arm64 fixes for -rc4
- Work around Cavium/Marvell ThunderX2 erratum #219 - Fix regression in mlock() ABI caused by sign-extension of TTBR1 addresses - More fixes to the spurious kernel fault detection logic - Fix pathological preemption race when enabling some CPU features at boot - Drop broken kcore macros in favour of generic implementations - Fix userspace view of ID_AA64ZFR0_EL1 when SVE is disabled - Avoid NULL dereference on allocation failure during hibernation -----BEGIN PGP SIGNATURE----- iQFEBAABCgAuFiEEPxTL6PPUbjXGY88ct6xw3ITBYzQFAl2o1IsQHHdpbGxAa2Vy bmVsLm9yZwAKCRC3rHDchMFjNEzXCACi8bK3PWUJ9R6DcUeDNFuSMT0d2C/8CRNj mKnYkHx/GSChLBe3ZsNYbH5wnh70ItKzUUyMwMlwKevpr0kvLn0AN+WuSpP77Thk qFuaO5EC10V039SXoAjOPs4Jxwoi6NaShDJYkLmr2kwp0g7dEol+8C80V1fQVqgx oSmFcrYlqki1qhz8e1qCY4K8fAt6VdMt8OcouXs58ETFqsUFBFy/xx9Vsa0AQKkN ZU8lzGIYNVLtA1G7wei7IuoKgYQ0t7XMynsve9P9i+RXmLrIbeyLpeZyn+3GW0jO r3Epj0lYqKe8IZ+eTUDu3KvKHHdICHIDBDxP2c66hfgkw0prn/vU =9E3e -----END PGP SIGNATURE----- Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux Pull arm64 fixes from Will Deacon: "The main thing here is a long-awaited workaround for a CPU erratum on ThunderX2 which we have developed in conjunction with engineers from Cavium/Marvell. At the moment, the workaround is unconditionally enabled for affected CPUs at runtime but we may add a command-line option to disable it in future if performance numbers show up indicating a significant cost for real workloads. Summary: - Work around Cavium/Marvell ThunderX2 erratum #219 - Fix regression in mlock() ABI caused by sign-extension of TTBR1 addresses - More fixes to the spurious kernel fault detection logic - Fix pathological preemption race when enabling some CPU features at boot - Drop broken kcore macros in favour of generic implementations - Fix userspace view of ID_AA64ZFR0_EL1 when SVE is disabled - Avoid NULL dereference on allocation failure during hibernation" * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: tags: Preserve tags for addresses translated via TTBR1 arm64: mm: fix inverted PAR_EL1.F check arm64: sysreg: fix incorrect definition of SYS_PAR_EL1_F arm64: entry.S: Do not preempt from IRQ before all cpufeatures are enabled arm64: hibernate: check pgd table allocation arm64: cpufeature: Treat ID_AA64ZFR0_EL1 as RAZ when SVE is not enabled arm64: Fix kcore macros after 52-bit virtual addressing fallout arm64: Allow CAVIUM_TX2_ERRATUM_219 to be selected arm64: Avoid Cavium TX2 erratum 219 when switching TTBR arm64: Enable workaround for Cavium TX2 erratum 219 when running SMT arm64: KVM: Trap VM ops when ARM64_WORKAROUND_CAVIUM_TX2_219_TVM is set
This commit is contained in:
commit
0e2adab6cf
|
@ -107,6 +107,8 @@ stable kernels.
|
|||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| Cavium | ThunderX2 SMMUv3| #126 | N/A |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| Cavium | ThunderX2 Core | #219 | CAVIUM_TX2_ERRATUM_219 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| Freescale/NXP | LS2080A/LS1043A | A-008585 | FSL_ERRATUM_A008585 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
|
|
|
@ -616,6 +616,23 @@ config CAVIUM_ERRATUM_30115
|
|||
|
||||
If unsure, say Y.
|
||||
|
||||
config CAVIUM_TX2_ERRATUM_219
|
||||
bool "Cavium ThunderX2 erratum 219: PRFM between TTBR change and ISB fails"
|
||||
default y
|
||||
help
|
||||
On Cavium ThunderX2, a load, store or prefetch instruction between a
|
||||
TTBR update and the corresponding context synchronizing operation can
|
||||
cause a spurious Data Abort to be delivered to any hardware thread in
|
||||
the CPU core.
|
||||
|
||||
Work around the issue by avoiding the problematic code sequence and
|
||||
trapping KVM guest TTBRx_EL1 writes to EL2 when SMT is enabled. The
|
||||
trap handler performs the corresponding register access, skips the
|
||||
instruction and ensures context synchronization by virtue of the
|
||||
exception return.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config QCOM_FALKOR_ERRATUM_1003
|
||||
bool "Falkor E1003: Incorrect translation due to ASID change"
|
||||
default y
|
||||
|
|
|
@ -78,10 +78,9 @@ alternative_else_nop_endif
|
|||
/*
|
||||
* Remove the address tag from a virtual address, if present.
|
||||
*/
|
||||
.macro clear_address_tag, dst, addr
|
||||
tst \addr, #(1 << 55)
|
||||
bic \dst, \addr, #(0xff << 56)
|
||||
csel \dst, \dst, \addr, eq
|
||||
.macro untagged_addr, dst, addr
|
||||
sbfx \dst, \addr, #0, #56
|
||||
and \dst, \dst, \addr
|
||||
.endm
|
||||
|
||||
#endif
|
||||
|
|
|
@ -52,7 +52,9 @@
|
|||
#define ARM64_HAS_IRQ_PRIO_MASKING 42
|
||||
#define ARM64_HAS_DCPODP 43
|
||||
#define ARM64_WORKAROUND_1463225 44
|
||||
#define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM 45
|
||||
#define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 46
|
||||
|
||||
#define ARM64_NCAPS 45
|
||||
#define ARM64_NCAPS 47
|
||||
|
||||
#endif /* __ASM_CPUCAPS_H */
|
||||
|
|
|
@ -215,12 +215,18 @@ static inline unsigned long kaslr_offset(void)
|
|||
* up with a tagged userland pointer. Clear the tag to get a sane pointer to
|
||||
* pass on to access_ok(), for instance.
|
||||
*/
|
||||
#define untagged_addr(addr) \
|
||||
#define __untagged_addr(addr) \
|
||||
((__force __typeof__(addr))sign_extend64((__force u64)(addr), 55))
|
||||
|
||||
#define untagged_addr(addr) ({ \
|
||||
u64 __addr = (__force u64)addr; \
|
||||
__addr &= __untagged_addr(__addr); \
|
||||
(__force __typeof__(addr))__addr; \
|
||||
})
|
||||
|
||||
#ifdef CONFIG_KASAN_SW_TAGS
|
||||
#define __tag_shifted(tag) ((u64)(tag) << 56)
|
||||
#define __tag_reset(addr) untagged_addr(addr)
|
||||
#define __tag_reset(addr) __untagged_addr(addr)
|
||||
#define __tag_get(addr) (__u8)((u64)(addr) >> 56)
|
||||
#else
|
||||
#define __tag_shifted(tag) 0UL
|
||||
|
|
|
@ -876,9 +876,6 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
|
|||
|
||||
#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
|
||||
|
||||
#define kc_vaddr_to_offset(v) ((v) & ~PAGE_END)
|
||||
#define kc_offset_to_vaddr(o) ((o) | PAGE_END)
|
||||
|
||||
#ifdef CONFIG_ARM64_PA_BITS_52
|
||||
#define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
|
||||
#else
|
||||
|
|
|
@ -212,7 +212,7 @@
|
|||
#define SYS_FAR_EL1 sys_reg(3, 0, 6, 0, 0)
|
||||
#define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0)
|
||||
|
||||
#define SYS_PAR_EL1_F BIT(1)
|
||||
#define SYS_PAR_EL1_F BIT(0)
|
||||
#define SYS_PAR_EL1_FST GENMASK(6, 1)
|
||||
|
||||
/*** Statistical Profiling Extension ***/
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <asm/cpu.h>
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/smp_plat.h>
|
||||
|
||||
static bool __maybe_unused
|
||||
is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
|
||||
|
@ -623,6 +624,30 @@ check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
|
|||
return (need_wa > 0);
|
||||
}
|
||||
|
||||
static const __maybe_unused struct midr_range tx2_family_cpus[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
|
||||
MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
|
||||
{},
|
||||
};
|
||||
|
||||
static bool __maybe_unused
|
||||
needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
|
||||
int scope)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!is_affected_midr_range_list(entry, scope) ||
|
||||
!is_hyp_mode_available())
|
||||
return false;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HARDEN_EL2_VECTORS
|
||||
|
||||
static const struct midr_range arm64_harden_el2_vectors[] = {
|
||||
|
@ -851,6 +876,19 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
|||
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
|
||||
.matches = has_cortex_a76_erratum_1463225,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
|
||||
{
|
||||
.desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
|
||||
.capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
|
||||
ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
|
||||
.matches = needs_tx2_tvm_workaround,
|
||||
},
|
||||
{
|
||||
.desc = "Cavium ThunderX2 erratum 219 (PRFM removal)",
|
||||
.capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM,
|
||||
ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
|
||||
},
|
||||
#endif
|
||||
{
|
||||
}
|
||||
|
|
|
@ -176,11 +176,16 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
|
|||
};
|
||||
|
||||
static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SM4_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SHA3_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BITPERM_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_AES_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SVEVER_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
|
||||
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SM4_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
|
||||
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SHA3_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
|
||||
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BITPERM_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
|
||||
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_AES_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
|
||||
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SVEVER_SHIFT, 4, 0),
|
||||
ARM64_FTR_END,
|
||||
};
|
||||
|
||||
|
|
|
@ -604,7 +604,7 @@ el1_da:
|
|||
*/
|
||||
mrs x3, far_el1
|
||||
inherit_daif pstate=x23, tmp=x2
|
||||
clear_address_tag x0, x3
|
||||
untagged_addr x0, x3
|
||||
mov x2, sp // struct pt_regs
|
||||
bl do_mem_abort
|
||||
|
||||
|
@ -680,7 +680,7 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKING
|
|||
orr x24, x24, x0
|
||||
alternative_else_nop_endif
|
||||
cbnz x24, 1f // preempt count != 0 || NMI return path
|
||||
bl preempt_schedule_irq // irq en/disable is done inside
|
||||
bl arm64_preempt_schedule_irq // irq en/disable is done inside
|
||||
1:
|
||||
#endif
|
||||
|
||||
|
@ -808,7 +808,7 @@ el0_da:
|
|||
mrs x26, far_el1
|
||||
ct_user_exit_irqoff
|
||||
enable_daif
|
||||
clear_address_tag x0, x26
|
||||
untagged_addr x0, x26
|
||||
mov x1, x25
|
||||
mov x2, sp
|
||||
bl do_mem_abort
|
||||
|
@ -1071,7 +1071,9 @@ alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
|
|||
#else
|
||||
ldr x30, =vectors
|
||||
#endif
|
||||
alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
|
||||
prfm plil1strm, [x30, #(1b - tramp_vectors)]
|
||||
alternative_else_nop_endif
|
||||
msr vbar_el1, x30
|
||||
add x30, x30, #(1b - tramp_vectors)
|
||||
isb
|
||||
|
|
|
@ -201,6 +201,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
|
|||
gfp_t mask)
|
||||
{
|
||||
int rc = 0;
|
||||
pgd_t *trans_pgd;
|
||||
pgd_t *pgdp;
|
||||
pud_t *pudp;
|
||||
pmd_t *pmdp;
|
||||
|
@ -215,7 +216,13 @@ static int create_safe_exec_page(void *src_start, size_t length,
|
|||
memcpy((void *)dst, src_start, length);
|
||||
__flush_icache_range(dst, dst + length);
|
||||
|
||||
pgdp = pgd_offset_raw(allocator(mask), dst_addr);
|
||||
trans_pgd = allocator(mask);
|
||||
if (!trans_pgd) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
pgdp = pgd_offset_raw(trans_pgd, dst_addr);
|
||||
if (pgd_none(READ_ONCE(*pgdp))) {
|
||||
pudp = allocator(mask);
|
||||
if (!pudp) {
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <linux/sched/task.h>
|
||||
#include <linux/sched/task_stack.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/lockdep.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/sysctl.h>
|
||||
|
@ -44,6 +45,7 @@
|
|||
#include <asm/alternative.h>
|
||||
#include <asm/arch_gicv3.h>
|
||||
#include <asm/compat.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/exec.h>
|
||||
#include <asm/fpsimd.h>
|
||||
|
@ -631,3 +633,19 @@ static int __init tagged_addr_init(void)
|
|||
|
||||
core_initcall(tagged_addr_init);
|
||||
#endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */
|
||||
|
||||
asmlinkage void __sched arm64_preempt_schedule_irq(void)
|
||||
{
|
||||
lockdep_assert_irqs_disabled();
|
||||
|
||||
/*
|
||||
* Preempting a task from an IRQ means we leave copies of PSTATE
|
||||
* on the stack. cpufeature's enable calls may modify PSTATE, but
|
||||
* resuming one of these preempted tasks would undo those changes.
|
||||
*
|
||||
* Only allow a task to be preempted once cpufeatures have been
|
||||
* enabled.
|
||||
*/
|
||||
if (static_branch_likely(&arm64_const_caps_ready))
|
||||
preempt_schedule_irq();
|
||||
}
|
||||
|
|
|
@ -124,6 +124,9 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
|
|||
{
|
||||
u64 hcr = vcpu->arch.hcr_el2;
|
||||
|
||||
if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
|
||||
hcr |= HCR_TVM;
|
||||
|
||||
write_sysreg(hcr, hcr_el2);
|
||||
|
||||
if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
|
||||
|
@ -174,8 +177,10 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
|
|||
* the crucial bit is "On taking a vSError interrupt,
|
||||
* HCR_EL2.VSE is cleared to 0."
|
||||
*/
|
||||
if (vcpu->arch.hcr_el2 & HCR_VSE)
|
||||
vcpu->arch.hcr_el2 = read_sysreg(hcr_el2);
|
||||
if (vcpu->arch.hcr_el2 & HCR_VSE) {
|
||||
vcpu->arch.hcr_el2 &= ~HCR_VSE;
|
||||
vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE;
|
||||
}
|
||||
|
||||
if (has_vhe())
|
||||
deactivate_traps_vhe();
|
||||
|
@ -380,6 +385,61 @@ static bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_hsr(vcpu));
|
||||
int rt = kvm_vcpu_sys_get_rt(vcpu);
|
||||
u64 val = vcpu_get_reg(vcpu, rt);
|
||||
|
||||
/*
|
||||
* The normal sysreg handling code expects to see the traps,
|
||||
* let's not do anything here.
|
||||
*/
|
||||
if (vcpu->arch.hcr_el2 & HCR_TVM)
|
||||
return false;
|
||||
|
||||
switch (sysreg) {
|
||||
case SYS_SCTLR_EL1:
|
||||
write_sysreg_el1(val, SYS_SCTLR);
|
||||
break;
|
||||
case SYS_TTBR0_EL1:
|
||||
write_sysreg_el1(val, SYS_TTBR0);
|
||||
break;
|
||||
case SYS_TTBR1_EL1:
|
||||
write_sysreg_el1(val, SYS_TTBR1);
|
||||
break;
|
||||
case SYS_TCR_EL1:
|
||||
write_sysreg_el1(val, SYS_TCR);
|
||||
break;
|
||||
case SYS_ESR_EL1:
|
||||
write_sysreg_el1(val, SYS_ESR);
|
||||
break;
|
||||
case SYS_FAR_EL1:
|
||||
write_sysreg_el1(val, SYS_FAR);
|
||||
break;
|
||||
case SYS_AFSR0_EL1:
|
||||
write_sysreg_el1(val, SYS_AFSR0);
|
||||
break;
|
||||
case SYS_AFSR1_EL1:
|
||||
write_sysreg_el1(val, SYS_AFSR1);
|
||||
break;
|
||||
case SYS_MAIR_EL1:
|
||||
write_sysreg_el1(val, SYS_MAIR);
|
||||
break;
|
||||
case SYS_AMAIR_EL1:
|
||||
write_sysreg_el1(val, SYS_AMAIR);
|
||||
break;
|
||||
case SYS_CONTEXTIDR_EL1:
|
||||
write_sysreg_el1(val, SYS_CONTEXTIDR);
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
__kvm_skip_instr(vcpu);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return true when we were able to fixup the guest exit and should return to
|
||||
* the guest, false when we should restore the host state and return to the
|
||||
|
@ -399,6 +459,11 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
|
|||
if (*exit_code != ARM_EXCEPTION_TRAP)
|
||||
goto exit;
|
||||
|
||||
if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
|
||||
kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 &&
|
||||
handle_tx2_tvm(vcpu))
|
||||
return true;
|
||||
|
||||
/*
|
||||
* We trap the first access to the FP/SIMD to save the host context
|
||||
* and restore the guest context lazily.
|
||||
|
|
|
@ -268,8 +268,12 @@ static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr,
|
|||
par = read_sysreg(par_el1);
|
||||
local_irq_restore(flags);
|
||||
|
||||
/*
|
||||
* If we now have a valid translation, treat the translation fault as
|
||||
* spurious.
|
||||
*/
|
||||
if (!(par & SYS_PAR_EL1_F))
|
||||
return false;
|
||||
return true;
|
||||
|
||||
/*
|
||||
* If we got a different type of fault from the AT instruction,
|
||||
|
|
|
@ -223,6 +223,7 @@ extern long schedule_timeout_uninterruptible(long timeout);
|
|||
extern long schedule_timeout_idle(long timeout);
|
||||
asmlinkage void schedule(void);
|
||||
extern void schedule_preempt_disabled(void);
|
||||
asmlinkage void preempt_schedule_irq(void);
|
||||
|
||||
extern int __must_check io_schedule_prepare(void);
|
||||
extern void io_schedule_finish(int token);
|
||||
|
|
Loading…
Reference in New Issue