2022-08-15 22:13:05 +02:00
|
|
|
/*
|
|
|
|
* The per-CPU TranslationBlock jump cache.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2003 Fabrice Bellard
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: GPL-2.0-or-later
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef ACCEL_TCG_TB_JMP_CACHE_H
|
|
|
|
#define ACCEL_TCG_TB_JMP_CACHE_H
|
|
|
|
|
|
|
|
#define TB_JMP_CACHE_BITS 12
|
|
|
|
#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
|
|
|
|
|
|
|
|
/*
|
2024-01-22 16:34:09 +01:00
|
|
|
* Invalidated in parallel; all accesses to 'tb' must be atomic.
|
|
|
|
* A valid entry is read/written by a single CPU, therefore there is
|
|
|
|
* no need for qatomic_rcu_read() and pc is always consistent with a
|
|
|
|
* non-NULL value of 'tb'. Strictly speaking pc is only needed for
|
|
|
|
* CF_PCREL, but it's used always for simplicity.
|
2022-08-15 22:13:05 +02:00
|
|
|
*/
|
|
|
|
struct CPUJumpCache {
|
2023-01-24 19:01:18 +01:00
|
|
|
struct rcu_head rcu;
|
2022-08-15 22:13:05 +02:00
|
|
|
struct {
|
|
|
|
TranslationBlock *tb;
|
2023-06-21 15:56:28 +02:00
|
|
|
vaddr pc;
|
2022-08-15 22:13:05 +02:00
|
|
|
} array[TB_JMP_CACHE_SIZE];
|
|
|
|
};
|
|
|
|
|
|
|
|
#endif /* ACCEL_TCG_TB_JMP_CACHE_H */
|