From bbb02509f2fece730350620a429276143a1e2232 Mon Sep 17 00:00:00 2001 From: Vitaly Cheptsov Date: Thu, 25 May 2023 10:37:48 +0100 Subject: [PATCH 01/21] fsl-imx6: Add SNVS support for i.MX6 boards SNVS is supported on both i.MX6 and i.MX6UL and is needed to support shutdown on the board. Cc: Peter Maydell (odd fixer:SABRELITE / i.MX6) Cc: Jean-Christophe Dubois (reviewer:SABRELITE / i.MX6) Cc: qemu-arm@nongnu.org (open list:SABRELITE / i.MX6) Cc: qemu-devel@nongnu.org (open list:All patches CC here) Signed-off-by: Vitaly Cheptsov Message-id: 20230515095015.66860-1-cheptsov@ispras.ru Reviewed-by: Peter Maydell Signed-off-by: Peter Maydell --- hw/arm/fsl-imx6.c | 8 ++++++++ include/hw/arm/fsl-imx6.h | 2 ++ 2 files changed, 10 insertions(+) diff --git a/hw/arm/fsl-imx6.c b/hw/arm/fsl-imx6.c index 00dafe3f62..4fa7f0b95e 100644 --- a/hw/arm/fsl-imx6.c +++ b/hw/arm/fsl-imx6.c @@ -53,6 +53,8 @@ static void fsl_imx6_init(Object *obj) object_initialize_child(obj, "src", &s->src, TYPE_IMX6_SRC); + object_initialize_child(obj, "snvs", &s->snvs, TYPE_IMX7_SNVS); + for (i = 0; i < FSL_IMX6_NUM_UARTS; i++) { snprintf(name, NAME_SIZE, "uart%d", i + 1); object_initialize_child(obj, name, &s->uart[i], TYPE_IMX_SERIAL); @@ -390,6 +392,12 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp) qdev_get_gpio_in(DEVICE(&s->a9mpcore), FSL_IMX6_ENET_MAC_1588_IRQ)); + /* + * SNVS + */ + sysbus_realize(SYS_BUS_DEVICE(&s->snvs), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->snvs), 0, FSL_IMX6_SNVSHP_ADDR); + /* * Watchdog */ diff --git a/include/hw/arm/fsl-imx6.h b/include/hw/arm/fsl-imx6.h index 83291457cf..5b4d48da08 100644 --- a/include/hw/arm/fsl-imx6.h +++ b/include/hw/arm/fsl-imx6.h @@ -21,6 +21,7 @@ #include "hw/cpu/a9mpcore.h" #include "hw/misc/imx6_ccm.h" #include "hw/misc/imx6_src.h" +#include "hw/misc/imx7_snvs.h" #include "hw/watchdog/wdt_imx2.h" #include "hw/char/imx_serial.h" #include "hw/timer/imx_gpt.h" @@ -59,6 +60,7 @@ struct FslIMX6State { A9MPPrivState a9mpcore; IMX6CCMState ccm; IMX6SRCState src; + IMX7SNVSState snvs; IMXSerialState uart[FSL_IMX6_NUM_UARTS]; IMXGPTState gpt; IMXEPITState epit[FSL_IMX6_NUM_EPITS]; From 263d0e48672c552c97cdbdbe2105d7b9fd0b133c Mon Sep 17 00:00:00 2001 From: Mostafa Saleh Date: Thu, 25 May 2023 10:37:49 +0100 Subject: [PATCH 02/21] hw/arm/smmuv3: Add missing fields for IDR0 In preparation for adding stage-2 support. Add IDR0 fields related to stage-2. VMID16: 16-bit VMID supported. S2P: Stage-2 translation supported. They are described in 6.3.1 SMMU_IDR0. No functional change intended. Reviewed-by: Richard Henderson Reviewed-by: Eric Auger Signed-off-by: Mostafa Saleh Tested-by: Eric Auger Tested-by: Jean-Philippe Brucker Message-id: 20230516203327.2051088-2-smostafa@google.com Signed-off-by: Peter Maydell --- hw/arm/smmuv3-internal.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hw/arm/smmuv3-internal.h b/hw/arm/smmuv3-internal.h index e8f0ebf25e..183d5ac8dc 100644 --- a/hw/arm/smmuv3-internal.h +++ b/hw/arm/smmuv3-internal.h @@ -34,10 +34,12 @@ typedef enum SMMUTranslationStatus { /* MMIO Registers */ REG32(IDR0, 0x0) + FIELD(IDR0, S2P, 0 , 1) FIELD(IDR0, S1P, 1 , 1) FIELD(IDR0, TTF, 2 , 2) FIELD(IDR0, COHACC, 4 , 1) FIELD(IDR0, ASID16, 12, 1) + FIELD(IDR0, VMID16, 18, 1) FIELD(IDR0, TTENDIAN, 21, 2) FIELD(IDR0, STALL_MODEL, 24, 2) FIELD(IDR0, TERM_MODEL, 26, 1) From 3b736c61849ebc16776143a947a90a731b0aea55 Mon Sep 17 00:00:00 2001 From: Mostafa Saleh Date: Thu, 25 May 2023 10:37:49 +0100 Subject: [PATCH 03/21] hw/arm/smmuv3: Update translation config to hold stage-2 In preparation for adding stage-2 support, add a S2 config struct(SMMUS2Cfg), composed of the following fields and embedded in the main SMMUTransCfg: -tsz: Size of IPA input region (S2T0SZ) -sl0: Start level of translation (S2SL0) -affd: AF Fault Disable (S2AFFD) -record_faults: Record fault events (S2R) -granule_sz: Granule page shift (based on S2TG) -vmid: Virtual Machine ID (S2VMID) -vttb: Address of translation table base (S2TTB) -eff_ps: Effective PA output range (based on S2PS) They will be used in the next patches in stage-2 address translation. The fields in SMMUS2Cfg, are reordered to make the shared and stage-1 fields next to each other, this reordering didn't change the struct size (104 bytes before and after). Stage-1 only fields: aa64, asid, tt, ttb, tbi, record_faults, oas. oas is stage-1 output address size. However, it is used to check input address in case stage-1 is unimplemented or bypassed according to SMMUv3 manual IHI0070.E "3.4. Address sizes" Shared fields: stage, disabled, bypassed, aborted, iotlb_*. No functional change intended. Reviewed-by: Eric Auger Signed-off-by: Mostafa Saleh Tested-by: Eric Auger Tested-by: Jean-Philippe Brucker Message-id: 20230516203327.2051088-3-smostafa@google.com Signed-off-by: Peter Maydell --- include/hw/arm/smmu-common.h | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/include/hw/arm/smmu-common.h b/include/hw/arm/smmu-common.h index 9fcff26357..9cf3f37929 100644 --- a/include/hw/arm/smmu-common.h +++ b/include/hw/arm/smmu-common.h @@ -58,25 +58,41 @@ typedef struct SMMUTLBEntry { uint8_t granule; } SMMUTLBEntry; +/* Stage-2 configuration. */ +typedef struct SMMUS2Cfg { + uint8_t tsz; /* Size of IPA input region (S2T0SZ) */ + uint8_t sl0; /* Start level of translation (S2SL0) */ + bool affd; /* AF Fault Disable (S2AFFD) */ + bool record_faults; /* Record fault events (S2R) */ + uint8_t granule_sz; /* Granule page shift (based on S2TG) */ + uint8_t eff_ps; /* Effective PA output range (based on S2PS) */ + uint16_t vmid; /* Virtual Machine ID (S2VMID) */ + uint64_t vttb; /* Address of translation table base (S2TTB) */ +} SMMUS2Cfg; + /* * Generic structure populated by derived SMMU devices * after decoding the configuration information and used as * input to the page table walk */ typedef struct SMMUTransCfg { + /* Shared fields between stage-1 and stage-2. */ int stage; /* translation stage */ - bool aa64; /* arch64 or aarch32 translation table */ bool disabled; /* smmu is disabled */ bool bypassed; /* translation is bypassed */ bool aborted; /* translation is aborted */ + uint32_t iotlb_hits; /* counts IOTLB hits */ + uint32_t iotlb_misses; /* counts IOTLB misses*/ + /* Used by stage-1 only. */ + bool aa64; /* arch64 or aarch32 translation table */ bool record_faults; /* record fault events */ uint64_t ttb; /* TT base address */ uint8_t oas; /* output address width */ uint8_t tbi; /* Top Byte Ignore */ uint16_t asid; SMMUTransTableInfo tt[2]; - uint32_t iotlb_hits; /* counts IOTLB hits for this asid */ - uint32_t iotlb_misses; /* counts IOTLB misses for this asid */ + /* Used by stage-2 only. */ + struct SMMUS2Cfg s2cfg; } SMMUTransCfg; typedef struct SMMUDevice { From bcc919e756cd793e59038f59ad69d543b369f689 Mon Sep 17 00:00:00 2001 From: Mostafa Saleh Date: Thu, 25 May 2023 10:37:49 +0100 Subject: [PATCH 04/21] hw/arm/smmuv3: Refactor stage-1 PTW In preparation for adding stage-2 support, rename smmu_ptw_64 to smmu_ptw_64_s1 and refactor some of the code so it can be reused in stage-2 page table walk. Remove AA64 check from PTW as decode_cd already ensures that AA64 is used, otherwise it faults with C_BAD_CD. A stage member is added to SMMUPTWEventInfo to differentiate between stage-1 and stage-2 ptw faults. Add stage argument to trace_smmu_ptw_level be consistent with other trace events. Signed-off-by: Mostafa Saleh Reviewed-by: Eric Auger Tested-by: Eric Auger Tested-by: Jean-Philippe Brucker Message-id: 20230516203327.2051088-4-smostafa@google.com Signed-off-by: Peter Maydell --- hw/arm/smmu-common.c | 27 ++++++++++----------------- hw/arm/smmuv3.c | 2 ++ hw/arm/trace-events | 2 +- include/hw/arm/smmu-common.h | 16 +++++++++++++--- 4 files changed, 26 insertions(+), 21 deletions(-) diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c index e7f1c1f219..50391a8c94 100644 --- a/hw/arm/smmu-common.c +++ b/hw/arm/smmu-common.c @@ -264,7 +264,7 @@ SMMUTransTableInfo *select_tt(SMMUTransCfg *cfg, dma_addr_t iova) } /** - * smmu_ptw_64 - VMSAv8-64 Walk of the page tables for a given IOVA + * smmu_ptw_64_s1 - VMSAv8-64 Walk of the page tables for a given IOVA * @cfg: translation config * @iova: iova to translate * @perm: access type @@ -276,9 +276,9 @@ SMMUTransTableInfo *select_tt(SMMUTransCfg *cfg, dma_addr_t iova) * Upon success, @tlbe is filled with translated_addr and entry * permission rights. */ -static int smmu_ptw_64(SMMUTransCfg *cfg, - dma_addr_t iova, IOMMUAccessFlags perm, - SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info) +static int smmu_ptw_64_s1(SMMUTransCfg *cfg, + dma_addr_t iova, IOMMUAccessFlags perm, + SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info) { dma_addr_t baseaddr, indexmask; int stage = cfg->stage; @@ -291,14 +291,14 @@ static int smmu_ptw_64(SMMUTransCfg *cfg, } granule_sz = tt->granule_sz; - stride = granule_sz - 3; + stride = VMSA_STRIDE(granule_sz); inputsize = 64 - tt->tsz; level = 4 - (inputsize - 4) / stride; - indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1; + indexmask = VMSA_IDXMSK(inputsize, stride, level); baseaddr = extract64(tt->ttb, 0, 48); baseaddr &= ~indexmask; - while (level <= 3) { + while (level < VMSA_LEVELS) { uint64_t subpage_size = 1ULL << level_shift(level, granule_sz); uint64_t mask = subpage_size - 1; uint32_t offset = iova_level_offset(iova, inputsize, level, granule_sz); @@ -309,7 +309,7 @@ static int smmu_ptw_64(SMMUTransCfg *cfg, if (get_pte(baseaddr, offset, &pte, info)) { goto error; } - trace_smmu_ptw_level(level, iova, subpage_size, + trace_smmu_ptw_level(stage, level, iova, subpage_size, baseaddr, offset, pte); if (is_invalid_pte(pte) || is_reserved_pte(pte, level)) { @@ -358,6 +358,7 @@ static int smmu_ptw_64(SMMUTransCfg *cfg, info->type = SMMU_PTW_ERR_TRANSLATION; error: + info->stage = 1; tlbe->entry.perm = IOMMU_NONE; return -EINVAL; } @@ -376,15 +377,7 @@ error: int smmu_ptw(SMMUTransCfg *cfg, dma_addr_t iova, IOMMUAccessFlags perm, SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info) { - if (!cfg->aa64) { - /* - * This code path is not entered as we check this while decoding - * the configuration data in the derived SMMU model. - */ - g_assert_not_reached(); - } - - return smmu_ptw_64(cfg, iova, perm, tlbe, info); + return smmu_ptw_64_s1(cfg, iova, perm, tlbe, info); } /** diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c index 270c80b665..4e90343996 100644 --- a/hw/arm/smmuv3.c +++ b/hw/arm/smmuv3.c @@ -716,6 +716,8 @@ static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr, cached_entry = g_new0(SMMUTLBEntry, 1); if (smmu_ptw(cfg, aligned_addr, flag, cached_entry, &ptw_info)) { + /* All faults from PTW has S2 field. */ + event.u.f_walk_eabt.s2 = (ptw_info.stage == 2); g_free(cached_entry); switch (ptw_info.type) { case SMMU_PTW_ERR_WALK_EABT: diff --git a/hw/arm/trace-events b/hw/arm/trace-events index 2dee296c8f..205ac04573 100644 --- a/hw/arm/trace-events +++ b/hw/arm/trace-events @@ -5,7 +5,7 @@ virt_acpi_setup(void) "No fw cfg or ACPI disabled. Bailing out." # smmu-common.c smmu_add_mr(const char *name) "%s" -smmu_ptw_level(int level, uint64_t iova, size_t subpage_size, uint64_t baseaddr, uint32_t offset, uint64_t pte) "level=%d iova=0x%"PRIx64" subpage_sz=0x%zx baseaddr=0x%"PRIx64" offset=%d => pte=0x%"PRIx64 +smmu_ptw_level(int stage, int level, uint64_t iova, size_t subpage_size, uint64_t baseaddr, uint32_t offset, uint64_t pte) "stage=%d level=%d iova=0x%"PRIx64" subpage_sz=0x%zx baseaddr=0x%"PRIx64" offset=%d => pte=0x%"PRIx64 smmu_ptw_invalid_pte(int stage, int level, uint64_t baseaddr, uint64_t pteaddr, uint32_t offset, uint64_t pte) "stage=%d level=%d base@=0x%"PRIx64" pte@=0x%"PRIx64" offset=%d pte=0x%"PRIx64 smmu_ptw_page_pte(int stage, int level, uint64_t iova, uint64_t baseaddr, uint64_t pteaddr, uint64_t pte, uint64_t address) "stage=%d level=%d iova=0x%"PRIx64" base@=0x%"PRIx64" pte@=0x%"PRIx64" pte=0x%"PRIx64" page address = 0x%"PRIx64 smmu_ptw_block_pte(int stage, int level, uint64_t baseaddr, uint64_t pteaddr, uint64_t pte, uint64_t iova, uint64_t gpa, int bsize_mb) "stage=%d level=%d base@=0x%"PRIx64" pte@=0x%"PRIx64" pte=0x%"PRIx64" iova=0x%"PRIx64" block address = 0x%"PRIx64" block size = %d MiB" diff --git a/include/hw/arm/smmu-common.h b/include/hw/arm/smmu-common.h index 9cf3f37929..97cea8ea06 100644 --- a/include/hw/arm/smmu-common.h +++ b/include/hw/arm/smmu-common.h @@ -23,9 +23,18 @@ #include "hw/pci/pci.h" #include "qom/object.h" -#define SMMU_PCI_BUS_MAX 256 -#define SMMU_PCI_DEVFN_MAX 256 -#define SMMU_PCI_DEVFN(sid) (sid & 0xFF) +#define SMMU_PCI_BUS_MAX 256 +#define SMMU_PCI_DEVFN_MAX 256 +#define SMMU_PCI_DEVFN(sid) (sid & 0xFF) + +/* VMSAv8-64 Translation constants and functions */ +#define VMSA_LEVELS 4 + +#define VMSA_STRIDE(gran) ((gran) - VMSA_LEVELS + 1) +#define VMSA_BIT_LVL(isz, strd, lvl) ((isz) - (strd) * \ + (VMSA_LEVELS - (lvl))) +#define VMSA_IDXMSK(isz, strd, lvl) ((1ULL << \ + VMSA_BIT_LVL(isz, strd, lvl)) - 1) /* * Page table walk error types @@ -40,6 +49,7 @@ typedef enum { } SMMUPTWEventType; typedef struct SMMUPTWEventInfo { + int stage; SMMUPTWEventType type; dma_addr_t addr; /* fetched address that induced an abort, if any */ } SMMUPTWEventInfo; From e703f7076a255cd13b1d9fc0934480a613614f14 Mon Sep 17 00:00:00 2001 From: Mostafa Saleh Date: Thu, 25 May 2023 10:37:50 +0100 Subject: [PATCH 05/21] hw/arm/smmuv3: Add page table walk for stage-2 In preparation for adding stage-2 support, add Stage-2 PTW code. Only Aarch64 format is supported as stage-1. Nesting stage-1 and stage-2 is not supported right now. HTTU is not supported, SW is expected to maintain the Access flag. This is described in the SMMUv3 manual(IHI 0070.E.a) "5.2. Stream Table Entry" in "[181] S2AFFD". This flag determines the behavior on access of a stage-2 page whose descriptor has AF == 0: - 0b0: An Access flag fault occurs (stall not supported). - 0b1: An Access flag fault never occurs. An Access fault takes priority over a Permission fault. There are 3 address size checks for stage-2 according to (IHI 0070.E.a) in "3.4. Address sizes". - As nesting is not supported, input address is passed directly to stage-2, and is checked against IAS. We use cfg->oas to hold the OAS when stage-1 is not used, this is set in the next patch. This check is done outside of smmu_ptw_64_s2 as it is not part of stage-2(it throws stage-1 fault), and the stage-2 function shouldn't change it's behavior when nesting is supported. When nesting is supported and we figure out how to combine TLB for stage-1 and stage-2 we can move this check into the stage-1 function as described in ARM DDI0487I.a in pseudocode aarch64/translation/vmsa_translation/AArch64.S1Translate aarch64/translation/vmsa_translation/AArch64.S1DisabledOutput - Input to stage-2 is checked against s2t0sz, and throws stage-2 transaltion fault if exceeds it. - Output of stage-2 is checked against effective PA output range. Reviewed-by: Eric Auger Signed-off-by: Mostafa Saleh Tested-by: Eric Auger Tested-by: Jean-Philippe Brucker Message-id: 20230516203327.2051088-5-smostafa@google.com Signed-off-by: Peter Maydell --- hw/arm/smmu-common.c | 142 ++++++++++++++++++++++++++++++++++++++++- hw/arm/smmu-internal.h | 35 ++++++++++ 2 files changed, 176 insertions(+), 1 deletion(-) diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c index 50391a8c94..3e82eab741 100644 --- a/hw/arm/smmu-common.c +++ b/hw/arm/smmu-common.c @@ -363,6 +363,127 @@ error: return -EINVAL; } +/** + * smmu_ptw_64_s2 - VMSAv8-64 Walk of the page tables for a given ipa + * for stage-2. + * @cfg: translation config + * @ipa: ipa to translate + * @perm: access type + * @tlbe: SMMUTLBEntry (out) + * @info: handle to an error info + * + * Return 0 on success, < 0 on error. In case of error, @info is filled + * and tlbe->perm is set to IOMMU_NONE. + * Upon success, @tlbe is filled with translated_addr and entry + * permission rights. + */ +static int smmu_ptw_64_s2(SMMUTransCfg *cfg, + dma_addr_t ipa, IOMMUAccessFlags perm, + SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info) +{ + const int stage = 2; + int granule_sz = cfg->s2cfg.granule_sz; + /* ARM DDI0487I.a: Table D8-7. */ + int inputsize = 64 - cfg->s2cfg.tsz; + int level = get_start_level(cfg->s2cfg.sl0, granule_sz); + int stride = VMSA_STRIDE(granule_sz); + int idx = pgd_concat_idx(level, granule_sz, ipa); + /* + * Get the ttb from concatenated structure. + * The offset is the idx * size of each ttb(number of ptes * (sizeof(pte)) + */ + uint64_t baseaddr = extract64(cfg->s2cfg.vttb, 0, 48) + (1 << stride) * + idx * sizeof(uint64_t); + dma_addr_t indexmask = VMSA_IDXMSK(inputsize, stride, level); + + baseaddr &= ~indexmask; + + /* + * On input, a stage 2 Translation fault occurs if the IPA is outside the + * range configured by the relevant S2T0SZ field of the STE. + */ + if (ipa >= (1ULL << inputsize)) { + info->type = SMMU_PTW_ERR_TRANSLATION; + goto error; + } + + while (level < VMSA_LEVELS) { + uint64_t subpage_size = 1ULL << level_shift(level, granule_sz); + uint64_t mask = subpage_size - 1; + uint32_t offset = iova_level_offset(ipa, inputsize, level, granule_sz); + uint64_t pte, gpa; + dma_addr_t pte_addr = baseaddr + offset * sizeof(pte); + uint8_t s2ap; + + if (get_pte(baseaddr, offset, &pte, info)) { + goto error; + } + trace_smmu_ptw_level(stage, level, ipa, subpage_size, + baseaddr, offset, pte); + if (is_invalid_pte(pte) || is_reserved_pte(pte, level)) { + trace_smmu_ptw_invalid_pte(stage, level, baseaddr, + pte_addr, offset, pte); + break; + } + + if (is_table_pte(pte, level)) { + baseaddr = get_table_pte_address(pte, granule_sz); + level++; + continue; + } else if (is_page_pte(pte, level)) { + gpa = get_page_pte_address(pte, granule_sz); + trace_smmu_ptw_page_pte(stage, level, ipa, + baseaddr, pte_addr, pte, gpa); + } else { + uint64_t block_size; + + gpa = get_block_pte_address(pte, level, granule_sz, + &block_size); + trace_smmu_ptw_block_pte(stage, level, baseaddr, + pte_addr, pte, ipa, gpa, + block_size >> 20); + } + + /* + * If S2AFFD and PTE.AF are 0 => fault. (5.2. Stream Table Entry) + * An Access fault takes priority over a Permission fault. + */ + if (!PTE_AF(pte) && !cfg->s2cfg.affd) { + info->type = SMMU_PTW_ERR_ACCESS; + goto error; + } + + s2ap = PTE_AP(pte); + if (is_permission_fault_s2(s2ap, perm)) { + info->type = SMMU_PTW_ERR_PERMISSION; + goto error; + } + + /* + * The address output from the translation causes a stage 2 Address + * Size fault if it exceeds the effective PA output range. + */ + if (gpa >= (1ULL << cfg->s2cfg.eff_ps)) { + info->type = SMMU_PTW_ERR_ADDR_SIZE; + goto error; + } + + tlbe->entry.translated_addr = gpa; + tlbe->entry.iova = ipa & ~mask; + tlbe->entry.addr_mask = mask; + tlbe->entry.perm = s2ap; + tlbe->level = level; + tlbe->granule = granule_sz; + return 0; + } + info->type = SMMU_PTW_ERR_TRANSLATION; + +error: + info->stage = 2; + tlbe->entry.perm = IOMMU_NONE; + return -EINVAL; +} + /** * smmu_ptw - Walk the page tables for an IOVA, according to @cfg * @@ -377,7 +498,26 @@ error: int smmu_ptw(SMMUTransCfg *cfg, dma_addr_t iova, IOMMUAccessFlags perm, SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info) { - return smmu_ptw_64_s1(cfg, iova, perm, tlbe, info); + if (cfg->stage == 1) { + return smmu_ptw_64_s1(cfg, iova, perm, tlbe, info); + } else if (cfg->stage == 2) { + /* + * If bypassing stage 1(or unimplemented), the input address is passed + * directly to stage 2 as IPA. If the input address of a transaction + * exceeds the size of the IAS, a stage 1 Address Size fault occurs. + * For AA64, IAS = OAS according to (IHI 0070.E.a) "3.4 Address sizes" + */ + if (iova >= (1ULL << cfg->oas)) { + info->type = SMMU_PTW_ERR_ADDR_SIZE; + info->stage = 1; + tlbe->entry.perm = IOMMU_NONE; + return -EINVAL; + } + + return smmu_ptw_64_s2(cfg, iova, perm, tlbe, info); + } + + g_assert_not_reached(); } /** diff --git a/hw/arm/smmu-internal.h b/hw/arm/smmu-internal.h index 2d75b31953..a9454f914e 100644 --- a/hw/arm/smmu-internal.h +++ b/hw/arm/smmu-internal.h @@ -66,6 +66,8 @@ #define PTE_APTABLE(pte) \ (extract64(pte, 61, 2)) +#define PTE_AF(pte) \ + (extract64(pte, 10, 1)) /* * TODO: At the moment all transactions are considered as privileged (EL1) * as IOMMU translation callback does not pass user/priv attributes. @@ -73,6 +75,9 @@ #define is_permission_fault(ap, perm) \ (((perm) & IOMMU_WO) && ((ap) & 0x2)) +#define is_permission_fault_s2(s2ap, perm) \ + (!(((s2ap) & (perm)) == (perm))) + #define PTE_AP_TO_PERM(ap) \ (IOMMU_ACCESS_FLAG(true, !((ap) & 0x2))) @@ -96,6 +101,36 @@ uint64_t iova_level_offset(uint64_t iova, int inputsize, MAKE_64BIT_MASK(0, gsz - 3); } +/* FEAT_LPA2 and FEAT_TTST are not implemented. */ +static inline int get_start_level(int sl0 , int granule_sz) +{ + /* ARM DDI0487I.a: Table D8-12. */ + if (granule_sz == 12) { + return 2 - sl0; + } + /* ARM DDI0487I.a: Table D8-22 and Table D8-31. */ + return 3 - sl0; +} + +/* + * Index in a concatenated first level stage-2 page table. + * ARM DDI0487I.a: D8.2.2 Concatenated translation tables. + */ +static inline int pgd_concat_idx(int start_level, int granule_sz, + dma_addr_t ipa) +{ + uint64_t ret; + /* + * Get the number of bits handled by next levels, then any extra bits in + * the address should index the concatenated tables. This relation can be + * deduced from tables in ARM DDI0487I.a: D8.2.7-9 + */ + int shift = level_shift(start_level - 1, granule_sz); + + ret = ipa >> shift; + return ret; +} + #define SMMU_IOTLB_ASID(key) ((key).asid) typedef struct SMMUIOTLBPageInvInfo { From 21eb5b5cde7f6f75751837d3082ce8b36070af33 Mon Sep 17 00:00:00 2001 From: Mostafa Saleh Date: Thu, 25 May 2023 10:37:50 +0100 Subject: [PATCH 06/21] hw/arm/smmuv3: Parse STE config for stage-2 Parse stage-2 configuration from STE and populate it in SMMUS2Cfg. Validity of field values are checked when possible. Only AA64 tables are supported and Small Translation Tables (STT) are not supported. According to SMMUv3 UM(IHI0070E) "5.2 Stream Table Entry": All fields with an S2 prefix (with the exception of S2VMID) are IGNORED when stage-2 bypasses translation (Config[1] == 0). Which means that VMID can be used(for TLB tagging) even if stage-2 is bypassed, so we parse it unconditionally when S2P exists. Otherwise it is set to -1.(only S1P) As stall is not supported, if S2S is set the translation would abort. For S2R, we reuse the same code used for stage-1 with flag record_faults. However when nested translation is supported we would need to separate stage-1 and stage-2 faults. Fix wrong shift in STE_S2HD, STE_S2HA, STE_S2S. Signed-off-by: Mostafa Saleh Tested-by: Eric Auger Tested-by: Jean-Philippe Brucker Reviewed-by: Eric Auger Message-id: 20230516203327.2051088-6-smostafa@google.com [PMM: fixed format string] Signed-off-by: Peter Maydell --- hw/arm/smmuv3-internal.h | 10 +- hw/arm/smmuv3.c | 182 +++++++++++++++++++++++++++++++++-- include/hw/arm/smmu-common.h | 1 + include/hw/arm/smmuv3.h | 3 + 4 files changed, 186 insertions(+), 10 deletions(-) diff --git a/hw/arm/smmuv3-internal.h b/hw/arm/smmuv3-internal.h index 183d5ac8dc..6d1c1edab7 100644 --- a/hw/arm/smmuv3-internal.h +++ b/hw/arm/smmuv3-internal.h @@ -526,9 +526,13 @@ typedef struct CD { #define STE_S2TG(x) extract32((x)->word[5], 14, 2) #define STE_S2PS(x) extract32((x)->word[5], 16, 3) #define STE_S2AA64(x) extract32((x)->word[5], 19, 1) -#define STE_S2HD(x) extract32((x)->word[5], 24, 1) -#define STE_S2HA(x) extract32((x)->word[5], 25, 1) -#define STE_S2S(x) extract32((x)->word[5], 26, 1) +#define STE_S2ENDI(x) extract32((x)->word[5], 20, 1) +#define STE_S2AFFD(x) extract32((x)->word[5], 21, 1) +#define STE_S2HD(x) extract32((x)->word[5], 23, 1) +#define STE_S2HA(x) extract32((x)->word[5], 24, 1) +#define STE_S2S(x) extract32((x)->word[5], 25, 1) +#define STE_S2R(x) extract32((x)->word[5], 26, 1) + #define STE_CTXPTR(x) \ ({ \ unsigned long addr; \ diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c index 4e90343996..989afb1fda 100644 --- a/hw/arm/smmuv3.c +++ b/hw/arm/smmuv3.c @@ -33,6 +33,9 @@ #include "smmuv3-internal.h" #include "smmu-internal.h" +#define PTW_RECORD_FAULT(cfg) (((cfg)->stage == 1) ? (cfg)->record_faults : \ + (cfg)->s2cfg.record_faults) + /** * smmuv3_trigger_irq - pulse @irq if enabled and update * GERROR register in case of GERROR interrupt @@ -329,11 +332,142 @@ static int smmu_get_cd(SMMUv3State *s, STE *ste, uint32_t ssid, return 0; } +/* + * Max valid value is 39 when SMMU_IDR3.STT == 0. + * In architectures after SMMUv3.0: + * - If STE.S2TG selects a 4KB or 16KB granule, the minimum valid value for this + * field is MAX(16, 64-IAS) + * - If STE.S2TG selects a 64KB granule, the minimum valid value for this field + * is (64-IAS). + * As we only support AA64, IAS = OAS. + */ +static bool s2t0sz_valid(SMMUTransCfg *cfg) +{ + if (cfg->s2cfg.tsz > 39) { + return false; + } + + if (cfg->s2cfg.granule_sz == 16) { + return (cfg->s2cfg.tsz >= 64 - oas2bits(SMMU_IDR5_OAS)); + } + + return (cfg->s2cfg.tsz >= MAX(64 - oas2bits(SMMU_IDR5_OAS), 16)); +} + +/* + * Return true if s2 page table config is valid. + * This checks with the configured start level, ias_bits and granularity we can + * have a valid page table as described in ARM ARM D8.2 Translation process. + * The idea here is to see for the highest possible number of IPA bits, how + * many concatenated tables we would need, if it is more than 16, then this is + * not possible. + */ +static bool s2_pgtable_config_valid(uint8_t sl0, uint8_t t0sz, uint8_t gran) +{ + int level = get_start_level(sl0, gran); + uint64_t ipa_bits = 64 - t0sz; + uint64_t max_ipa = (1ULL << ipa_bits) - 1; + int nr_concat = pgd_concat_idx(level, gran, max_ipa) + 1; + + return nr_concat <= VMSA_MAX_S2_CONCAT; +} + +static int decode_ste_s2_cfg(SMMUTransCfg *cfg, STE *ste) +{ + cfg->stage = 2; + + if (STE_S2AA64(ste) == 0x0) { + qemu_log_mask(LOG_UNIMP, + "SMMUv3 AArch32 tables not supported\n"); + g_assert_not_reached(); + } + + switch (STE_S2TG(ste)) { + case 0x0: /* 4KB */ + cfg->s2cfg.granule_sz = 12; + break; + case 0x1: /* 64KB */ + cfg->s2cfg.granule_sz = 16; + break; + case 0x2: /* 16KB */ + cfg->s2cfg.granule_sz = 14; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "SMMUv3 bad STE S2TG: %x\n", STE_S2TG(ste)); + goto bad_ste; + } + + cfg->s2cfg.vttb = STE_S2TTB(ste); + + cfg->s2cfg.sl0 = STE_S2SL0(ste); + /* FEAT_TTST not supported. */ + if (cfg->s2cfg.sl0 == 0x3) { + qemu_log_mask(LOG_UNIMP, "SMMUv3 S2SL0 = 0x3 has no meaning!\n"); + goto bad_ste; + } + + /* For AA64, The effective S2PS size is capped to the OAS. */ + cfg->s2cfg.eff_ps = oas2bits(MIN(STE_S2PS(ste), SMMU_IDR5_OAS)); + /* + * It is ILLEGAL for the address in S2TTB to be outside the range + * described by the effective S2PS value. + */ + if (cfg->s2cfg.vttb & ~(MAKE_64BIT_MASK(0, cfg->s2cfg.eff_ps))) { + qemu_log_mask(LOG_GUEST_ERROR, + "SMMUv3 S2TTB too large 0x%" PRIx64 + ", effective PS %d bits\n", + cfg->s2cfg.vttb, cfg->s2cfg.eff_ps); + goto bad_ste; + } + + cfg->s2cfg.tsz = STE_S2T0SZ(ste); + + if (!s2t0sz_valid(cfg)) { + qemu_log_mask(LOG_GUEST_ERROR, "SMMUv3 bad STE S2T0SZ = %d\n", + cfg->s2cfg.tsz); + goto bad_ste; + } + + if (!s2_pgtable_config_valid(cfg->s2cfg.sl0, cfg->s2cfg.tsz, + cfg->s2cfg.granule_sz)) { + qemu_log_mask(LOG_GUEST_ERROR, + "SMMUv3 STE stage 2 config not valid!\n"); + goto bad_ste; + } + + /* Only LE supported(IDR0.TTENDIAN). */ + if (STE_S2ENDI(ste)) { + qemu_log_mask(LOG_GUEST_ERROR, + "SMMUv3 STE_S2ENDI only supports LE!\n"); + goto bad_ste; + } + + cfg->s2cfg.affd = STE_S2AFFD(ste); + + cfg->s2cfg.record_faults = STE_S2R(ste); + /* As stall is not supported. */ + if (STE_S2S(ste)) { + qemu_log_mask(LOG_UNIMP, "SMMUv3 Stall not implemented!\n"); + goto bad_ste; + } + + /* This is still here as stage 2 has not been fully enabled yet. */ + qemu_log_mask(LOG_UNIMP, "SMMUv3 does not support stage 2 yet\n"); + goto bad_ste; + + return 0; + +bad_ste: + return -EINVAL; +} + /* Returns < 0 in case of invalid STE, 0 otherwise */ static int decode_ste(SMMUv3State *s, SMMUTransCfg *cfg, STE *ste, SMMUEventInfo *event) { uint32_t config; + int ret; if (!STE_VALID(ste)) { if (!event->inval_ste_allowed) { @@ -354,10 +488,38 @@ static int decode_ste(SMMUv3State *s, SMMUTransCfg *cfg, return 0; } - if (STE_CFG_S2_ENABLED(config)) { - qemu_log_mask(LOG_UNIMP, "SMMUv3 does not support stage 2 yet\n"); + /* + * If a stage is enabled in SW while not advertised, throw bad ste + * according to user manual(IHI0070E) "5.2 Stream Table Entry". + */ + if (!STAGE1_SUPPORTED(s) && STE_CFG_S1_ENABLED(config)) { + qemu_log_mask(LOG_GUEST_ERROR, "SMMUv3 S1 used but not supported.\n"); goto bad_ste; } + if (!STAGE2_SUPPORTED(s) && STE_CFG_S2_ENABLED(config)) { + qemu_log_mask(LOG_GUEST_ERROR, "SMMUv3 S2 used but not supported.\n"); + goto bad_ste; + } + + if (STAGE2_SUPPORTED(s)) { + /* VMID is considered even if s2 is disabled. */ + cfg->s2cfg.vmid = STE_S2VMID(ste); + } else { + /* Default to -1 */ + cfg->s2cfg.vmid = -1; + } + + if (STE_CFG_S2_ENABLED(config)) { + /* + * Stage-1 OAS defaults to OAS even if not enabled as it would be used + * in input address check for stage-2. + */ + cfg->oas = oas2bits(SMMU_IDR5_OAS); + ret = decode_ste_s2_cfg(cfg, ste); + if (ret) { + goto bad_ste; + } + } if (STE_S1CDMAX(ste) != 0) { qemu_log_mask(LOG_UNIMP, @@ -702,7 +864,13 @@ static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr, if (cached_entry) { if ((flag & IOMMU_WO) && !(cached_entry->entry.perm & IOMMU_WO)) { status = SMMU_TRANS_ERROR; - if (cfg->record_faults) { + /* + * We know that the TLB only contains either stage-1 or stage-2 as + * nesting is not supported. So it is sufficient to check the + * translation stage to know the TLB stage for now. + */ + event.u.f_walk_eabt.s2 = (cfg->stage == 2); + if (PTW_RECORD_FAULT(cfg)) { event.type = SMMU_EVT_F_PERMISSION; event.u.f_permission.addr = addr; event.u.f_permission.rnw = flag & 0x1; @@ -728,28 +896,28 @@ static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr, event.u.f_walk_eabt.addr2 = ptw_info.addr; break; case SMMU_PTW_ERR_TRANSLATION: - if (cfg->record_faults) { + if (PTW_RECORD_FAULT(cfg)) { event.type = SMMU_EVT_F_TRANSLATION; event.u.f_translation.addr = addr; event.u.f_translation.rnw = flag & 0x1; } break; case SMMU_PTW_ERR_ADDR_SIZE: - if (cfg->record_faults) { + if (PTW_RECORD_FAULT(cfg)) { event.type = SMMU_EVT_F_ADDR_SIZE; event.u.f_addr_size.addr = addr; event.u.f_addr_size.rnw = flag & 0x1; } break; case SMMU_PTW_ERR_ACCESS: - if (cfg->record_faults) { + if (PTW_RECORD_FAULT(cfg)) { event.type = SMMU_EVT_F_ACCESS; event.u.f_access.addr = addr; event.u.f_access.rnw = flag & 0x1; } break; case SMMU_PTW_ERR_PERMISSION: - if (cfg->record_faults) { + if (PTW_RECORD_FAULT(cfg)) { event.type = SMMU_EVT_F_PERMISSION; event.u.f_permission.addr = addr; event.u.f_permission.rnw = flag & 0x1; diff --git a/include/hw/arm/smmu-common.h b/include/hw/arm/smmu-common.h index 97cea8ea06..4f1405d4e4 100644 --- a/include/hw/arm/smmu-common.h +++ b/include/hw/arm/smmu-common.h @@ -29,6 +29,7 @@ /* VMSAv8-64 Translation constants and functions */ #define VMSA_LEVELS 4 +#define VMSA_MAX_S2_CONCAT 16 #define VMSA_STRIDE(gran) ((gran) - VMSA_LEVELS + 1) #define VMSA_BIT_LVL(isz, strd, lvl) ((isz) - (strd) * \ diff --git a/include/hw/arm/smmuv3.h b/include/hw/arm/smmuv3.h index a0c026402e..6031d7d325 100644 --- a/include/hw/arm/smmuv3.h +++ b/include/hw/arm/smmuv3.h @@ -83,4 +83,7 @@ struct SMMUv3Class { #define TYPE_ARM_SMMUV3 "arm-smmuv3" OBJECT_DECLARE_TYPE(SMMUv3State, SMMUv3Class, ARM_SMMUV3) +#define STAGE1_SUPPORTED(s) FIELD_EX32(s->idr[0], IDR0, S1P) +#define STAGE2_SUPPORTED(s) FIELD_EX32(s->idr[0], IDR0, S2P) + #endif From cd617556aded2528664a3673d66d5cd0864f5341 Mon Sep 17 00:00:00 2001 From: Mostafa Saleh Date: Thu, 25 May 2023 10:37:50 +0100 Subject: [PATCH 07/21] hw/arm/smmuv3: Make TLB lookup work for stage-2 Right now, either stage-1 or stage-2 are supported, this simplifies how we can deal with TLBs. This patch makes TLB lookup work if stage-2 is enabled instead of stage-1. TLB lookup is done before a PTW, if a valid entry is found we won't do the PTW. To be able to do TLB lookup, we need the correct tagging info, as granularity and input size, so we get this based on the supported translation stage. The TLB entries are added correctly from each stage PTW. When nested translation is supported, this would need to change, for example if we go with a combined TLB implementation, we would need to use the min of the granularities in TLB. As stage-2 shouldn't be tagged by ASID, it will be set to -1 if S1P is not enabled. Signed-off-by: Mostafa Saleh Reviewed-by: Eric Auger Tested-by: Eric Auger Tested-by: Jean-Philippe Brucker Message-id: 20230516203327.2051088-7-smostafa@google.com Signed-off-by: Peter Maydell --- hw/arm/smmuv3.c | 44 +++++++++++++++++++++++++++++++++----------- 1 file changed, 33 insertions(+), 11 deletions(-) diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c index 989afb1fda..3fb5ed512b 100644 --- a/hw/arm/smmuv3.c +++ b/hw/arm/smmuv3.c @@ -721,6 +721,9 @@ static int smmuv3_decode_config(IOMMUMemoryRegion *mr, SMMUTransCfg *cfg, STE ste; CD cd; + /* ASID defaults to -1 (if s1 is not supported). */ + cfg->asid = -1; + ret = smmu_find_ste(s, sid, &ste, event); if (ret) { return ret; @@ -818,6 +821,11 @@ static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr, .addr_mask = ~(hwaddr)0, .perm = IOMMU_NONE, }; + /* + * Combined attributes used for TLB lookup, as only one stage is supported, + * it will hold attributes based on the enabled stage. + */ + SMMUTransTableInfo tt_combined; qemu_mutex_lock(&s->mutex); @@ -846,21 +854,35 @@ static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr, goto epilogue; } - tt = select_tt(cfg, addr); - if (!tt) { - if (cfg->record_faults) { - event.type = SMMU_EVT_F_TRANSLATION; - event.u.f_translation.addr = addr; - event.u.f_translation.rnw = flag & 0x1; + if (cfg->stage == 1) { + /* Select stage1 translation table. */ + tt = select_tt(cfg, addr); + if (!tt) { + if (cfg->record_faults) { + event.type = SMMU_EVT_F_TRANSLATION; + event.u.f_translation.addr = addr; + event.u.f_translation.rnw = flag & 0x1; + } + status = SMMU_TRANS_ERROR; + goto epilogue; } - status = SMMU_TRANS_ERROR; - goto epilogue; - } + tt_combined.granule_sz = tt->granule_sz; + tt_combined.tsz = tt->tsz; - page_mask = (1ULL << (tt->granule_sz)) - 1; + } else { + /* Stage2. */ + tt_combined.granule_sz = cfg->s2cfg.granule_sz; + tt_combined.tsz = cfg->s2cfg.tsz; + } + /* + * TLB lookup looks for granule and input size for a translation stage, + * as only one stage is supported right now, choose the right values + * from the configuration. + */ + page_mask = (1ULL << tt_combined.granule_sz) - 1; aligned_addr = addr & ~page_mask; - cached_entry = smmu_iotlb_lookup(bs, cfg, tt, aligned_addr); + cached_entry = smmu_iotlb_lookup(bs, cfg, &tt_combined, aligned_addr); if (cached_entry) { if ((flag & IOMMU_WO) && !(cached_entry->entry.perm & IOMMU_WO)) { status = SMMU_TRANS_ERROR; From 2eaeb7d593254a34f3e551865b687886c2698f96 Mon Sep 17 00:00:00 2001 From: Mostafa Saleh Date: Thu, 25 May 2023 10:37:50 +0100 Subject: [PATCH 08/21] hw/arm/smmuv3: Add VMID to TLB tagging Allow TLB to be tagged with VMID. If stage-1 is only supported, VMID is set to -1 and ignored from STE and CMD_TLBI_NH* cmds. Update smmu_iotlb_insert trace event to have vmid. Signed-off-by: Mostafa Saleh Reviewed-by: Eric Auger Tested-by: Eric Auger Tested-by: Jean-Philippe Brucker Message-id: 20230516203327.2051088-8-smostafa@google.com Signed-off-by: Peter Maydell --- hw/arm/smmu-common.c | 36 ++++++++++++++++++++++-------------- hw/arm/smmu-internal.h | 2 ++ hw/arm/smmuv3.c | 12 +++++++++--- hw/arm/trace-events | 6 +++--- include/hw/arm/smmu-common.h | 5 +++-- 5 files changed, 39 insertions(+), 22 deletions(-) diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c index 3e82eab741..6109beaa70 100644 --- a/hw/arm/smmu-common.c +++ b/hw/arm/smmu-common.c @@ -38,7 +38,7 @@ static guint smmu_iotlb_key_hash(gconstpointer v) /* Jenkins hash */ a = b = c = JHASH_INITVAL + sizeof(*key); - a += key->asid + key->level + key->tg; + a += key->asid + key->vmid + key->level + key->tg; b += extract64(key->iova, 0, 32); c += extract64(key->iova, 32, 32); @@ -53,13 +53,15 @@ static gboolean smmu_iotlb_key_equal(gconstpointer v1, gconstpointer v2) SMMUIOTLBKey *k1 = (SMMUIOTLBKey *)v1, *k2 = (SMMUIOTLBKey *)v2; return (k1->asid == k2->asid) && (k1->iova == k2->iova) && - (k1->level == k2->level) && (k1->tg == k2->tg); + (k1->level == k2->level) && (k1->tg == k2->tg) && + (k1->vmid == k2->vmid); } -SMMUIOTLBKey smmu_get_iotlb_key(uint16_t asid, uint64_t iova, +SMMUIOTLBKey smmu_get_iotlb_key(uint16_t asid, uint16_t vmid, uint64_t iova, uint8_t tg, uint8_t level) { - SMMUIOTLBKey key = {.asid = asid, .iova = iova, .tg = tg, .level = level}; + SMMUIOTLBKey key = {.asid = asid, .vmid = vmid, .iova = iova, + .tg = tg, .level = level}; return key; } @@ -78,7 +80,8 @@ SMMUTLBEntry *smmu_iotlb_lookup(SMMUState *bs, SMMUTransCfg *cfg, uint64_t mask = subpage_size - 1; SMMUIOTLBKey key; - key = smmu_get_iotlb_key(cfg->asid, iova & ~mask, tg, level); + key = smmu_get_iotlb_key(cfg->asid, cfg->s2cfg.vmid, + iova & ~mask, tg, level); entry = g_hash_table_lookup(bs->iotlb, &key); if (entry) { break; @@ -88,13 +91,13 @@ SMMUTLBEntry *smmu_iotlb_lookup(SMMUState *bs, SMMUTransCfg *cfg, if (entry) { cfg->iotlb_hits++; - trace_smmu_iotlb_lookup_hit(cfg->asid, iova, + trace_smmu_iotlb_lookup_hit(cfg->asid, cfg->s2cfg.vmid, iova, cfg->iotlb_hits, cfg->iotlb_misses, 100 * cfg->iotlb_hits / (cfg->iotlb_hits + cfg->iotlb_misses)); } else { cfg->iotlb_misses++; - trace_smmu_iotlb_lookup_miss(cfg->asid, iova, + trace_smmu_iotlb_lookup_miss(cfg->asid, cfg->s2cfg.vmid, iova, cfg->iotlb_hits, cfg->iotlb_misses, 100 * cfg->iotlb_hits / (cfg->iotlb_hits + cfg->iotlb_misses)); @@ -111,8 +114,10 @@ void smmu_iotlb_insert(SMMUState *bs, SMMUTransCfg *cfg, SMMUTLBEntry *new) smmu_iotlb_inv_all(bs); } - *key = smmu_get_iotlb_key(cfg->asid, new->entry.iova, tg, new->level); - trace_smmu_iotlb_insert(cfg->asid, new->entry.iova, tg, new->level); + *key = smmu_get_iotlb_key(cfg->asid, cfg->s2cfg.vmid, new->entry.iova, + tg, new->level); + trace_smmu_iotlb_insert(cfg->asid, cfg->s2cfg.vmid, new->entry.iova, + tg, new->level); g_hash_table_insert(bs->iotlb, key, new); } @@ -130,8 +135,7 @@ static gboolean smmu_hash_remove_by_asid(gpointer key, gpointer value, return SMMU_IOTLB_ASID(*iotlb_key) == asid; } - -static gboolean smmu_hash_remove_by_asid_iova(gpointer key, gpointer value, +static gboolean smmu_hash_remove_by_asid_vmid_iova(gpointer key, gpointer value, gpointer user_data) { SMMUTLBEntry *iter = (SMMUTLBEntry *)value; @@ -142,18 +146,21 @@ static gboolean smmu_hash_remove_by_asid_iova(gpointer key, gpointer value, if (info->asid >= 0 && info->asid != SMMU_IOTLB_ASID(iotlb_key)) { return false; } + if (info->vmid >= 0 && info->vmid != SMMU_IOTLB_VMID(iotlb_key)) { + return false; + } return ((info->iova & ~entry->addr_mask) == entry->iova) || ((entry->iova & ~info->mask) == info->iova); } -void smmu_iotlb_inv_iova(SMMUState *s, int asid, dma_addr_t iova, +void smmu_iotlb_inv_iova(SMMUState *s, int asid, int vmid, dma_addr_t iova, uint8_t tg, uint64_t num_pages, uint8_t ttl) { /* if tg is not set we use 4KB range invalidation */ uint8_t granule = tg ? tg * 2 + 10 : 12; if (ttl && (num_pages == 1) && (asid >= 0)) { - SMMUIOTLBKey key = smmu_get_iotlb_key(asid, iova, tg, ttl); + SMMUIOTLBKey key = smmu_get_iotlb_key(asid, vmid, iova, tg, ttl); if (g_hash_table_remove(s->iotlb, &key)) { return; @@ -166,10 +173,11 @@ void smmu_iotlb_inv_iova(SMMUState *s, int asid, dma_addr_t iova, SMMUIOTLBPageInvInfo info = { .asid = asid, .iova = iova, + .vmid = vmid, .mask = (num_pages * 1 << granule) - 1}; g_hash_table_foreach_remove(s->iotlb, - smmu_hash_remove_by_asid_iova, + smmu_hash_remove_by_asid_vmid_iova, &info); } diff --git a/hw/arm/smmu-internal.h b/hw/arm/smmu-internal.h index a9454f914e..843bebb185 100644 --- a/hw/arm/smmu-internal.h +++ b/hw/arm/smmu-internal.h @@ -132,9 +132,11 @@ static inline int pgd_concat_idx(int start_level, int granule_sz, } #define SMMU_IOTLB_ASID(key) ((key).asid) +#define SMMU_IOTLB_VMID(key) ((key).vmid) typedef struct SMMUIOTLBPageInvInfo { int asid; + int vmid; uint64_t iova; uint64_t mask; } SMMUIOTLBPageInvInfo; diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c index 3fb5ed512b..e59630806b 100644 --- a/hw/arm/smmuv3.c +++ b/hw/arm/smmuv3.c @@ -1067,7 +1067,7 @@ static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd) { dma_addr_t end, addr = CMD_ADDR(cmd); uint8_t type = CMD_TYPE(cmd); - uint16_t vmid = CMD_VMID(cmd); + int vmid = -1; uint8_t scale = CMD_SCALE(cmd); uint8_t num = CMD_NUM(cmd); uint8_t ttl = CMD_TTL(cmd); @@ -1076,6 +1076,12 @@ static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd) uint64_t num_pages; uint8_t granule; int asid = -1; + SMMUv3State *smmuv3 = ARM_SMMUV3(s); + + /* Only consider VMID if stage-2 is supported. */ + if (STAGE2_SUPPORTED(smmuv3)) { + vmid = CMD_VMID(cmd); + } if (type == SMMU_CMD_TLBI_NH_VA) { asid = CMD_ASID(cmd); @@ -1084,7 +1090,7 @@ static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd) if (!tg) { trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, 1, ttl, leaf); smmuv3_inv_notifiers_iova(s, asid, addr, tg, 1); - smmu_iotlb_inv_iova(s, asid, addr, tg, 1, ttl); + smmu_iotlb_inv_iova(s, asid, vmid, addr, tg, 1, ttl); return; } @@ -1102,7 +1108,7 @@ static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd) num_pages = (mask + 1) >> granule; trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, num_pages, ttl, leaf); smmuv3_inv_notifiers_iova(s, asid, addr, tg, num_pages); - smmu_iotlb_inv_iova(s, asid, addr, tg, num_pages, ttl); + smmu_iotlb_inv_iova(s, asid, vmid, addr, tg, num_pages, ttl); addr += mask + 1; } } diff --git a/hw/arm/trace-events b/hw/arm/trace-events index 205ac04573..705104e58b 100644 --- a/hw/arm/trace-events +++ b/hw/arm/trace-events @@ -14,9 +14,9 @@ smmu_iotlb_inv_all(void) "IOTLB invalidate all" smmu_iotlb_inv_asid(uint16_t asid) "IOTLB invalidate asid=%d" smmu_iotlb_inv_iova(uint16_t asid, uint64_t addr) "IOTLB invalidate asid=%d addr=0x%"PRIx64 smmu_inv_notifiers_mr(const char *name) "iommu mr=%s" -smmu_iotlb_lookup_hit(uint16_t asid, uint64_t addr, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache HIT asid=%d addr=0x%"PRIx64" hit=%d miss=%d hit rate=%d" -smmu_iotlb_lookup_miss(uint16_t asid, uint64_t addr, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache MISS asid=%d addr=0x%"PRIx64" hit=%d miss=%d hit rate=%d" -smmu_iotlb_insert(uint16_t asid, uint64_t addr, uint8_t tg, uint8_t level) "IOTLB ++ asid=%d addr=0x%"PRIx64" tg=%d level=%d" +smmu_iotlb_lookup_hit(uint16_t asid, uint16_t vmid, uint64_t addr, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache HIT asid=%d vmid=%d addr=0x%"PRIx64" hit=%d miss=%d hit rate=%d" +smmu_iotlb_lookup_miss(uint16_t asid, uint16_t vmid, uint64_t addr, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache MISS asid=%d vmid=%d addr=0x%"PRIx64" hit=%d miss=%d hit rate=%d" +smmu_iotlb_insert(uint16_t asid, uint16_t vmid, uint64_t addr, uint8_t tg, uint8_t level) "IOTLB ++ asid=%d vmid=%d addr=0x%"PRIx64" tg=%d level=%d" # smmuv3.c smmuv3_read_mmio(uint64_t addr, uint64_t val, unsigned size, uint32_t r) "addr: 0x%"PRIx64" val:0x%"PRIx64" size: 0x%x(%d)" diff --git a/include/hw/arm/smmu-common.h b/include/hw/arm/smmu-common.h index 4f1405d4e4..3cbb4998ad 100644 --- a/include/hw/arm/smmu-common.h +++ b/include/hw/arm/smmu-common.h @@ -125,6 +125,7 @@ typedef struct SMMUPciBus { typedef struct SMMUIOTLBKey { uint64_t iova; uint16_t asid; + uint16_t vmid; uint8_t tg; uint8_t level; } SMMUIOTLBKey; @@ -188,11 +189,11 @@ IOMMUMemoryRegion *smmu_iommu_mr(SMMUState *s, uint32_t sid); SMMUTLBEntry *smmu_iotlb_lookup(SMMUState *bs, SMMUTransCfg *cfg, SMMUTransTableInfo *tt, hwaddr iova); void smmu_iotlb_insert(SMMUState *bs, SMMUTransCfg *cfg, SMMUTLBEntry *entry); -SMMUIOTLBKey smmu_get_iotlb_key(uint16_t asid, uint64_t iova, +SMMUIOTLBKey smmu_get_iotlb_key(uint16_t asid, uint16_t vmid, uint64_t iova, uint8_t tg, uint8_t level); void smmu_iotlb_inv_all(SMMUState *s); void smmu_iotlb_inv_asid(SMMUState *s, uint16_t asid); -void smmu_iotlb_inv_iova(SMMUState *s, int asid, dma_addr_t iova, +void smmu_iotlb_inv_iova(SMMUState *s, int asid, int vmid, dma_addr_t iova, uint8_t tg, uint64_t num_pages, uint8_t ttl); /* Unmap the range of all the notifiers registered to any IOMMU mr */ From ccc3ee38713381a823a6b9229b36a0a1b36d919d Mon Sep 17 00:00:00 2001 From: Mostafa Saleh Date: Thu, 25 May 2023 10:37:51 +0100 Subject: [PATCH 09/21] hw/arm/smmuv3: Add CMDs related to stage-2 CMD_TLBI_S2_IPA: As S1+S2 is not enabled, for now this can be the same as CMD_TLBI_NH_VAA. CMD_TLBI_S12_VMALL: Added new function to invalidate TLB by VMID. For stage-1 only commands, add a check to throw CERROR_ILL if used when stage-1 is not supported. Reviewed-by: Eric Auger Signed-off-by: Mostafa Saleh Tested-by: Eric Auger Tested-by: Jean-Philippe Brucker Message-id: 20230516203327.2051088-9-smostafa@google.com Signed-off-by: Peter Maydell --- hw/arm/smmu-common.c | 16 +++++++++++ hw/arm/smmuv3.c | 55 ++++++++++++++++++++++++++++++------ hw/arm/trace-events | 4 ++- include/hw/arm/smmu-common.h | 1 + 4 files changed, 67 insertions(+), 9 deletions(-) diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c index 6109beaa70..5ab9d45d58 100644 --- a/hw/arm/smmu-common.c +++ b/hw/arm/smmu-common.c @@ -135,6 +135,16 @@ static gboolean smmu_hash_remove_by_asid(gpointer key, gpointer value, return SMMU_IOTLB_ASID(*iotlb_key) == asid; } + +static gboolean smmu_hash_remove_by_vmid(gpointer key, gpointer value, + gpointer user_data) +{ + uint16_t vmid = *(uint16_t *)user_data; + SMMUIOTLBKey *iotlb_key = (SMMUIOTLBKey *)key; + + return SMMU_IOTLB_VMID(*iotlb_key) == vmid; +} + static gboolean smmu_hash_remove_by_asid_vmid_iova(gpointer key, gpointer value, gpointer user_data) { @@ -187,6 +197,12 @@ void smmu_iotlb_inv_asid(SMMUState *s, uint16_t asid) g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_asid, &asid); } +inline void smmu_iotlb_inv_vmid(SMMUState *s, uint16_t vmid) +{ + trace_smmu_iotlb_inv_vmid(vmid); + g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_vmid, &vmid); +} + /* VMSAv8-64 Translation */ /** diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c index e59630806b..536bb9e340 100644 --- a/hw/arm/smmuv3.c +++ b/hw/arm/smmuv3.c @@ -1063,7 +1063,7 @@ static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova, } } -static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd) +static void smmuv3_range_inval(SMMUState *s, Cmd *cmd) { dma_addr_t end, addr = CMD_ADDR(cmd); uint8_t type = CMD_TYPE(cmd); @@ -1088,7 +1088,7 @@ static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd) } if (!tg) { - trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, 1, ttl, leaf); + trace_smmuv3_range_inval(vmid, asid, addr, tg, 1, ttl, leaf); smmuv3_inv_notifiers_iova(s, asid, addr, tg, 1); smmu_iotlb_inv_iova(s, asid, vmid, addr, tg, 1, ttl); return; @@ -1106,7 +1106,7 @@ static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd) uint64_t mask = dma_aligned_pow2_mask(addr, end, 64); num_pages = (mask + 1) >> granule; - trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, num_pages, ttl, leaf); + trace_smmuv3_range_inval(vmid, asid, addr, tg, num_pages, ttl, leaf); smmuv3_inv_notifiers_iova(s, asid, addr, tg, num_pages); smmu_iotlb_inv_iova(s, asid, vmid, addr, tg, num_pages, ttl); addr += mask + 1; @@ -1240,12 +1240,22 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) { uint16_t asid = CMD_ASID(&cmd); + if (!STAGE1_SUPPORTED(s)) { + cmd_error = SMMU_CERROR_ILL; + break; + } + trace_smmuv3_cmdq_tlbi_nh_asid(asid); smmu_inv_notifiers_all(&s->smmu_state); smmu_iotlb_inv_asid(bs, asid); break; } case SMMU_CMD_TLBI_NH_ALL: + if (!STAGE1_SUPPORTED(s)) { + cmd_error = SMMU_CERROR_ILL; + break; + } + QEMU_FALLTHROUGH; case SMMU_CMD_TLBI_NSNH_ALL: trace_smmuv3_cmdq_tlbi_nh(); smmu_inv_notifiers_all(&s->smmu_state); @@ -1253,7 +1263,36 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) break; case SMMU_CMD_TLBI_NH_VAA: case SMMU_CMD_TLBI_NH_VA: - smmuv3_s1_range_inval(bs, &cmd); + if (!STAGE1_SUPPORTED(s)) { + cmd_error = SMMU_CERROR_ILL; + break; + } + smmuv3_range_inval(bs, &cmd); + break; + case SMMU_CMD_TLBI_S12_VMALL: + { + uint16_t vmid = CMD_VMID(&cmd); + + if (!STAGE2_SUPPORTED(s)) { + cmd_error = SMMU_CERROR_ILL; + break; + } + + trace_smmuv3_cmdq_tlbi_s12_vmid(vmid); + smmu_inv_notifiers_all(&s->smmu_state); + smmu_iotlb_inv_vmid(bs, vmid); + break; + } + case SMMU_CMD_TLBI_S2_IPA: + if (!STAGE2_SUPPORTED(s)) { + cmd_error = SMMU_CERROR_ILL; + break; + } + /* + * As currently only either s1 or s2 are supported + * we can reuse same function for s2. + */ + smmuv3_range_inval(bs, &cmd); break; case SMMU_CMD_TLBI_EL3_ALL: case SMMU_CMD_TLBI_EL3_VA: @@ -1261,8 +1300,6 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) case SMMU_CMD_TLBI_EL2_ASID: case SMMU_CMD_TLBI_EL2_VA: case SMMU_CMD_TLBI_EL2_VAA: - case SMMU_CMD_TLBI_S12_VMALL: - case SMMU_CMD_TLBI_S2_IPA: case SMMU_CMD_ATC_INV: case SMMU_CMD_PRI_RESP: case SMMU_CMD_RESUME: @@ -1271,12 +1308,14 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) break; default: cmd_error = SMMU_CERROR_ILL; - qemu_log_mask(LOG_GUEST_ERROR, - "Illegal command type: %d\n", CMD_TYPE(&cmd)); break; } qemu_mutex_unlock(&s->mutex); if (cmd_error) { + if (cmd_error == SMMU_CERROR_ILL) { + qemu_log_mask(LOG_GUEST_ERROR, + "Illegal command type: %d\n", CMD_TYPE(&cmd)); + } break; } /* diff --git a/hw/arm/trace-events b/hw/arm/trace-events index 705104e58b..f8fdf1ca9f 100644 --- a/hw/arm/trace-events +++ b/hw/arm/trace-events @@ -12,6 +12,7 @@ smmu_ptw_block_pte(int stage, int level, uint64_t baseaddr, uint64_t pteaddr, ui smmu_get_pte(uint64_t baseaddr, int index, uint64_t pteaddr, uint64_t pte) "baseaddr=0x%"PRIx64" index=0x%x, pteaddr=0x%"PRIx64", pte=0x%"PRIx64 smmu_iotlb_inv_all(void) "IOTLB invalidate all" smmu_iotlb_inv_asid(uint16_t asid) "IOTLB invalidate asid=%d" +smmu_iotlb_inv_vmid(uint16_t vmid) "IOTLB invalidate vmid=%d" smmu_iotlb_inv_iova(uint16_t asid, uint64_t addr) "IOTLB invalidate asid=%d addr=0x%"PRIx64 smmu_inv_notifiers_mr(const char *name) "iommu mr=%s" smmu_iotlb_lookup_hit(uint16_t asid, uint16_t vmid, uint64_t addr, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache HIT asid=%d vmid=%d addr=0x%"PRIx64" hit=%d miss=%d hit rate=%d" @@ -45,9 +46,10 @@ smmuv3_cmdq_cfgi_ste_range(int start, int end) "start=0x%x - end=0x%x" smmuv3_cmdq_cfgi_cd(uint32_t sid) "sid=0x%x" smmuv3_config_cache_hit(uint32_t sid, uint32_t hits, uint32_t misses, uint32_t perc) "Config cache HIT for sid=0x%x (hits=%d, misses=%d, hit rate=%d)" smmuv3_config_cache_miss(uint32_t sid, uint32_t hits, uint32_t misses, uint32_t perc) "Config cache MISS for sid=0x%x (hits=%d, misses=%d, hit rate=%d)" -smmuv3_s1_range_inval(int vmid, int asid, uint64_t addr, uint8_t tg, uint64_t num_pages, uint8_t ttl, bool leaf) "vmid=%d asid=%d addr=0x%"PRIx64" tg=%d num_pages=0x%"PRIx64" ttl=%d leaf=%d" +smmuv3_range_inval(int vmid, int asid, uint64_t addr, uint8_t tg, uint64_t num_pages, uint8_t ttl, bool leaf) "vmid=%d asid=%d addr=0x%"PRIx64" tg=%d num_pages=0x%"PRIx64" ttl=%d leaf=%d" smmuv3_cmdq_tlbi_nh(void) "" smmuv3_cmdq_tlbi_nh_asid(uint16_t asid) "asid=%d" +smmuv3_cmdq_tlbi_s12_vmid(uint16_t vmid) "vmid=%d" smmuv3_config_cache_inv(uint32_t sid) "Config cache INV for sid=0x%x" smmuv3_notify_flag_add(const char *iommu) "ADD SMMUNotifier node for iommu mr=%s" smmuv3_notify_flag_del(const char *iommu) "DEL SMMUNotifier node for iommu mr=%s" diff --git a/include/hw/arm/smmu-common.h b/include/hw/arm/smmu-common.h index 3cbb4998ad..fd8d772da1 100644 --- a/include/hw/arm/smmu-common.h +++ b/include/hw/arm/smmu-common.h @@ -193,6 +193,7 @@ SMMUIOTLBKey smmu_get_iotlb_key(uint16_t asid, uint16_t vmid, uint64_t iova, uint8_t tg, uint8_t level); void smmu_iotlb_inv_all(SMMUState *s); void smmu_iotlb_inv_asid(SMMUState *s, uint16_t asid); +void smmu_iotlb_inv_vmid(SMMUState *s, uint16_t vmid); void smmu_iotlb_inv_iova(SMMUState *s, int asid, int vmid, dma_addr_t iova, uint8_t tg, uint64_t num_pages, uint8_t ttl); From 32bd7baec2991bba1d801c348a74fc531dcae6f1 Mon Sep 17 00:00:00 2001 From: Mostafa Saleh Date: Thu, 25 May 2023 10:37:51 +0100 Subject: [PATCH 10/21] hw/arm/smmuv3: Add stage-2 support in iova notifier In smmuv3_notify_iova, read the granule based on translation stage and use VMID if valid value is sent. Signed-off-by: Mostafa Saleh Reviewed-by: Eric Auger Tested-by: Eric Auger Tested-by: Jean-Philippe Brucker Message-id: 20230516203327.2051088-10-smostafa@google.com Signed-off-by: Peter Maydell --- hw/arm/smmuv3.c | 39 ++++++++++++++++++++++++++------------- hw/arm/trace-events | 2 +- 2 files changed, 27 insertions(+), 14 deletions(-) diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c index 536bb9e340..f51561ebaf 100644 --- a/hw/arm/smmuv3.c +++ b/hw/arm/smmuv3.c @@ -1000,18 +1000,21 @@ epilogue: * @mr: IOMMU mr region handle * @n: notifier to be called * @asid: address space ID or negative value if we don't care + * @vmid: virtual machine ID or negative value if we don't care * @iova: iova * @tg: translation granule (if communicated through range invalidation) * @num_pages: number of @granule sized pages (if tg != 0), otherwise 1 */ static void smmuv3_notify_iova(IOMMUMemoryRegion *mr, IOMMUNotifier *n, - int asid, dma_addr_t iova, - uint8_t tg, uint64_t num_pages) + int asid, int vmid, + dma_addr_t iova, uint8_t tg, + uint64_t num_pages) { SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu); IOMMUTLBEvent event; uint8_t granule; + SMMUv3State *s = sdev->smmu; if (!tg) { SMMUEventInfo event = {.inval_ste_allowed = true}; @@ -1026,11 +1029,20 @@ static void smmuv3_notify_iova(IOMMUMemoryRegion *mr, return; } - tt = select_tt(cfg, iova); - if (!tt) { + if (vmid >= 0 && cfg->s2cfg.vmid != vmid) { return; } - granule = tt->granule_sz; + + if (STAGE1_SUPPORTED(s)) { + tt = select_tt(cfg, iova); + if (!tt) { + return; + } + granule = tt->granule_sz; + } else { + granule = cfg->s2cfg.granule_sz; + } + } else { granule = tg * 2 + 10; } @@ -1044,9 +1056,10 @@ static void smmuv3_notify_iova(IOMMUMemoryRegion *mr, memory_region_notify_iommu_one(n, &event); } -/* invalidate an asid/iova range tuple in all mr's */ -static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova, - uint8_t tg, uint64_t num_pages) +/* invalidate an asid/vmid/iova range tuple in all mr's */ +static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, int vmid, + dma_addr_t iova, uint8_t tg, + uint64_t num_pages) { SMMUDevice *sdev; @@ -1054,11 +1067,11 @@ static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova, IOMMUMemoryRegion *mr = &sdev->iommu; IOMMUNotifier *n; - trace_smmuv3_inv_notifiers_iova(mr->parent_obj.name, asid, iova, - tg, num_pages); + trace_smmuv3_inv_notifiers_iova(mr->parent_obj.name, asid, vmid, + iova, tg, num_pages); IOMMU_NOTIFIER_FOREACH(n, mr) { - smmuv3_notify_iova(mr, n, asid, iova, tg, num_pages); + smmuv3_notify_iova(mr, n, asid, vmid, iova, tg, num_pages); } } } @@ -1089,7 +1102,7 @@ static void smmuv3_range_inval(SMMUState *s, Cmd *cmd) if (!tg) { trace_smmuv3_range_inval(vmid, asid, addr, tg, 1, ttl, leaf); - smmuv3_inv_notifiers_iova(s, asid, addr, tg, 1); + smmuv3_inv_notifiers_iova(s, asid, vmid, addr, tg, 1); smmu_iotlb_inv_iova(s, asid, vmid, addr, tg, 1, ttl); return; } @@ -1107,7 +1120,7 @@ static void smmuv3_range_inval(SMMUState *s, Cmd *cmd) num_pages = (mask + 1) >> granule; trace_smmuv3_range_inval(vmid, asid, addr, tg, num_pages, ttl, leaf); - smmuv3_inv_notifiers_iova(s, asid, addr, tg, num_pages); + smmuv3_inv_notifiers_iova(s, asid, vmid, addr, tg, num_pages); smmu_iotlb_inv_iova(s, asid, vmid, addr, tg, num_pages, ttl); addr += mask + 1; } diff --git a/hw/arm/trace-events b/hw/arm/trace-events index f8fdf1ca9f..cdc1ea06a8 100644 --- a/hw/arm/trace-events +++ b/hw/arm/trace-events @@ -53,5 +53,5 @@ smmuv3_cmdq_tlbi_s12_vmid(uint16_t vmid) "vmid=%d" smmuv3_config_cache_inv(uint32_t sid) "Config cache INV for sid=0x%x" smmuv3_notify_flag_add(const char *iommu) "ADD SMMUNotifier node for iommu mr=%s" smmuv3_notify_flag_del(const char *iommu) "DEL SMMUNotifier node for iommu mr=%s" -smmuv3_inv_notifiers_iova(const char *name, uint16_t asid, uint64_t iova, uint8_t tg, uint64_t num_pages) "iommu mr=%s asid=%d iova=0x%"PRIx64" tg=%d num_pages=0x%"PRIx64 +smmuv3_inv_notifiers_iova(const char *name, uint16_t asid, uint16_t vmid, uint64_t iova, uint8_t tg, uint64_t num_pages) "iommu mr=%s asid=%d vmid=%d iova=0x%"PRIx64" tg=%d num_pages=0x%"PRIx64 From 8cefcc3b7127f1c497aa832378fe69453fb9db2c Mon Sep 17 00:00:00 2001 From: Mostafa Saleh Date: Thu, 25 May 2023 10:37:51 +0100 Subject: [PATCH 11/21] hw/arm/smmuv3: Add knob to choose translation stage and enable stage-2 As everything is in place, we can use a new system property to advertise which stage is supported and remove bad_ste from STE stage2 config. The property added arm-smmuv3.stage can have 3 values: - "1": Stage-1 only is advertised. - "2": Stage-2 only is advertised. If not passed or an unsupported value is passed, it will default to stage-1. Advertise VMID16. Don't try to decode CD, if stage-2 is configured. Reviewed-by: Eric Auger Signed-off-by: Mostafa Saleh Tested-by: Eric Auger Tested-by: Jean-Philippe Brucker Message-id: 20230516203327.2051088-11-smostafa@google.com Signed-off-by: Peter Maydell --- hw/arm/smmuv3.c | 32 ++++++++++++++++++++++---------- include/hw/arm/smmuv3.h | 1 + 2 files changed, 23 insertions(+), 10 deletions(-) diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c index f51561ebaf..932f009697 100644 --- a/hw/arm/smmuv3.c +++ b/hw/arm/smmuv3.c @@ -21,6 +21,7 @@ #include "hw/irq.h" #include "hw/sysbus.h" #include "migration/vmstate.h" +#include "hw/qdev-properties.h" #include "hw/qdev-core.h" #include "hw/pci/pci.h" #include "cpu.h" @@ -241,14 +242,17 @@ void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo *info) static void smmuv3_init_regs(SMMUv3State *s) { - /** - * IDR0: stage1 only, AArch64 only, coherent access, 16b ASID, - * multi-level stream table - */ - s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S1P, 1); /* stage 1 supported */ + /* Based on sys property, the stages supported in smmu will be advertised.*/ + if (s->stage && !strcmp("2", s->stage)) { + s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S2P, 1); + } else { + s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S1P, 1); + } + s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTF, 2); /* AArch64 PTW only */ s->idr[0] = FIELD_DP32(s->idr[0], IDR0, COHACC, 1); /* IO coherent */ s->idr[0] = FIELD_DP32(s->idr[0], IDR0, ASID16, 1); /* 16-bit ASID */ + s->idr[0] = FIELD_DP32(s->idr[0], IDR0, VMID16, 1); /* 16-bit VMID */ s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTENDIAN, 2); /* little endian */ s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STALL_MODEL, 1); /* No stall */ /* terminated transaction will always be aborted/error returned */ @@ -452,10 +456,6 @@ static int decode_ste_s2_cfg(SMMUTransCfg *cfg, STE *ste) goto bad_ste; } - /* This is still here as stage 2 has not been fully enabled yet. */ - qemu_log_mask(LOG_UNIMP, "SMMUv3 does not support stage 2 yet\n"); - goto bad_ste; - return 0; bad_ste: @@ -734,7 +734,7 @@ static int smmuv3_decode_config(IOMMUMemoryRegion *mr, SMMUTransCfg *cfg, return ret; } - if (cfg->aborted || cfg->bypassed) { + if (cfg->aborted || cfg->bypassed || (cfg->stage == 2)) { return 0; } @@ -1805,6 +1805,17 @@ static const VMStateDescription vmstate_smmuv3 = { } }; +static Property smmuv3_properties[] = { + /* + * Stages of translation advertised. + * "1": Stage 1 + * "2": Stage 2 + * Defaults to stage 1 + */ + DEFINE_PROP_STRING("stage", SMMUv3State, stage), + DEFINE_PROP_END_OF_LIST() +}; + static void smmuv3_instance_init(Object *obj) { /* Nothing much to do here as of now */ @@ -1821,6 +1832,7 @@ static void smmuv3_class_init(ObjectClass *klass, void *data) &c->parent_phases); c->parent_realize = dc->realize; dc->realize = smmu_realize; + device_class_set_props(dc, smmuv3_properties); } static int smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu, diff --git a/include/hw/arm/smmuv3.h b/include/hw/arm/smmuv3.h index 6031d7d325..d183a62766 100644 --- a/include/hw/arm/smmuv3.h +++ b/include/hw/arm/smmuv3.h @@ -62,6 +62,7 @@ struct SMMUv3State { qemu_irq irq[4]; QemuMutex mutex; + char *stage; }; typedef enum { From 31afe04586efeccb80cc36ffafcd0e32a3245ffb Mon Sep 17 00:00:00 2001 From: Tommy Wu Date: Thu, 25 May 2023 10:37:51 +0100 Subject: [PATCH 12/21] hw/dma/xilinx_axidma: Check DMASR.HALTED to prevent infinite loop. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When we receive a packet from the xilinx_axienet and then try to s2mem through the xilinx_axidma, if the descriptor ring buffer is full in the xilinx axidma driver, we’ll assert the DMASR.HALTED in the function : stream_process_s2mem and return 0. In the end, we’ll be stuck in an infinite loop in axienet_eth_rx_notify. This patch checks the DMASR.HALTED state when we try to push data from xilinx axi-enet to xilinx axi-dma. When the DMASR.HALTED is asserted, we will not keep pushing the data and then prevent the infinte loop. Signed-off-by: Tommy Wu Reviewed-by: Edgar E. Iglesias Reviewed-by: Frank Chang Message-id: 20230519062137.1251741-1-tommy.wu@sifive.com Signed-off-by: Peter Maydell --- hw/dma/xilinx_axidma.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/hw/dma/xilinx_axidma.c b/hw/dma/xilinx_axidma.c index 6030c76435..12c90267df 100644 --- a/hw/dma/xilinx_axidma.c +++ b/hw/dma/xilinx_axidma.c @@ -168,6 +168,11 @@ static inline int stream_idle(struct Stream *s) return !!(s->regs[R_DMASR] & DMASR_IDLE); } +static inline int stream_halted(struct Stream *s) +{ + return !!(s->regs[R_DMASR] & DMASR_HALTED); +} + static void stream_reset(struct Stream *s) { s->regs[R_DMASR] = DMASR_HALTED; /* starts up halted. */ @@ -269,7 +274,7 @@ static void stream_process_mem2s(struct Stream *s, StreamSink *tx_data_dev, uint64_t addr; bool eop; - if (!stream_running(s) || stream_idle(s)) { + if (!stream_running(s) || stream_idle(s) || stream_halted(s)) { return; } @@ -326,7 +331,7 @@ static size_t stream_process_s2mem(struct Stream *s, unsigned char *buf, unsigned int rxlen; size_t pos = 0; - if (!stream_running(s) || stream_idle(s)) { + if (!stream_running(s) || stream_idle(s) || stream_halted(s)) { return 0; } @@ -407,7 +412,7 @@ xilinx_axidma_data_stream_can_push(StreamSink *obj, XilinxAXIDMAStreamSink *ds = XILINX_AXI_DMA_DATA_STREAM(obj); struct Stream *s = &ds->dma->streams[1]; - if (!stream_running(s) || stream_idle(s)) { + if (!stream_running(s) || stream_idle(s) || stream_halted(s)) { ds->dma->notify = notify; ds->dma->notify_opaque = notify_opaque; return false; From c9ba1c9f02cfede5329f504cdda6fd3a256e0434 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Chigot?= Date: Wed, 24 May 2023 16:37:14 +0200 Subject: [PATCH 13/21] hw/arm/xlnx-zynqmp: fix unsigned error when checking the RPUs number MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When passing --smp with a number lower than XLNX_ZYNQMP_NUM_APU_CPUS, the expression (ms->smp.cpus - XLNX_ZYNQMP_NUM_APU_CPUS) will result in a positive number as ms->smp.cpus is a unsigned int. This will raise the following error afterwards, as Qemu will try to instantiate some additional RPUs. | $ qemu-system-aarch64 --smp 1 -M xlnx-zcu102 | ** | ERROR:../src/tcg/tcg.c:777:tcg_register_thread: | assertion failed: (n < tcg_max_ctxs) Signed-off-by: Clément Chigot Reviewed-by: Francisco Iglesias Tested-by: Francisco Iglesias Message-id: 20230524143714.565792-1-chigot@adacore.com Signed-off-by: Peter Maydell --- hw/arm/xlnx-zynqmp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hw/arm/xlnx-zynqmp.c b/hw/arm/xlnx-zynqmp.c index 335cfc417d..5905a33015 100644 --- a/hw/arm/xlnx-zynqmp.c +++ b/hw/arm/xlnx-zynqmp.c @@ -213,7 +213,7 @@ static void xlnx_zynqmp_create_rpu(MachineState *ms, XlnxZynqMPState *s, const char *boot_cpu, Error **errp) { int i; - int num_rpus = MIN(ms->smp.cpus - XLNX_ZYNQMP_NUM_APU_CPUS, + int num_rpus = MIN((int)(ms->smp.cpus - XLNX_ZYNQMP_NUM_APU_CPUS), XLNX_ZYNQMP_NUM_RPU_CPUS); if (num_rpus <= 0) { From e19d24e6d91ec92383c989d942c61a91b2a6bdac Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Wed, 24 May 2023 10:06:00 +0200 Subject: [PATCH 14/21] tests/qtest: Run arm-specific tests only if the required machine is available pflash-cfi02-test.c always uses the "musicpal" machine for testing, test-arm-mptimer.c always uses the "vexpress-a9" machine, and microbit-test.c requires the "microbit" machine, so we should only run these tests if the machines have been enabled in the configuration. Signed-off-by: Thomas Huth Reviewed-by: Fabiano Rosas Message-id: 20230524080600.1618137-1-thuth@redhat.com Signed-off-by: Peter Maydell --- tests/qtest/meson.build | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build index 4c5585ac0f..087f2dc9d7 100644 --- a/tests/qtest/meson.build +++ b/tests/qtest/meson.build @@ -198,14 +198,15 @@ qtests_arm = \ (config_all_devices.has_key('CONFIG_CMSDK_APB_DUALTIMER') ? ['cmsdk-apb-dualtimer-test'] : []) + \ (config_all_devices.has_key('CONFIG_CMSDK_APB_TIMER') ? ['cmsdk-apb-timer-test'] : []) + \ (config_all_devices.has_key('CONFIG_CMSDK_APB_WATCHDOG') ? ['cmsdk-apb-watchdog-test'] : []) + \ - (config_all_devices.has_key('CONFIG_PFLASH_CFI02') ? ['pflash-cfi02-test'] : []) + \ + (config_all_devices.has_key('CONFIG_PFLASH_CFI02') and + config_all_devices.has_key('CONFIG_MUSICPAL') ? ['pflash-cfi02-test'] : []) + \ (config_all_devices.has_key('CONFIG_ASPEED_SOC') ? qtests_aspeed : []) + \ (config_all_devices.has_key('CONFIG_NPCM7XX') ? qtests_npcm7xx : []) + \ (config_all_devices.has_key('CONFIG_GENERIC_LOADER') ? ['hexloader-test'] : []) + \ (config_all_devices.has_key('CONFIG_TPM_TIS_I2C') ? ['tpm-tis-i2c-test'] : []) + \ + (config_all_devices.has_key('CONFIG_VEXPRESS') ? ['test-arm-mptimer'] : []) + \ + (config_all_devices.has_key('CONFIG_MICROBIT') ? ['microbit-test'] : []) + \ ['arm-cpu-features', - 'microbit-test', - 'test-arm-mptimer', 'boot-serial-test'] # TODO: once aarch64 TCG is fixed on ARM 32 bit host, make bios-tables-test unconditional From d7fe699be54b2cbb8e4ee37b63588b3458a49da7 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 23 May 2023 14:17:26 +0100 Subject: [PATCH 15/21] target/arm: Explicitly select short-format FSR for M-profile For M-profile, there is no guest-facing A-profile format FSR, but we still use the env->exception.fsr field to pass fault information from the point where a fault is raised to the code in arm_v7m_cpu_do_interrupt() which interprets it and sets the M-profile specific fault status registers. So it doesn't matter whether we fill in env->exception.fsr in the short format or the LPAE format, as long as both sides agree. As it happens arm_v7m_cpu_do_interrupt() assumes short-form. In compute_fsr_fsc() we weren't explicitly choosing short-form for M-profile, but instead relied on it falling out in the wash because arm_s1_regime_using_lpae_format() would be false. This was broken in commit 452c67a4 when we added v8R support, because we said "PMSAv8 is always LPAE format" (as it is for v8R), forgetting that we were implicitly using this code path on M-profile. At that point we would hit a g_assert_not_reached(): ERROR:../../target/arm/internals.h:549:arm_fi_to_lfsc: code should not be reached #7 0x0000555555e055f7 in arm_fi_to_lfsc (fi=0x7fffecff9a90) at ../../target/arm/internals.h:549 #8 0x0000555555e05a27 in compute_fsr_fsc (env=0x555557356670, fi=0x7fffecff9a90, target_el=1, mmu_idx=1, ret_fsc=0x7fffecff9a1c) at ../../target/arm/tlb_helper.c:95 #9 0x0000555555e05b62 in arm_deliver_fault (cpu=0x555557354800, addr=268961344, access_type=MMU_INST_FETCH, mmu_idx=1, fi=0x7fffecff9a90) at ../../target/arm/tlb_helper.c:132 #10 0x0000555555e06095 in arm_cpu_tlb_fill (cs=0x555557354800, address=268961344, size=1, access_type=MMU_INST_FETCH, mmu_idx=1, probe=false, retaddr=0) at ../../target/arm/tlb_helper.c:260 The specific assertion changed when commit fcc7404eff24b4c added "assert not M-profile" to arm_is_secure_below_el3(), because the conditions being checked in compute_fsr_fsc() include arm_el_is_aa64(), which will end up calling arm_is_secure_below_el3() and asserting before we try to call arm_fi_to_lfsc(): #7 0x0000555555efaf43 in arm_is_secure_below_el3 (env=0x5555574665a0) at ../../target/arm/cpu.h:2396 #8 0x0000555555efb103 in arm_is_el2_enabled (env=0x5555574665a0) at ../../target/arm/cpu.h:2448 #9 0x0000555555efb204 in arm_el_is_aa64 (env=0x5555574665a0, el=1) at ../../target/arm/cpu.h:2509 #10 0x0000555555efbdfd in compute_fsr_fsc (env=0x5555574665a0, fi=0x7fffecff99e0, target_el=1, mmu_idx=1, ret_fsc=0x7fffecff996c) Avoid the assertion and the incorrect FSR format selection by explicitly making M-profile use the short-format in this function. Fixes: 452c67a42704 ("target/arm: Enable TTBCR_EAE for ARMv8-R AArch32")a Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1658 Cc: qemu-stable@nongnu.org Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-id: 20230523131726.866635-1-peter.maydell@linaro.org --- target/arm/tcg/tlb_helper.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/target/arm/tcg/tlb_helper.c b/target/arm/tcg/tlb_helper.c index d5a89bc514..8df36c2cbf 100644 --- a/target/arm/tcg/tlb_helper.c +++ b/target/arm/tcg/tlb_helper.c @@ -75,8 +75,17 @@ static uint32_t compute_fsr_fsc(CPUARMState *env, ARMMMUFaultInfo *fi, ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx); uint32_t fsr, fsc; - if (target_el == 2 || arm_el_is_aa64(env, target_el) || - arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) { + /* + * For M-profile there is no guest-facing FSR. We compute a + * short-form value for env->exception.fsr which we will then + * examine in arm_v7m_cpu_do_interrupt(). In theory we could + * use the LPAE format instead as long as both bits of code agree + * (and arm_fi_to_lfsc() handled the M-profile specific + * ARMFault_QEMU_NSCExec and ARMFault_QEMU_SFault cases). + */ + if (!arm_feature(env, ARM_FEATURE_M) && + (target_el == 2 || arm_el_is_aa64(env, target_el) || + arm_s1_regime_using_lpae_format(env, arm_mmu_idx))) { /* * LPAE format fault status register : bottom 6 bits are * status code in the same form as needed for syndrome From 257a5ec524374bd00da38b30dddfef1d96ca96f4 Mon Sep 17 00:00:00 2001 From: Fabiano Rosas Date: Tue, 23 May 2023 15:05:23 -0300 Subject: [PATCH 16/21] target/arm: Explain why we need to select ARM_V7M MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We currently need to select ARM_V7M unconditionally when TCG is present in the build because some translate.c helpers and the whole of m_helpers.c are not yet under CONFIG_ARM_V7M. Suggested-by: Philippe Mathieu-Daudé Signed-off-by: Fabiano Rosas Reviewed-by: Philippe Mathieu-Daudé Message-id: 20230523180525.29994-2-farosas@suse.de Signed-off-by: Peter Maydell --- target/arm/Kconfig | 3 +++ 1 file changed, 3 insertions(+) diff --git a/target/arm/Kconfig b/target/arm/Kconfig index 5947366f6e..bf57d739cd 100644 --- a/target/arm/Kconfig +++ b/target/arm/Kconfig @@ -1,6 +1,9 @@ config ARM bool select ARM_COMPATIBLE_SEMIHOSTING if TCG + + # We need to select this until we move m_helper.c and the + # translate.c v7m helpers under ARM_V7M. select ARM_V7M if TCG config AARCH64 From da324efb179cc86e6c8751a59dc215e510836b6d Mon Sep 17 00:00:00 2001 From: Fabiano Rosas Date: Tue, 23 May 2023 15:05:24 -0300 Subject: [PATCH 17/21] arm/Kconfig: Keep Kconfig default entries in default.mak as documentation When we moved the arm default CONFIGs into Kconfig and removed them from default.mak, we made it harder to identify which CONFIGs are selected by default in case users want to disable them. Bring back the default entries into default.mak, but keep them commented out. This way users can keep their workflows of editing default.mak to remove build options without needing to search through Kconfig. Reported-by: Thomas Huth Signed-off-by: Fabiano Rosas Reviewed-by: Thomas Huth Message-id: 20230523180525.29994-3-farosas@suse.de Signed-off-by: Peter Maydell --- configs/devices/aarch64-softmmu/default.mak | 6 ++++ configs/devices/arm-softmmu/default.mak | 40 +++++++++++++++++++++ 2 files changed, 46 insertions(+) diff --git a/configs/devices/aarch64-softmmu/default.mak b/configs/devices/aarch64-softmmu/default.mak index 70e05a197d..f82a04c27d 100644 --- a/configs/devices/aarch64-softmmu/default.mak +++ b/configs/devices/aarch64-softmmu/default.mak @@ -2,3 +2,9 @@ # We support all the 32 bit boards so need all their config include ../arm-softmmu/default.mak + +# These are selected by default when TCG is enabled, uncomment them to +# keep out of the build. +# CONFIG_XLNX_ZYNQMP_ARM=n +# CONFIG_XLNX_VERSAL=n +# CONFIG_SBSA_REF=n diff --git a/configs/devices/arm-softmmu/default.mak b/configs/devices/arm-softmmu/default.mak index 647fbce88d..980c48a7d9 100644 --- a/configs/devices/arm-softmmu/default.mak +++ b/configs/devices/arm-softmmu/default.mak @@ -4,3 +4,43 @@ # CONFIG_TEST_DEVICES=n CONFIG_ARM_VIRT=y + +# These are selected by default when TCG is enabled, uncomment them to +# keep out of the build. +# CONFIG_CUBIEBOARD=n +# CONFIG_EXYNOS4=n +# CONFIG_HIGHBANK=n +# CONFIG_INTEGRATOR=n +# CONFIG_FSL_IMX31=n +# CONFIG_MUSICPAL=n +# CONFIG_MUSCA=n +# CONFIG_CHEETAH=n +# CONFIG_SX1=n +# CONFIG_NSERIES=n +# CONFIG_STELLARIS=n +# CONFIG_STM32VLDISCOVERY=n +# CONFIG_REALVIEW=n +# CONFIG_VERSATILE=n +# CONFIG_VEXPRESS=n +# CONFIG_ZYNQ=n +# CONFIG_MAINSTONE=n +# CONFIG_GUMSTIX=n +# CONFIG_SPITZ=n +# CONFIG_TOSA=n +# CONFIG_Z2=n +# CONFIG_NPCM7XX=n +# CONFIG_COLLIE=n +# CONFIG_ASPEED_SOC=n +# CONFIG_NETDUINO2=n +# CONFIG_NETDUINOPLUS2=n +# CONFIG_OLIMEX_STM32_H405=n +# CONFIG_MPS2=n +# CONFIG_RASPI=n +# CONFIG_DIGIC=n +# CONFIG_SABRELITE=n +# CONFIG_EMCRAFT_SF2=n +# CONFIG_MICROBIT=n +# CONFIG_FSL_IMX25=n +# CONFIG_FSL_IMX7=n +# CONFIG_FSL_IMX6UL=n +# CONFIG_ALLWINNER_H3=n From 441d701db75a6aa0c5e7c21836282e1555924a2d Mon Sep 17 00:00:00 2001 From: Fabiano Rosas Date: Tue, 23 May 2023 15:05:25 -0300 Subject: [PATCH 18/21] arm/Kconfig: Make TCG dependence explicit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace the 'default y if TCG' pattern with 'default y; depends on TCG'. That makes explict that there is a dependence on TCG and enabling these CONFIGs via .mak files without TCG present will fail earlier. Suggested-by: Paolo Bonzini Signed-off-by: Fabiano Rosas Reviewed-by: Thomas Huth Reviewed-by: Philippe Mathieu-Daudé Message-id: 20230523180525.29994-4-farosas@suse.de Signed-off-by: Peter Maydell --- hw/arm/Kconfig | 123 ++++++++++++++++++++++++++++++++----------------- 1 file changed, 82 insertions(+), 41 deletions(-) diff --git a/hw/arm/Kconfig b/hw/arm/Kconfig index 0f42c556d7..acc4371a4a 100644 --- a/hw/arm/Kconfig +++ b/hw/arm/Kconfig @@ -35,24 +35,28 @@ config ARM_VIRT config CHEETAH bool - default y if TCG && ARM + default y + depends on TCG && ARM select OMAP select TSC210X config CUBIEBOARD bool - default y if TCG && ARM + default y + depends on TCG && ARM select ALLWINNER_A10 config DIGIC bool - default y if TCG && ARM + default y + depends on TCG && ARM select PTIMER select PFLASH_CFI02 config EXYNOS4 bool - default y if TCG && ARM + default y + depends on TCG && ARM imply I2C_DEVICES select A9MPCORE select I2C @@ -65,7 +69,8 @@ config EXYNOS4 config HIGHBANK bool - default y if TCG && ARM + default y + depends on TCG && ARM select A9MPCORE select A15MPCORE select AHCI @@ -80,7 +85,8 @@ config HIGHBANK config INTEGRATOR bool - default y if TCG && ARM + default y + depends on TCG && ARM select ARM_TIMER select INTEGRATOR_DEBUG select PL011 # UART @@ -93,14 +99,16 @@ config INTEGRATOR config MAINSTONE bool - default y if TCG && ARM + default y + depends on TCG && ARM select PXA2XX select PFLASH_CFI01 select SMC91C111 config MUSCA bool - default y if TCG && ARM + default y + depends on TCG && ARM select ARMSSE select PL011 select PL031 @@ -112,7 +120,8 @@ config MARVELL_88W8618 config MUSICPAL bool - default y if TCG && ARM + default y + depends on TCG && ARM select OR_IRQ select BITBANG_I2C select MARVELL_88W8618 @@ -123,22 +132,26 @@ config MUSICPAL config NETDUINO2 bool - default y if TCG && ARM + default y + depends on TCG && ARM select STM32F205_SOC config NETDUINOPLUS2 bool - default y if TCG && ARM + default y + depends on TCG && ARM select STM32F405_SOC config OLIMEX_STM32_H405 bool - default y if TCG && ARM + default y + depends on TCG && ARM select STM32F405_SOC config NSERIES bool - default y if TCG && ARM + default y + depends on TCG && ARM select OMAP select TMP105 # temperature sensor select BLIZZARD # LCD/TV controller @@ -171,14 +184,16 @@ config PXA2XX config GUMSTIX bool - default y if TCG && ARM + default y + depends on TCG && ARM select PFLASH_CFI01 select SMC91C111 select PXA2XX config TOSA bool - default y if TCG && ARM + default y + depends on TCG && ARM select ZAURUS # scoop select MICRODRIVE select PXA2XX @@ -186,7 +201,8 @@ config TOSA config SPITZ bool - default y if TCG && ARM + default y + depends on TCG && ARM select ADS7846 # touch-screen controller select MAX111X # A/D converter select WM8750 # audio codec @@ -199,7 +215,8 @@ config SPITZ config Z2 bool - default y if TCG && ARM + default y + depends on TCG && ARM select PFLASH_CFI01 select WM8750 select PL011 # UART @@ -207,7 +224,8 @@ config Z2 config REALVIEW bool - default y if TCG && ARM + default y + depends on TCG && ARM imply PCI_DEVICES imply PCI_TESTDEV imply I2C_DEVICES @@ -236,7 +254,8 @@ config REALVIEW config SBSA_REF bool - default y if TCG && AARCH64 + default y + depends on TCG && AARCH64 imply PCI_DEVICES select AHCI select ARM_SMMUV3 @@ -252,13 +271,15 @@ config SBSA_REF config SABRELITE bool - default y if TCG && ARM + default y + depends on TCG && ARM select FSL_IMX6 select SSI_M25P80 config STELLARIS bool - default y if TCG && ARM + default y + depends on TCG && ARM imply I2C_DEVICES select ARM_V7M select CMSDK_APB_WATCHDOG @@ -276,7 +297,8 @@ config STELLARIS config STM32VLDISCOVERY bool - default y if TCG && ARM + default y + depends on TCG && ARM select STM32F100_SOC config STRONGARM @@ -285,19 +307,22 @@ config STRONGARM config COLLIE bool - default y if TCG && ARM + default y + depends on TCG && ARM select PFLASH_CFI01 select ZAURUS # scoop select STRONGARM config SX1 bool - default y if TCG && ARM + default y + depends on TCG && ARM select OMAP config VERSATILE bool - default y if TCG && ARM + default y + depends on TCG && ARM select ARM_TIMER # sp804 select PFLASH_CFI01 select LSI_SCSI_PCI @@ -309,7 +334,8 @@ config VERSATILE config VEXPRESS bool - default y if TCG && ARM + default y + depends on TCG && ARM select A9MPCORE select A15MPCORE select ARM_MPTIMER @@ -325,7 +351,8 @@ config VEXPRESS config ZYNQ bool - default y if TCG && ARM + default y + depends on TCG && ARM select A9MPCORE select CADENCE # UART select PFLASH_CFI02 @@ -342,7 +369,8 @@ config ZYNQ config ARM_V7M bool # currently v7M must be included in a TCG build due to translate.c - default y if TCG && ARM + default y + depends on TCG && ARM select PTIMER config ALLWINNER_A10 @@ -361,7 +389,8 @@ config ALLWINNER_A10 config ALLWINNER_H3 bool - default y if TCG && ARM + default y + depends on TCG && ARM select ALLWINNER_A10_PIT select ALLWINNER_SUN8I_EMAC select ALLWINNER_I2C @@ -376,7 +405,8 @@ config ALLWINNER_H3 config RASPI bool - default y if TCG && ARM + default y + depends on TCG && ARM select FRAMEBUFFER select PL011 # UART select SDHCI @@ -407,7 +437,8 @@ config STM32F405_SOC config XLNX_ZYNQMP_ARM bool - default y if TCG && AARCH64 + default y + depends on TCG && AARCH64 select AHCI select ARM_GIC select CADENCE @@ -425,7 +456,8 @@ config XLNX_ZYNQMP_ARM config XLNX_VERSAL bool - default y if TCG && AARCH64 + default y + depends on TCG && AARCH64 select ARM_GIC select PL011 select CADENCE @@ -440,7 +472,8 @@ config XLNX_VERSAL config NPCM7XX bool - default y if TCG && ARM + default y + depends on TCG && ARM select A9MPCORE select ADM1272 select ARM_GIC @@ -457,7 +490,8 @@ config NPCM7XX config FSL_IMX25 bool - default y if TCG && ARM + default y + depends on TCG && ARM imply I2C_DEVICES select IMX select IMX_FEC @@ -467,7 +501,8 @@ config FSL_IMX25 config FSL_IMX31 bool - default y if TCG && ARM + default y + depends on TCG && ARM imply I2C_DEVICES select SERIAL select IMX @@ -488,7 +523,8 @@ config FSL_IMX6 config ASPEED_SOC bool - default y if TCG && ARM + default y + depends on TCG && ARM select DS1338 select FTGMAC100 select I2C @@ -509,7 +545,8 @@ config ASPEED_SOC config MPS2 bool - default y if TCG && ARM + default y + depends on TCG && ARM imply I2C_DEVICES select ARMSSE select LAN9118 @@ -525,7 +562,8 @@ config MPS2 config FSL_IMX7 bool - default y if TCG && ARM + default y + depends on TCG && ARM imply PCI_DEVICES imply TEST_DEVICES imply I2C_DEVICES @@ -544,7 +582,8 @@ config ARM_SMMUV3 config FSL_IMX6UL bool - default y if TCG && ARM + default y + depends on TCG && ARM imply I2C_DEVICES select A15MPCORE select IMX @@ -556,7 +595,8 @@ config FSL_IMX6UL config MICROBIT bool - default y if TCG && ARM + default y + depends on TCG && ARM select NRF51_SOC config NRF51_SOC @@ -568,7 +608,8 @@ config NRF51_SOC config EMCRAFT_SF2 bool - default y if TCG && ARM + default y + depends on TCG && ARM select MSF2 select SSI_M25P80 From 5d6c687c9d2249de3f41d8d71baa1f15cc5cf112 Mon Sep 17 00:00:00 2001 From: Enze Li Date: Thu, 25 May 2023 14:43:45 +0800 Subject: [PATCH 19/21] Update copyright dates to 2023 I noticed that in the latest version, the copyright string is still 2022, even though 2023 is halfway through. This patch fixes that and fixes the documentation along with it. Signed-off-by: Enze Li Reviewed-by: Peter Maydell Message-id: 20230525064345.1152801-1-lienze@kylinos.cn Signed-off-by: Peter Maydell --- docs/conf.py | 2 +- include/qemu/help-texts.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index c687ff2663..e84a95e71c 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -89,7 +89,7 @@ default_role = 'any' # General information about the project. project = u'QEMU' -copyright = u'2022, The QEMU Project Developers' +copyright = u'2023, The QEMU Project Developers' author = u'The QEMU Project Developers' # The version info for the project you're documenting, acts as replacement for diff --git a/include/qemu/help-texts.h b/include/qemu/help-texts.h index 4f265fed8d..d0359f82e0 100644 --- a/include/qemu/help-texts.h +++ b/include/qemu/help-texts.h @@ -2,7 +2,7 @@ #define QEMU_HELP_TEXTS_H /* Copyright string for -version arguments, About dialogs, etc */ -#define QEMU_COPYRIGHT "Copyright (c) 2003-2022 " \ +#define QEMU_COPYRIGHT "Copyright (c) 2003-2023 " \ "Fabrice Bellard and the QEMU Project developers" /* Bug reporting information for --help arguments, About dialogs, etc */ From 0c08d4f310ebf3f621ee5b7447ded0736decbb43 Mon Sep 17 00:00:00 2001 From: Marcin Juszkiewicz Date: Wed, 24 May 2023 13:33:06 +0200 Subject: [PATCH 20/21] hw/arm/sbsa-ref: add GIC node into DT Let add GIC information into DeviceTree as part of SBSA-REF versioning. Trusted Firmware will read it and provide to next firmware level. Bumps platform version to 0.1 one so we can check is node is present. Signed-off-by: Marcin Juszkiewicz Reviewed-by: Peter Maydell Signed-off-by: Peter Maydell --- hw/arm/sbsa-ref.c | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c index 9c3e670ec6..de21200ff9 100644 --- a/hw/arm/sbsa-ref.c +++ b/hw/arm/sbsa-ref.c @@ -29,6 +29,7 @@ #include "exec/hwaddr.h" #include "kvm_arm.h" #include "hw/arm/boot.h" +#include "hw/arm/fdt.h" #include "hw/arm/smmuv3.h" #include "hw/block/flash.h" #include "hw/boards.h" @@ -168,6 +169,20 @@ static uint64_t sbsa_ref_cpu_mp_affinity(SBSAMachineState *sms, int idx) return arm_cpu_mp_affinity(idx, clustersz); } +static void sbsa_fdt_add_gic_node(SBSAMachineState *sms) +{ + char *nodename; + + nodename = g_strdup_printf("/intc"); + qemu_fdt_add_subnode(sms->fdt, nodename); + qemu_fdt_setprop_sized_cells(sms->fdt, nodename, "reg", + 2, sbsa_ref_memmap[SBSA_GIC_DIST].base, + 2, sbsa_ref_memmap[SBSA_GIC_DIST].size, + 2, sbsa_ref_memmap[SBSA_GIC_REDIST].base, + 2, sbsa_ref_memmap[SBSA_GIC_REDIST].size); + + g_free(nodename); +} /* * Firmware on this machine only uses ACPI table to load OS, these limited * device tree nodes are just to let firmware know the info which varies from @@ -204,7 +219,7 @@ static void create_fdt(SBSAMachineState *sms) * fw compatibility. */ qemu_fdt_setprop_cell(fdt, "/", "machine-version-major", 0); - qemu_fdt_setprop_cell(fdt, "/", "machine-version-minor", 0); + qemu_fdt_setprop_cell(fdt, "/", "machine-version-minor", 1); if (ms->numa_state->have_numa_distance) { int size = nb_numa_nodes * nb_numa_nodes * 3 * sizeof(uint32_t); @@ -260,6 +275,8 @@ static void create_fdt(SBSAMachineState *sms) g_free(nodename); } + + sbsa_fdt_add_gic_node(sms); } #define SBSA_FLASH_SECTOR_SIZE (256 * KiB) From ec683110def96b16be3931ec87baba65a3dc5ad0 Mon Sep 17 00:00:00 2001 From: Marcin Juszkiewicz Date: Wed, 24 May 2023 13:33:07 +0200 Subject: [PATCH 21/21] docs: sbsa: correct graphics card name We moved from VGA to Bochs to have PCIe card. Signed-off-by: Marcin Juszkiewicz Reviewed-by: Peter Maydell Signed-off-by: Peter Maydell --- docs/system/arm/sbsa.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/system/arm/sbsa.rst b/docs/system/arm/sbsa.rst index b499d7e927..016776aed8 100644 --- a/docs/system/arm/sbsa.rst +++ b/docs/system/arm/sbsa.rst @@ -27,6 +27,6 @@ The sbsa-ref board supports: - System bus EHCI controller - CDROM and hard disc on AHCI bus - E1000E ethernet card on PCIe bus - - VGA display adaptor on PCIe bus + - Bochs display adapter on PCIe bus - A generic SBSA watchdog device