2013-06-24 19:31:25 +02:00
/*
* IOMMU API for ARM architected SMMU implementations .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 59 Temple Place - Suite 330 , Boston , MA 02111 - 1307 , USA .
*
* Copyright ( C ) 2013 ARM Limited
*
* Author : Will Deacon < will . deacon @ arm . com >
*
* This driver currently supports :
* - SMMUv1 and v2 implementations
* - Stream - matching and stream - indexing
* - v7 / v8 long - descriptor format
* - Non - secure access to the SMMU
* - Context fault reporting
*/
# define pr_fmt(fmt) "arm-smmu: " fmt
# include <linux/delay.h>
# include <linux/dma-mapping.h>
# include <linux/err.h>
# include <linux/interrupt.h>
# include <linux/io.h>
# include <linux/iommu.h>
2014-10-29 22:13:40 +01:00
# include <linux/iopoll.h>
2013-06-24 19:31:25 +02:00
# include <linux/module.h>
# include <linux/of.h>
2015-07-29 20:46:05 +02:00
# include <linux/of_address.h>
2014-05-01 19:05:08 +02:00
# include <linux/pci.h>
2013-06-24 19:31:25 +02:00
# include <linux/platform_device.h>
# include <linux/slab.h>
# include <linux/spinlock.h>
# include <linux/amba/bus.h>
2014-11-14 18:17:54 +01:00
# include "io-pgtable.h"
2013-06-24 19:31:25 +02:00
/* Maximum number of stream IDs assigned to a single device */
2014-01-30 19:18:08 +01:00
# define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS
2013-06-24 19:31:25 +02:00
/* Maximum number of context banks per SMMU */
# define ARM_SMMU_MAX_CBS 128
/* Maximum number of mapping groups per SMMU */
# define ARM_SMMU_MAX_SMRS 128
/* SMMU global address space */
# define ARM_SMMU_GR0(smmu) ((smmu)->base)
2014-07-30 12:33:25 +02:00
# define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
2013-06-24 19:31:25 +02:00
2014-01-30 19:18:04 +01:00
/*
* SMMU global address space with conditional offset to access secure
* aliases of non - secure registers ( e . g . nsCR0 : 0x400 , nsGFSR : 0x448 ,
* nsGFSYNR0 : 0x450 )
*/
# define ARM_SMMU_GR0_NS(smmu) \
( ( smmu ) - > base + \
( ( smmu - > options & ARM_SMMU_OPT_SECURE_CFG_ACCESS ) \
? 0x400 : 0 ) )
2015-08-19 01:40:30 +02:00
# ifdef CONFIG_64BIT
# define smmu_writeq writeq_relaxed
# else
# define smmu_writeq(reg64, addr) \
do { \
u64 __val = ( reg64 ) ; \
void __iomem * __addr = ( addr ) ; \
writel_relaxed ( __val > > 32 , __addr + 4 ) ; \
writel_relaxed ( __val , __addr ) ; \
} while ( 0 )
# endif
2013-06-24 19:31:25 +02:00
/* Configuration registers */
# define ARM_SMMU_GR0_sCR0 0x0
# define sCR0_CLIENTPD (1 << 0)
# define sCR0_GFRE (1 << 1)
# define sCR0_GFIE (1 << 2)
# define sCR0_GCFGFRE (1 << 4)
# define sCR0_GCFGFIE (1 << 5)
# define sCR0_USFCFG (1 << 10)
# define sCR0_VMIDPNE (1 << 11)
# define sCR0_PTM (1 << 12)
# define sCR0_FB (1 << 13)
# define sCR0_BSU_SHIFT 14
# define sCR0_BSU_MASK 0x3
/* Identification registers */
# define ARM_SMMU_GR0_ID0 0x20
# define ARM_SMMU_GR0_ID1 0x24
# define ARM_SMMU_GR0_ID2 0x28
# define ARM_SMMU_GR0_ID3 0x2c
# define ARM_SMMU_GR0_ID4 0x30
# define ARM_SMMU_GR0_ID5 0x34
# define ARM_SMMU_GR0_ID6 0x38
# define ARM_SMMU_GR0_ID7 0x3c
# define ARM_SMMU_GR0_sGFSR 0x48
# define ARM_SMMU_GR0_sGFSYNR0 0x50
# define ARM_SMMU_GR0_sGFSYNR1 0x54
# define ARM_SMMU_GR0_sGFSYNR2 0x58
# define ID0_S1TS (1 << 30)
# define ID0_S2TS (1 << 29)
# define ID0_NTS (1 << 28)
# define ID0_SMS (1 << 27)
2014-10-29 22:13:40 +01:00
# define ID0_ATOSNS (1 << 26)
2013-06-24 19:31:25 +02:00
# define ID0_CTTW (1 << 14)
# define ID0_NUMIRPT_SHIFT 16
# define ID0_NUMIRPT_MASK 0xff
2014-08-23 02:12:32 +02:00
# define ID0_NUMSIDB_SHIFT 9
# define ID0_NUMSIDB_MASK 0xf
2013-06-24 19:31:25 +02:00
# define ID0_NUMSMRG_SHIFT 0
# define ID0_NUMSMRG_MASK 0xff
# define ID1_PAGESIZE (1 << 31)
# define ID1_NUMPAGENDXB_SHIFT 28
# define ID1_NUMPAGENDXB_MASK 7
# define ID1_NUMS2CB_SHIFT 16
# define ID1_NUMS2CB_MASK 0xff
# define ID1_NUMCB_SHIFT 0
# define ID1_NUMCB_MASK 0xff
# define ID2_OAS_SHIFT 4
# define ID2_OAS_MASK 0xf
# define ID2_IAS_SHIFT 0
# define ID2_IAS_MASK 0xf
# define ID2_UBS_SHIFT 8
# define ID2_UBS_MASK 0xf
# define ID2_PTFS_4K (1 << 12)
# define ID2_PTFS_16K (1 << 13)
# define ID2_PTFS_64K (1 << 14)
/* Global TLB invalidation */
# define ARM_SMMU_GR0_TLBIVMID 0x64
# define ARM_SMMU_GR0_TLBIALLNSNH 0x68
# define ARM_SMMU_GR0_TLBIALLH 0x6c
# define ARM_SMMU_GR0_sTLBGSYNC 0x70
# define ARM_SMMU_GR0_sTLBGSTATUS 0x74
# define sTLBGSTATUS_GSACTIVE (1 << 0)
# define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
/* Stream mapping registers */
# define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
# define SMR_VALID (1 << 31)
# define SMR_MASK_SHIFT 16
# define SMR_MASK_MASK 0x7fff
# define SMR_ID_SHIFT 0
# define SMR_ID_MASK 0x7fff
# define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
# define S2CR_CBNDX_SHIFT 0
# define S2CR_CBNDX_MASK 0xff
# define S2CR_TYPE_SHIFT 16
# define S2CR_TYPE_MASK 0x3
# define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
# define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
# define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
/* Context bank attribute registers */
# define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
# define CBAR_VMID_SHIFT 0
# define CBAR_VMID_MASK 0xff
2014-02-06 15:59:05 +01:00
# define CBAR_S1_BPSHCFG_SHIFT 8
# define CBAR_S1_BPSHCFG_MASK 3
# define CBAR_S1_BPSHCFG_NSH 3
2013-06-24 19:31:25 +02:00
# define CBAR_S1_MEMATTR_SHIFT 12
# define CBAR_S1_MEMATTR_MASK 0xf
# define CBAR_S1_MEMATTR_WB 0xf
# define CBAR_TYPE_SHIFT 16
# define CBAR_TYPE_MASK 0x3
# define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
# define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
# define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
# define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
# define CBAR_IRPTNDX_SHIFT 24
# define CBAR_IRPTNDX_MASK 0xff
# define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
# define CBA2R_RW64_32BIT (0 << 0)
# define CBA2R_RW64_64BIT (1 << 0)
/* Translation context bank */
# define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
2014-07-30 12:33:25 +02:00
# define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
2013-06-24 19:31:25 +02:00
# define ARM_SMMU_CB_SCTLR 0x0
# define ARM_SMMU_CB_RESUME 0x8
# define ARM_SMMU_CB_TTBCR2 0x10
2015-08-19 01:40:30 +02:00
# define ARM_SMMU_CB_TTBR0 0x20
# define ARM_SMMU_CB_TTBR1 0x28
2013-06-24 19:31:25 +02:00
# define ARM_SMMU_CB_TTBCR 0x30
# define ARM_SMMU_CB_S1_MAIR0 0x38
2014-11-14 18:17:54 +01:00
# define ARM_SMMU_CB_S1_MAIR1 0x3c
2014-10-29 22:13:40 +01:00
# define ARM_SMMU_CB_PAR_LO 0x50
# define ARM_SMMU_CB_PAR_HI 0x54
2013-06-24 19:31:25 +02:00
# define ARM_SMMU_CB_FSR 0x58
# define ARM_SMMU_CB_FAR_LO 0x60
# define ARM_SMMU_CB_FAR_HI 0x64
# define ARM_SMMU_CB_FSYNR0 0x68
2014-11-14 18:17:54 +01:00
# define ARM_SMMU_CB_S1_TLBIVA 0x600
2013-07-31 20:21:27 +02:00
# define ARM_SMMU_CB_S1_TLBIASID 0x610
2014-11-14 18:17:54 +01:00
# define ARM_SMMU_CB_S1_TLBIVAL 0x620
# define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
# define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
2015-05-27 18:09:34 +02:00
# define ARM_SMMU_CB_ATS1PR 0x800
2014-10-29 22:13:40 +01:00
# define ARM_SMMU_CB_ATSR 0x8f0
2013-06-24 19:31:25 +02:00
# define SCTLR_S1_ASIDPNE (1 << 12)
# define SCTLR_CFCFG (1 << 7)
# define SCTLR_CFIE (1 << 6)
# define SCTLR_CFRE (1 << 5)
# define SCTLR_E (1 << 4)
# define SCTLR_AFE (1 << 2)
# define SCTLR_TRE (1 << 1)
# define SCTLR_M (1 << 0)
# define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
2014-10-29 22:13:40 +01:00
# define CB_PAR_F (1 << 0)
# define ATSR_ACTIVE (1 << 0)
2013-06-24 19:31:25 +02:00
# define RESUME_RETRY (0 << 0)
# define RESUME_TERMINATE (1 << 0)
# define TTBCR2_SEP_SHIFT 15
2015-05-08 18:44:22 +02:00
# define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
2013-06-24 19:31:25 +02:00
2015-08-19 01:40:30 +02:00
# define TTBRn_ASID_SHIFT 48
2013-06-24 19:31:25 +02:00
# define FSR_MULTI (1 << 31)
# define FSR_SS (1 << 30)
# define FSR_UUT (1 << 8)
# define FSR_ASF (1 << 7)
# define FSR_TLBLKF (1 << 6)
# define FSR_TLBMCF (1 << 5)
# define FSR_EF (1 << 4)
# define FSR_PF (1 << 3)
# define FSR_AFF (1 << 2)
# define FSR_TF (1 << 1)
2014-07-08 18:52:18 +02:00
# define FSR_IGN (FSR_AFF | FSR_ASF | \
FSR_TLBMCF | FSR_TLBLKF )
# define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
2013-07-31 20:21:26 +02:00
FSR_EF | FSR_PF | FSR_TF | FSR_IGN )
2013-06-24 19:31:25 +02:00
# define FSYNR0_WNR (1 << 4)
2014-07-14 20:47:39 +02:00
static int force_stage ;
2015-05-27 18:09:35 +02:00
module_param_named ( force_stage , force_stage , int , S_IRUGO ) ;
2014-07-14 20:47:39 +02:00
MODULE_PARM_DESC ( force_stage ,
" Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation. " ) ;
2014-08-28 18:51:59 +02:00
enum arm_smmu_arch_version {
ARM_SMMU_V1 = 1 ,
ARM_SMMU_V2 ,
} ;
2013-06-24 19:31:25 +02:00
struct arm_smmu_smr {
u8 idx ;
u16 mask ;
u16 id ;
} ;
2014-05-01 19:05:08 +02:00
struct arm_smmu_master_cfg {
2013-06-24 19:31:25 +02:00
int num_streamids ;
u16 streamids [ MAX_MASTER_STREAMIDS ] ;
struct arm_smmu_smr * smrs ;
} ;
2014-05-01 19:05:08 +02:00
struct arm_smmu_master {
struct device_node * of_node ;
struct rb_node node ;
struct arm_smmu_master_cfg cfg ;
} ;
2013-06-24 19:31:25 +02:00
struct arm_smmu_device {
struct device * dev ;
void __iomem * base ;
unsigned long size ;
2014-07-30 12:33:25 +02:00
unsigned long pgshift ;
2013-06-24 19:31:25 +02:00
# define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
# define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
# define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
# define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
# define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
2014-10-29 22:13:40 +01:00
# define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
2013-06-24 19:31:25 +02:00
u32 features ;
2014-01-30 19:18:04 +01:00
# define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
u32 options ;
2014-08-28 18:51:59 +02:00
enum arm_smmu_arch_version version ;
2013-06-24 19:31:25 +02:00
u32 num_context_banks ;
u32 num_s2_context_banks ;
DECLARE_BITMAP ( context_map , ARM_SMMU_MAX_CBS ) ;
atomic_t irptndx ;
u32 num_mapping_groups ;
DECLARE_BITMAP ( smr_map , ARM_SMMU_MAX_SMRS ) ;
2014-11-14 18:17:54 +01:00
unsigned long va_size ;
unsigned long ipa_size ;
unsigned long pa_size ;
2013-06-24 19:31:25 +02:00
u32 num_global_irqs ;
u32 num_context_irqs ;
unsigned int * irqs ;
struct list_head list ;
struct rb_root masters ;
} ;
struct arm_smmu_cfg {
u8 cbndx ;
u8 irptndx ;
u32 cbar ;
} ;
2013-08-21 10:33:30 +02:00
# define INVALID_IRPTNDX 0xff
2013-06-24 19:31:25 +02:00
2013-07-31 20:21:28 +02:00
# define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx)
# define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1)
2014-06-25 23:46:31 +02:00
enum arm_smmu_domain_stage {
ARM_SMMU_DOMAIN_S1 = 0 ,
ARM_SMMU_DOMAIN_S2 ,
ARM_SMMU_DOMAIN_NESTED ,
} ;
2013-06-24 19:31:25 +02:00
struct arm_smmu_domain {
2014-06-25 12:29:12 +02:00
struct arm_smmu_device * smmu ;
2014-11-14 18:17:54 +01:00
struct io_pgtable_ops * pgtbl_ops ;
spinlock_t pgtbl_lock ;
2014-06-25 12:29:12 +02:00
struct arm_smmu_cfg cfg ;
2014-06-25 23:46:31 +02:00
enum arm_smmu_domain_stage stage ;
2014-11-14 18:17:54 +01:00
struct mutex init_mutex ; /* Protects smmu pointer */
2015-03-26 13:43:10 +01:00
struct iommu_domain domain ;
2013-06-24 19:31:25 +02:00
} ;
2014-11-14 18:17:54 +01:00
static struct iommu_ops arm_smmu_ops ;
2013-06-24 19:31:25 +02:00
static DEFINE_SPINLOCK ( arm_smmu_devices_lock ) ;
static LIST_HEAD ( arm_smmu_devices ) ;
2014-01-30 19:18:04 +01:00
struct arm_smmu_option_prop {
u32 opt ;
const char * prop ;
} ;
2014-07-08 18:52:18 +02:00
static struct arm_smmu_option_prop arm_smmu_options [ ] = {
2014-01-30 19:18:04 +01:00
{ ARM_SMMU_OPT_SECURE_CFG_ACCESS , " calxeda,smmu-secure-config-access " } ,
{ 0 , NULL } ,
} ;
2015-03-26 13:43:10 +01:00
static struct arm_smmu_domain * to_smmu_domain ( struct iommu_domain * dom )
{
return container_of ( dom , struct arm_smmu_domain , domain ) ;
}
2014-01-30 19:18:04 +01:00
static void parse_driver_options ( struct arm_smmu_device * smmu )
{
int i = 0 ;
2014-07-08 18:52:18 +02:00
2014-01-30 19:18:04 +01:00
do {
if ( of_property_read_bool ( smmu - > dev - > of_node ,
arm_smmu_options [ i ] . prop ) ) {
smmu - > options | = arm_smmu_options [ i ] . opt ;
dev_notice ( smmu - > dev , " option %s \n " ,
arm_smmu_options [ i ] . prop ) ;
}
} while ( arm_smmu_options [ + + i ] . opt ) ;
}
2014-07-15 12:27:08 +02:00
static struct device_node * dev_get_dev_node ( struct device * dev )
2014-05-01 19:05:08 +02:00
{
if ( dev_is_pci ( dev ) ) {
struct pci_bus * bus = to_pci_dev ( dev ) - > bus ;
2014-07-08 18:52:18 +02:00
2014-05-01 19:05:08 +02:00
while ( ! pci_is_root_bus ( bus ) )
bus = bus - > parent ;
2014-07-15 12:27:08 +02:00
return bus - > bridge - > parent - > of_node ;
2014-05-01 19:05:08 +02:00
}
2014-07-15 12:27:08 +02:00
return dev - > of_node ;
2014-05-01 19:05:08 +02:00
}
2013-06-24 19:31:25 +02:00
static struct arm_smmu_master * find_smmu_master ( struct arm_smmu_device * smmu ,
struct device_node * dev_node )
{
struct rb_node * node = smmu - > masters . rb_node ;
while ( node ) {
struct arm_smmu_master * master ;
2014-07-08 18:52:18 +02:00
2013-06-24 19:31:25 +02:00
master = container_of ( node , struct arm_smmu_master , node ) ;
if ( dev_node < master - > of_node )
node = node - > rb_left ;
else if ( dev_node > master - > of_node )
node = node - > rb_right ;
else
return master ;
}
return NULL ;
}
2014-05-01 19:05:08 +02:00
static struct arm_smmu_master_cfg *
2014-07-15 12:27:08 +02:00
find_smmu_master_cfg ( struct device * dev )
2014-05-01 19:05:08 +02:00
{
2014-07-15 12:27:08 +02:00
struct arm_smmu_master_cfg * cfg = NULL ;
struct iommu_group * group = iommu_group_get ( dev ) ;
2014-05-01 19:05:08 +02:00
2014-07-15 12:27:08 +02:00
if ( group ) {
cfg = iommu_group_get_iommudata ( group ) ;
iommu_group_put ( group ) ;
}
2014-05-01 19:05:08 +02:00
2014-07-15 12:27:08 +02:00
return cfg ;
2014-05-01 19:05:08 +02:00
}
2013-06-24 19:31:25 +02:00
static int insert_smmu_master ( struct arm_smmu_device * smmu ,
struct arm_smmu_master * master )
{
struct rb_node * * new , * parent ;
new = & smmu - > masters . rb_node ;
parent = NULL ;
while ( * new ) {
2014-07-08 18:52:18 +02:00
struct arm_smmu_master * this
= container_of ( * new , struct arm_smmu_master , node ) ;
2013-06-24 19:31:25 +02:00
parent = * new ;
if ( master - > of_node < this - > of_node )
new = & ( ( * new ) - > rb_left ) ;
else if ( master - > of_node > this - > of_node )
new = & ( ( * new ) - > rb_right ) ;
else
return - EEXIST ;
}
rb_link_node ( & master - > node , parent , new ) ;
rb_insert_color ( & master - > node , & smmu - > masters ) ;
return 0 ;
}
static int register_smmu_master ( struct arm_smmu_device * smmu ,
struct device * dev ,
struct of_phandle_args * masterspec )
{
int i ;
struct arm_smmu_master * master ;
master = find_smmu_master ( smmu , masterspec - > np ) ;
if ( master ) {
dev_err ( dev ,
" rejecting multiple registrations for master device %s \n " ,
masterspec - > np - > name ) ;
return - EBUSY ;
}
if ( masterspec - > args_count > MAX_MASTER_STREAMIDS ) {
dev_err ( dev ,
" reached maximum number (%d) of stream IDs for master device %s \n " ,
MAX_MASTER_STREAMIDS , masterspec - > np - > name ) ;
return - ENOSPC ;
}
master = devm_kzalloc ( dev , sizeof ( * master ) , GFP_KERNEL ) ;
if ( ! master )
return - ENOMEM ;
2014-05-01 19:05:08 +02:00
master - > of_node = masterspec - > np ;
master - > cfg . num_streamids = masterspec - > args_count ;
2013-06-24 19:31:25 +02:00
2014-08-23 02:12:32 +02:00
for ( i = 0 ; i < master - > cfg . num_streamids ; + + i ) {
u16 streamid = masterspec - > args [ i ] ;
2013-06-24 19:31:25 +02:00
2014-08-23 02:12:32 +02:00
if ( ! ( smmu - > features & ARM_SMMU_FEAT_STREAM_MATCH ) & &
( streamid > = smmu - > num_mapping_groups ) ) {
dev_err ( dev ,
" stream ID for master device %s greater than maximum allowed (%d) \n " ,
masterspec - > np - > name , smmu - > num_mapping_groups ) ;
return - ERANGE ;
}
master - > cfg . streamids [ i ] = streamid ;
}
2013-06-24 19:31:25 +02:00
return insert_smmu_master ( smmu , master ) ;
}
2014-06-25 12:29:12 +02:00
static struct arm_smmu_device * find_smmu_for_device ( struct device * dev )
2013-06-24 19:31:25 +02:00
{
2014-06-25 12:29:12 +02:00
struct arm_smmu_device * smmu ;
2014-05-01 19:05:08 +02:00
struct arm_smmu_master * master = NULL ;
2014-07-15 12:27:08 +02:00
struct device_node * dev_node = dev_get_dev_node ( dev ) ;
2013-06-24 19:31:25 +02:00
spin_lock ( & arm_smmu_devices_lock ) ;
2014-06-25 12:29:12 +02:00
list_for_each_entry ( smmu , & arm_smmu_devices , list ) {
2014-05-01 19:05:08 +02:00
master = find_smmu_master ( smmu , dev_node ) ;
if ( master )
break ;
}
2013-06-24 19:31:25 +02:00
spin_unlock ( & arm_smmu_devices_lock ) ;
2014-06-25 12:29:12 +02:00
2014-05-01 19:05:08 +02:00
return master ? smmu : NULL ;
2013-06-24 19:31:25 +02:00
}
static int __arm_smmu_alloc_bitmap ( unsigned long * map , int start , int end )
{
int idx ;
do {
idx = find_next_zero_bit ( map , end , start ) ;
if ( idx = = end )
return - ENOSPC ;
} while ( test_and_set_bit ( idx , map ) ) ;
return idx ;
}
static void __arm_smmu_free_bitmap ( unsigned long * map , int idx )
{
clear_bit ( idx , map ) ;
}
/* Wait for any pending TLB invalidations to complete */
2014-11-14 18:17:54 +01:00
static void __arm_smmu_tlb_sync ( struct arm_smmu_device * smmu )
2013-06-24 19:31:25 +02:00
{
int count = 0 ;
void __iomem * gr0_base = ARM_SMMU_GR0 ( smmu ) ;
writel_relaxed ( 0 , gr0_base + ARM_SMMU_GR0_sTLBGSYNC ) ;
while ( readl_relaxed ( gr0_base + ARM_SMMU_GR0_sTLBGSTATUS )
& sTLBGSTATUS_GSACTIVE ) {
cpu_relax ( ) ;
if ( + + count = = TLB_LOOP_TIMEOUT ) {
dev_err_ratelimited ( smmu - > dev ,
" TLB sync timed out -- SMMU may be deadlocked \n " ) ;
return ;
}
udelay ( 1 ) ;
}
}
2014-11-14 18:17:54 +01:00
static void arm_smmu_tlb_sync ( void * cookie )
{
struct arm_smmu_domain * smmu_domain = cookie ;
__arm_smmu_tlb_sync ( smmu_domain - > smmu ) ;
}
static void arm_smmu_tlb_inv_context ( void * cookie )
2013-07-31 20:21:27 +02:00
{
2014-11-14 18:17:54 +01:00
struct arm_smmu_domain * smmu_domain = cookie ;
2014-06-25 12:29:12 +02:00
struct arm_smmu_cfg * cfg = & smmu_domain - > cfg ;
struct arm_smmu_device * smmu = smmu_domain - > smmu ;
2013-07-31 20:21:27 +02:00
bool stage1 = cfg - > cbar ! = CBAR_TYPE_S2_TRANS ;
2014-11-14 18:17:54 +01:00
void __iomem * base ;
2013-07-31 20:21:27 +02:00
if ( stage1 ) {
base = ARM_SMMU_CB_BASE ( smmu ) + ARM_SMMU_CB ( smmu , cfg - > cbndx ) ;
2013-07-31 20:21:28 +02:00
writel_relaxed ( ARM_SMMU_CB_ASID ( cfg ) ,
base + ARM_SMMU_CB_S1_TLBIASID ) ;
2013-07-31 20:21:27 +02:00
} else {
base = ARM_SMMU_GR0 ( smmu ) ;
2013-07-31 20:21:28 +02:00
writel_relaxed ( ARM_SMMU_CB_VMID ( cfg ) ,
base + ARM_SMMU_GR0_TLBIVMID ) ;
2013-07-31 20:21:27 +02:00
}
2014-11-14 18:17:54 +01:00
__arm_smmu_tlb_sync ( smmu ) ;
}
static void arm_smmu_tlb_inv_range_nosync ( unsigned long iova , size_t size ,
2015-12-07 19:18:53 +01:00
size_t granule , bool leaf , void * cookie )
2014-11-14 18:17:54 +01:00
{
struct arm_smmu_domain * smmu_domain = cookie ;
struct arm_smmu_cfg * cfg = & smmu_domain - > cfg ;
struct arm_smmu_device * smmu = smmu_domain - > smmu ;
bool stage1 = cfg - > cbar ! = CBAR_TYPE_S2_TRANS ;
void __iomem * reg ;
if ( stage1 ) {
reg = ARM_SMMU_CB_BASE ( smmu ) + ARM_SMMU_CB ( smmu , cfg - > cbndx ) ;
reg + = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA ;
if ( ! IS_ENABLED ( CONFIG_64BIT ) | | smmu - > version = = ARM_SMMU_V1 ) {
iova & = ~ 12UL ;
iova | = ARM_SMMU_CB_ASID ( cfg ) ;
2015-12-07 19:18:52 +01:00
do {
writel_relaxed ( iova , reg ) ;
iova + = granule ;
} while ( size - = granule ) ;
2014-11-14 18:17:54 +01:00
# ifdef CONFIG_64BIT
} else {
iova > > = 12 ;
iova | = ( u64 ) ARM_SMMU_CB_ASID ( cfg ) < < 48 ;
2015-12-07 19:18:52 +01:00
do {
writeq_relaxed ( iova , reg ) ;
iova + = granule > > 12 ;
} while ( size - = granule ) ;
2014-11-14 18:17:54 +01:00
# endif
}
# ifdef CONFIG_64BIT
} else if ( smmu - > version = = ARM_SMMU_V2 ) {
reg = ARM_SMMU_CB_BASE ( smmu ) + ARM_SMMU_CB ( smmu , cfg - > cbndx ) ;
reg + = leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
ARM_SMMU_CB_S2_TLBIIPAS2 ;
2015-12-07 19:18:52 +01:00
iova > > = 12 ;
do {
writeq_relaxed ( iova , reg ) ;
iova + = granule > > 12 ;
} while ( size - = granule ) ;
2014-11-14 18:17:54 +01:00
# endif
} else {
reg = ARM_SMMU_GR0 ( smmu ) + ARM_SMMU_GR0_TLBIVMID ;
writel_relaxed ( ARM_SMMU_CB_VMID ( cfg ) , reg ) ;
}
}
static struct iommu_gather_ops arm_smmu_gather_ops = {
. tlb_flush_all = arm_smmu_tlb_inv_context ,
. tlb_add_flush = arm_smmu_tlb_inv_range_nosync ,
. tlb_sync = arm_smmu_tlb_sync ,
} ;
2013-06-24 19:31:25 +02:00
static irqreturn_t arm_smmu_context_fault ( int irq , void * dev )
{
int flags , ret ;
u32 fsr , far , fsynr , resume ;
unsigned long iova ;
struct iommu_domain * domain = dev ;
2015-03-26 13:43:10 +01:00
struct arm_smmu_domain * smmu_domain = to_smmu_domain ( domain ) ;
2014-06-25 12:29:12 +02:00
struct arm_smmu_cfg * cfg = & smmu_domain - > cfg ;
struct arm_smmu_device * smmu = smmu_domain - > smmu ;
2013-06-24 19:31:25 +02:00
void __iomem * cb_base ;
2014-06-25 12:29:12 +02:00
cb_base = ARM_SMMU_CB_BASE ( smmu ) + ARM_SMMU_CB ( smmu , cfg - > cbndx ) ;
2013-06-24 19:31:25 +02:00
fsr = readl_relaxed ( cb_base + ARM_SMMU_CB_FSR ) ;
if ( ! ( fsr & FSR_FAULT ) )
return IRQ_NONE ;
if ( fsr & FSR_IGN )
dev_err_ratelimited ( smmu - > dev ,
2014-08-06 06:42:01 +02:00
" Unexpected context fault (fsr 0x%x) \n " ,
2013-06-24 19:31:25 +02:00
fsr ) ;
fsynr = readl_relaxed ( cb_base + ARM_SMMU_CB_FSYNR0 ) ;
flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ ;
far = readl_relaxed ( cb_base + ARM_SMMU_CB_FAR_LO ) ;
iova = far ;
# ifdef CONFIG_64BIT
far = readl_relaxed ( cb_base + ARM_SMMU_CB_FAR_HI ) ;
iova | = ( ( unsigned long ) far < < 32 ) ;
# endif
if ( ! report_iommu_fault ( domain , smmu - > dev , iova , flags ) ) {
ret = IRQ_HANDLED ;
resume = RESUME_RETRY ;
} else {
2013-10-01 14:39:08 +02:00
dev_err_ratelimited ( smmu - > dev ,
" Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d \n " ,
2014-06-25 12:29:12 +02:00
iova , fsynr , cfg - > cbndx ) ;
2013-06-24 19:31:25 +02:00
ret = IRQ_NONE ;
resume = RESUME_TERMINATE ;
}
/* Clear the faulting FSR */
writel ( fsr , cb_base + ARM_SMMU_CB_FSR ) ;
/* Retry or terminate any stalled transactions */
if ( fsr & FSR_SS )
writel_relaxed ( resume , cb_base + ARM_SMMU_CB_RESUME ) ;
return ret ;
}
static irqreturn_t arm_smmu_global_fault ( int irq , void * dev )
{
u32 gfsr , gfsynr0 , gfsynr1 , gfsynr2 ;
struct arm_smmu_device * smmu = dev ;
2014-01-30 19:18:04 +01:00
void __iomem * gr0_base = ARM_SMMU_GR0_NS ( smmu ) ;
2013-06-24 19:31:25 +02:00
gfsr = readl_relaxed ( gr0_base + ARM_SMMU_GR0_sGFSR ) ;
gfsynr0 = readl_relaxed ( gr0_base + ARM_SMMU_GR0_sGFSYNR0 ) ;
gfsynr1 = readl_relaxed ( gr0_base + ARM_SMMU_GR0_sGFSYNR1 ) ;
gfsynr2 = readl_relaxed ( gr0_base + ARM_SMMU_GR0_sGFSYNR2 ) ;
2014-01-30 19:18:04 +01:00
if ( ! gfsr )
return IRQ_NONE ;
2013-06-24 19:31:25 +02:00
dev_err_ratelimited ( smmu - > dev ,
" Unexpected global fault, this could be serious \n " ) ;
dev_err_ratelimited ( smmu - > dev ,
" \t GFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x \n " ,
gfsr , gfsynr0 , gfsynr1 , gfsynr2 ) ;
writel ( gfsr , gr0_base + ARM_SMMU_GR0_sGFSR ) ;
2013-07-31 20:21:26 +02:00
return IRQ_HANDLED ;
2013-06-24 19:31:25 +02:00
}
2014-11-14 18:17:54 +01:00
static void arm_smmu_init_context_bank ( struct arm_smmu_domain * smmu_domain ,
struct io_pgtable_cfg * pgtbl_cfg )
2013-06-24 19:31:25 +02:00
{
u32 reg ;
2015-08-19 01:40:30 +02:00
u64 reg64 ;
2013-06-24 19:31:25 +02:00
bool stage1 ;
2014-06-25 12:29:12 +02:00
struct arm_smmu_cfg * cfg = & smmu_domain - > cfg ;
struct arm_smmu_device * smmu = smmu_domain - > smmu ;
2015-10-13 18:53:24 +02:00
void __iomem * cb_base , * gr1_base ;
2013-06-24 19:31:25 +02:00
gr1_base = ARM_SMMU_GR1 ( smmu ) ;
2014-06-25 12:29:12 +02:00
stage1 = cfg - > cbar ! = CBAR_TYPE_S2_TRANS ;
cb_base = ARM_SMMU_CB_BASE ( smmu ) + ARM_SMMU_CB ( smmu , cfg - > cbndx ) ;
2013-06-24 19:31:25 +02:00
2015-03-04 13:21:03 +01:00
if ( smmu - > version > ARM_SMMU_V1 ) {
/*
* CBA2R .
* * Must * be initialised before CBAR thanks to VMID16
* architectural oversight affected some implementations .
*/
# ifdef CONFIG_64BIT
reg = CBA2R_RW64_64BIT ;
# else
reg = CBA2R_RW64_32BIT ;
# endif
writel_relaxed ( reg , gr1_base + ARM_SMMU_GR1_CBA2R ( cfg - > cbndx ) ) ;
}
2013-06-24 19:31:25 +02:00
/* CBAR */
2014-06-25 12:29:12 +02:00
reg = cfg - > cbar ;
2014-08-28 18:51:59 +02:00
if ( smmu - > version = = ARM_SMMU_V1 )
2014-07-08 18:52:18 +02:00
reg | = cfg - > irptndx < < CBAR_IRPTNDX_SHIFT ;
2013-06-24 19:31:25 +02:00
2014-02-06 15:59:05 +01:00
/*
* Use the weakest shareability / memory types , so they are
* overridden by the ttbcr / pte .
*/
if ( stage1 ) {
reg | = ( CBAR_S1_BPSHCFG_NSH < < CBAR_S1_BPSHCFG_SHIFT ) |
( CBAR_S1_MEMATTR_WB < < CBAR_S1_MEMATTR_SHIFT ) ;
} else {
2014-06-25 12:29:12 +02:00
reg | = ARM_SMMU_CB_VMID ( cfg ) < < CBAR_VMID_SHIFT ;
2014-02-06 15:59:05 +01:00
}
2014-06-25 12:29:12 +02:00
writel_relaxed ( reg , gr1_base + ARM_SMMU_GR1_CBAR ( cfg - > cbndx ) ) ;
2013-06-24 19:31:25 +02:00
2014-11-14 18:17:54 +01:00
/* TTBRs */
if ( stage1 ) {
2015-08-19 01:40:30 +02:00
reg64 = pgtbl_cfg - > arm_lpae_s1_cfg . ttbr [ 0 ] ;
reg64 | = ( ( u64 ) ARM_SMMU_CB_ASID ( cfg ) ) < < TTBRn_ASID_SHIFT ;
smmu_writeq ( reg64 , cb_base + ARM_SMMU_CB_TTBR0 ) ;
reg64 = pgtbl_cfg - > arm_lpae_s1_cfg . ttbr [ 1 ] ;
reg64 | = ( ( u64 ) ARM_SMMU_CB_ASID ( cfg ) ) < < TTBRn_ASID_SHIFT ;
smmu_writeq ( reg64 , cb_base + ARM_SMMU_CB_TTBR1 ) ;
2014-11-14 18:17:54 +01:00
} else {
2015-08-19 01:40:30 +02:00
reg64 = pgtbl_cfg - > arm_lpae_s2_cfg . vttbr ;
smmu_writeq ( reg64 , cb_base + ARM_SMMU_CB_TTBR0 ) ;
2014-11-14 18:17:54 +01:00
}
2014-06-24 19:26:26 +02:00
2014-11-14 18:17:54 +01:00
/* TTBCR */
if ( stage1 ) {
reg = pgtbl_cfg - > arm_lpae_s1_cfg . tcr ;
writel_relaxed ( reg , cb_base + ARM_SMMU_CB_TTBCR ) ;
if ( smmu - > version > ARM_SMMU_V1 ) {
reg = pgtbl_cfg - > arm_lpae_s1_cfg . tcr > > 32 ;
2015-05-08 18:44:22 +02:00
reg | = TTBCR2_SEP_UPSTREAM ;
2014-11-14 18:17:54 +01:00
writel_relaxed ( reg , cb_base + ARM_SMMU_CB_TTBCR2 ) ;
2013-06-24 19:31:25 +02:00
}
} else {
2014-11-14 18:17:54 +01:00
reg = pgtbl_cfg - > arm_lpae_s2_cfg . vtcr ;
writel_relaxed ( reg , cb_base + ARM_SMMU_CB_TTBCR ) ;
2013-06-24 19:31:25 +02:00
}
2014-11-14 18:17:54 +01:00
/* MAIRs (stage-1 only) */
2013-06-24 19:31:25 +02:00
if ( stage1 ) {
2014-11-14 18:17:54 +01:00
reg = pgtbl_cfg - > arm_lpae_s1_cfg . mair [ 0 ] ;
2013-06-24 19:31:25 +02:00
writel_relaxed ( reg , cb_base + ARM_SMMU_CB_S1_MAIR0 ) ;
2014-11-14 18:17:54 +01:00
reg = pgtbl_cfg - > arm_lpae_s1_cfg . mair [ 1 ] ;
writel_relaxed ( reg , cb_base + ARM_SMMU_CB_S1_MAIR1 ) ;
2013-06-24 19:31:25 +02:00
}
/* SCTLR */
reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP ;
if ( stage1 )
reg | = SCTLR_S1_ASIDPNE ;
# ifdef __BIG_ENDIAN
reg | = SCTLR_E ;
# endif
2013-08-21 14:49:53 +02:00
writel_relaxed ( reg , cb_base + ARM_SMMU_CB_SCTLR ) ;
2013-06-24 19:31:25 +02:00
}
static int arm_smmu_init_domain_context ( struct iommu_domain * domain ,
2014-06-25 12:29:12 +02:00
struct arm_smmu_device * smmu )
2013-06-24 19:31:25 +02:00
{
2014-07-30 19:58:13 +02:00
int irq , start , ret = 0 ;
2014-11-14 18:17:54 +01:00
unsigned long ias , oas ;
struct io_pgtable_ops * pgtbl_ops ;
struct io_pgtable_cfg pgtbl_cfg ;
enum io_pgtable_fmt fmt ;
2015-03-26 13:43:10 +01:00
struct arm_smmu_domain * smmu_domain = to_smmu_domain ( domain ) ;
2014-06-25 12:29:12 +02:00
struct arm_smmu_cfg * cfg = & smmu_domain - > cfg ;
2013-06-24 19:31:25 +02:00
2014-11-14 18:17:54 +01:00
mutex_lock ( & smmu_domain - > init_mutex ) ;
2014-07-30 19:58:13 +02:00
if ( smmu_domain - > smmu )
goto out_unlock ;
2014-06-25 23:46:31 +02:00
/*
* Mapping the requested stage onto what we support is surprisingly
* complicated , mainly because the spec allows S1 + S2 SMMUs without
* support for nested translation . That means we end up with the
* following table :
*
* Requested Supported Actual
* S1 N S1
* S1 S1 + S2 S1
* S1 S2 S2
* S1 S1 S1
* N N N
* N S1 + S2 S2
* N S2 S2
* N S1 S1
*
* Note that you can ' t actually request stage - 2 mappings .
*/
if ( ! ( smmu - > features & ARM_SMMU_FEAT_TRANS_S1 ) )
smmu_domain - > stage = ARM_SMMU_DOMAIN_S2 ;
if ( ! ( smmu - > features & ARM_SMMU_FEAT_TRANS_S2 ) )
smmu_domain - > stage = ARM_SMMU_DOMAIN_S1 ;
switch ( smmu_domain - > stage ) {
case ARM_SMMU_DOMAIN_S1 :
cfg - > cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS ;
start = smmu - > num_s2_context_banks ;
2014-11-14 18:17:54 +01:00
ias = smmu - > va_size ;
oas = smmu - > ipa_size ;
if ( IS_ENABLED ( CONFIG_64BIT ) )
fmt = ARM_64_LPAE_S1 ;
else
fmt = ARM_32_LPAE_S1 ;
2014-06-25 23:46:31 +02:00
break ;
case ARM_SMMU_DOMAIN_NESTED :
2013-06-24 19:31:25 +02:00
/*
* We will likely want to change this if / when KVM gets
* involved .
*/
2014-06-25 23:46:31 +02:00
case ARM_SMMU_DOMAIN_S2 :
2014-06-25 13:12:41 +02:00
cfg - > cbar = CBAR_TYPE_S2_TRANS ;
start = 0 ;
2014-11-14 18:17:54 +01:00
ias = smmu - > ipa_size ;
oas = smmu - > pa_size ;
if ( IS_ENABLED ( CONFIG_64BIT ) )
fmt = ARM_64_LPAE_S2 ;
else
fmt = ARM_32_LPAE_S2 ;
2014-06-25 23:46:31 +02:00
break ;
default :
ret = - EINVAL ;
goto out_unlock ;
2013-06-24 19:31:25 +02:00
}
ret = __arm_smmu_alloc_bitmap ( smmu - > context_map , start ,
smmu - > num_context_banks ) ;
if ( IS_ERR_VALUE ( ret ) )
2014-07-30 19:58:13 +02:00
goto out_unlock ;
2013-06-24 19:31:25 +02:00
2014-06-25 12:29:12 +02:00
cfg - > cbndx = ret ;
2014-08-28 18:51:59 +02:00
if ( smmu - > version = = ARM_SMMU_V1 ) {
2014-06-25 12:29:12 +02:00
cfg - > irptndx = atomic_inc_return ( & smmu - > irptndx ) ;
cfg - > irptndx % = smmu - > num_context_irqs ;
2013-06-24 19:31:25 +02:00
} else {
2014-06-25 12:29:12 +02:00
cfg - > irptndx = cfg - > cbndx ;
2013-06-24 19:31:25 +02:00
}
2014-11-14 18:17:54 +01:00
pgtbl_cfg = ( struct io_pgtable_cfg ) {
. pgsize_bitmap = arm_smmu_ops . pgsize_bitmap ,
. ias = ias ,
. oas = oas ,
. tlb = & arm_smmu_gather_ops ,
2015-07-29 20:46:06 +02:00
. iommu_dev = smmu - > dev ,
2014-11-14 18:17:54 +01:00
} ;
smmu_domain - > smmu = smmu ;
pgtbl_ops = alloc_io_pgtable_ops ( fmt , & pgtbl_cfg , smmu_domain ) ;
if ( ! pgtbl_ops ) {
ret = - ENOMEM ;
goto out_clear_smmu ;
}
/* Update our support page sizes to reflect the page table format */
arm_smmu_ops . pgsize_bitmap = pgtbl_cfg . pgsize_bitmap ;
2014-07-30 19:58:13 +02:00
2014-11-14 18:17:54 +01:00
/* Initialise the context bank with our page table cfg */
arm_smmu_init_context_bank ( smmu_domain , & pgtbl_cfg ) ;
/*
* Request context fault interrupt . Do this last to avoid the
* handler seeing a half - initialised domain state .
*/
2014-06-25 12:29:12 +02:00
irq = smmu - > irqs [ smmu - > num_global_irqs + cfg - > irptndx ] ;
2013-06-24 19:31:25 +02:00
ret = request_irq ( irq , arm_smmu_context_fault , IRQF_SHARED ,
" arm-smmu-context-fault " , domain ) ;
if ( IS_ERR_VALUE ( ret ) ) {
dev_err ( smmu - > dev , " failed to request context IRQ %d (%u) \n " ,
2014-06-25 12:29:12 +02:00
cfg - > irptndx , irq ) ;
cfg - > irptndx = INVALID_IRPTNDX ;
2013-06-24 19:31:25 +02:00
}
2014-11-14 18:17:54 +01:00
mutex_unlock ( & smmu_domain - > init_mutex ) ;
/* Publish page table ops for map/unmap */
smmu_domain - > pgtbl_ops = pgtbl_ops ;
2014-05-01 19:05:08 +02:00
return 0 ;
2013-06-24 19:31:25 +02:00
2014-11-14 18:17:54 +01:00
out_clear_smmu :
smmu_domain - > smmu = NULL ;
2014-07-30 19:58:13 +02:00
out_unlock :
2014-11-14 18:17:54 +01:00
mutex_unlock ( & smmu_domain - > init_mutex ) ;
2013-06-24 19:31:25 +02:00
return ret ;
}
static void arm_smmu_destroy_domain_context ( struct iommu_domain * domain )
{
2015-03-26 13:43:10 +01:00
struct arm_smmu_domain * smmu_domain = to_smmu_domain ( domain ) ;
2014-06-25 12:29:12 +02:00
struct arm_smmu_device * smmu = smmu_domain - > smmu ;
struct arm_smmu_cfg * cfg = & smmu_domain - > cfg ;
2013-07-31 20:21:27 +02:00
void __iomem * cb_base ;
2013-06-24 19:31:25 +02:00
int irq ;
if ( ! smmu )
return ;
2014-11-14 18:17:54 +01:00
/*
* Disable the context bank and free the page tables before freeing
* it .
*/
2014-06-25 12:29:12 +02:00
cb_base = ARM_SMMU_CB_BASE ( smmu ) + ARM_SMMU_CB ( smmu , cfg - > cbndx ) ;
2013-07-31 20:21:27 +02:00
writel_relaxed ( 0 , cb_base + ARM_SMMU_CB_SCTLR ) ;
2014-06-25 12:29:12 +02:00
if ( cfg - > irptndx ! = INVALID_IRPTNDX ) {
irq = smmu - > irqs [ smmu - > num_global_irqs + cfg - > irptndx ] ;
2013-06-24 19:31:25 +02:00
free_irq ( irq , domain ) ;
}
2015-11-06 18:32:41 +01:00
free_io_pgtable_ops ( smmu_domain - > pgtbl_ops ) ;
2014-06-25 12:29:12 +02:00
__arm_smmu_free_bitmap ( smmu - > context_map , cfg - > cbndx ) ;
2013-06-24 19:31:25 +02:00
}
2015-03-26 13:43:10 +01:00
static struct iommu_domain * arm_smmu_domain_alloc ( unsigned type )
2013-06-24 19:31:25 +02:00
{
struct arm_smmu_domain * smmu_domain ;
2015-03-26 13:43:10 +01:00
if ( type ! = IOMMU_DOMAIN_UNMANAGED )
return NULL ;
2013-06-24 19:31:25 +02:00
/*
* Allocate the domain and initialise some of its data structures .
* We can ' t really do anything meaningful until we ' ve added a
* master .
*/
smmu_domain = kzalloc ( sizeof ( * smmu_domain ) , GFP_KERNEL ) ;
if ( ! smmu_domain )
2015-03-26 13:43:10 +01:00
return NULL ;
2013-06-24 19:31:25 +02:00
2014-11-14 18:17:54 +01:00
mutex_init ( & smmu_domain - > init_mutex ) ;
spin_lock_init ( & smmu_domain - > pgtbl_lock ) ;
2015-03-26 13:43:10 +01:00
return & smmu_domain - > domain ;
2013-06-24 19:31:25 +02:00
}
2015-03-26 13:43:10 +01:00
static void arm_smmu_domain_free ( struct iommu_domain * domain )
2013-06-24 19:31:25 +02:00
{
2015-03-26 13:43:10 +01:00
struct arm_smmu_domain * smmu_domain = to_smmu_domain ( domain ) ;
2013-07-31 20:21:27 +02:00
/*
* Free the domain resources . We assume that all devices have
* already been detached .
*/
2013-06-24 19:31:25 +02:00
arm_smmu_destroy_domain_context ( domain ) ;
kfree ( smmu_domain ) ;
}
static int arm_smmu_master_configure_smrs ( struct arm_smmu_device * smmu ,
2014-05-01 19:05:08 +02:00
struct arm_smmu_master_cfg * cfg )
2013-06-24 19:31:25 +02:00
{
int i ;
struct arm_smmu_smr * smrs ;
void __iomem * gr0_base = ARM_SMMU_GR0 ( smmu ) ;
if ( ! ( smmu - > features & ARM_SMMU_FEAT_STREAM_MATCH ) )
return 0 ;
2014-05-01 19:05:08 +02:00
if ( cfg - > smrs )
2013-06-24 19:31:25 +02:00
return - EEXIST ;
2014-07-08 18:52:18 +02:00
smrs = kmalloc_array ( cfg - > num_streamids , sizeof ( * smrs ) , GFP_KERNEL ) ;
2013-06-24 19:31:25 +02:00
if ( ! smrs ) {
2014-05-01 19:05:08 +02:00
dev_err ( smmu - > dev , " failed to allocate %d SMRs \n " ,
cfg - > num_streamids ) ;
2013-06-24 19:31:25 +02:00
return - ENOMEM ;
}
2014-06-25 12:29:12 +02:00
/* Allocate the SMRs on the SMMU */
2014-05-01 19:05:08 +02:00
for ( i = 0 ; i < cfg - > num_streamids ; + + i ) {
2013-06-24 19:31:25 +02:00
int idx = __arm_smmu_alloc_bitmap ( smmu - > smr_map , 0 ,
smmu - > num_mapping_groups ) ;
if ( IS_ERR_VALUE ( idx ) ) {
dev_err ( smmu - > dev , " failed to allocate free SMR \n " ) ;
goto err_free_smrs ;
}
smrs [ i ] = ( struct arm_smmu_smr ) {
. idx = idx ,
. mask = 0 , /* We don't currently share SMRs */
2014-05-01 19:05:08 +02:00
. id = cfg - > streamids [ i ] ,
2013-06-24 19:31:25 +02:00
} ;
}
/* It worked! Now, poke the actual hardware */
2014-05-01 19:05:08 +02:00
for ( i = 0 ; i < cfg - > num_streamids ; + + i ) {
2013-06-24 19:31:25 +02:00
u32 reg = SMR_VALID | smrs [ i ] . id < < SMR_ID_SHIFT |
smrs [ i ] . mask < < SMR_MASK_SHIFT ;
writel_relaxed ( reg , gr0_base + ARM_SMMU_GR0_SMR ( smrs [ i ] . idx ) ) ;
}
2014-05-01 19:05:08 +02:00
cfg - > smrs = smrs ;
2013-06-24 19:31:25 +02:00
return 0 ;
err_free_smrs :
while ( - - i > = 0 )
__arm_smmu_free_bitmap ( smmu - > smr_map , smrs [ i ] . idx ) ;
kfree ( smrs ) ;
return - ENOSPC ;
}
static void arm_smmu_master_free_smrs ( struct arm_smmu_device * smmu ,
2014-05-01 19:05:08 +02:00
struct arm_smmu_master_cfg * cfg )
2013-06-24 19:31:25 +02:00
{
int i ;
void __iomem * gr0_base = ARM_SMMU_GR0 ( smmu ) ;
2014-05-01 19:05:08 +02:00
struct arm_smmu_smr * smrs = cfg - > smrs ;
2013-06-24 19:31:25 +02:00
2014-07-15 12:22:24 +02:00
if ( ! smrs )
return ;
2013-06-24 19:31:25 +02:00
/* Invalidate the SMRs before freeing back to the allocator */
2014-05-01 19:05:08 +02:00
for ( i = 0 ; i < cfg - > num_streamids ; + + i ) {
2013-06-24 19:31:25 +02:00
u8 idx = smrs [ i ] . idx ;
2014-07-08 18:52:18 +02:00
2013-06-24 19:31:25 +02:00
writel_relaxed ( ~ SMR_VALID , gr0_base + ARM_SMMU_GR0_SMR ( idx ) ) ;
__arm_smmu_free_bitmap ( smmu - > smr_map , idx ) ;
}
2014-05-01 19:05:08 +02:00
cfg - > smrs = NULL ;
2013-06-24 19:31:25 +02:00
kfree ( smrs ) ;
}
static int arm_smmu_domain_add_master ( struct arm_smmu_domain * smmu_domain ,
2014-05-01 19:05:08 +02:00
struct arm_smmu_master_cfg * cfg )
2013-06-24 19:31:25 +02:00
{
int i , ret ;
2014-06-25 12:29:12 +02:00
struct arm_smmu_device * smmu = smmu_domain - > smmu ;
2013-06-24 19:31:25 +02:00
void __iomem * gr0_base = ARM_SMMU_GR0 ( smmu ) ;
2014-07-15 12:27:08 +02:00
/* Devices in an IOMMU group may already be configured */
2014-05-01 19:05:08 +02:00
ret = arm_smmu_master_configure_smrs ( smmu , cfg ) ;
2013-06-24 19:31:25 +02:00
if ( ret )
2014-07-15 12:27:08 +02:00
return ret = = - EEXIST ? 0 : ret ;
2013-06-24 19:31:25 +02:00
2014-05-01 19:05:08 +02:00
for ( i = 0 ; i < cfg - > num_streamids ; + + i ) {
2013-06-24 19:31:25 +02:00
u32 idx , s2cr ;
2014-07-08 18:52:18 +02:00
2014-05-01 19:05:08 +02:00
idx = cfg - > smrs ? cfg - > smrs [ i ] . idx : cfg - > streamids [ i ] ;
2014-04-18 04:20:48 +02:00
s2cr = S2CR_TYPE_TRANS |
2014-06-25 12:29:12 +02:00
( smmu_domain - > cfg . cbndx < < S2CR_CBNDX_SHIFT ) ;
2013-06-24 19:31:25 +02:00
writel_relaxed ( s2cr , gr0_base + ARM_SMMU_GR0_S2CR ( idx ) ) ;
}
return 0 ;
}
static void arm_smmu_domain_remove_master ( struct arm_smmu_domain * smmu_domain ,
2014-05-01 19:05:08 +02:00
struct arm_smmu_master_cfg * cfg )
2013-06-24 19:31:25 +02:00
{
2014-07-15 12:22:24 +02:00
int i ;
2014-06-25 12:29:12 +02:00
struct arm_smmu_device * smmu = smmu_domain - > smmu ;
2014-07-15 12:22:24 +02:00
void __iomem * gr0_base = ARM_SMMU_GR0 ( smmu ) ;
2013-06-24 19:31:25 +02:00
2014-07-15 12:27:08 +02:00
/* An IOMMU group is torn down by the first device to be removed */
if ( ( smmu - > features & ARM_SMMU_FEAT_STREAM_MATCH ) & & ! cfg - > smrs )
return ;
2013-06-24 19:31:25 +02:00
/*
* We * must * clear the S2CR first , because freeing the SMR means
* that it can be re - allocated immediately .
*/
2014-07-15 12:22:24 +02:00
for ( i = 0 ; i < cfg - > num_streamids ; + + i ) {
u32 idx = cfg - > smrs ? cfg - > smrs [ i ] . idx : cfg - > streamids [ i ] ;
writel_relaxed ( S2CR_TYPE_BYPASS ,
gr0_base + ARM_SMMU_GR0_S2CR ( idx ) ) ;
}
2014-05-01 19:05:08 +02:00
arm_smmu_master_free_smrs ( smmu , cfg ) ;
2013-06-24 19:31:25 +02:00
}
static int arm_smmu_attach_dev ( struct iommu_domain * domain , struct device * dev )
{
2014-07-30 19:58:13 +02:00
int ret ;
2015-03-26 13:43:10 +01:00
struct arm_smmu_domain * smmu_domain = to_smmu_domain ( domain ) ;
2014-11-14 18:17:54 +01:00
struct arm_smmu_device * smmu ;
2014-05-01 19:05:08 +02:00
struct arm_smmu_master_cfg * cfg ;
2013-06-24 19:31:25 +02:00
2014-07-15 12:27:08 +02:00
smmu = find_smmu_for_device ( dev ) ;
2014-06-25 12:29:12 +02:00
if ( ! smmu ) {
2013-06-24 19:31:25 +02:00
dev_err ( dev , " cannot attach to SMMU, is it on the same bus? \n " ) ;
return - ENXIO ;
}
2014-07-17 12:23:51 +02:00
if ( dev - > archdata . iommu ) {
dev_err ( dev , " already attached to IOMMU domain \n " ) ;
return - EEXIST ;
}
2014-11-14 18:17:54 +01:00
/* Ensure that the domain is finalised */
ret = arm_smmu_init_domain_context ( domain , smmu ) ;
if ( IS_ERR_VALUE ( ret ) )
return ret ;
2013-06-24 19:31:25 +02:00
/*
2014-06-25 12:29:12 +02:00
* Sanity check the domain . We don ' t support domains across
* different SMMUs .
2013-06-24 19:31:25 +02:00
*/
2014-11-14 18:17:54 +01:00
if ( smmu_domain - > smmu ! = smmu ) {
2013-06-24 19:31:25 +02:00
dev_err ( dev ,
" cannot attach to SMMU %s whilst already attached to domain on SMMU %s \n " ,
2014-07-30 19:58:13 +02:00
dev_name ( smmu_domain - > smmu - > dev ) , dev_name ( smmu - > dev ) ) ;
return - EINVAL ;
2013-06-24 19:31:25 +02:00
}
/* Looks ok, so add the device to the domain */
2014-07-15 12:27:08 +02:00
cfg = find_smmu_master_cfg ( dev ) ;
2014-05-01 19:05:08 +02:00
if ( ! cfg )
2013-06-24 19:31:25 +02:00
return - ENODEV ;
2014-07-17 12:23:51 +02:00
ret = arm_smmu_domain_add_master ( smmu_domain , cfg ) ;
if ( ! ret )
dev - > archdata . iommu = domain ;
2013-06-24 19:31:25 +02:00
return ret ;
}
static void arm_smmu_detach_dev ( struct iommu_domain * domain , struct device * dev )
{
2015-03-26 13:43:10 +01:00
struct arm_smmu_domain * smmu_domain = to_smmu_domain ( domain ) ;
2014-05-01 19:05:08 +02:00
struct arm_smmu_master_cfg * cfg ;
2013-06-24 19:31:25 +02:00
2014-07-15 12:27:08 +02:00
cfg = find_smmu_master_cfg ( dev ) ;
2014-07-17 12:23:51 +02:00
if ( ! cfg )
return ;
dev - > archdata . iommu = NULL ;
arm_smmu_domain_remove_master ( smmu_domain , cfg ) ;
2013-06-24 19:31:25 +02:00
}
static int arm_smmu_map ( struct iommu_domain * domain , unsigned long iova ,
2014-02-20 17:31:06 +01:00
phys_addr_t paddr , size_t size , int prot )
2013-06-24 19:31:25 +02:00
{
2014-11-14 18:17:54 +01:00
int ret ;
unsigned long flags ;
2015-03-26 13:43:10 +01:00
struct arm_smmu_domain * smmu_domain = to_smmu_domain ( domain ) ;
2014-11-14 18:17:54 +01:00
struct io_pgtable_ops * ops = smmu_domain - > pgtbl_ops ;
2013-06-24 19:31:25 +02:00
2014-11-14 18:17:54 +01:00
if ( ! ops )
2013-06-24 19:31:25 +02:00
return - ENODEV ;
2014-11-14 18:17:54 +01:00
spin_lock_irqsave ( & smmu_domain - > pgtbl_lock , flags ) ;
ret = ops - > map ( ops , iova , paddr , size , prot ) ;
spin_unlock_irqrestore ( & smmu_domain - > pgtbl_lock , flags ) ;
return ret ;
2013-06-24 19:31:25 +02:00
}
static size_t arm_smmu_unmap ( struct iommu_domain * domain , unsigned long iova ,
size_t size )
{
2014-11-14 18:17:54 +01:00
size_t ret ;
unsigned long flags ;
2015-03-26 13:43:10 +01:00
struct arm_smmu_domain * smmu_domain = to_smmu_domain ( domain ) ;
2014-11-14 18:17:54 +01:00
struct io_pgtable_ops * ops = smmu_domain - > pgtbl_ops ;
2013-06-24 19:31:25 +02:00
2014-11-14 18:17:54 +01:00
if ( ! ops )
return 0 ;
spin_lock_irqsave ( & smmu_domain - > pgtbl_lock , flags ) ;
ret = ops - > unmap ( ops , iova , size ) ;
spin_unlock_irqrestore ( & smmu_domain - > pgtbl_lock , flags ) ;
return ret ;
2013-06-24 19:31:25 +02:00
}
2014-10-29 22:13:40 +01:00
static phys_addr_t arm_smmu_iova_to_phys_hard ( struct iommu_domain * domain ,
dma_addr_t iova )
{
2015-03-26 13:43:10 +01:00
struct arm_smmu_domain * smmu_domain = to_smmu_domain ( domain ) ;
2014-10-29 22:13:40 +01:00
struct arm_smmu_device * smmu = smmu_domain - > smmu ;
struct arm_smmu_cfg * cfg = & smmu_domain - > cfg ;
struct io_pgtable_ops * ops = smmu_domain - > pgtbl_ops ;
struct device * dev = smmu - > dev ;
void __iomem * cb_base ;
u32 tmp ;
u64 phys ;
2015-05-27 18:09:34 +02:00
unsigned long va ;
2014-10-29 22:13:40 +01:00
cb_base = ARM_SMMU_CB_BASE ( smmu ) + ARM_SMMU_CB ( smmu , cfg - > cbndx ) ;
2015-05-27 18:09:34 +02:00
/* ATS1 registers can only be written atomically */
va = iova & ~ 0xfffUL ;
if ( smmu - > version = = ARM_SMMU_V2 )
2015-08-19 01:40:30 +02:00
smmu_writeq ( va , cb_base + ARM_SMMU_CB_ATS1PR ) ;
2015-05-27 18:09:34 +02:00
else
writel_relaxed ( va , cb_base + ARM_SMMU_CB_ATS1PR ) ;
2014-10-29 22:13:40 +01:00
if ( readl_poll_timeout_atomic ( cb_base + ARM_SMMU_CB_ATSR , tmp ,
! ( tmp & ATSR_ACTIVE ) , 5 , 50 ) ) {
dev_err ( dev ,
2015-08-18 18:12:24 +02:00
" iova to phys timed out on %pad. Falling back to software table walk. \n " ,
2014-10-29 22:13:40 +01:00
& iova ) ;
return ops - > iova_to_phys ( ops , iova ) ;
}
phys = readl_relaxed ( cb_base + ARM_SMMU_CB_PAR_LO ) ;
phys | = ( ( u64 ) readl_relaxed ( cb_base + ARM_SMMU_CB_PAR_HI ) ) < < 32 ;
if ( phys & CB_PAR_F ) {
dev_err ( dev , " translation fault! \n " ) ;
dev_err ( dev , " PAR = 0x%llx \n " , phys ) ;
return 0 ;
}
return ( phys & GENMASK_ULL ( 39 , 12 ) ) | ( iova & 0xfff ) ;
}
2013-06-24 19:31:25 +02:00
static phys_addr_t arm_smmu_iova_to_phys ( struct iommu_domain * domain ,
2014-10-29 22:13:40 +01:00
dma_addr_t iova )
2013-06-24 19:31:25 +02:00
{
2014-11-14 18:17:54 +01:00
phys_addr_t ret ;
unsigned long flags ;
2015-03-26 13:43:10 +01:00
struct arm_smmu_domain * smmu_domain = to_smmu_domain ( domain ) ;
2014-11-14 18:17:54 +01:00
struct io_pgtable_ops * ops = smmu_domain - > pgtbl_ops ;
2013-06-24 19:31:25 +02:00
2014-11-14 18:17:54 +01:00
if ( ! ops )
2013-11-07 19:47:50 +01:00
return 0 ;
2013-06-24 19:31:25 +02:00
2014-11-14 18:17:54 +01:00
spin_lock_irqsave ( & smmu_domain - > pgtbl_lock , flags ) ;
2015-03-04 16:51:06 +01:00
if ( smmu_domain - > smmu - > features & ARM_SMMU_FEAT_TRANS_OPS & &
smmu_domain - > stage = = ARM_SMMU_DOMAIN_S1 ) {
2014-10-29 22:13:40 +01:00
ret = arm_smmu_iova_to_phys_hard ( domain , iova ) ;
2015-03-04 16:51:06 +01:00
} else {
2014-10-29 22:13:40 +01:00
ret = ops - > iova_to_phys ( ops , iova ) ;
2015-03-04 16:51:06 +01:00
}
2014-11-14 18:17:54 +01:00
spin_unlock_irqrestore ( & smmu_domain - > pgtbl_lock , flags ) ;
2014-10-29 22:13:40 +01:00
2014-11-14 18:17:54 +01:00
return ret ;
2013-06-24 19:31:25 +02:00
}
2014-09-05 10:49:34 +02:00
static bool arm_smmu_capable ( enum iommu_cap cap )
2013-06-24 19:31:25 +02:00
{
2014-06-24 18:30:10 +02:00
switch ( cap ) {
case IOMMU_CAP_CACHE_COHERENCY :
2014-09-05 10:49:34 +02:00
/*
* Return true here as the SMMU can always send out coherent
* requests .
*/
return true ;
2014-06-24 18:30:10 +02:00
case IOMMU_CAP_INTR_REMAP :
2014-09-05 10:49:34 +02:00
return true ; /* MSIs are just memory writes */
2014-10-13 15:06:18 +02:00
case IOMMU_CAP_NOEXEC :
return true ;
2014-06-24 18:30:10 +02:00
default :
2014-09-05 10:49:34 +02:00
return false ;
2014-06-24 18:30:10 +02:00
}
2013-06-24 19:31:25 +02:00
}
2014-05-01 19:05:08 +02:00
static int __arm_smmu_get_pci_sid ( struct pci_dev * pdev , u16 alias , void * data )
{
* ( ( u16 * ) data ) = alias ;
return 0 ; /* Continue walking */
2013-06-24 19:31:25 +02:00
}
2014-07-15 12:27:08 +02:00
static void __arm_smmu_release_pci_iommudata ( void * data )
{
kfree ( data ) ;
}
2015-10-21 23:51:41 +02:00
static int arm_smmu_init_pci_device ( struct pci_dev * pdev ,
struct iommu_group * group )
2013-06-24 19:31:25 +02:00
{
2015-01-19 15:27:33 +01:00
struct arm_smmu_master_cfg * cfg ;
2015-10-21 23:51:41 +02:00
u16 sid ;
int i ;
2014-05-01 19:05:08 +02:00
2015-01-19 15:27:33 +01:00
cfg = iommu_group_get_iommudata ( group ) ;
if ( ! cfg ) {
2014-05-01 19:05:08 +02:00
cfg = kzalloc ( sizeof ( * cfg ) , GFP_KERNEL ) ;
2015-10-21 23:51:41 +02:00
if ( ! cfg )
return - ENOMEM ;
2014-05-01 19:05:08 +02:00
2015-01-19 15:27:33 +01:00
iommu_group_set_iommudata ( group , cfg ,
__arm_smmu_release_pci_iommudata ) ;
}
2014-07-15 12:27:08 +02:00
2015-10-21 23:51:41 +02:00
if ( cfg - > num_streamids > = MAX_MASTER_STREAMIDS )
return - ENOSPC ;
2014-05-01 19:05:08 +02:00
2015-01-19 15:27:33 +01:00
/*
* Assume Stream ID = = Requester ID for now .
* We need a way to describe the ID mappings in FDT .
*/
pci_for_each_dma_alias ( pdev , __arm_smmu_get_pci_sid , & sid ) ;
for ( i = 0 ; i < cfg - > num_streamids ; + + i )
if ( cfg - > streamids [ i ] = = sid )
break ;
/* Avoid duplicate SIDs, as this can lead to SMR conflicts */
if ( i = = cfg - > num_streamids )
cfg - > streamids [ cfg - > num_streamids + + ] = sid ;
2013-10-18 17:08:29 +02:00
2015-01-19 15:27:33 +01:00
return 0 ;
2013-06-24 19:31:25 +02:00
}
2015-10-21 23:51:41 +02:00
static int arm_smmu_init_platform_device ( struct device * dev ,
struct iommu_group * group )
2015-01-19 15:27:33 +01:00
{
struct arm_smmu_device * smmu = find_smmu_for_device ( dev ) ;
2015-10-21 23:51:41 +02:00
struct arm_smmu_master * master ;
2015-01-19 15:27:33 +01:00
if ( ! smmu )
return - ENODEV ;
master = find_smmu_master ( smmu , dev - > of_node ) ;
if ( ! master )
return - ENODEV ;
iommu_group_set_iommudata ( group , & master - > cfg , NULL ) ;
2015-10-21 23:51:41 +02:00
return 0 ;
2015-01-19 15:27:33 +01:00
}
static int arm_smmu_add_device ( struct device * dev )
{
2015-10-21 23:51:41 +02:00
struct iommu_group * group ;
2015-01-19 15:27:33 +01:00
2015-10-21 23:51:41 +02:00
group = iommu_group_get_for_dev ( dev ) ;
if ( IS_ERR ( group ) )
return PTR_ERR ( group ) ;
2015-01-19 15:27:33 +01:00
2015-11-20 09:56:18 +01:00
iommu_group_put ( group ) ;
2015-10-21 23:51:41 +02:00
return 0 ;
2015-01-19 15:27:33 +01:00
}
2013-06-24 19:31:25 +02:00
static void arm_smmu_remove_device ( struct device * dev )
{
2013-10-18 17:08:29 +02:00
iommu_group_remove_device ( dev ) ;
2013-06-24 19:31:25 +02:00
}
2015-10-21 23:51:41 +02:00
static struct iommu_group * arm_smmu_device_group ( struct device * dev )
{
struct iommu_group * group ;
int ret ;
if ( dev_is_pci ( dev ) )
group = pci_device_group ( dev ) ;
else
group = generic_device_group ( dev ) ;
if ( IS_ERR ( group ) )
return group ;
if ( dev_is_pci ( dev ) )
ret = arm_smmu_init_pci_device ( to_pci_dev ( dev ) , group ) ;
else
ret = arm_smmu_init_platform_device ( dev , group ) ;
if ( ret ) {
iommu_group_put ( group ) ;
group = ERR_PTR ( ret ) ;
}
return group ;
}
2014-06-25 23:46:31 +02:00
static int arm_smmu_domain_get_attr ( struct iommu_domain * domain ,
enum iommu_attr attr , void * data )
{
2015-03-26 13:43:10 +01:00
struct arm_smmu_domain * smmu_domain = to_smmu_domain ( domain ) ;
2014-06-25 23:46:31 +02:00
switch ( attr ) {
case DOMAIN_ATTR_NESTING :
* ( int * ) data = ( smmu_domain - > stage = = ARM_SMMU_DOMAIN_NESTED ) ;
return 0 ;
default :
return - ENODEV ;
}
}
static int arm_smmu_domain_set_attr ( struct iommu_domain * domain ,
enum iommu_attr attr , void * data )
{
2014-11-14 18:17:54 +01:00
int ret = 0 ;
2015-03-26 13:43:10 +01:00
struct arm_smmu_domain * smmu_domain = to_smmu_domain ( domain ) ;
2014-06-25 23:46:31 +02:00
2014-11-14 18:17:54 +01:00
mutex_lock ( & smmu_domain - > init_mutex ) ;
2014-06-25 23:46:31 +02:00
switch ( attr ) {
case DOMAIN_ATTR_NESTING :
2014-11-14 18:17:54 +01:00
if ( smmu_domain - > smmu ) {
ret = - EPERM ;
goto out_unlock ;
}
2014-06-25 23:46:31 +02:00
if ( * ( int * ) data )
smmu_domain - > stage = ARM_SMMU_DOMAIN_NESTED ;
else
smmu_domain - > stage = ARM_SMMU_DOMAIN_S1 ;
2014-11-14 18:17:54 +01:00
break ;
2014-06-25 23:46:31 +02:00
default :
2014-11-14 18:17:54 +01:00
ret = - ENODEV ;
2014-06-25 23:46:31 +02:00
}
2014-11-14 18:17:54 +01:00
out_unlock :
mutex_unlock ( & smmu_domain - > init_mutex ) ;
return ret ;
2014-06-25 23:46:31 +02:00
}
2014-11-14 18:17:54 +01:00
static struct iommu_ops arm_smmu_ops = {
2014-06-25 23:46:31 +02:00
. capable = arm_smmu_capable ,
2015-03-26 13:43:10 +01:00
. domain_alloc = arm_smmu_domain_alloc ,
. domain_free = arm_smmu_domain_free ,
2014-06-25 23:46:31 +02:00
. attach_dev = arm_smmu_attach_dev ,
. detach_dev = arm_smmu_detach_dev ,
. map = arm_smmu_map ,
. unmap = arm_smmu_unmap ,
2014-12-02 13:07:13 +01:00
. map_sg = default_iommu_map_sg ,
2014-06-25 23:46:31 +02:00
. iova_to_phys = arm_smmu_iova_to_phys ,
. add_device = arm_smmu_add_device ,
. remove_device = arm_smmu_remove_device ,
2015-10-21 23:51:41 +02:00
. device_group = arm_smmu_device_group ,
2014-06-25 23:46:31 +02:00
. domain_get_attr = arm_smmu_domain_get_attr ,
. domain_set_attr = arm_smmu_domain_set_attr ,
2014-11-14 18:17:54 +01:00
. pgsize_bitmap = - 1UL , /* Restricted during device attach */
2013-06-24 19:31:25 +02:00
} ;
static void arm_smmu_device_reset ( struct arm_smmu_device * smmu )
{
void __iomem * gr0_base = ARM_SMMU_GR0 ( smmu ) ;
2013-10-01 14:39:09 +02:00
void __iomem * cb_base ;
2013-06-24 19:31:25 +02:00
int i = 0 ;
2013-10-01 14:39:09 +02:00
u32 reg ;
2014-01-30 19:18:04 +01:00
/* clear global FSR */
reg = readl_relaxed ( ARM_SMMU_GR0_NS ( smmu ) + ARM_SMMU_GR0_sGFSR ) ;
writel ( reg , ARM_SMMU_GR0_NS ( smmu ) + ARM_SMMU_GR0_sGFSR ) ;
2013-06-24 19:31:25 +02:00
/* Mark all SMRn as invalid and all S2CRn as bypass */
for ( i = 0 ; i < smmu - > num_mapping_groups ; + + i ) {
2014-08-23 02:12:32 +02:00
writel_relaxed ( 0 , gr0_base + ARM_SMMU_GR0_SMR ( i ) ) ;
2014-07-08 18:52:18 +02:00
writel_relaxed ( S2CR_TYPE_BYPASS ,
gr0_base + ARM_SMMU_GR0_S2CR ( i ) ) ;
2013-06-24 19:31:25 +02:00
}
2013-10-01 14:39:09 +02:00
/* Make sure all context banks are disabled and clear CB_FSR */
for ( i = 0 ; i < smmu - > num_context_banks ; + + i ) {
cb_base = ARM_SMMU_CB_BASE ( smmu ) + ARM_SMMU_CB ( smmu , i ) ;
writel_relaxed ( 0 , cb_base + ARM_SMMU_CB_SCTLR ) ;
writel_relaxed ( FSR_FAULT , cb_base + ARM_SMMU_CB_FSR ) ;
}
2013-07-31 20:21:27 +02:00
2013-06-24 19:31:25 +02:00
/* Invalidate the TLB, just in case */
writel_relaxed ( 0 , gr0_base + ARM_SMMU_GR0_TLBIALLH ) ;
writel_relaxed ( 0 , gr0_base + ARM_SMMU_GR0_TLBIALLNSNH ) ;
2014-01-30 19:18:04 +01:00
reg = readl_relaxed ( ARM_SMMU_GR0_NS ( smmu ) + ARM_SMMU_GR0_sCR0 ) ;
2013-10-01 14:39:09 +02:00
2013-06-24 19:31:25 +02:00
/* Enable fault reporting */
2013-10-01 14:39:09 +02:00
reg | = ( sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE ) ;
2013-06-24 19:31:25 +02:00
/* Disable TLB broadcasting. */
2013-10-01 14:39:09 +02:00
reg | = ( sCR0_VMIDPNE | sCR0_PTM ) ;
2013-06-24 19:31:25 +02:00
/* Enable client access, but bypass when no mapping is found */
2013-10-01 14:39:09 +02:00
reg & = ~ ( sCR0_CLIENTPD | sCR0_USFCFG ) ;
2013-06-24 19:31:25 +02:00
/* Disable forced broadcasting */
2013-10-01 14:39:09 +02:00
reg & = ~ sCR0_FB ;
2013-06-24 19:31:25 +02:00
/* Don't upgrade barriers */
2013-10-01 14:39:09 +02:00
reg & = ~ ( sCR0_BSU_MASK < < sCR0_BSU_SHIFT ) ;
2013-06-24 19:31:25 +02:00
/* Push the button */
2014-11-14 18:17:54 +01:00
__arm_smmu_tlb_sync ( smmu ) ;
2014-01-30 19:18:04 +01:00
writel ( reg , ARM_SMMU_GR0_NS ( smmu ) + ARM_SMMU_GR0_sCR0 ) ;
2013-06-24 19:31:25 +02:00
}
static int arm_smmu_id_size_to_bits ( int size )
{
switch ( size ) {
case 0 :
return 32 ;
case 1 :
return 36 ;
case 2 :
return 40 ;
case 3 :
return 42 ;
case 4 :
return 44 ;
case 5 :
default :
return 48 ;
}
}
static int arm_smmu_device_cfg_probe ( struct arm_smmu_device * smmu )
{
unsigned long size ;
void __iomem * gr0_base = ARM_SMMU_GR0 ( smmu ) ;
u32 id ;
2015-07-29 20:46:05 +02:00
bool cttw_dt , cttw_reg ;
2013-06-24 19:31:25 +02:00
dev_notice ( smmu - > dev , " probing hardware configuration... \n " ) ;
dev_notice ( smmu - > dev , " SMMUv%d with: \n " , smmu - > version ) ;
/* ID0 */
id = readl_relaxed ( gr0_base + ARM_SMMU_GR0_ID0 ) ;
2014-07-14 20:47:39 +02:00
/* Restrict available stages based on module parameter */
if ( force_stage = = 1 )
id & = ~ ( ID0_S2TS | ID0_NTS ) ;
else if ( force_stage = = 2 )
id & = ~ ( ID0_S1TS | ID0_NTS ) ;
2013-06-24 19:31:25 +02:00
if ( id & ID0_S1TS ) {
smmu - > features | = ARM_SMMU_FEAT_TRANS_S1 ;
dev_notice ( smmu - > dev , " \t stage 1 translation \n " ) ;
}
if ( id & ID0_S2TS ) {
smmu - > features | = ARM_SMMU_FEAT_TRANS_S2 ;
dev_notice ( smmu - > dev , " \t stage 2 translation \n " ) ;
}
if ( id & ID0_NTS ) {
smmu - > features | = ARM_SMMU_FEAT_TRANS_NESTED ;
dev_notice ( smmu - > dev , " \t nested translation \n " ) ;
}
if ( ! ( smmu - > features &
2014-07-14 20:47:39 +02:00
( ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2 ) ) ) {
2013-06-24 19:31:25 +02:00
dev_err ( smmu - > dev , " \t no translation support! \n " ) ;
return - ENODEV ;
}
2015-06-29 18:47:42 +02:00
if ( ( id & ID0_S1TS ) & & ( ( smmu - > version = = 1 ) | | ! ( id & ID0_ATOSNS ) ) ) {
2014-10-29 22:13:40 +01:00
smmu - > features | = ARM_SMMU_FEAT_TRANS_OPS ;
dev_notice ( smmu - > dev , " \t address translation ops \n " ) ;
}
2015-07-29 20:46:05 +02:00
/*
* In order for DMA API calls to work properly , we must defer to what
* the DT says about coherency , regardless of what the hardware claims .
* Fortunately , this also opens up a workaround for systems where the
* ID register value has ended up configured incorrectly .
*/
cttw_dt = of_dma_is_coherent ( smmu - > dev - > of_node ) ;
cttw_reg = ! ! ( id & ID0_CTTW ) ;
if ( cttw_dt )
2013-06-24 19:31:25 +02:00
smmu - > features | = ARM_SMMU_FEAT_COHERENT_WALK ;
2015-07-29 20:46:05 +02:00
if ( cttw_dt | | cttw_reg )
dev_notice ( smmu - > dev , " \t %scoherent table walk \n " ,
cttw_dt ? " " : " non- " ) ;
if ( cttw_dt ! = cttw_reg )
dev_notice ( smmu - > dev ,
" \t (IDR0.CTTW overridden by dma-coherent property) \n " ) ;
2013-06-24 19:31:25 +02:00
if ( id & ID0_SMS ) {
u32 smr , sid , mask ;
smmu - > features | = ARM_SMMU_FEAT_STREAM_MATCH ;
smmu - > num_mapping_groups = ( id > > ID0_NUMSMRG_SHIFT ) &
ID0_NUMSMRG_MASK ;
if ( smmu - > num_mapping_groups = = 0 ) {
dev_err ( smmu - > dev ,
" stream-matching supported, but no SMRs present! \n " ) ;
return - ENODEV ;
}
smr = SMR_MASK_MASK < < SMR_MASK_SHIFT ;
smr | = ( SMR_ID_MASK < < SMR_ID_SHIFT ) ;
writel_relaxed ( smr , gr0_base + ARM_SMMU_GR0_SMR ( 0 ) ) ;
smr = readl_relaxed ( gr0_base + ARM_SMMU_GR0_SMR ( 0 ) ) ;
mask = ( smr > > SMR_MASK_SHIFT ) & SMR_MASK_MASK ;
sid = ( smr > > SMR_ID_SHIFT ) & SMR_ID_MASK ;
if ( ( mask & sid ) ! = sid ) {
dev_err ( smmu - > dev ,
" SMR mask bits (0x%x) insufficient for ID field (0x%x) \n " ,
mask , sid ) ;
return - ENODEV ;
}
dev_notice ( smmu - > dev ,
" \t stream matching with %u register groups, mask 0x%x " ,
smmu - > num_mapping_groups , mask ) ;
2014-08-23 02:12:32 +02:00
} else {
smmu - > num_mapping_groups = ( id > > ID0_NUMSIDB_SHIFT ) &
ID0_NUMSIDB_MASK ;
2013-06-24 19:31:25 +02:00
}
/* ID1 */
id = readl_relaxed ( gr0_base + ARM_SMMU_GR0_ID1 ) ;
2014-07-30 12:33:25 +02:00
smmu - > pgshift = ( id & ID1_PAGESIZE ) ? 16 : 12 ;
2013-06-24 19:31:25 +02:00
2013-10-01 14:39:06 +02:00
/* Check for size mismatch of SMMU address space from mapped region */
2014-11-14 18:17:54 +01:00
size = 1 < < ( ( ( id > > ID1_NUMPAGENDXB_SHIFT ) & ID1_NUMPAGENDXB_MASK ) + 1 ) ;
2014-07-30 12:33:25 +02:00
size * = 2 < < smmu - > pgshift ;
2013-10-01 14:39:06 +02:00
if ( smmu - > size ! = size )
2014-07-08 18:52:18 +02:00
dev_warn ( smmu - > dev ,
" SMMU address space size (0x%lx) differs from mapped region size (0x%lx)! \n " ,
size , smmu - > size ) ;
2013-06-24 19:31:25 +02:00
2014-11-14 18:17:54 +01:00
smmu - > num_s2_context_banks = ( id > > ID1_NUMS2CB_SHIFT ) & ID1_NUMS2CB_MASK ;
2013-06-24 19:31:25 +02:00
smmu - > num_context_banks = ( id > > ID1_NUMCB_SHIFT ) & ID1_NUMCB_MASK ;
if ( smmu - > num_s2_context_banks > smmu - > num_context_banks ) {
dev_err ( smmu - > dev , " impossible number of S2 context banks! \n " ) ;
return - ENODEV ;
}
dev_notice ( smmu - > dev , " \t %u context banks (%u stage-2 only) \n " ,
smmu - > num_context_banks , smmu - > num_s2_context_banks ) ;
/* ID2 */
id = readl_relaxed ( gr0_base + ARM_SMMU_GR0_ID2 ) ;
size = arm_smmu_id_size_to_bits ( ( id > > ID2_IAS_SHIFT ) & ID2_IAS_MASK ) ;
2014-11-14 18:17:54 +01:00
smmu - > ipa_size = size ;
2013-06-24 19:31:25 +02:00
2014-11-14 18:17:54 +01:00
/* The output mask is also applied for bypass */
2013-06-24 19:31:25 +02:00
size = arm_smmu_id_size_to_bits ( ( id > > ID2_OAS_SHIFT ) & ID2_OAS_MASK ) ;
2014-11-14 18:17:54 +01:00
smmu - > pa_size = size ;
2013-06-24 19:31:25 +02:00
2015-03-04 17:41:05 +01:00
/*
* What the page table walker can address actually depends on which
* descriptor format is in use , but since a ) we don ' t know that yet ,
* and b ) it can vary per context bank , this will have to do . . .
*/
if ( dma_set_mask_and_coherent ( smmu - > dev , DMA_BIT_MASK ( size ) ) )
dev_warn ( smmu - > dev ,
" failed to set DMA mask for table walker \n " ) ;
2014-08-28 18:51:59 +02:00
if ( smmu - > version = = ARM_SMMU_V1 ) {
2014-11-14 18:17:54 +01:00
smmu - > va_size = smmu - > ipa_size ;
size = SZ_4K | SZ_2M | SZ_1G ;
2013-06-24 19:31:25 +02:00
} else {
size = ( id > > ID2_UBS_SHIFT ) & ID2_UBS_MASK ;
2014-11-14 18:17:54 +01:00
smmu - > va_size = arm_smmu_id_size_to_bits ( size ) ;
# ifndef CONFIG_64BIT
smmu - > va_size = min ( 32UL , smmu - > va_size ) ;
2013-06-24 19:31:25 +02:00
# endif
2014-11-14 18:17:54 +01:00
size = 0 ;
if ( id & ID2_PTFS_4K )
size | = SZ_4K | SZ_2M | SZ_1G ;
if ( id & ID2_PTFS_16K )
size | = SZ_16K | SZ_32M ;
if ( id & ID2_PTFS_64K )
size | = SZ_64K | SZ_512M ;
2013-06-24 19:31:25 +02:00
}
2014-11-14 18:17:54 +01:00
arm_smmu_ops . pgsize_bitmap & = size ;
dev_notice ( smmu - > dev , " \t Supported page sizes: 0x%08lx \n " , size ) ;
2014-09-01 17:24:48 +02:00
if ( smmu - > features & ARM_SMMU_FEAT_TRANS_S1 )
dev_notice ( smmu - > dev , " \t Stage-1: %lu-bit VA -> %lu-bit IPA \n " ,
2014-11-14 18:17:54 +01:00
smmu - > va_size , smmu - > ipa_size ) ;
2014-09-01 17:24:48 +02:00
if ( smmu - > features & ARM_SMMU_FEAT_TRANS_S2 )
dev_notice ( smmu - > dev , " \t Stage-2: %lu-bit IPA -> %lu-bit PA \n " ,
2014-11-14 18:17:54 +01:00
smmu - > ipa_size , smmu - > pa_size ) ;
2014-09-01 17:24:48 +02:00
2013-06-24 19:31:25 +02:00
return 0 ;
}
2014-10-02 12:24:45 +02:00
static const struct of_device_id arm_smmu_of_match [ ] = {
2014-08-28 18:51:59 +02:00
{ . compatible = " arm,smmu-v1 " , . data = ( void * ) ARM_SMMU_V1 } ,
{ . compatible = " arm,smmu-v2 " , . data = ( void * ) ARM_SMMU_V2 } ,
{ . compatible = " arm,mmu-400 " , . data = ( void * ) ARM_SMMU_V1 } ,
2014-08-28 18:52:00 +02:00
{ . compatible = " arm,mmu-401 " , . data = ( void * ) ARM_SMMU_V1 } ,
2014-08-28 18:51:59 +02:00
{ . compatible = " arm,mmu-500 " , . data = ( void * ) ARM_SMMU_V2 } ,
{ } ,
} ;
MODULE_DEVICE_TABLE ( of , arm_smmu_of_match ) ;
2013-06-24 19:31:25 +02:00
static int arm_smmu_device_dt_probe ( struct platform_device * pdev )
{
2014-08-28 18:51:59 +02:00
const struct of_device_id * of_id ;
2013-06-24 19:31:25 +02:00
struct resource * res ;
struct arm_smmu_device * smmu ;
struct device * dev = & pdev - > dev ;
struct rb_node * node ;
struct of_phandle_args masterspec ;
int num_irqs , i , err ;
smmu = devm_kzalloc ( dev , sizeof ( * smmu ) , GFP_KERNEL ) ;
if ( ! smmu ) {
dev_err ( dev , " failed to allocate arm_smmu_device \n " ) ;
return - ENOMEM ;
}
smmu - > dev = dev ;
2014-08-28 18:51:59 +02:00
of_id = of_match_node ( arm_smmu_of_match , dev - > of_node ) ;
smmu - > version = ( enum arm_smmu_arch_version ) of_id - > data ;
2013-06-24 19:31:25 +02:00
res = platform_get_resource ( pdev , IORESOURCE_MEM , 0 ) ;
2013-08-19 13:20:37 +02:00
smmu - > base = devm_ioremap_resource ( dev , res ) ;
if ( IS_ERR ( smmu - > base ) )
return PTR_ERR ( smmu - > base ) ;
2013-06-24 19:31:25 +02:00
smmu - > size = resource_size ( res ) ;
if ( of_property_read_u32 ( dev - > of_node , " #global-interrupts " ,
& smmu - > num_global_irqs ) ) {
dev_err ( dev , " missing #global-interrupts property \n " ) ;
return - ENODEV ;
}
num_irqs = 0 ;
while ( ( res = platform_get_resource ( pdev , IORESOURCE_IRQ , num_irqs ) ) ) {
num_irqs + + ;
if ( num_irqs > smmu - > num_global_irqs )
smmu - > num_context_irqs + + ;
}
2013-10-01 14:39:07 +02:00
if ( ! smmu - > num_context_irqs ) {
dev_err ( dev , " found %d interrupts but expected at least %d \n " ,
num_irqs , smmu - > num_global_irqs + 1 ) ;
return - ENODEV ;
2013-06-24 19:31:25 +02:00
}
smmu - > irqs = devm_kzalloc ( dev , sizeof ( * smmu - > irqs ) * num_irqs ,
GFP_KERNEL ) ;
if ( ! smmu - > irqs ) {
dev_err ( dev , " failed to allocate %d irqs \n " , num_irqs ) ;
return - ENOMEM ;
}
for ( i = 0 ; i < num_irqs ; + + i ) {
int irq = platform_get_irq ( pdev , i ) ;
2014-07-08 18:52:18 +02:00
2013-06-24 19:31:25 +02:00
if ( irq < 0 ) {
dev_err ( dev , " failed to get irq index %d \n " , i ) ;
return - ENODEV ;
}
smmu - > irqs [ i ] = irq ;
}
2014-08-23 02:12:32 +02:00
err = arm_smmu_device_cfg_probe ( smmu ) ;
if ( err )
return err ;
2013-06-24 19:31:25 +02:00
i = 0 ;
smmu - > masters = RB_ROOT ;
while ( ! of_parse_phandle_with_args ( dev - > of_node , " mmu-masters " ,
" #stream-id-cells " , i ,
& masterspec ) ) {
err = register_smmu_master ( smmu , dev , & masterspec ) ;
if ( err ) {
dev_err ( dev , " failed to add master %s \n " ,
masterspec . np - > name ) ;
goto out_put_masters ;
}
i + + ;
}
dev_notice ( dev , " registered %d master devices \n " , i ) ;
2014-01-30 19:18:04 +01:00
parse_driver_options ( smmu ) ;
2014-08-28 18:51:59 +02:00
if ( smmu - > version > ARM_SMMU_V1 & &
2013-06-24 19:31:25 +02:00
smmu - > num_context_banks ! = smmu - > num_context_irqs ) {
dev_err ( dev ,
" found only %d context interrupt(s) but %d required \n " ,
smmu - > num_context_irqs , smmu - > num_context_banks ) ;
2013-11-15 10:42:30 +01:00
err = - ENODEV ;
2014-06-25 12:29:12 +02:00
goto out_put_masters ;
2013-06-24 19:31:25 +02:00
}
for ( i = 0 ; i < smmu - > num_global_irqs ; + + i ) {
err = request_irq ( smmu - > irqs [ i ] ,
arm_smmu_global_fault ,
IRQF_SHARED ,
" arm-smmu global fault " ,
smmu ) ;
if ( err ) {
dev_err ( dev , " failed to request global IRQ %d (%u) \n " ,
i , smmu - > irqs [ i ] ) ;
goto out_free_irqs ;
}
}
INIT_LIST_HEAD ( & smmu - > list ) ;
spin_lock ( & arm_smmu_devices_lock ) ;
list_add ( & smmu - > list , & arm_smmu_devices ) ;
spin_unlock ( & arm_smmu_devices_lock ) ;
2013-08-21 14:56:34 +02:00
arm_smmu_device_reset ( smmu ) ;
2013-06-24 19:31:25 +02:00
return 0 ;
out_free_irqs :
while ( i - - )
free_irq ( smmu - > irqs [ i ] , smmu ) ;
out_put_masters :
for ( node = rb_first ( & smmu - > masters ) ; node ; node = rb_next ( node ) ) {
2014-07-08 18:52:18 +02:00
struct arm_smmu_master * master
= container_of ( node , struct arm_smmu_master , node ) ;
2013-06-24 19:31:25 +02:00
of_node_put ( master - > of_node ) ;
}
return err ;
}
static int arm_smmu_device_remove ( struct platform_device * pdev )
{
int i ;
struct device * dev = & pdev - > dev ;
struct arm_smmu_device * curr , * smmu = NULL ;
struct rb_node * node ;
spin_lock ( & arm_smmu_devices_lock ) ;
list_for_each_entry ( curr , & arm_smmu_devices , list ) {
if ( curr - > dev = = dev ) {
smmu = curr ;
list_del ( & smmu - > list ) ;
break ;
}
}
spin_unlock ( & arm_smmu_devices_lock ) ;
if ( ! smmu )
return - ENODEV ;
for ( node = rb_first ( & smmu - > masters ) ; node ; node = rb_next ( node ) ) {
2014-07-08 18:52:18 +02:00
struct arm_smmu_master * master
= container_of ( node , struct arm_smmu_master , node ) ;
2013-06-24 19:31:25 +02:00
of_node_put ( master - > of_node ) ;
}
2013-07-31 20:21:28 +02:00
if ( ! bitmap_empty ( smmu - > context_map , ARM_SMMU_MAX_CBS ) )
2013-06-24 19:31:25 +02:00
dev_err ( dev , " removing device with active domains! \n " ) ;
for ( i = 0 ; i < smmu - > num_global_irqs ; + + i )
free_irq ( smmu - > irqs [ i ] , smmu ) ;
/* Turn the thing off */
2014-07-08 18:52:18 +02:00
writel ( sCR0_CLIENTPD , ARM_SMMU_GR0_NS ( smmu ) + ARM_SMMU_GR0_sCR0 ) ;
2013-06-24 19:31:25 +02:00
return 0 ;
}
static struct platform_driver arm_smmu_driver = {
. driver = {
. name = " arm-smmu " ,
. of_match_table = of_match_ptr ( arm_smmu_of_match ) ,
} ,
. probe = arm_smmu_device_dt_probe ,
. remove = arm_smmu_device_remove ,
} ;
static int __init arm_smmu_init ( void )
{
2014-11-07 16:26:18 +01:00
struct device_node * np ;
2013-06-24 19:31:25 +02:00
int ret ;
2014-11-07 16:26:18 +01:00
/*
* Play nice with systems that don ' t have an ARM SMMU by checking that
* an ARM SMMU exists in the system before proceeding with the driver
* and IOMMU bus operation registration .
*/
np = of_find_matching_node ( NULL , arm_smmu_of_match ) ;
if ( ! np )
return 0 ;
of_node_put ( np ) ;
2013-06-24 19:31:25 +02:00
ret = platform_driver_register ( & arm_smmu_driver ) ;
if ( ret )
return ret ;
/* Oh, for a proper bus abstraction */
2013-08-21 10:34:20 +02:00
if ( ! iommu_present ( & platform_bus_type ) )
2013-06-24 19:31:25 +02:00
bus_set_iommu ( & platform_bus_type , & arm_smmu_ops ) ;
2014-02-04 23:17:53 +01:00
# ifdef CONFIG_ARM_AMBA
2013-08-21 10:34:20 +02:00
if ( ! iommu_present ( & amba_bustype ) )
2013-06-24 19:31:25 +02:00
bus_set_iommu ( & amba_bustype , & arm_smmu_ops ) ;
2014-02-04 23:17:53 +01:00
# endif
2013-06-24 19:31:25 +02:00
2014-05-01 19:05:08 +02:00
# ifdef CONFIG_PCI
if ( ! iommu_present ( & pci_bus_type ) )
bus_set_iommu ( & pci_bus_type , & arm_smmu_ops ) ;
# endif
2013-06-24 19:31:25 +02:00
return 0 ;
}
static void __exit arm_smmu_exit ( void )
{
return platform_driver_unregister ( & arm_smmu_driver ) ;
}
2013-10-01 14:39:05 +02:00
subsys_initcall ( arm_smmu_init ) ;
2013-06-24 19:31:25 +02:00
module_exit ( arm_smmu_exit ) ;
MODULE_DESCRIPTION ( " IOMMU API for ARM architected SMMU implementations " ) ;
MODULE_AUTHOR ( " Will Deacon <will.deacon@arm.com> " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;