Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/security-testing-2.6

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/security-testing-2.6: (26 commits)
  selinux: include vmalloc.h for vmalloc_user
  secmark: fix config problem when CONFIG_NF_CONNTRACK_SECMARK is not set
  selinux: implement mmap on /selinux/policy
  SELinux: allow userspace to read policy back out of the kernel
  SELinux: drop useless (and incorrect) AVTAB_MAX_SIZE
  SELinux: deterministic ordering of range transition rules
  kernel: roundup should only reference arguments once
  kernel: rounddown helper function
  secmark: export secctx, drop secmark in procfs
  conntrack: export lsm context rather than internal secid via netlink
  security: secid_to_secctx returns len when data is NULL
  secmark: make secmark object handling generic
  secmark: do not return early if there was no error
  AppArmor: Ensure the size of the copy is < the buffer allocated to hold it
  TOMOYO: Print URL information before panic().
  security: remove unused parameter from security_task_setscheduler()
  tpm: change 'tpm_suspend_pcr' to be module parameter
  selinux: fix up style problem on /selinux/status
  selinux: change to new flag variable
  selinux: really fix dependency causing parallel compile failure.
  ...
This commit is contained in:
Linus Torvalds 2010-10-21 12:41:19 -07:00
commit a8fe150098
38 changed files with 1804 additions and 245 deletions

View File

@ -103,7 +103,7 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
goto out_unlock;
retval = security_task_setscheduler(p, 0, NULL);
retval = security_task_setscheduler(p)
if (retval)
goto out_unlock;

View File

@ -47,6 +47,16 @@ enum tpm_duration {
#define TPM_MAX_PROTECTED_ORDINAL 12
#define TPM_PROTECTED_ORDINAL_MASK 0xFF
/*
* Bug workaround - some TPM's don't flush the most
* recently changed pcr on suspend, so force the flush
* with an extend to the selected _unused_ non-volatile pcr.
*/
static int tpm_suspend_pcr;
module_param_named(suspend_pcr, tpm_suspend_pcr, uint, 0644);
MODULE_PARM_DESC(suspend_pcr,
"PCR to use for dummy writes to faciltate flush on suspend.");
static LIST_HEAD(tpm_chip_list);
static DEFINE_SPINLOCK(driver_lock);
static DECLARE_BITMAP(dev_mask, TPM_NUM_DEVICES);
@ -1077,18 +1087,6 @@ static struct tpm_input_header savestate_header = {
.ordinal = TPM_ORD_SAVESTATE
};
/* Bug workaround - some TPM's don't flush the most
* recently changed pcr on suspend, so force the flush
* with an extend to the selected _unused_ non-volatile pcr.
*/
static int tpm_suspend_pcr;
static int __init tpm_suspend_setup(char *str)
{
get_option(&str, &tpm_suspend_pcr);
return 1;
}
__setup("tpm_suspend_pcr=", tpm_suspend_setup);
/*
* We are about to suspend. Save the TPM state
* so that it can be restored.

View File

@ -58,7 +58,18 @@ extern const char linux_proc_banner[];
#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
#define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
#define roundup(x, y) ( \
{ \
typeof(y) __y = y; \
(((x) + (__y - 1)) / __y) * __y; \
} \
)
#define rounddown(x, y) ( \
{ \
typeof(x) __x = (x); \
__x - (__x % (y)); \
} \
)
#define DIV_ROUND_CLOSEST(x, divisor)( \
{ \
typeof(divisor) __divisor = divisor; \

View File

@ -39,8 +39,9 @@ enum ctattr_type {
CTA_TUPLE_MASTER,
CTA_NAT_SEQ_ADJ_ORIG,
CTA_NAT_SEQ_ADJ_REPLY,
CTA_SECMARK,
CTA_SECMARK, /* obsolete */
CTA_ZONE,
CTA_SECCTX,
__CTA_MAX
};
#define CTA_MAX (__CTA_MAX - 1)
@ -172,4 +173,11 @@ enum ctattr_help {
};
#define CTA_HELP_MAX (__CTA_HELP_MAX - 1)
enum ctattr_secctx {
CTA_SECCTX_UNSPEC,
CTA_SECCTX_NAME,
__CTA_SECCTX_MAX
};
#define CTA_SECCTX_MAX (__CTA_SECCTX_MAX - 1)
#endif /* _IPCONNTRACK_NETLINK_H */

View File

@ -11,18 +11,12 @@
* packets are being marked for.
*/
#define SECMARK_MODE_SEL 0x01 /* SELinux */
#define SECMARK_SELCTX_MAX 256
struct xt_secmark_target_selinux_info {
__u32 selsid;
char selctx[SECMARK_SELCTX_MAX];
};
#define SECMARK_SECCTX_MAX 256
struct xt_secmark_target_info {
__u8 mode;
union {
struct xt_secmark_target_selinux_info sel;
} u;
__u32 secid;
char secctx[SECMARK_SECCTX_MAX];
};
#endif /*_XT_SECMARK_H_target */

View File

@ -74,7 +74,7 @@ extern int cap_file_mmap(struct file *file, unsigned long reqprot,
extern int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags);
extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5);
extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp);
extern int cap_task_setscheduler(struct task_struct *p);
extern int cap_task_setioprio(struct task_struct *p, int ioprio);
extern int cap_task_setnice(struct task_struct *p, int nice);
extern int cap_syslog(int type, bool from_file);
@ -959,6 +959,12 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* Sets the new child socket's sid to the openreq sid.
* @inet_conn_established:
* Sets the connection's peersid to the secmark on skb.
* @secmark_relabel_packet:
* check if the process should be allowed to relabel packets to the given secid
* @security_secmark_refcount_inc
* tells the LSM to increment the number of secmark labeling rules loaded
* @security_secmark_refcount_dec
* tells the LSM to decrement the number of secmark labeling rules loaded
* @req_classify_flow:
* Sets the flow's sid to the openreq sid.
* @tun_dev_create:
@ -1279,9 +1285,13 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* Return 0 if permission is granted.
*
* @secid_to_secctx:
* Convert secid to security context.
* Convert secid to security context. If secdata is NULL the length of
* the result will be returned in seclen, but no secdata will be returned.
* This does mean that the length could change between calls to check the
* length and the next call which actually allocates and returns the secdata.
* @secid contains the security ID.
* @secdata contains the pointer that stores the converted security context.
* @seclen pointer which contains the length of the data
* @secctx_to_secid:
* Convert security context to secid.
* @secid contains the pointer to the generated security ID.
@ -1501,8 +1511,7 @@ struct security_operations {
int (*task_getioprio) (struct task_struct *p);
int (*task_setrlimit) (struct task_struct *p, unsigned int resource,
struct rlimit *new_rlim);
int (*task_setscheduler) (struct task_struct *p, int policy,
struct sched_param *lp);
int (*task_setscheduler) (struct task_struct *p);
int (*task_getscheduler) (struct task_struct *p);
int (*task_movememory) (struct task_struct *p);
int (*task_kill) (struct task_struct *p,
@ -1594,6 +1603,9 @@ struct security_operations {
struct request_sock *req);
void (*inet_csk_clone) (struct sock *newsk, const struct request_sock *req);
void (*inet_conn_established) (struct sock *sk, struct sk_buff *skb);
int (*secmark_relabel_packet) (u32 secid);
void (*secmark_refcount_inc) (void);
void (*secmark_refcount_dec) (void);
void (*req_classify_flow) (const struct request_sock *req, struct flowi *fl);
int (*tun_dev_create)(void);
void (*tun_dev_post_create)(struct sock *sk);
@ -1752,8 +1764,7 @@ int security_task_setioprio(struct task_struct *p, int ioprio);
int security_task_getioprio(struct task_struct *p);
int security_task_setrlimit(struct task_struct *p, unsigned int resource,
struct rlimit *new_rlim);
int security_task_setscheduler(struct task_struct *p,
int policy, struct sched_param *lp);
int security_task_setscheduler(struct task_struct *p);
int security_task_getscheduler(struct task_struct *p);
int security_task_movememory(struct task_struct *p);
int security_task_kill(struct task_struct *p, struct siginfo *info,
@ -2320,11 +2331,9 @@ static inline int security_task_setrlimit(struct task_struct *p,
return 0;
}
static inline int security_task_setscheduler(struct task_struct *p,
int policy,
struct sched_param *lp)
static inline int security_task_setscheduler(struct task_struct *p)
{
return cap_task_setscheduler(p, policy, lp);
return cap_task_setscheduler(p);
}
static inline int security_task_getscheduler(struct task_struct *p)
@ -2551,6 +2560,9 @@ void security_inet_csk_clone(struct sock *newsk,
const struct request_sock *req);
void security_inet_conn_established(struct sock *sk,
struct sk_buff *skb);
int security_secmark_relabel_packet(u32 secid);
void security_secmark_refcount_inc(void);
void security_secmark_refcount_dec(void);
int security_tun_dev_create(void);
void security_tun_dev_post_create(struct sock *sk);
int security_tun_dev_attach(struct sock *sk);
@ -2705,6 +2717,19 @@ static inline void security_inet_conn_established(struct sock *sk,
{
}
static inline int security_secmark_relabel_packet(u32 secid)
{
return 0;
}
static inline void security_secmark_refcount_inc(void)
{
}
static inline void security_secmark_refcount_dec(void)
{
}
static inline int security_tun_dev_create(void)
{
return 0;

View File

@ -20,75 +20,12 @@ struct kern_ipc_perm;
#ifdef CONFIG_SECURITY_SELINUX
/**
* selinux_string_to_sid - map a security context string to a security ID
* @str: the security context string to be mapped
* @sid: ID value returned via this.
*
* Returns 0 if successful, with the SID stored in sid. A value
* of zero for sid indicates no SID could be determined (but no error
* occurred).
*/
int selinux_string_to_sid(char *str, u32 *sid);
/**
* selinux_secmark_relabel_packet_permission - secmark permission check
* @sid: SECMARK ID value to be applied to network packet
*
* Returns 0 if the current task is allowed to set the SECMARK label of
* packets with the supplied security ID. Note that it is implicit that
* the packet is always being relabeled from the default unlabeled value,
* and that the access control decision is made in the AVC.
*/
int selinux_secmark_relabel_packet_permission(u32 sid);
/**
* selinux_secmark_refcount_inc - increments the secmark use counter
*
* SELinux keeps track of the current SECMARK targets in use so it knows
* when to apply SECMARK label access checks to network packets. This
* function incements this reference count to indicate that a new SECMARK
* target has been configured.
*/
void selinux_secmark_refcount_inc(void);
/**
* selinux_secmark_refcount_dec - decrements the secmark use counter
*
* SELinux keeps track of the current SECMARK targets in use so it knows
* when to apply SECMARK label access checks to network packets. This
* function decements this reference count to indicate that one of the
* existing SECMARK targets has been removed/flushed.
*/
void selinux_secmark_refcount_dec(void);
/**
* selinux_is_enabled - is SELinux enabled?
*/
bool selinux_is_enabled(void);
#else
static inline int selinux_string_to_sid(const char *str, u32 *sid)
{
*sid = 0;
return 0;
}
static inline int selinux_secmark_relabel_packet_permission(u32 sid)
{
return 0;
}
static inline void selinux_secmark_refcount_inc(void)
{
return;
}
static inline void selinux_secmark_refcount_dec(void)
{
return;
}
static inline bool selinux_is_enabled(void)
{
return false;

View File

@ -1397,7 +1397,7 @@ static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont,
if (tsk->flags & PF_THREAD_BOUND)
return -EINVAL;
ret = security_task_setscheduler(tsk, 0, NULL);
ret = security_task_setscheduler(tsk);
if (ret)
return ret;
if (threadgroup) {
@ -1405,7 +1405,7 @@ static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont,
rcu_read_lock();
list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
ret = security_task_setscheduler(c, 0, NULL);
ret = security_task_setscheduler(c);
if (ret) {
rcu_read_unlock();
return ret;

View File

@ -4645,7 +4645,7 @@ recheck:
}
if (user) {
retval = security_task_setscheduler(p, policy, param);
retval = security_task_setscheduler(p);
if (retval)
return retval;
}
@ -4887,7 +4887,7 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
goto out_unlock;
retval = security_task_setscheduler(p, 0, NULL);
retval = security_task_setscheduler(p);
if (retval)
goto out_unlock;

View File

@ -11,6 +11,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/percpu.h>
#include <linux/security.h>
#include <net/net_namespace.h>
#include <linux/netfilter.h>
@ -87,6 +88,29 @@ static void ct_seq_stop(struct seq_file *s, void *v)
rcu_read_unlock();
}
#ifdef CONFIG_NF_CONNTRACK_SECMARK
static int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
{
int ret;
u32 len;
char *secctx;
ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
if (ret)
return ret;
ret = seq_printf(s, "secctx=%s ", secctx);
security_release_secctx(secctx, len);
return ret;
}
#else
static inline int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
{
return 0;
}
#endif
static int ct_seq_show(struct seq_file *s, void *v)
{
struct nf_conntrack_tuple_hash *hash = v;
@ -148,10 +172,8 @@ static int ct_seq_show(struct seq_file *s, void *v)
goto release;
#endif
#ifdef CONFIG_NF_CONNTRACK_SECMARK
if (seq_printf(s, "secmark=%u ", ct->secmark))
if (ct_show_secctx(s, ct))
goto release;
#endif
if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)))
goto release;

View File

@ -22,6 +22,7 @@
#include <linux/rculist_nulls.h>
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/security.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
#include <linux/netlink.h>
@ -245,16 +246,31 @@ nla_put_failure:
#ifdef CONFIG_NF_CONNTRACK_SECMARK
static inline int
ctnetlink_dump_secmark(struct sk_buff *skb, const struct nf_conn *ct)
ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct)
{
NLA_PUT_BE32(skb, CTA_SECMARK, htonl(ct->secmark));
return 0;
struct nlattr *nest_secctx;
int len, ret;
char *secctx;
ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
if (ret)
return ret;
ret = -1;
nest_secctx = nla_nest_start(skb, CTA_SECCTX | NLA_F_NESTED);
if (!nest_secctx)
goto nla_put_failure;
NLA_PUT_STRING(skb, CTA_SECCTX_NAME, secctx);
nla_nest_end(skb, nest_secctx);
ret = 0;
nla_put_failure:
return -1;
security_release_secctx(secctx, len);
return ret;
}
#else
#define ctnetlink_dump_secmark(a, b) (0)
#define ctnetlink_dump_secctx(a, b) (0)
#endif
#define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
@ -391,7 +407,7 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
ctnetlink_dump_protoinfo(skb, ct) < 0 ||
ctnetlink_dump_helpinfo(skb, ct) < 0 ||
ctnetlink_dump_mark(skb, ct) < 0 ||
ctnetlink_dump_secmark(skb, ct) < 0 ||
ctnetlink_dump_secctx(skb, ct) < 0 ||
ctnetlink_dump_id(skb, ct) < 0 ||
ctnetlink_dump_use(skb, ct) < 0 ||
ctnetlink_dump_master(skb, ct) < 0 ||
@ -437,6 +453,17 @@ ctnetlink_counters_size(const struct nf_conn *ct)
;
}
#ifdef CONFIG_NF_CONNTRACK_SECMARK
static int ctnetlink_nlmsg_secctx_size(const struct nf_conn *ct)
{
int len;
security_secid_to_secctx(ct->secmark, NULL, &len);
return sizeof(char) * len;
}
#endif
static inline size_t
ctnetlink_nlmsg_size(const struct nf_conn *ct)
{
@ -453,7 +480,8 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct)
+ nla_total_size(0) /* CTA_HELP */
+ nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
#ifdef CONFIG_NF_CONNTRACK_SECMARK
+ nla_total_size(sizeof(u_int32_t)) /* CTA_SECMARK */
+ nla_total_size(0) /* CTA_SECCTX */
+ nla_total_size(ctnetlink_nlmsg_secctx_size(ct)) /* CTA_SECCTX_NAME */
#endif
#ifdef CONFIG_NF_NAT_NEEDED
+ 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
@ -556,7 +584,7 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
#ifdef CONFIG_NF_CONNTRACK_SECMARK
if ((events & (1 << IPCT_SECMARK) || ct->secmark)
&& ctnetlink_dump_secmark(skb, ct) < 0)
&& ctnetlink_dump_secctx(skb, ct) < 0)
goto nla_put_failure;
#endif

View File

@ -15,6 +15,7 @@
#include <linux/seq_file.h>
#include <linux/percpu.h>
#include <linux/netdevice.h>
#include <linux/security.h>
#include <net/net_namespace.h>
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
@ -108,6 +109,29 @@ static void ct_seq_stop(struct seq_file *s, void *v)
rcu_read_unlock();
}
#ifdef CONFIG_NF_CONNTRACK_SECMARK
static int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
{
int ret;
u32 len;
char *secctx;
ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
if (ret)
return ret;
ret = seq_printf(s, "secctx=%s ", secctx);
security_release_secctx(secctx, len);
return ret;
}
#else
static inline int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
{
return 0;
}
#endif
/* return 0 on success, 1 in case of error */
static int ct_seq_show(struct seq_file *s, void *v)
{
@ -168,10 +192,8 @@ static int ct_seq_show(struct seq_file *s, void *v)
goto release;
#endif
#ifdef CONFIG_NF_CONNTRACK_SECMARK
if (seq_printf(s, "secmark=%u ", ct->secmark))
if (ct_show_secctx(s, ct))
goto release;
#endif
#ifdef CONFIG_NF_CONNTRACK_ZONES
if (seq_printf(s, "zone=%u ", nf_ct_zone(ct)))

View File

@ -9,7 +9,6 @@
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/skbuff.h>
#include <linux/selinux.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <linux/netfilter/x_tables.h>

View File

@ -14,8 +14,8 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/security.h>
#include <linux/skbuff.h>
#include <linux/selinux.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_SECMARK.h>
@ -39,9 +39,8 @@ secmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
switch (mode) {
case SECMARK_MODE_SEL:
secmark = info->u.sel.selsid;
secmark = info->secid;
break;
default:
BUG();
}
@ -50,33 +49,33 @@ secmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
return XT_CONTINUE;
}
static int checkentry_selinux(struct xt_secmark_target_info *info)
static int checkentry_lsm(struct xt_secmark_target_info *info)
{
int err;
struct xt_secmark_target_selinux_info *sel = &info->u.sel;
sel->selctx[SECMARK_SELCTX_MAX - 1] = '\0';
info->secctx[SECMARK_SECCTX_MAX - 1] = '\0';
info->secid = 0;
err = selinux_string_to_sid(sel->selctx, &sel->selsid);
err = security_secctx_to_secid(info->secctx, strlen(info->secctx),
&info->secid);
if (err) {
if (err == -EINVAL)
pr_info("invalid SELinux context \'%s\'\n",
sel->selctx);
pr_info("invalid security context \'%s\'\n", info->secctx);
return err;
}
if (!sel->selsid) {
pr_info("unable to map SELinux context \'%s\'\n", sel->selctx);
if (!info->secid) {
pr_info("unable to map security context \'%s\'\n", info->secctx);
return -ENOENT;
}
err = selinux_secmark_relabel_packet_permission(sel->selsid);
err = security_secmark_relabel_packet(info->secid);
if (err) {
pr_info("unable to obtain relabeling permission\n");
return err;
}
selinux_secmark_refcount_inc();
security_secmark_refcount_inc();
return 0;
}
@ -100,16 +99,16 @@ static int secmark_tg_check(const struct xt_tgchk_param *par)
switch (info->mode) {
case SECMARK_MODE_SEL:
err = checkentry_selinux(info);
if (err <= 0)
return err;
break;
default:
pr_info("invalid mode: %hu\n", info->mode);
return -EINVAL;
}
err = checkentry_lsm(info);
if (err)
return err;
if (!mode)
mode = info->mode;
return 0;
@ -119,7 +118,7 @@ static void secmark_tg_destroy(const struct xt_tgdtor_param *par)
{
switch (mode) {
case SECMARK_MODE_SEL:
selinux_secmark_refcount_dec();
security_secmark_refcount_dec();
}
}

View File

@ -3,3 +3,4 @@
#
af_names.h
capability_names.h
rlim_names.h

View File

@ -29,7 +29,7 @@
* aa_simple_write_to_buffer - common routine for getting policy from user
* @op: operation doing the user buffer copy
* @userbuf: user buffer to copy data from (NOT NULL)
* @alloc_size: size of user buffer
* @alloc_size: size of user buffer (REQUIRES: @alloc_size >= @copy_size)
* @copy_size: size of data to copy from user buffer
* @pos: position write is at in the file (NOT NULL)
*
@ -42,6 +42,8 @@ static char *aa_simple_write_to_buffer(int op, const char __user *userbuf,
{
char *data;
BUG_ON(copy_size > alloc_size);
if (*pos != 0)
/* only writes from pos 0, that is complete writes */
return ERR_PTR(-ESPIPE);

View File

@ -677,7 +677,18 @@ static void cap_inet_conn_established(struct sock *sk, struct sk_buff *skb)
{
}
static int cap_secmark_relabel_packet(u32 secid)
{
return 0;
}
static void cap_secmark_refcount_inc(void)
{
}
static void cap_secmark_refcount_dec(void)
{
}
static void cap_req_classify_flow(const struct request_sock *req,
struct flowi *fl)
@ -777,7 +788,8 @@ static int cap_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
static int cap_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
{
return -EOPNOTSUPP;
*secid = 0;
return 0;
}
static void cap_release_secctx(char *secdata, u32 seclen)
@ -1018,6 +1030,9 @@ void __init security_fixup_ops(struct security_operations *ops)
set_to_cap_if_null(ops, inet_conn_request);
set_to_cap_if_null(ops, inet_csk_clone);
set_to_cap_if_null(ops, inet_conn_established);
set_to_cap_if_null(ops, secmark_relabel_packet);
set_to_cap_if_null(ops, secmark_refcount_inc);
set_to_cap_if_null(ops, secmark_refcount_dec);
set_to_cap_if_null(ops, req_classify_flow);
set_to_cap_if_null(ops, tun_dev_create);
set_to_cap_if_null(ops, tun_dev_post_create);

View File

@ -719,14 +719,11 @@ static int cap_safe_nice(struct task_struct *p)
/**
* cap_task_setscheduler - Detemine if scheduler policy change is permitted
* @p: The task to affect
* @policy: The policy to effect
* @lp: The parameters to the scheduling policy
*
* Detemine if the requested scheduler policy change is permitted for the
* specified task, returning 0 if permission is granted, -ve if denied.
*/
int cap_task_setscheduler(struct task_struct *p, int policy,
struct sched_param *lp)
int cap_task_setscheduler(struct task_struct *p)
{
return cap_safe_nice(p);
}

View File

@ -89,20 +89,12 @@ __setup("security=", choose_lsm);
* Return true if:
* -The passed LSM is the one chosen by user at boot time,
* -or the passed LSM is configured as the default and the user did not
* choose an alternate LSM at boot time,
* -or there is no default LSM set and the user didn't specify a
* specific LSM and we're the first to ask for registration permission,
* -or the passed LSM is currently loaded.
* choose an alternate LSM at boot time.
* Otherwise, return false.
*/
int __init security_module_enable(struct security_operations *ops)
{
if (!*chosen_lsm)
strncpy(chosen_lsm, ops->name, SECURITY_NAME_MAX);
else if (strncmp(ops->name, chosen_lsm, SECURITY_NAME_MAX))
return 0;
return 1;
return !strcmp(ops->name, chosen_lsm);
}
/**
@ -786,10 +778,9 @@ int security_task_setrlimit(struct task_struct *p, unsigned int resource,
return security_ops->task_setrlimit(p, resource, new_rlim);
}
int security_task_setscheduler(struct task_struct *p,
int policy, struct sched_param *lp)
int security_task_setscheduler(struct task_struct *p)
{
return security_ops->task_setscheduler(p, policy, lp);
return security_ops->task_setscheduler(p);
}
int security_task_getscheduler(struct task_struct *p)
@ -1145,6 +1136,24 @@ void security_inet_conn_established(struct sock *sk,
security_ops->inet_conn_established(sk, skb);
}
int security_secmark_relabel_packet(u32 secid)
{
return security_ops->secmark_relabel_packet(secid);
}
EXPORT_SYMBOL(security_secmark_relabel_packet);
void security_secmark_refcount_inc(void)
{
security_ops->secmark_refcount_inc();
}
EXPORT_SYMBOL(security_secmark_refcount_inc);
void security_secmark_refcount_dec(void)
{
security_ops->secmark_refcount_dec();
}
EXPORT_SYMBOL(security_secmark_refcount_dec);
int security_tun_dev_create(void)
{
return security_ops->tun_dev_create();

View File

@ -2,25 +2,20 @@
# Makefile for building the SELinux module as part of the kernel tree.
#
obj-$(CONFIG_SECURITY_SELINUX) := selinux.o ss/
obj-$(CONFIG_SECURITY_SELINUX) := selinux.o
selinux-y := avc.o \
hooks.o \
selinuxfs.o \
netlink.o \
nlmsgtab.o \
netif.o \
netnode.o \
netport.o \
exports.o
selinux-y := avc.o hooks.o selinuxfs.o netlink.o nlmsgtab.o netif.o \
netnode.o netport.o exports.o \
ss/ebitmap.o ss/hashtab.o ss/symtab.o ss/sidtab.o ss/avtab.o \
ss/policydb.o ss/services.o ss/conditional.o ss/mls.o ss/status.o
selinux-$(CONFIG_SECURITY_NETWORK_XFRM) += xfrm.o
selinux-$(CONFIG_NETLABEL) += netlabel.o
EXTRA_CFLAGS += -Isecurity/selinux -Isecurity/selinux/include
ccflags-y := -Isecurity/selinux -Isecurity/selinux/include
$(obj)/avc.o: $(obj)/flask.h
$(addprefix $(obj)/,$(selinux-y)): $(obj)/flask.h
quiet_cmd_flask = GEN $(obj)/flask.h $(obj)/av_permissions.h
cmd_flask = scripts/selinux/genheaders/genheaders $(obj)/flask.h $(obj)/av_permissions.h

View File

@ -11,58 +11,9 @@
* it under the terms of the GNU General Public License version 2,
* as published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/selinux.h>
#include <linux/fs.h>
#include <linux/ipc.h>
#include <asm/atomic.h>
#include "security.h"
#include "objsec.h"
/* SECMARK reference count */
extern atomic_t selinux_secmark_refcount;
int selinux_string_to_sid(char *str, u32 *sid)
{
if (selinux_enabled)
return security_context_to_sid(str, strlen(str), sid);
else {
*sid = 0;
return 0;
}
}
EXPORT_SYMBOL_GPL(selinux_string_to_sid);
int selinux_secmark_relabel_packet_permission(u32 sid)
{
if (selinux_enabled) {
const struct task_security_struct *__tsec;
u32 tsid;
__tsec = current_security();
tsid = __tsec->sid;
return avc_has_perm(tsid, sid, SECCLASS_PACKET,
PACKET__RELABELTO, NULL);
}
return 0;
}
EXPORT_SYMBOL_GPL(selinux_secmark_relabel_packet_permission);
void selinux_secmark_refcount_inc(void)
{
atomic_inc(&selinux_secmark_refcount);
}
EXPORT_SYMBOL_GPL(selinux_secmark_refcount_inc);
void selinux_secmark_refcount_dec(void)
{
atomic_dec(&selinux_secmark_refcount);
}
EXPORT_SYMBOL_GPL(selinux_secmark_refcount_dec);
bool selinux_is_enabled(void)
{

View File

@ -3354,11 +3354,11 @@ static int selinux_task_setrlimit(struct task_struct *p, unsigned int resource,
return 0;
}
static int selinux_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp)
static int selinux_task_setscheduler(struct task_struct *p)
{
int rc;
rc = cap_task_setscheduler(p, policy, lp);
rc = cap_task_setscheduler(p);
if (rc)
return rc;
@ -4279,6 +4279,27 @@ static void selinux_inet_conn_established(struct sock *sk, struct sk_buff *skb)
selinux_skb_peerlbl_sid(skb, family, &sksec->peer_sid);
}
static int selinux_secmark_relabel_packet(u32 sid)
{
const struct task_security_struct *__tsec;
u32 tsid;
__tsec = current_security();
tsid = __tsec->sid;
return avc_has_perm(tsid, sid, SECCLASS_PACKET, PACKET__RELABELTO, NULL);
}
static void selinux_secmark_refcount_inc(void)
{
atomic_inc(&selinux_secmark_refcount);
}
static void selinux_secmark_refcount_dec(void)
{
atomic_dec(&selinux_secmark_refcount);
}
static void selinux_req_classify_flow(const struct request_sock *req,
struct flowi *fl)
{
@ -5533,6 +5554,9 @@ static struct security_operations selinux_ops = {
.inet_conn_request = selinux_inet_conn_request,
.inet_csk_clone = selinux_inet_csk_clone,
.inet_conn_established = selinux_inet_conn_established,
.secmark_relabel_packet = selinux_secmark_relabel_packet,
.secmark_refcount_inc = selinux_secmark_refcount_inc,
.secmark_refcount_dec = selinux_secmark_refcount_dec,
.req_classify_flow = selinux_req_classify_flow,
.tun_dev_create = selinux_tun_dev_create,
.tun_dev_post_create = selinux_tun_dev_post_create,

View File

@ -17,7 +17,7 @@ struct security_class_mapping secclass_map[] = {
{ "compute_av", "compute_create", "compute_member",
"check_context", "load_policy", "compute_relabel",
"compute_user", "setenforce", "setbool", "setsecparam",
"setcheckreqprot", NULL } },
"setcheckreqprot", "read_policy", NULL } },
{ "process",
{ "fork", "transition", "sigchld", "sigkill",
"sigstop", "signull", "signal", "ptrace", "getsched", "setsched",

View File

@ -9,6 +9,7 @@
#define _SELINUX_SECURITY_H_
#include <linux/magic.h>
#include <linux/types.h>
#include "flask.h"
#define SECSID_NULL 0x00000000 /* unspecified SID */
@ -82,6 +83,8 @@ extern int selinux_policycap_openperm;
int security_mls_enabled(void);
int security_load_policy(void *data, size_t len);
int security_read_policy(void **data, ssize_t *len);
size_t security_policydb_len(void);
int security_policycap_supported(unsigned int req_cap);
@ -191,5 +194,25 @@ static inline int security_netlbl_sid_to_secattr(u32 sid,
const char *security_get_initial_sid_context(u32 sid);
/*
* status notifier using mmap interface
*/
extern struct page *selinux_kernel_status_page(void);
#define SELINUX_KERNEL_STATUS_VERSION 1
struct selinux_kernel_status {
u32 version; /* version number of thie structure */
u32 sequence; /* sequence number of seqlock logic */
u32 enforcing; /* current setting of enforcing mode */
u32 policyload; /* times of policy reloaded */
u32 deny_unknown; /* current setting of deny_unknown */
/*
* The version > 0 supports above members.
*/
} __attribute__((packed));
extern void selinux_status_update_setenforce(int enforcing);
extern void selinux_status_update_policyload(int seqno);
#endif /* _SELINUX_SECURITY_H_ */

View File

@ -68,6 +68,8 @@ static int *bool_pending_values;
static struct dentry *class_dir;
static unsigned long last_class_ino;
static char policy_opened;
/* global data for policy capabilities */
static struct dentry *policycap_dir;
@ -110,6 +112,8 @@ enum sel_inos {
SEL_COMPAT_NET, /* whether to use old compat network packet controls */
SEL_REJECT_UNKNOWN, /* export unknown reject handling to userspace */
SEL_DENY_UNKNOWN, /* export unknown deny handling to userspace */
SEL_STATUS, /* export current status using mmap() */
SEL_POLICY, /* allow userspace to read the in kernel policy */
SEL_INO_NEXT, /* The next inode number to use */
};
@ -171,6 +175,7 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf,
if (selinux_enforcing)
avc_ss_reset(0);
selnl_notify_setenforce(selinux_enforcing);
selinux_status_update_setenforce(selinux_enforcing);
}
length = count;
out:
@ -205,6 +210,59 @@ static const struct file_operations sel_handle_unknown_ops = {
.llseek = generic_file_llseek,
};
static int sel_open_handle_status(struct inode *inode, struct file *filp)
{
struct page *status = selinux_kernel_status_page();
if (!status)
return -ENOMEM;
filp->private_data = status;
return 0;
}
static ssize_t sel_read_handle_status(struct file *filp, char __user *buf,
size_t count, loff_t *ppos)
{
struct page *status = filp->private_data;
BUG_ON(!status);
return simple_read_from_buffer(buf, count, ppos,
page_address(status),
sizeof(struct selinux_kernel_status));
}
static int sel_mmap_handle_status(struct file *filp,
struct vm_area_struct *vma)
{
struct page *status = filp->private_data;
unsigned long size = vma->vm_end - vma->vm_start;
BUG_ON(!status);
/* only allows one page from the head */
if (vma->vm_pgoff > 0 || size != PAGE_SIZE)
return -EIO;
/* disallow writable mapping */
if (vma->vm_flags & VM_WRITE)
return -EPERM;
/* disallow mprotect() turns it into writable */
vma->vm_flags &= ~VM_MAYWRITE;
return remap_pfn_range(vma, vma->vm_start,
page_to_pfn(status),
size, vma->vm_page_prot);
}
static const struct file_operations sel_handle_status_ops = {
.open = sel_open_handle_status,
.read = sel_read_handle_status,
.mmap = sel_mmap_handle_status,
.llseek = generic_file_llseek,
};
#ifdef CONFIG_SECURITY_SELINUX_DISABLE
static ssize_t sel_write_disable(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
@ -296,6 +354,141 @@ static const struct file_operations sel_mls_ops = {
.llseek = generic_file_llseek,
};
struct policy_load_memory {
size_t len;
void *data;
};
static int sel_open_policy(struct inode *inode, struct file *filp)
{
struct policy_load_memory *plm = NULL;
int rc;
BUG_ON(filp->private_data);
mutex_lock(&sel_mutex);
rc = task_has_security(current, SECURITY__READ_POLICY);
if (rc)
goto err;
rc = -EBUSY;
if (policy_opened)
goto err;
rc = -ENOMEM;
plm = kzalloc(sizeof(*plm), GFP_KERNEL);
if (!plm)
goto err;
if (i_size_read(inode) != security_policydb_len()) {
mutex_lock(&inode->i_mutex);
i_size_write(inode, security_policydb_len());
mutex_unlock(&inode->i_mutex);
}
rc = security_read_policy(&plm->data, &plm->len);
if (rc)
goto err;
policy_opened = 1;
filp->private_data = plm;
mutex_unlock(&sel_mutex);
return 0;
err:
mutex_unlock(&sel_mutex);
if (plm)
vfree(plm->data);
kfree(plm);
return rc;
}
static int sel_release_policy(struct inode *inode, struct file *filp)
{
struct policy_load_memory *plm = filp->private_data;
BUG_ON(!plm);
policy_opened = 0;
vfree(plm->data);
kfree(plm);
return 0;
}
static ssize_t sel_read_policy(struct file *filp, char __user *buf,
size_t count, loff_t *ppos)
{
struct policy_load_memory *plm = filp->private_data;
int ret;
mutex_lock(&sel_mutex);
ret = task_has_security(current, SECURITY__READ_POLICY);
if (ret)
goto out;
ret = simple_read_from_buffer(buf, count, ppos, plm->data, plm->len);
out:
mutex_unlock(&sel_mutex);
return ret;
}
static int sel_mmap_policy_fault(struct vm_area_struct *vma,
struct vm_fault *vmf)
{
struct policy_load_memory *plm = vma->vm_file->private_data;
unsigned long offset;
struct page *page;
if (vmf->flags & (FAULT_FLAG_MKWRITE | FAULT_FLAG_WRITE))
return VM_FAULT_SIGBUS;
offset = vmf->pgoff << PAGE_SHIFT;
if (offset >= roundup(plm->len, PAGE_SIZE))
return VM_FAULT_SIGBUS;
page = vmalloc_to_page(plm->data + offset);
get_page(page);
vmf->page = page;
return 0;
}
static struct vm_operations_struct sel_mmap_policy_ops = {
.fault = sel_mmap_policy_fault,
.page_mkwrite = sel_mmap_policy_fault,
};
int sel_mmap_policy(struct file *filp, struct vm_area_struct *vma)
{
if (vma->vm_flags & VM_SHARED) {
/* do not allow mprotect to make mapping writable */
vma->vm_flags &= ~VM_MAYWRITE;
if (vma->vm_flags & VM_WRITE)
return -EACCES;
}
vma->vm_flags |= VM_RESERVED;
vma->vm_ops = &sel_mmap_policy_ops;
return 0;
}
static const struct file_operations sel_policy_ops = {
.open = sel_open_policy,
.read = sel_read_policy,
.mmap = sel_mmap_policy,
.release = sel_release_policy,
};
static ssize_t sel_write_load(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
@ -1612,6 +1805,8 @@ static int sel_fill_super(struct super_block *sb, void *data, int silent)
[SEL_CHECKREQPROT] = {"checkreqprot", &sel_checkreqprot_ops, S_IRUGO|S_IWUSR},
[SEL_REJECT_UNKNOWN] = {"reject_unknown", &sel_handle_unknown_ops, S_IRUGO},
[SEL_DENY_UNKNOWN] = {"deny_unknown", &sel_handle_unknown_ops, S_IRUGO},
[SEL_STATUS] = {"status", &sel_handle_status_ops, S_IRUGO},
[SEL_POLICY] = {"policy", &sel_policy_ops, S_IRUSR},
/* last one */ {""}
};
ret = simple_fill_super(sb, SELINUX_MAGIC, selinux_files);

View File

@ -1,9 +0,0 @@
#
# Makefile for building the SELinux security server as part of the kernel tree.
#
EXTRA_CFLAGS += -Isecurity/selinux -Isecurity/selinux/include
obj-y := ss.o
ss-y := ebitmap.o hashtab.o symtab.o sidtab.o avtab.o policydb.o services.o conditional.o mls.o

View File

@ -266,8 +266,8 @@ int avtab_alloc(struct avtab *h, u32 nrules)
if (shift > 2)
shift = shift - 2;
nslot = 1 << shift;
if (nslot > MAX_AVTAB_SIZE)
nslot = MAX_AVTAB_SIZE;
if (nslot > MAX_AVTAB_HASH_BUCKETS)
nslot = MAX_AVTAB_HASH_BUCKETS;
mask = nslot - 1;
h->htable = kcalloc(nslot, sizeof(*(h->htable)), GFP_KERNEL);
@ -501,6 +501,48 @@ bad:
goto out;
}
int avtab_write_item(struct policydb *p, struct avtab_node *cur, void *fp)
{
__le16 buf16[4];
__le32 buf32[1];
int rc;
buf16[0] = cpu_to_le16(cur->key.source_type);
buf16[1] = cpu_to_le16(cur->key.target_type);
buf16[2] = cpu_to_le16(cur->key.target_class);
buf16[3] = cpu_to_le16(cur->key.specified);
rc = put_entry(buf16, sizeof(u16), 4, fp);
if (rc)
return rc;
buf32[0] = cpu_to_le32(cur->datum.data);
rc = put_entry(buf32, sizeof(u32), 1, fp);
if (rc)
return rc;
return 0;
}
int avtab_write(struct policydb *p, struct avtab *a, void *fp)
{
unsigned int i;
int rc = 0;
struct avtab_node *cur;
__le32 buf[1];
buf[0] = cpu_to_le32(a->nel);
rc = put_entry(buf, sizeof(u32), 1, fp);
if (rc)
return rc;
for (i = 0; i < a->nslot; i++) {
for (cur = a->htable[i]; cur; cur = cur->next) {
rc = avtab_write_item(p, cur, fp);
if (rc)
return rc;
}
}
return rc;
}
void avtab_cache_init(void)
{
avtab_node_cachep = kmem_cache_create("avtab_node",

View File

@ -71,6 +71,8 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
void *p);
int avtab_read(struct avtab *a, void *fp, struct policydb *pol);
int avtab_write_item(struct policydb *p, struct avtab_node *cur, void *fp);
int avtab_write(struct policydb *p, struct avtab *a, void *fp);
struct avtab_node *avtab_insert_nonunique(struct avtab *h, struct avtab_key *key,
struct avtab_datum *datum);
@ -85,7 +87,6 @@ void avtab_cache_destroy(void);
#define MAX_AVTAB_HASH_BITS 11
#define MAX_AVTAB_HASH_BUCKETS (1 << MAX_AVTAB_HASH_BITS)
#define MAX_AVTAB_HASH_MASK (MAX_AVTAB_HASH_BUCKETS-1)
#define MAX_AVTAB_SIZE MAX_AVTAB_HASH_BUCKETS
#endif /* _SS_AVTAB_H_ */

View File

@ -490,6 +490,129 @@ err:
return rc;
}
int cond_write_bool(void *vkey, void *datum, void *ptr)
{
char *key = vkey;
struct cond_bool_datum *booldatum = datum;
struct policy_data *pd = ptr;
void *fp = pd->fp;
__le32 buf[3];
u32 len;
int rc;
len = strlen(key);
buf[0] = cpu_to_le32(booldatum->value);
buf[1] = cpu_to_le32(booldatum->state);
buf[2] = cpu_to_le32(len);
rc = put_entry(buf, sizeof(u32), 3, fp);
if (rc)
return rc;
rc = put_entry(key, 1, len, fp);
if (rc)
return rc;
return 0;
}
/*
* cond_write_cond_av_list doesn't write out the av_list nodes.
* Instead it writes out the key/value pairs from the avtab. This
* is necessary because there is no way to uniquely identifying rules
* in the avtab so it is not possible to associate individual rules
* in the avtab with a conditional without saving them as part of
* the conditional. This means that the avtab with the conditional
* rules will not be saved but will be rebuilt on policy load.
*/
static int cond_write_av_list(struct policydb *p,
struct cond_av_list *list, struct policy_file *fp)
{
__le32 buf[1];
struct cond_av_list *cur_list;
u32 len;
int rc;
len = 0;
for (cur_list = list; cur_list != NULL; cur_list = cur_list->next)
len++;
buf[0] = cpu_to_le32(len);
rc = put_entry(buf, sizeof(u32), 1, fp);
if (rc)
return rc;
if (len == 0)
return 0;
for (cur_list = list; cur_list != NULL; cur_list = cur_list->next) {
rc = avtab_write_item(p, cur_list->node, fp);
if (rc)
return rc;
}
return 0;
}
int cond_write_node(struct policydb *p, struct cond_node *node,
struct policy_file *fp)
{
struct cond_expr *cur_expr;
__le32 buf[2];
int rc;
u32 len = 0;
buf[0] = cpu_to_le32(node->cur_state);
rc = put_entry(buf, sizeof(u32), 1, fp);
if (rc)
return rc;
for (cur_expr = node->expr; cur_expr != NULL; cur_expr = cur_expr->next)
len++;
buf[0] = cpu_to_le32(len);
rc = put_entry(buf, sizeof(u32), 1, fp);
if (rc)
return rc;
for (cur_expr = node->expr; cur_expr != NULL; cur_expr = cur_expr->next) {
buf[0] = cpu_to_le32(cur_expr->expr_type);
buf[1] = cpu_to_le32(cur_expr->bool);
rc = put_entry(buf, sizeof(u32), 2, fp);
if (rc)
return rc;
}
rc = cond_write_av_list(p, node->true_list, fp);
if (rc)
return rc;
rc = cond_write_av_list(p, node->false_list, fp);
if (rc)
return rc;
return 0;
}
int cond_write_list(struct policydb *p, struct cond_node *list, void *fp)
{
struct cond_node *cur;
u32 len;
__le32 buf[1];
int rc;
len = 0;
for (cur = list; cur != NULL; cur = cur->next)
len++;
buf[0] = cpu_to_le32(len);
rc = put_entry(buf, sizeof(u32), 1, fp);
if (rc)
return rc;
for (cur = list; cur != NULL; cur = cur->next) {
rc = cond_write_node(p, cur, fp);
if (rc)
return rc;
}
return 0;
}
/* Determine whether additional permissions are granted by the conditional
* av table, and if so, add them to the result
*/

View File

@ -69,6 +69,8 @@ int cond_index_bool(void *key, void *datum, void *datap);
int cond_read_bool(struct policydb *p, struct hashtab *h, void *fp);
int cond_read_list(struct policydb *p, void *fp);
int cond_write_bool(void *key, void *datum, void *ptr);
int cond_write_list(struct policydb *p, struct cond_node *list, void *fp);
void cond_compute_av(struct avtab *ctab, struct avtab_key *key, struct av_decision *avd);

View File

@ -22,6 +22,8 @@
#include "ebitmap.h"
#include "policydb.h"
#define BITS_PER_U64 (sizeof(u64) * 8)
int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2)
{
struct ebitmap_node *n1, *n2;
@ -363,10 +365,10 @@ int ebitmap_read(struct ebitmap *e, void *fp)
e->highbit = le32_to_cpu(buf[1]);
count = le32_to_cpu(buf[2]);
if (mapunit != sizeof(u64) * 8) {
if (mapunit != BITS_PER_U64) {
printk(KERN_ERR "SELinux: ebitmap: map size %u does not "
"match my size %Zd (high bit was %d)\n",
mapunit, sizeof(u64) * 8, e->highbit);
mapunit, BITS_PER_U64, e->highbit);
goto bad;
}
@ -446,3 +448,78 @@ bad:
ebitmap_destroy(e);
goto out;
}
int ebitmap_write(struct ebitmap *e, void *fp)
{
struct ebitmap_node *n;
u32 count;
__le32 buf[3];
u64 map;
int bit, last_bit, last_startbit, rc;
buf[0] = cpu_to_le32(BITS_PER_U64);
count = 0;
last_bit = 0;
last_startbit = -1;
ebitmap_for_each_positive_bit(e, n, bit) {
if (rounddown(bit, (int)BITS_PER_U64) > last_startbit) {
count++;
last_startbit = rounddown(bit, BITS_PER_U64);
}
last_bit = roundup(bit + 1, BITS_PER_U64);
}
buf[1] = cpu_to_le32(last_bit);
buf[2] = cpu_to_le32(count);
rc = put_entry(buf, sizeof(u32), 3, fp);
if (rc)
return rc;
map = 0;
last_startbit = INT_MIN;
ebitmap_for_each_positive_bit(e, n, bit) {
if (rounddown(bit, (int)BITS_PER_U64) > last_startbit) {
__le64 buf64[1];
/* this is the very first bit */
if (!map) {
last_startbit = rounddown(bit, BITS_PER_U64);
map = (u64)1 << (bit - last_startbit);
continue;
}
/* write the last node */
buf[0] = cpu_to_le32(last_startbit);
rc = put_entry(buf, sizeof(u32), 1, fp);
if (rc)
return rc;
buf64[0] = cpu_to_le64(map);
rc = put_entry(buf64, sizeof(u64), 1, fp);
if (rc)
return rc;
/* set up for the next node */
map = 0;
last_startbit = rounddown(bit, BITS_PER_U64);
}
map |= (u64)1 << (bit - last_startbit);
}
/* write the last node */
if (map) {
__le64 buf64[1];
/* write the last node */
buf[0] = cpu_to_le32(last_startbit);
rc = put_entry(buf, sizeof(u32), 1, fp);
if (rc)
return rc;
buf64[0] = cpu_to_le64(map);
rc = put_entry(buf64, sizeof(u64), 1, fp);
if (rc)
return rc;
}
return 0;
}

View File

@ -123,6 +123,7 @@ int ebitmap_get_bit(struct ebitmap *e, unsigned long bit);
int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value);
void ebitmap_destroy(struct ebitmap *e);
int ebitmap_read(struct ebitmap *e, void *fp);
int ebitmap_write(struct ebitmap *e, void *fp);
#ifdef CONFIG_NETLABEL
int ebitmap_netlbl_export(struct ebitmap *ebmap,

View File

@ -37,6 +37,7 @@
#include "policydb.h"
#include "conditional.h"
#include "mls.h"
#include "services.h"
#define _DEBUG_HASHES
@ -185,9 +186,19 @@ static u32 rangetr_hash(struct hashtab *h, const void *k)
static int rangetr_cmp(struct hashtab *h, const void *k1, const void *k2)
{
const struct range_trans *key1 = k1, *key2 = k2;
return (key1->source_type != key2->source_type ||
key1->target_type != key2->target_type ||
key1->target_class != key2->target_class);
int v;
v = key1->source_type - key2->source_type;
if (v)
return v;
v = key1->target_type - key2->target_type;
if (v)
return v;
v = key1->target_class - key2->target_class;
return v;
}
/*
@ -1624,11 +1635,11 @@ static int role_bounds_sanity_check(void *key, void *datum, void *datap)
static int type_bounds_sanity_check(void *key, void *datum, void *datap)
{
struct type_datum *upper, *type;
struct type_datum *upper;
struct policydb *p = datap;
int depth = 0;
upper = type = datum;
upper = datum;
while (upper->bounds) {
if (++depth == POLICYDB_BOUNDS_MAXDEPTH) {
printk(KERN_ERR "SELinux: type %s: "
@ -2306,3 +2317,843 @@ bad:
policydb_destroy(p);
goto out;
}
/*
* Write a MLS level structure to a policydb binary
* representation file.
*/
static int mls_write_level(struct mls_level *l, void *fp)
{
__le32 buf[1];
int rc;
buf[0] = cpu_to_le32(l->sens);
rc = put_entry(buf, sizeof(u32), 1, fp);
if (rc)
return rc;
rc = ebitmap_write(&l->cat, fp);
if (rc)
return rc;
return 0;
}
/*
* Write a MLS range structure to a policydb binary
* representation file.
*/
static int mls_write_range_helper(struct mls_range *r, void *fp)
{
__le32 buf[3];
size_t items;
int rc, eq;
eq = mls_level_eq(&r->level[1], &r->level[0]);
if (eq)
items = 2;
else
items = 3;
buf[0] = cpu_to_le32(items-1);
buf[1] = cpu_to_le32(r->level[0].sens);
if (!eq)
buf[2] = cpu_to_le32(r->level[1].sens);
BUG_ON(items > (sizeof(buf)/sizeof(buf[0])));
rc = put_entry(buf, sizeof(u32), items, fp);
if (rc)
return rc;
rc = ebitmap_write(&r->level[0].cat, fp);
if (rc)
return rc;
if (!eq) {
rc = ebitmap_write(&r->level[1].cat, fp);
if (rc)
return rc;
}
return 0;
}
static int sens_write(void *vkey, void *datum, void *ptr)
{
char *key = vkey;
struct level_datum *levdatum = datum;
struct policy_data *pd = ptr;
void *fp = pd->fp;
__le32 buf[2];
size_t len;
int rc;
len = strlen(key);
buf[0] = cpu_to_le32(len);
buf[1] = cpu_to_le32(levdatum->isalias);
rc = put_entry(buf, sizeof(u32), 2, fp);
if (rc)
return rc;
rc = put_entry(key, 1, len, fp);
if (rc)
return rc;
rc = mls_write_level(levdatum->level, fp);
if (rc)
return rc;
return 0;
}
static int cat_write(void *vkey, void *datum, void *ptr)
{
char *key = vkey;
struct cat_datum *catdatum = datum;
struct policy_data *pd = ptr;
void *fp = pd->fp;
__le32 buf[3];
size_t len;
int rc;
len = strlen(key);
buf[0] = cpu_to_le32(len);
buf[1] = cpu_to_le32(catdatum->value);
buf[2] = cpu_to_le32(catdatum->isalias);
rc = put_entry(buf, sizeof(u32), 3, fp);
if (rc)
return rc;
rc = put_entry(key, 1, len, fp);
if (rc)
return rc;
return 0;
}
static int role_trans_write(struct role_trans *r, void *fp)
{
struct role_trans *tr;
u32 buf[3];
size_t nel;
int rc;
nel = 0;
for (tr = r; tr; tr = tr->next)
nel++;
buf[0] = cpu_to_le32(nel);
rc = put_entry(buf, sizeof(u32), 1, fp);
if (rc)
return rc;
for (tr = r; tr; tr = tr->next) {
buf[0] = cpu_to_le32(tr->role);
buf[1] = cpu_to_le32(tr->type);
buf[2] = cpu_to_le32(tr->new_role);
rc = put_entry(buf, sizeof(u32), 3, fp);
if (rc)
return rc;
}
return 0;
}
static int role_allow_write(struct role_allow *r, void *fp)
{
struct role_allow *ra;
u32 buf[2];
size_t nel;
int rc;
nel = 0;
for (ra = r; ra; ra = ra->next)
nel++;
buf[0] = cpu_to_le32(nel);
rc = put_entry(buf, sizeof(u32), 1, fp);
if (rc)
return rc;
for (ra = r; ra; ra = ra->next) {
buf[0] = cpu_to_le32(ra->role);
buf[1] = cpu_to_le32(ra->new_role);
rc = put_entry(buf, sizeof(u32), 2, fp);
if (rc)
return rc;
}
return 0;
}
/*
* Write a security context structure
* to a policydb binary representation file.
*/
static int context_write(struct policydb *p, struct context *c,
void *fp)
{
int rc;
__le32 buf[3];
buf[0] = cpu_to_le32(c->user);
buf[1] = cpu_to_le32(c->role);
buf[2] = cpu_to_le32(c->type);
rc = put_entry(buf, sizeof(u32), 3, fp);
if (rc)
return rc;
rc = mls_write_range_helper(&c->range, fp);
if (rc)
return rc;
return 0;
}
/*
* The following *_write functions are used to
* write the symbol data to a policy database
* binary representation file.
*/
static int perm_write(void *vkey, void *datum, void *fp)
{
char *key = vkey;
struct perm_datum *perdatum = datum;
__le32 buf[2];
size_t len;
int rc;
len = strlen(key);
buf[0] = cpu_to_le32(len);
buf[1] = cpu_to_le32(perdatum->value);
rc = put_entry(buf, sizeof(u32), 2, fp);
if (rc)
return rc;
rc = put_entry(key, 1, len, fp);
if (rc)
return rc;
return 0;
}
static int common_write(void *vkey, void *datum, void *ptr)
{
char *key = vkey;
struct common_datum *comdatum = datum;
struct policy_data *pd = ptr;
void *fp = pd->fp;
__le32 buf[4];
size_t len;
int rc;
len = strlen(key);
buf[0] = cpu_to_le32(len);
buf[1] = cpu_to_le32(comdatum->value);
buf[2] = cpu_to_le32(comdatum->permissions.nprim);
buf[3] = cpu_to_le32(comdatum->permissions.table->nel);
rc = put_entry(buf, sizeof(u32), 4, fp);
if (rc)
return rc;
rc = put_entry(key, 1, len, fp);
if (rc)
return rc;
rc = hashtab_map(comdatum->permissions.table, perm_write, fp);
if (rc)
return rc;
return 0;
}
static int write_cons_helper(struct policydb *p, struct constraint_node *node,
void *fp)
{
struct constraint_node *c;
struct constraint_expr *e;
__le32 buf[3];
u32 nel;
int rc;
for (c = node; c; c = c->next) {
nel = 0;
for (e = c->expr; e; e = e->next)
nel++;
buf[0] = cpu_to_le32(c->permissions);
buf[1] = cpu_to_le32(nel);
rc = put_entry(buf, sizeof(u32), 2, fp);
if (rc)
return rc;
for (e = c->expr; e; e = e->next) {
buf[0] = cpu_to_le32(e->expr_type);
buf[1] = cpu_to_le32(e->attr);
buf[2] = cpu_to_le32(e->op);
rc = put_entry(buf, sizeof(u32), 3, fp);
if (rc)
return rc;
switch (e->expr_type) {
case CEXPR_NAMES:
rc = ebitmap_write(&e->names, fp);
if (rc)
return rc;
break;
default:
break;
}
}
}
return 0;
}
static int class_write(void *vkey, void *datum, void *ptr)
{
char *key = vkey;
struct class_datum *cladatum = datum;
struct policy_data *pd = ptr;
void *fp = pd->fp;
struct policydb *p = pd->p;
struct constraint_node *c;
__le32 buf[6];
u32 ncons;
size_t len, len2;
int rc;
len = strlen(key);
if (cladatum->comkey)
len2 = strlen(cladatum->comkey);
else
len2 = 0;
ncons = 0;
for (c = cladatum->constraints; c; c = c->next)
ncons++;
buf[0] = cpu_to_le32(len);
buf[1] = cpu_to_le32(len2);
buf[2] = cpu_to_le32(cladatum->value);
buf[3] = cpu_to_le32(cladatum->permissions.nprim);
if (cladatum->permissions.table)
buf[4] = cpu_to_le32(cladatum->permissions.table->nel);
else
buf[4] = 0;
buf[5] = cpu_to_le32(ncons);
rc = put_entry(buf, sizeof(u32), 6, fp);
if (rc)
return rc;
rc = put_entry(key, 1, len, fp);
if (rc)
return rc;
if (cladatum->comkey) {
rc = put_entry(cladatum->comkey, 1, len2, fp);
if (rc)
return rc;
}
rc = hashtab_map(cladatum->permissions.table, perm_write, fp);
if (rc)
return rc;
rc = write_cons_helper(p, cladatum->constraints, fp);
if (rc)
return rc;
/* write out the validatetrans rule */
ncons = 0;
for (c = cladatum->validatetrans; c; c = c->next)
ncons++;
buf[0] = cpu_to_le32(ncons);
rc = put_entry(buf, sizeof(u32), 1, fp);
if (rc)
return rc;
rc = write_cons_helper(p, cladatum->validatetrans, fp);
if (rc)
return rc;
return 0;
}
static int role_write(void *vkey, void *datum, void *ptr)
{
char *key = vkey;
struct role_datum *role = datum;
struct policy_data *pd = ptr;
void *fp = pd->fp;
struct policydb *p = pd->p;
__le32 buf[3];
size_t items, len;
int rc;
len = strlen(key);
items = 0;
buf[items++] = cpu_to_le32(len);
buf[items++] = cpu_to_le32(role->value);
if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
buf[items++] = cpu_to_le32(role->bounds);
BUG_ON(items > (sizeof(buf)/sizeof(buf[0])));
rc = put_entry(buf, sizeof(u32), items, fp);
if (rc)
return rc;
rc = put_entry(key, 1, len, fp);
if (rc)
return rc;
rc = ebitmap_write(&role->dominates, fp);
if (rc)
return rc;
rc = ebitmap_write(&role->types, fp);
if (rc)
return rc;
return 0;
}
static int type_write(void *vkey, void *datum, void *ptr)
{
char *key = vkey;
struct type_datum *typdatum = datum;
struct policy_data *pd = ptr;
struct policydb *p = pd->p;
void *fp = pd->fp;
__le32 buf[4];
int rc;
size_t items, len;
len = strlen(key);
items = 0;
buf[items++] = cpu_to_le32(len);
buf[items++] = cpu_to_le32(typdatum->value);
if (p->policyvers >= POLICYDB_VERSION_BOUNDARY) {
u32 properties = 0;
if (typdatum->primary)
properties |= TYPEDATUM_PROPERTY_PRIMARY;
if (typdatum->attribute)
properties |= TYPEDATUM_PROPERTY_ATTRIBUTE;
buf[items++] = cpu_to_le32(properties);
buf[items++] = cpu_to_le32(typdatum->bounds);
} else {
buf[items++] = cpu_to_le32(typdatum->primary);
}
BUG_ON(items > (sizeof(buf) / sizeof(buf[0])));
rc = put_entry(buf, sizeof(u32), items, fp);
if (rc)
return rc;
rc = put_entry(key, 1, len, fp);
if (rc)
return rc;
return 0;
}
static int user_write(void *vkey, void *datum, void *ptr)
{
char *key = vkey;
struct user_datum *usrdatum = datum;
struct policy_data *pd = ptr;
struct policydb *p = pd->p;
void *fp = pd->fp;
__le32 buf[3];
size_t items, len;
int rc;
len = strlen(key);
items = 0;
buf[items++] = cpu_to_le32(len);
buf[items++] = cpu_to_le32(usrdatum->value);
if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
buf[items++] = cpu_to_le32(usrdatum->bounds);
BUG_ON(items > (sizeof(buf) / sizeof(buf[0])));
rc = put_entry(buf, sizeof(u32), items, fp);
if (rc)
return rc;
rc = put_entry(key, 1, len, fp);
if (rc)
return rc;
rc = ebitmap_write(&usrdatum->roles, fp);
if (rc)
return rc;
rc = mls_write_range_helper(&usrdatum->range, fp);
if (rc)
return rc;
rc = mls_write_level(&usrdatum->dfltlevel, fp);
if (rc)
return rc;
return 0;
}
static int (*write_f[SYM_NUM]) (void *key, void *datum,
void *datap) =
{
common_write,
class_write,
role_write,
type_write,
user_write,
cond_write_bool,
sens_write,
cat_write,
};
static int ocontext_write(struct policydb *p, struct policydb_compat_info *info,
void *fp)
{
unsigned int i, j, rc;
size_t nel, len;
__le32 buf[3];
u32 nodebuf[8];
struct ocontext *c;
for (i = 0; i < info->ocon_num; i++) {
nel = 0;
for (c = p->ocontexts[i]; c; c = c->next)
nel++;
buf[0] = cpu_to_le32(nel);
rc = put_entry(buf, sizeof(u32), 1, fp);
if (rc)
return rc;
for (c = p->ocontexts[i]; c; c = c->next) {
switch (i) {
case OCON_ISID:
buf[0] = cpu_to_le32(c->sid[0]);
rc = put_entry(buf, sizeof(u32), 1, fp);
if (rc)
return rc;
rc = context_write(p, &c->context[0], fp);
if (rc)
return rc;
break;
case OCON_FS:
case OCON_NETIF:
len = strlen(c->u.name);
buf[0] = cpu_to_le32(len);
rc = put_entry(buf, sizeof(u32), 1, fp);
if (rc)
return rc;
rc = put_entry(c->u.name, 1, len, fp);
if (rc)
return rc;
rc = context_write(p, &c->context[0], fp);
if (rc)
return rc;
rc = context_write(p, &c->context[1], fp);
if (rc)
return rc;
break;
case OCON_PORT:
buf[0] = cpu_to_le32(c->u.port.protocol);
buf[1] = cpu_to_le32(c->u.port.low_port);
buf[2] = cpu_to_le32(c->u.port.high_port);
rc = put_entry(buf, sizeof(u32), 3, fp);
if (rc)
return rc;
rc = context_write(p, &c->context[0], fp);
if (rc)
return rc;
break;
case OCON_NODE:
nodebuf[0] = c->u.node.addr; /* network order */
nodebuf[1] = c->u.node.mask; /* network order */
rc = put_entry(nodebuf, sizeof(u32), 2, fp);
if (rc)
return rc;
rc = context_write(p, &c->context[0], fp);
if (rc)
return rc;
break;
case OCON_FSUSE:
buf[0] = cpu_to_le32(c->v.behavior);
len = strlen(c->u.name);
buf[1] = cpu_to_le32(len);
rc = put_entry(buf, sizeof(u32), 2, fp);
if (rc)
return rc;
rc = put_entry(c->u.name, 1, len, fp);
if (rc)
return rc;
rc = context_write(p, &c->context[0], fp);
if (rc)
return rc;
break;
case OCON_NODE6:
for (j = 0; j < 4; j++)
nodebuf[j] = c->u.node6.addr[j]; /* network order */
for (j = 0; j < 4; j++)
nodebuf[j + 4] = c->u.node6.mask[j]; /* network order */
rc = put_entry(nodebuf, sizeof(u32), 8, fp);
if (rc)
return rc;
rc = context_write(p, &c->context[0], fp);
if (rc)
return rc;
break;
}
}
}
return 0;
}
static int genfs_write(struct policydb *p, void *fp)
{
struct genfs *genfs;
struct ocontext *c;
size_t len;
__le32 buf[1];
int rc;
len = 0;
for (genfs = p->genfs; genfs; genfs = genfs->next)
len++;
buf[0] = cpu_to_le32(len);
rc = put_entry(buf, sizeof(u32), 1, fp);
if (rc)
return rc;
for (genfs = p->genfs; genfs; genfs = genfs->next) {
len = strlen(genfs->fstype);
buf[0] = cpu_to_le32(len);
rc = put_entry(buf, sizeof(u32), 1, fp);
if (rc)
return rc;
rc = put_entry(genfs->fstype, 1, len, fp);
if (rc)
return rc;
len = 0;
for (c = genfs->head; c; c = c->next)
len++;
buf[0] = cpu_to_le32(len);
rc = put_entry(buf, sizeof(u32), 1, fp);
if (rc)
return rc;
for (c = genfs->head; c; c = c->next) {
len = strlen(c->u.name);
buf[0] = cpu_to_le32(len);
rc = put_entry(buf, sizeof(u32), 1, fp);
if (rc)
return rc;
rc = put_entry(c->u.name, 1, len, fp);
if (rc)
return rc;
buf[0] = cpu_to_le32(c->v.sclass);
rc = put_entry(buf, sizeof(u32), 1, fp);
if (rc)
return rc;
rc = context_write(p, &c->context[0], fp);
if (rc)
return rc;
}
}
return 0;
}
static int range_count(void *key, void *data, void *ptr)
{
int *cnt = ptr;
*cnt = *cnt + 1;
return 0;
}
static int range_write_helper(void *key, void *data, void *ptr)
{
__le32 buf[2];
struct range_trans *rt = key;
struct mls_range *r = data;
struct policy_data *pd = ptr;
void *fp = pd->fp;
struct policydb *p = pd->p;
int rc;
buf[0] = cpu_to_le32(rt->source_type);
buf[1] = cpu_to_le32(rt->target_type);
rc = put_entry(buf, sizeof(u32), 2, fp);
if (rc)
return rc;
if (p->policyvers >= POLICYDB_VERSION_RANGETRANS) {
buf[0] = cpu_to_le32(rt->target_class);
rc = put_entry(buf, sizeof(u32), 1, fp);
if (rc)
return rc;
}
rc = mls_write_range_helper(r, fp);
if (rc)
return rc;
return 0;
}
static int range_write(struct policydb *p, void *fp)
{
size_t nel;
__le32 buf[1];
int rc;
struct policy_data pd;
pd.p = p;
pd.fp = fp;
/* count the number of entries in the hashtab */
nel = 0;
rc = hashtab_map(p->range_tr, range_count, &nel);
if (rc)
return rc;
buf[0] = cpu_to_le32(nel);
rc = put_entry(buf, sizeof(u32), 1, fp);
if (rc)
return rc;
/* actually write all of the entries */
rc = hashtab_map(p->range_tr, range_write_helper, &pd);
if (rc)
return rc;
return 0;
}
/*
* Write the configuration data in a policy database
* structure to a policy database binary representation
* file.
*/
int policydb_write(struct policydb *p, void *fp)
{
unsigned int i, num_syms;
int rc;
__le32 buf[4];
u32 config;
size_t len;
struct policydb_compat_info *info;
/*
* refuse to write policy older than compressed avtab
* to simplify the writer. There are other tests dropped
* since we assume this throughout the writer code. Be
* careful if you ever try to remove this restriction
*/
if (p->policyvers < POLICYDB_VERSION_AVTAB) {
printk(KERN_ERR "SELinux: refusing to write policy version %d."
" Because it is less than version %d\n", p->policyvers,
POLICYDB_VERSION_AVTAB);
return -EINVAL;
}
config = 0;
if (p->mls_enabled)
config |= POLICYDB_CONFIG_MLS;
if (p->reject_unknown)
config |= REJECT_UNKNOWN;
if (p->allow_unknown)
config |= ALLOW_UNKNOWN;
/* Write the magic number and string identifiers. */
buf[0] = cpu_to_le32(POLICYDB_MAGIC);
len = strlen(POLICYDB_STRING);
buf[1] = cpu_to_le32(len);
rc = put_entry(buf, sizeof(u32), 2, fp);
if (rc)
return rc;
rc = put_entry(POLICYDB_STRING, 1, len, fp);
if (rc)
return rc;
/* Write the version, config, and table sizes. */
info = policydb_lookup_compat(p->policyvers);
if (!info) {
printk(KERN_ERR "SELinux: compatibility lookup failed for policy "
"version %d", p->policyvers);
return rc;
}
buf[0] = cpu_to_le32(p->policyvers);
buf[1] = cpu_to_le32(config);
buf[2] = cpu_to_le32(info->sym_num);
buf[3] = cpu_to_le32(info->ocon_num);
rc = put_entry(buf, sizeof(u32), 4, fp);
if (rc)
return rc;
if (p->policyvers >= POLICYDB_VERSION_POLCAP) {
rc = ebitmap_write(&p->policycaps, fp);
if (rc)
return rc;
}
if (p->policyvers >= POLICYDB_VERSION_PERMISSIVE) {
rc = ebitmap_write(&p->permissive_map, fp);
if (rc)
return rc;
}
num_syms = info->sym_num;
for (i = 0; i < num_syms; i++) {
struct policy_data pd;
pd.fp = fp;
pd.p = p;
buf[0] = cpu_to_le32(p->symtab[i].nprim);
buf[1] = cpu_to_le32(p->symtab[i].table->nel);
rc = put_entry(buf, sizeof(u32), 2, fp);
if (rc)
return rc;
rc = hashtab_map(p->symtab[i].table, write_f[i], &pd);
if (rc)
return rc;
}
rc = avtab_write(p, &p->te_avtab, fp);
if (rc)
return rc;
rc = cond_write_list(p, p->cond_list, fp);
if (rc)
return rc;
rc = role_trans_write(p->role_tr, fp);
if (rc)
return rc;
rc = role_allow_write(p->role_allow, fp);
if (rc)
return rc;
rc = ocontext_write(p, info, fp);
if (rc)
return rc;
rc = genfs_write(p, fp);
if (rc)
return rc;
rc = range_write(p, fp);
if (rc)
return rc;
for (i = 0; i < p->p_types.nprim; i++) {
struct ebitmap *e = flex_array_get(p->type_attr_map_array, i);
BUG_ON(!e);
rc = ebitmap_write(e, fp);
if (rc)
return rc;
}
return 0;
}

View File

@ -254,6 +254,9 @@ struct policydb {
struct ebitmap permissive_map;
/* length of this policy when it was loaded */
size_t len;
unsigned int policyvers;
unsigned int reject_unknown : 1;
@ -270,6 +273,7 @@ extern int policydb_class_isvalid(struct policydb *p, unsigned int class);
extern int policydb_type_isvalid(struct policydb *p, unsigned int type);
extern int policydb_role_isvalid(struct policydb *p, unsigned int role);
extern int policydb_read(struct policydb *p, void *fp);
extern int policydb_write(struct policydb *p, void *fp);
#define PERM_SYMTAB_SIZE 32
@ -290,6 +294,11 @@ struct policy_file {
size_t len;
};
struct policy_data {
struct policydb *p;
void *fp;
};
static inline int next_entry(void *buf, struct policy_file *fp, size_t bytes)
{
if (bytes > fp->len)
@ -301,6 +310,17 @@ static inline int next_entry(void *buf, struct policy_file *fp, size_t bytes)
return 0;
}
static inline int put_entry(void *buf, size_t bytes, int num, struct policy_file *fp)
{
size_t len = bytes * num;
memcpy(fp->data, buf, len);
fp->data += len;
fp->len -= len;
return 0;
}
extern u16 string_to_security_class(struct policydb *p, const char *name);
extern u32 string_to_av_perm(struct policydb *p, u16 tclass, const char *name);

View File

@ -51,6 +51,7 @@
#include <linux/mutex.h>
#include <linux/selinux.h>
#include <linux/flex_array.h>
#include <linux/vmalloc.h>
#include <net/netlabel.h>
#include "flask.h"
@ -991,7 +992,8 @@ static int context_struct_to_string(struct context *context, char **scontext, u3
{
char *scontextp;
*scontext = NULL;
if (scontext)
*scontext = NULL;
*scontext_len = 0;
if (context->len) {
@ -1008,6 +1010,9 @@ static int context_struct_to_string(struct context *context, char **scontext, u3
*scontext_len += strlen(policydb.p_type_val_to_name[context->type - 1]) + 1;
*scontext_len += mls_compute_context_len(context);
if (!scontext)
return 0;
/* Allocate space for the context; caller must free this space. */
scontextp = kmalloc(*scontext_len, GFP_ATOMIC);
if (!scontextp)
@ -1047,7 +1052,8 @@ static int security_sid_to_context_core(u32 sid, char **scontext,
struct context *context;
int rc = 0;
*scontext = NULL;
if (scontext)
*scontext = NULL;
*scontext_len = 0;
if (!ss_initialized) {
@ -1055,6 +1061,8 @@ static int security_sid_to_context_core(u32 sid, char **scontext,
char *scontextp;
*scontext_len = strlen(initial_sid_to_string[sid]) + 1;
if (!scontext)
goto out;
scontextp = kmalloc(*scontext_len, GFP_ATOMIC);
if (!scontextp) {
rc = -ENOMEM;
@ -1769,6 +1777,7 @@ int security_load_policy(void *data, size_t len)
return rc;
}
policydb.len = len;
rc = selinux_set_mapping(&policydb, secclass_map,
&current_mapping,
&current_mapping_size);
@ -1791,6 +1800,7 @@ int security_load_policy(void *data, size_t len)
selinux_complete_init();
avc_ss_reset(seqno);
selnl_notify_policyload(seqno);
selinux_status_update_policyload(seqno);
selinux_netlbl_cache_invalidate();
selinux_xfrm_notify_policyload();
return 0;
@ -1804,6 +1814,7 @@ int security_load_policy(void *data, size_t len)
if (rc)
return rc;
newpolicydb.len = len;
/* If switching between different policy types, log MLS status */
if (policydb.mls_enabled && !newpolicydb.mls_enabled)
printk(KERN_INFO "SELinux: Disabling MLS support...\n");
@ -1870,6 +1881,7 @@ int security_load_policy(void *data, size_t len)
avc_ss_reset(seqno);
selnl_notify_policyload(seqno);
selinux_status_update_policyload(seqno);
selinux_netlbl_cache_invalidate();
selinux_xfrm_notify_policyload();
@ -1883,6 +1895,17 @@ err:
}
size_t security_policydb_len(void)
{
size_t len;
read_lock(&policy_rwlock);
len = policydb.len;
read_unlock(&policy_rwlock);
return len;
}
/**
* security_port_sid - Obtain the SID for a port.
* @protocol: protocol number
@ -2374,6 +2397,7 @@ out:
if (!rc) {
avc_ss_reset(seqno);
selnl_notify_policyload(seqno);
selinux_status_update_policyload(seqno);
selinux_xfrm_notify_policyload();
}
return rc;
@ -3129,3 +3153,38 @@ netlbl_sid_to_secattr_failure:
return rc;
}
#endif /* CONFIG_NETLABEL */
/**
* security_read_policy - read the policy.
* @data: binary policy data
* @len: length of data in bytes
*
*/
int security_read_policy(void **data, ssize_t *len)
{
int rc;
struct policy_file fp;
if (!ss_initialized)
return -EINVAL;
*len = security_policydb_len();
*data = vmalloc_user(*len);
if (!*data)
return -ENOMEM;
fp.data = *data;
fp.len = *len;
read_lock(&policy_rwlock);
rc = policydb_write(&policydb, &fp);
read_unlock(&policy_rwlock);
if (rc)
return rc;
*len = (unsigned long)fp.data - (unsigned long)*data;
return 0;
}

View File

@ -0,0 +1,126 @@
/*
* mmap based event notifications for SELinux
*
* Author: KaiGai Kohei <kaigai@ak.jp.nec.com>
*
* Copyright (C) 2010 NEC corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
* as published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include "avc.h"
#include "services.h"
/*
* The selinux_status_page shall be exposed to userspace applications
* using mmap interface on /selinux/status.
* It enables to notify applications a few events that will cause reset
* of userspace access vector without context switching.
*
* The selinux_kernel_status structure on the head of status page is
* protected from concurrent accesses using seqlock logic, so userspace
* application should reference the status page according to the seqlock
* logic.
*
* Typically, application checks status->sequence at the head of access
* control routine. If it is odd-number, kernel is updating the status,
* so please wait for a moment. If it is changed from the last sequence
* number, it means something happen, so application will reset userspace
* avc, if needed.
* In most cases, application shall confirm the kernel status is not
* changed without any system call invocations.
*/
static struct page *selinux_status_page;
static DEFINE_MUTEX(selinux_status_lock);
/*
* selinux_kernel_status_page
*
* It returns a reference to selinux_status_page. If the status page is
* not allocated yet, it also tries to allocate it at the first time.
*/
struct page *selinux_kernel_status_page(void)
{
struct selinux_kernel_status *status;
struct page *result = NULL;
mutex_lock(&selinux_status_lock);
if (!selinux_status_page) {
selinux_status_page = alloc_page(GFP_KERNEL|__GFP_ZERO);
if (selinux_status_page) {
status = page_address(selinux_status_page);
status->version = SELINUX_KERNEL_STATUS_VERSION;
status->sequence = 0;
status->enforcing = selinux_enforcing;
/*
* NOTE: the next policyload event shall set
* a positive value on the status->policyload,
* although it may not be 1, but never zero.
* So, application can know it was updated.
*/
status->policyload = 0;
status->deny_unknown = !security_get_allow_unknown();
}
}
result = selinux_status_page;
mutex_unlock(&selinux_status_lock);
return result;
}
/*
* selinux_status_update_setenforce
*
* It updates status of the current enforcing/permissive mode.
*/
void selinux_status_update_setenforce(int enforcing)
{
struct selinux_kernel_status *status;
mutex_lock(&selinux_status_lock);
if (selinux_status_page) {
status = page_address(selinux_status_page);
status->sequence++;
smp_wmb();
status->enforcing = enforcing;
smp_wmb();
status->sequence++;
}
mutex_unlock(&selinux_status_lock);
}
/*
* selinux_status_update_policyload
*
* It updates status of the times of policy reloaded, and current
* setting of deny_unknown.
*/
void selinux_status_update_policyload(int seqno)
{
struct selinux_kernel_status *status;
mutex_lock(&selinux_status_lock);
if (selinux_status_page) {
status = page_address(selinux_status_page);
status->sequence++;
smp_wmb();
status->policyload = seqno;
status->deny_unknown = !security_get_allow_unknown();
smp_wmb();
status->sequence++;
}
mutex_unlock(&selinux_status_lock);
}

View File

@ -1281,12 +1281,11 @@ static int smack_task_getioprio(struct task_struct *p)
*
* Return 0 if read access is permitted
*/
static int smack_task_setscheduler(struct task_struct *p, int policy,
struct sched_param *lp)
static int smack_task_setscheduler(struct task_struct *p)
{
int rc;
rc = cap_task_setscheduler(p, policy, lp);
rc = cap_task_setscheduler(p);
if (rc == 0)
rc = smk_curacc_on_task(p, MAY_WRITE);
return rc;
@ -3005,7 +3004,8 @@ static int smack_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
{
char *sp = smack_from_secid(secid);
*secdata = sp;
if (secdata)
*secdata = sp;
*seclen = strlen(sp);
return 0;
}

View File

@ -768,8 +768,10 @@ static bool tomoyo_select_one(struct tomoyo_io_buffer *head, const char *data)
return true; /* Do nothing if open(O_WRONLY). */
memset(&head->r, 0, sizeof(head->r));
head->r.print_this_domain_only = true;
head->r.eof = !domain;
head->r.domain = &domain->list;
if (domain)
head->r.domain = &domain->list;
else
head->r.eof = 1;
tomoyo_io_printf(head, "# select %s\n", data);
if (domain && domain->is_deleted)
tomoyo_io_printf(head, "# This is a deleted domain.\n");
@ -2051,13 +2053,22 @@ void tomoyo_check_profile(void)
const u8 profile = domain->profile;
if (tomoyo_profile_ptr[profile])
continue;
printk(KERN_ERR "You need to define profile %u before using it.\n",
profile);
printk(KERN_ERR "Please see http://tomoyo.sourceforge.jp/2.3/ "
"for more information.\n");
panic("Profile %u (used by '%s') not defined.\n",
profile, domain->domainname->name);
}
tomoyo_read_unlock(idx);
if (tomoyo_profile_version != 20090903)
if (tomoyo_profile_version != 20090903) {
printk(KERN_ERR "You need to install userland programs for "
"TOMOYO 2.3 and initialize policy configuration.\n");
printk(KERN_ERR "Please see http://tomoyo.sourceforge.jp/2.3/ "
"for more information.\n");
panic("Profile version %u is not supported.\n",
tomoyo_profile_version);
}
printk(KERN_INFO "TOMOYO: 2.3.0\n");
printk(KERN_INFO "Mandatory Access Control activated.\n");
}