2005-04-17 00:20:36 +02:00
|
|
|
/*
|
|
|
|
* POSIX message queues filesystem for Linux.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl)
|
2006-10-03 23:23:27 +02:00
|
|
|
* Michal Wronski (michal.wronski@gmail.com)
|
2005-04-17 00:20:36 +02:00
|
|
|
*
|
|
|
|
* Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com)
|
|
|
|
* Lockless receive & send, fd based notify:
|
|
|
|
* Manfred Spraul (manfred@colorfullife.com)
|
|
|
|
*
|
2006-05-24 23:09:55 +02:00
|
|
|
* Audit: George Wilson (ltcgcw@us.ibm.com)
|
|
|
|
*
|
2005-04-17 00:20:36 +02:00
|
|
|
* This file is released under the GPL.
|
|
|
|
*/
|
|
|
|
|
2006-01-11 21:17:46 +01:00
|
|
|
#include <linux/capability.h>
|
2005-04-17 00:20:36 +02:00
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/pagemap.h>
|
|
|
|
#include <linux/file.h>
|
|
|
|
#include <linux/mount.h>
|
|
|
|
#include <linux/namei.h>
|
|
|
|
#include <linux/sysctl.h>
|
|
|
|
#include <linux/poll.h>
|
|
|
|
#include <linux/mqueue.h>
|
|
|
|
#include <linux/msg.h>
|
|
|
|
#include <linux/skbuff.h>
|
|
|
|
#include <linux/netlink.h>
|
|
|
|
#include <linux/syscalls.h>
|
2006-05-24 23:09:55 +02:00
|
|
|
#include <linux/audit.h>
|
2005-05-01 17:59:14 +02:00
|
|
|
#include <linux/signal.h>
|
2006-03-26 11:37:17 +02:00
|
|
|
#include <linux/mutex.h>
|
2007-10-19 08:40:14 +02:00
|
|
|
#include <linux/nsproxy.h>
|
|
|
|
#include <linux/pid.h>
|
2006-03-26 11:37:17 +02:00
|
|
|
|
2005-04-17 00:20:36 +02:00
|
|
|
#include <net/sock.h>
|
|
|
|
#include "util.h"
|
|
|
|
|
|
|
|
#define MQUEUE_MAGIC 0x19800202
|
|
|
|
#define DIRENT_SIZE 20
|
|
|
|
#define FILENT_SIZE 80
|
|
|
|
|
|
|
|
#define SEND 0
|
|
|
|
#define RECV 1
|
|
|
|
|
|
|
|
#define STATE_NONE 0
|
|
|
|
#define STATE_PENDING 1
|
|
|
|
#define STATE_READY 2
|
|
|
|
|
|
|
|
/* default values */
|
|
|
|
#define DFLT_QUEUESMAX 256 /* max number of message queues */
|
|
|
|
#define DFLT_MSGMAX 10 /* max number of messages in each queue */
|
|
|
|
#define HARD_MSGMAX (131072/sizeof(void*))
|
|
|
|
#define DFLT_MSGSIZEMAX 8192 /* max message size */
|
|
|
|
|
2008-10-19 05:28:32 +02:00
|
|
|
/*
|
|
|
|
* Define the ranges various user-specified maximum values can
|
|
|
|
* be set to.
|
|
|
|
*/
|
|
|
|
#define MIN_MSGMAX 1 /* min value for msg_max */
|
|
|
|
#define MAX_MSGMAX HARD_MSGMAX /* max value for msg_max */
|
|
|
|
#define MIN_MSGSIZEMAX 128 /* min value for msgsize_max */
|
|
|
|
#define MAX_MSGSIZEMAX (8192*128) /* max value for msgsize_max */
|
2005-04-17 00:20:36 +02:00
|
|
|
|
|
|
|
struct ext_wait_queue { /* queue of sleeping tasks */
|
|
|
|
struct task_struct *task;
|
|
|
|
struct list_head list;
|
|
|
|
struct msg_msg *msg; /* ptr of loaded message */
|
|
|
|
int state; /* one of STATE_* values */
|
|
|
|
};
|
|
|
|
|
|
|
|
struct mqueue_inode_info {
|
|
|
|
spinlock_t lock;
|
|
|
|
struct inode vfs_inode;
|
|
|
|
wait_queue_head_t wait_q;
|
|
|
|
|
|
|
|
struct msg_msg **messages;
|
|
|
|
struct mq_attr attr;
|
|
|
|
|
|
|
|
struct sigevent notify;
|
2006-10-02 11:17:26 +02:00
|
|
|
struct pid* notify_owner;
|
2005-09-10 09:26:54 +02:00
|
|
|
struct user_struct *user; /* user who created, for accounting */
|
2005-04-17 00:20:36 +02:00
|
|
|
struct sock *notify_sock;
|
|
|
|
struct sk_buff *notify_cookie;
|
|
|
|
|
|
|
|
/* for tasks waiting for free space and messages, respectively */
|
|
|
|
struct ext_wait_queue e_wait_q[2];
|
|
|
|
|
|
|
|
unsigned long qsize; /* size of queue in memory (sum of all msgs) */
|
|
|
|
};
|
|
|
|
|
2007-02-12 09:55:39 +01:00
|
|
|
static const struct inode_operations mqueue_dir_inode_operations;
|
2007-02-12 09:55:35 +01:00
|
|
|
static const struct file_operations mqueue_file_operations;
|
2005-04-17 00:20:36 +02:00
|
|
|
static struct super_operations mqueue_super_ops;
|
|
|
|
static void remove_notification(struct mqueue_inode_info *info);
|
|
|
|
|
|
|
|
static spinlock_t mq_lock;
|
2006-12-07 05:33:20 +01:00
|
|
|
static struct kmem_cache *mqueue_inode_cachep;
|
2005-04-17 00:20:36 +02:00
|
|
|
static struct vfsmount *mqueue_mnt;
|
|
|
|
|
|
|
|
static unsigned int queues_count;
|
|
|
|
static unsigned int queues_max = DFLT_QUEUESMAX;
|
|
|
|
static unsigned int msg_max = DFLT_MSGMAX;
|
|
|
|
static unsigned int msgsize_max = DFLT_MSGSIZEMAX;
|
|
|
|
|
|
|
|
static struct ctl_table_header * mq_sysctl_table;
|
|
|
|
|
|
|
|
static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
|
|
|
|
{
|
|
|
|
return container_of(inode, struct mqueue_inode_info, vfs_inode);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct inode *mqueue_get_inode(struct super_block *sb, int mode,
|
|
|
|
struct mq_attr *attr)
|
|
|
|
{
|
|
|
|
struct inode *inode;
|
|
|
|
|
|
|
|
inode = new_inode(sb);
|
|
|
|
if (inode) {
|
|
|
|
inode->i_mode = mode;
|
|
|
|
inode->i_uid = current->fsuid;
|
|
|
|
inode->i_gid = current->fsgid;
|
|
|
|
inode->i_blocks = 0;
|
|
|
|
inode->i_mtime = inode->i_ctime = inode->i_atime =
|
|
|
|
CURRENT_TIME;
|
|
|
|
|
|
|
|
if (S_ISREG(mode)) {
|
|
|
|
struct mqueue_inode_info *info;
|
|
|
|
struct task_struct *p = current;
|
|
|
|
struct user_struct *u = p->user;
|
|
|
|
unsigned long mq_bytes, mq_msg_tblsz;
|
|
|
|
|
|
|
|
inode->i_fop = &mqueue_file_operations;
|
|
|
|
inode->i_size = FILENT_SIZE;
|
|
|
|
/* mqueue specific info */
|
|
|
|
info = MQUEUE_I(inode);
|
|
|
|
spin_lock_init(&info->lock);
|
|
|
|
init_waitqueue_head(&info->wait_q);
|
|
|
|
INIT_LIST_HEAD(&info->e_wait_q[0].list);
|
|
|
|
INIT_LIST_HEAD(&info->e_wait_q[1].list);
|
|
|
|
info->messages = NULL;
|
2006-10-02 11:17:26 +02:00
|
|
|
info->notify_owner = NULL;
|
2005-04-17 00:20:36 +02:00
|
|
|
info->qsize = 0;
|
|
|
|
info->user = NULL; /* set when all is ok */
|
|
|
|
memset(&info->attr, 0, sizeof(info->attr));
|
2008-10-19 05:28:32 +02:00
|
|
|
info->attr.mq_maxmsg = msg_max;
|
|
|
|
info->attr.mq_msgsize = msgsize_max;
|
2005-04-17 00:20:36 +02:00
|
|
|
if (attr) {
|
|
|
|
info->attr.mq_maxmsg = attr->mq_maxmsg;
|
|
|
|
info->attr.mq_msgsize = attr->mq_msgsize;
|
|
|
|
}
|
|
|
|
mq_msg_tblsz = info->attr.mq_maxmsg * sizeof(struct msg_msg *);
|
|
|
|
mq_bytes = (mq_msg_tblsz +
|
|
|
|
(info->attr.mq_maxmsg * info->attr.mq_msgsize));
|
|
|
|
|
|
|
|
spin_lock(&mq_lock);
|
|
|
|
if (u->mq_bytes + mq_bytes < u->mq_bytes ||
|
|
|
|
u->mq_bytes + mq_bytes >
|
|
|
|
p->signal->rlim[RLIMIT_MSGQUEUE].rlim_cur) {
|
|
|
|
spin_unlock(&mq_lock);
|
|
|
|
goto out_inode;
|
|
|
|
}
|
|
|
|
u->mq_bytes += mq_bytes;
|
|
|
|
spin_unlock(&mq_lock);
|
|
|
|
|
|
|
|
info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL);
|
|
|
|
if (!info->messages) {
|
|
|
|
spin_lock(&mq_lock);
|
|
|
|
u->mq_bytes -= mq_bytes;
|
|
|
|
spin_unlock(&mq_lock);
|
|
|
|
goto out_inode;
|
|
|
|
}
|
|
|
|
/* all is ok */
|
|
|
|
info->user = get_uid(u);
|
|
|
|
} else if (S_ISDIR(mode)) {
|
2006-10-01 08:29:04 +02:00
|
|
|
inc_nlink(inode);
|
2005-04-17 00:20:36 +02:00
|
|
|
/* Some things misbehave if size == 0 on a directory */
|
|
|
|
inode->i_size = 2 * DIRENT_SIZE;
|
|
|
|
inode->i_op = &mqueue_dir_inode_operations;
|
|
|
|
inode->i_fop = &simple_dir_operations;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return inode;
|
|
|
|
out_inode:
|
|
|
|
make_bad_inode(inode);
|
|
|
|
iput(inode);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
|
|
|
|
{
|
|
|
|
struct inode *inode;
|
|
|
|
|
|
|
|
sb->s_blocksize = PAGE_CACHE_SIZE;
|
|
|
|
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
|
|
|
|
sb->s_magic = MQUEUE_MAGIC;
|
|
|
|
sb->s_op = &mqueue_super_ops;
|
|
|
|
|
|
|
|
inode = mqueue_get_inode(sb, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL);
|
|
|
|
if (!inode)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
sb->s_root = d_alloc_root(inode);
|
|
|
|
if (!sb->s_root) {
|
|
|
|
iput(inode);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
[PATCH] VFS: Permit filesystem to override root dentry on mount
Extend the get_sb() filesystem operation to take an extra argument that
permits the VFS to pass in the target vfsmount that defines the mountpoint.
The filesystem is then required to manually set the superblock and root dentry
pointers. For most filesystems, this should be done with simple_set_mnt()
which will set the superblock pointer and then set the root dentry to the
superblock's s_root (as per the old default behaviour).
The get_sb() op now returns an integer as there's now no need to return the
superblock pointer.
This patch permits a superblock to be implicitly shared amongst several mount
points, such as can be done with NFS to avoid potential inode aliasing. In
such a case, simple_set_mnt() would not be called, and instead the mnt_root
and mnt_sb would be set directly.
The patch also makes the following changes:
(*) the get_sb_*() convenience functions in the core kernel now take a vfsmount
pointer argument and return an integer, so most filesystems have to change
very little.
(*) If one of the convenience function is not used, then get_sb() should
normally call simple_set_mnt() to instantiate the vfsmount. This will
always return 0, and so can be tail-called from get_sb().
(*) generic_shutdown_super() now calls shrink_dcache_sb() to clean up the
dcache upon superblock destruction rather than shrink_dcache_anon().
This is required because the superblock may now have multiple trees that
aren't actually bound to s_root, but that still need to be cleaned up. The
currently called functions assume that the whole tree is rooted at s_root,
and that anonymous dentries are not the roots of trees which results in
dentries being left unculled.
However, with the way NFS superblock sharing are currently set to be
implemented, these assumptions are violated: the root of the filesystem is
simply a dummy dentry and inode (the real inode for '/' may well be
inaccessible), and all the vfsmounts are rooted on anonymous[*] dentries
with child trees.
[*] Anonymous until discovered from another tree.
(*) The documentation has been adjusted, including the additional bit of
changing ext2_* into foo_* in the documentation.
[akpm@osdl.org: convert ipath_fs, do other stuff]
Signed-off-by: David Howells <dhowells@redhat.com>
Acked-by: Al Viro <viro@zeniv.linux.org.uk>
Cc: Nathan Scott <nathans@sgi.com>
Cc: Roland Dreier <rolandd@cisco.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-23 11:02:57 +02:00
|
|
|
static int mqueue_get_sb(struct file_system_type *fs_type,
|
|
|
|
int flags, const char *dev_name,
|
|
|
|
void *data, struct vfsmount *mnt)
|
2005-04-17 00:20:36 +02:00
|
|
|
{
|
[PATCH] VFS: Permit filesystem to override root dentry on mount
Extend the get_sb() filesystem operation to take an extra argument that
permits the VFS to pass in the target vfsmount that defines the mountpoint.
The filesystem is then required to manually set the superblock and root dentry
pointers. For most filesystems, this should be done with simple_set_mnt()
which will set the superblock pointer and then set the root dentry to the
superblock's s_root (as per the old default behaviour).
The get_sb() op now returns an integer as there's now no need to return the
superblock pointer.
This patch permits a superblock to be implicitly shared amongst several mount
points, such as can be done with NFS to avoid potential inode aliasing. In
such a case, simple_set_mnt() would not be called, and instead the mnt_root
and mnt_sb would be set directly.
The patch also makes the following changes:
(*) the get_sb_*() convenience functions in the core kernel now take a vfsmount
pointer argument and return an integer, so most filesystems have to change
very little.
(*) If one of the convenience function is not used, then get_sb() should
normally call simple_set_mnt() to instantiate the vfsmount. This will
always return 0, and so can be tail-called from get_sb().
(*) generic_shutdown_super() now calls shrink_dcache_sb() to clean up the
dcache upon superblock destruction rather than shrink_dcache_anon().
This is required because the superblock may now have multiple trees that
aren't actually bound to s_root, but that still need to be cleaned up. The
currently called functions assume that the whole tree is rooted at s_root,
and that anonymous dentries are not the roots of trees which results in
dentries being left unculled.
However, with the way NFS superblock sharing are currently set to be
implemented, these assumptions are violated: the root of the filesystem is
simply a dummy dentry and inode (the real inode for '/' may well be
inaccessible), and all the vfsmounts are rooted on anonymous[*] dentries
with child trees.
[*] Anonymous until discovered from another tree.
(*) The documentation has been adjusted, including the additional bit of
changing ext2_* into foo_* in the documentation.
[akpm@osdl.org: convert ipath_fs, do other stuff]
Signed-off-by: David Howells <dhowells@redhat.com>
Acked-by: Al Viro <viro@zeniv.linux.org.uk>
Cc: Nathan Scott <nathans@sgi.com>
Cc: Roland Dreier <rolandd@cisco.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-23 11:02:57 +02:00
|
|
|
return get_sb_single(fs_type, flags, data, mqueue_fill_super, mnt);
|
2005-04-17 00:20:36 +02:00
|
|
|
}
|
|
|
|
|
2008-07-26 04:45:34 +02:00
|
|
|
static void init_once(void *foo)
|
2005-04-17 00:20:36 +02:00
|
|
|
{
|
|
|
|
struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
|
|
|
|
|
2007-05-17 07:10:57 +02:00
|
|
|
inode_init_once(&p->vfs_inode);
|
2005-04-17 00:20:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct inode *mqueue_alloc_inode(struct super_block *sb)
|
|
|
|
{
|
|
|
|
struct mqueue_inode_info *ei;
|
|
|
|
|
2006-12-07 05:33:17 +01:00
|
|
|
ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL);
|
2005-04-17 00:20:36 +02:00
|
|
|
if (!ei)
|
|
|
|
return NULL;
|
|
|
|
return &ei->vfs_inode;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mqueue_destroy_inode(struct inode *inode)
|
|
|
|
{
|
|
|
|
kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mqueue_delete_inode(struct inode *inode)
|
|
|
|
{
|
|
|
|
struct mqueue_inode_info *info;
|
|
|
|
struct user_struct *user;
|
|
|
|
unsigned long mq_bytes;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (S_ISDIR(inode->i_mode)) {
|
|
|
|
clear_inode(inode);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
info = MQUEUE_I(inode);
|
|
|
|
spin_lock(&info->lock);
|
|
|
|
for (i = 0; i < info->attr.mq_curmsgs; i++)
|
|
|
|
free_msg(info->messages[i]);
|
|
|
|
kfree(info->messages);
|
|
|
|
spin_unlock(&info->lock);
|
|
|
|
|
|
|
|
clear_inode(inode);
|
|
|
|
|
|
|
|
mq_bytes = (info->attr.mq_maxmsg * sizeof(struct msg_msg *) +
|
|
|
|
(info->attr.mq_maxmsg * info->attr.mq_msgsize));
|
|
|
|
user = info->user;
|
|
|
|
if (user) {
|
|
|
|
spin_lock(&mq_lock);
|
|
|
|
user->mq_bytes -= mq_bytes;
|
|
|
|
queues_count--;
|
|
|
|
spin_unlock(&mq_lock);
|
|
|
|
free_uid(user);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mqueue_create(struct inode *dir, struct dentry *dentry,
|
|
|
|
int mode, struct nameidata *nd)
|
|
|
|
{
|
|
|
|
struct inode *inode;
|
|
|
|
struct mq_attr *attr = dentry->d_fsdata;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
spin_lock(&mq_lock);
|
|
|
|
if (queues_count >= queues_max && !capable(CAP_SYS_RESOURCE)) {
|
|
|
|
error = -ENOSPC;
|
|
|
|
goto out_lock;
|
|
|
|
}
|
|
|
|
queues_count++;
|
|
|
|
spin_unlock(&mq_lock);
|
|
|
|
|
|
|
|
inode = mqueue_get_inode(dir->i_sb, mode, attr);
|
|
|
|
if (!inode) {
|
|
|
|
error = -ENOMEM;
|
|
|
|
spin_lock(&mq_lock);
|
|
|
|
queues_count--;
|
|
|
|
goto out_lock;
|
|
|
|
}
|
|
|
|
|
|
|
|
dir->i_size += DIRENT_SIZE;
|
|
|
|
dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
|
|
|
|
|
|
|
|
d_instantiate(dentry, inode);
|
|
|
|
dget(dentry);
|
|
|
|
return 0;
|
|
|
|
out_lock:
|
|
|
|
spin_unlock(&mq_lock);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
|
|
|
|
{
|
|
|
|
struct inode *inode = dentry->d_inode;
|
|
|
|
|
|
|
|
dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
|
|
|
|
dir->i_size -= DIRENT_SIZE;
|
2006-10-01 08:29:03 +02:00
|
|
|
drop_nlink(inode);
|
2005-04-17 00:20:36 +02:00
|
|
|
dput(dentry);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is routine for system read from queue file.
|
|
|
|
* To avoid mess with doing here some sort of mq_receive we allow
|
|
|
|
* to read only queue size & notification info (the only values
|
|
|
|
* that are interesting from user point of view and aren't accessible
|
|
|
|
* through std routines)
|
|
|
|
*/
|
|
|
|
static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
|
2008-07-25 10:48:07 +02:00
|
|
|
size_t count, loff_t *off)
|
2005-04-17 00:20:36 +02:00
|
|
|
{
|
2006-12-08 11:37:11 +01:00
|
|
|
struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
|
2005-04-17 00:20:36 +02:00
|
|
|
char buffer[FILENT_SIZE];
|
2008-07-25 10:48:07 +02:00
|
|
|
ssize_t ret;
|
2005-04-17 00:20:36 +02:00
|
|
|
|
|
|
|
spin_lock(&info->lock);
|
|
|
|
snprintf(buffer, sizeof(buffer),
|
|
|
|
"QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
|
|
|
|
info->qsize,
|
|
|
|
info->notify_owner ? info->notify.sigev_notify : 0,
|
|
|
|
(info->notify_owner &&
|
|
|
|
info->notify.sigev_notify == SIGEV_SIGNAL) ?
|
|
|
|
info->notify.sigev_signo : 0,
|
2008-02-08 13:19:20 +01:00
|
|
|
pid_vnr(info->notify_owner));
|
2005-04-17 00:20:36 +02:00
|
|
|
spin_unlock(&info->lock);
|
|
|
|
buffer[sizeof(buffer)-1] = '\0';
|
|
|
|
|
2008-07-25 10:48:07 +02:00
|
|
|
ret = simple_read_from_buffer(u_data, count, off, buffer,
|
|
|
|
strlen(buffer));
|
|
|
|
if (ret <= 0)
|
|
|
|
return ret;
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2006-12-08 11:37:11 +01:00
|
|
|
filp->f_path.dentry->d_inode->i_atime = filp->f_path.dentry->d_inode->i_ctime = CURRENT_TIME;
|
2008-07-25 10:48:07 +02:00
|
|
|
return ret;
|
2005-04-17 00:20:36 +02:00
|
|
|
}
|
|
|
|
|
2006-06-23 11:05:12 +02:00
|
|
|
static int mqueue_flush_file(struct file *filp, fl_owner_t id)
|
2005-04-17 00:20:36 +02:00
|
|
|
{
|
2006-12-08 11:37:11 +01:00
|
|
|
struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
|
2005-04-17 00:20:36 +02:00
|
|
|
|
|
|
|
spin_lock(&info->lock);
|
2006-10-02 11:17:26 +02:00
|
|
|
if (task_tgid(current) == info->notify_owner)
|
2005-04-17 00:20:36 +02:00
|
|
|
remove_notification(info);
|
|
|
|
|
|
|
|
spin_unlock(&info->lock);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
|
|
|
|
{
|
2006-12-08 11:37:11 +01:00
|
|
|
struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
|
2005-04-17 00:20:36 +02:00
|
|
|
int retval = 0;
|
|
|
|
|
|
|
|
poll_wait(filp, &info->wait_q, poll_tab);
|
|
|
|
|
|
|
|
spin_lock(&info->lock);
|
|
|
|
if (info->attr.mq_curmsgs)
|
|
|
|
retval = POLLIN | POLLRDNORM;
|
|
|
|
|
|
|
|
if (info->attr.mq_curmsgs < info->attr.mq_maxmsg)
|
|
|
|
retval |= POLLOUT | POLLWRNORM;
|
|
|
|
spin_unlock(&info->lock);
|
|
|
|
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Adds current to info->e_wait_q[sr] before element with smaller prio */
|
|
|
|
static void wq_add(struct mqueue_inode_info *info, int sr,
|
|
|
|
struct ext_wait_queue *ewp)
|
|
|
|
{
|
|
|
|
struct ext_wait_queue *walk;
|
|
|
|
|
|
|
|
ewp->task = current;
|
|
|
|
|
|
|
|
list_for_each_entry(walk, &info->e_wait_q[sr].list, list) {
|
|
|
|
if (walk->task->static_prio <= current->static_prio) {
|
|
|
|
list_add_tail(&ewp->list, &walk->list);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
list_add_tail(&ewp->list, &info->e_wait_q[sr].list);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Puts current task to sleep. Caller must hold queue lock. After return
|
|
|
|
* lock isn't held.
|
|
|
|
* sr: SEND or RECV
|
|
|
|
*/
|
|
|
|
static int wq_sleep(struct mqueue_inode_info *info, int sr,
|
|
|
|
long timeout, struct ext_wait_queue *ewp)
|
|
|
|
{
|
|
|
|
int retval;
|
|
|
|
signed long time;
|
|
|
|
|
|
|
|
wq_add(info, sr, ewp);
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
|
|
|
|
|
|
spin_unlock(&info->lock);
|
|
|
|
time = schedule_timeout(timeout);
|
|
|
|
|
|
|
|
while (ewp->state == STATE_PENDING)
|
|
|
|
cpu_relax();
|
|
|
|
|
|
|
|
if (ewp->state == STATE_READY) {
|
|
|
|
retval = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
spin_lock(&info->lock);
|
|
|
|
if (ewp->state == STATE_READY) {
|
|
|
|
retval = 0;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
if (signal_pending(current)) {
|
|
|
|
retval = -ERESTARTSYS;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (time == 0) {
|
|
|
|
retval = -ETIMEDOUT;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
list_del(&ewp->list);
|
|
|
|
out_unlock:
|
|
|
|
spin_unlock(&info->lock);
|
|
|
|
out:
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Returns waiting task that should be serviced first or NULL if none exists
|
|
|
|
*/
|
|
|
|
static struct ext_wait_queue *wq_get_first_waiter(
|
|
|
|
struct mqueue_inode_info *info, int sr)
|
|
|
|
{
|
|
|
|
struct list_head *ptr;
|
|
|
|
|
|
|
|
ptr = info->e_wait_q[sr].list.prev;
|
|
|
|
if (ptr == &info->e_wait_q[sr].list)
|
|
|
|
return NULL;
|
|
|
|
return list_entry(ptr, struct ext_wait_queue, list);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Auxiliary functions to manipulate messages' list */
|
|
|
|
static void msg_insert(struct msg_msg *ptr, struct mqueue_inode_info *info)
|
|
|
|
{
|
|
|
|
int k;
|
|
|
|
|
|
|
|
k = info->attr.mq_curmsgs - 1;
|
|
|
|
while (k >= 0 && info->messages[k]->m_type >= ptr->m_type) {
|
|
|
|
info->messages[k + 1] = info->messages[k];
|
|
|
|
k--;
|
|
|
|
}
|
|
|
|
info->attr.mq_curmsgs++;
|
|
|
|
info->qsize += ptr->m_ts;
|
|
|
|
info->messages[k + 1] = ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
|
|
|
|
{
|
|
|
|
info->qsize -= info->messages[--info->attr.mq_curmsgs]->m_ts;
|
|
|
|
return info->messages[info->attr.mq_curmsgs];
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void set_cookie(struct sk_buff *skb, char code)
|
|
|
|
{
|
|
|
|
((char*)skb->data)[NOTIFY_COOKIE_LEN-1] = code;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The next function is only to split too long sys_mq_timedsend
|
|
|
|
*/
|
|
|
|
static void __do_notify(struct mqueue_inode_info *info)
|
|
|
|
{
|
|
|
|
/* notification
|
|
|
|
* invoked when there is registered process and there isn't process
|
|
|
|
* waiting synchronously for message AND state of queue changed from
|
|
|
|
* empty to not empty. Here we are sure that no one is waiting
|
|
|
|
* synchronously. */
|
|
|
|
if (info->notify_owner &&
|
|
|
|
info->attr.mq_curmsgs == 1) {
|
|
|
|
struct siginfo sig_i;
|
|
|
|
switch (info->notify.sigev_notify) {
|
|
|
|
case SIGEV_NONE:
|
|
|
|
break;
|
|
|
|
case SIGEV_SIGNAL:
|
|
|
|
/* sends signal */
|
|
|
|
|
|
|
|
sig_i.si_signo = info->notify.sigev_signo;
|
|
|
|
sig_i.si_errno = 0;
|
|
|
|
sig_i.si_code = SI_MESGQ;
|
|
|
|
sig_i.si_value = info->notify.sigev_value;
|
2008-02-08 13:19:21 +01:00
|
|
|
sig_i.si_pid = task_tgid_vnr(current);
|
2005-04-17 00:20:36 +02:00
|
|
|
sig_i.si_uid = current->uid;
|
|
|
|
|
2006-10-02 11:17:26 +02:00
|
|
|
kill_pid_info(info->notify.sigev_signo,
|
|
|
|
&sig_i, info->notify_owner);
|
2005-04-17 00:20:36 +02:00
|
|
|
break;
|
|
|
|
case SIGEV_THREAD:
|
|
|
|
set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
|
2007-10-11 06:14:03 +02:00
|
|
|
netlink_sendskb(info->notify_sock, info->notify_cookie);
|
2005-04-17 00:20:36 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* after notification unregisters process */
|
2006-10-02 11:17:26 +02:00
|
|
|
put_pid(info->notify_owner);
|
|
|
|
info->notify_owner = NULL;
|
2005-04-17 00:20:36 +02:00
|
|
|
}
|
|
|
|
wake_up(&info->wait_q);
|
|
|
|
}
|
|
|
|
|
|
|
|
static long prepare_timeout(const struct timespec __user *u_arg)
|
|
|
|
{
|
|
|
|
struct timespec ts, nowts;
|
|
|
|
long timeout;
|
|
|
|
|
|
|
|
if (u_arg) {
|
|
|
|
if (unlikely(copy_from_user(&ts, u_arg,
|
|
|
|
sizeof(struct timespec))))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (unlikely(ts.tv_nsec < 0 || ts.tv_sec < 0
|
|
|
|
|| ts.tv_nsec >= NSEC_PER_SEC))
|
|
|
|
return -EINVAL;
|
|
|
|
nowts = CURRENT_TIME;
|
|
|
|
/* first subtract as jiffies can't be too big */
|
|
|
|
ts.tv_sec -= nowts.tv_sec;
|
|
|
|
if (ts.tv_nsec < nowts.tv_nsec) {
|
|
|
|
ts.tv_nsec += NSEC_PER_SEC;
|
|
|
|
ts.tv_sec--;
|
|
|
|
}
|
|
|
|
ts.tv_nsec -= nowts.tv_nsec;
|
|
|
|
if (ts.tv_sec < 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
timeout = timespec_to_jiffies(&ts) + 1;
|
|
|
|
} else
|
|
|
|
return MAX_SCHEDULE_TIMEOUT;
|
|
|
|
|
|
|
|
return timeout;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void remove_notification(struct mqueue_inode_info *info)
|
|
|
|
{
|
2006-10-02 11:17:26 +02:00
|
|
|
if (info->notify_owner != NULL &&
|
2005-04-17 00:20:36 +02:00
|
|
|
info->notify.sigev_notify == SIGEV_THREAD) {
|
|
|
|
set_cookie(info->notify_cookie, NOTIFY_REMOVED);
|
2007-10-11 06:14:03 +02:00
|
|
|
netlink_sendskb(info->notify_sock, info->notify_cookie);
|
2005-04-17 00:20:36 +02:00
|
|
|
}
|
2006-10-02 11:17:26 +02:00
|
|
|
put_pid(info->notify_owner);
|
|
|
|
info->notify_owner = NULL;
|
2005-04-17 00:20:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static int mq_attr_ok(struct mq_attr *attr)
|
|
|
|
{
|
|
|
|
if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0)
|
|
|
|
return 0;
|
|
|
|
if (capable(CAP_SYS_RESOURCE)) {
|
|
|
|
if (attr->mq_maxmsg > HARD_MSGMAX)
|
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
if (attr->mq_maxmsg > msg_max ||
|
|
|
|
attr->mq_msgsize > msgsize_max)
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
/* check for overflow */
|
|
|
|
if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg)
|
|
|
|
return 0;
|
|
|
|
if ((unsigned long)(attr->mq_maxmsg * attr->mq_msgsize) +
|
|
|
|
(attr->mq_maxmsg * sizeof (struct msg_msg *)) <
|
|
|
|
(unsigned long)(attr->mq_maxmsg * attr->mq_msgsize))
|
|
|
|
return 0;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Invoked when creating a new queue via sys_mq_open
|
|
|
|
*/
|
|
|
|
static struct file *do_create(struct dentry *dir, struct dentry *dentry,
|
|
|
|
int oflag, mode_t mode, struct mq_attr __user *u_attr)
|
|
|
|
{
|
|
|
|
struct mq_attr attr;
|
2008-02-15 23:37:48 +01:00
|
|
|
struct file *result;
|
2005-04-17 00:20:36 +02:00
|
|
|
int ret;
|
|
|
|
|
2006-01-14 21:29:55 +01:00
|
|
|
if (u_attr) {
|
|
|
|
ret = -EFAULT;
|
2005-04-17 00:20:36 +02:00
|
|
|
if (copy_from_user(&attr, u_attr, sizeof(attr)))
|
2006-01-14 21:29:55 +01:00
|
|
|
goto out;
|
|
|
|
ret = -EINVAL;
|
2005-04-17 00:20:36 +02:00
|
|
|
if (!mq_attr_ok(&attr))
|
2006-01-14 21:29:55 +01:00
|
|
|
goto out;
|
2005-04-17 00:20:36 +02:00
|
|
|
/* store for use during create */
|
|
|
|
dentry->d_fsdata = &attr;
|
|
|
|
}
|
|
|
|
|
2005-09-27 22:21:36 +02:00
|
|
|
mode &= ~current->fs->umask;
|
2008-02-15 23:37:48 +01:00
|
|
|
ret = mnt_want_write(mqueue_mnt);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
2005-04-17 00:20:36 +02:00
|
|
|
ret = vfs_create(dir->d_inode, dentry, mode, NULL);
|
|
|
|
dentry->d_fsdata = NULL;
|
|
|
|
if (ret)
|
2008-02-15 23:37:48 +01:00
|
|
|
goto out_drop_write;
|
|
|
|
|
|
|
|
result = dentry_open(dentry, mqueue_mnt, oflag);
|
|
|
|
/*
|
|
|
|
* dentry_open() took a persistent mnt_want_write(),
|
|
|
|
* so we can now drop this one.
|
|
|
|
*/
|
|
|
|
mnt_drop_write(mqueue_mnt);
|
|
|
|
return result;
|
|
|
|
|
|
|
|
out_drop_write:
|
|
|
|
mnt_drop_write(mqueue_mnt);
|
2006-01-14 21:29:55 +01:00
|
|
|
out:
|
|
|
|
dput(dentry);
|
|
|
|
mntput(mqueue_mnt);
|
|
|
|
return ERR_PTR(ret);
|
2005-04-17 00:20:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Opens existing queue */
|
|
|
|
static struct file *do_open(struct dentry *dentry, int oflag)
|
|
|
|
{
|
|
|
|
static int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
|
|
|
|
MAY_READ | MAY_WRITE };
|
|
|
|
|
2006-01-14 21:29:55 +01:00
|
|
|
if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) {
|
|
|
|
dput(dentry);
|
|
|
|
mntput(mqueue_mnt);
|
2005-04-17 00:20:36 +02:00
|
|
|
return ERR_PTR(-EINVAL);
|
2006-01-14 21:29:55 +01:00
|
|
|
}
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2008-07-22 06:07:17 +02:00
|
|
|
if (inode_permission(dentry->d_inode, oflag2acc[oflag & O_ACCMODE])) {
|
2006-01-14 21:29:55 +01:00
|
|
|
dput(dentry);
|
|
|
|
mntput(mqueue_mnt);
|
2005-04-17 00:20:36 +02:00
|
|
|
return ERR_PTR(-EACCES);
|
2006-01-14 21:29:55 +01:00
|
|
|
}
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2006-01-14 21:29:55 +01:00
|
|
|
return dentry_open(dentry, mqueue_mnt, oflag);
|
2005-04-17 00:20:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
asmlinkage long sys_mq_open(const char __user *u_name, int oflag, mode_t mode,
|
|
|
|
struct mq_attr __user *u_attr)
|
|
|
|
{
|
|
|
|
struct dentry *dentry;
|
|
|
|
struct file *filp;
|
|
|
|
char *name;
|
|
|
|
int fd, error;
|
|
|
|
|
2006-05-24 23:09:55 +02:00
|
|
|
error = audit_mq_open(oflag, mode, u_attr);
|
|
|
|
if (error != 0)
|
|
|
|
return error;
|
|
|
|
|
2005-04-17 00:20:36 +02:00
|
|
|
if (IS_ERR(name = getname(u_name)))
|
|
|
|
return PTR_ERR(name);
|
|
|
|
|
2008-05-03 21:28:45 +02:00
|
|
|
fd = get_unused_fd_flags(O_CLOEXEC);
|
2005-04-17 00:20:36 +02:00
|
|
|
if (fd < 0)
|
|
|
|
goto out_putname;
|
|
|
|
|
2006-01-10 00:59:24 +01:00
|
|
|
mutex_lock(&mqueue_mnt->mnt_root->d_inode->i_mutex);
|
2005-04-17 00:20:36 +02:00
|
|
|
dentry = lookup_one_len(name, mqueue_mnt->mnt_root, strlen(name));
|
|
|
|
if (IS_ERR(dentry)) {
|
|
|
|
error = PTR_ERR(dentry);
|
|
|
|
goto out_err;
|
|
|
|
}
|
|
|
|
mntget(mqueue_mnt);
|
|
|
|
|
|
|
|
if (oflag & O_CREAT) {
|
|
|
|
if (dentry->d_inode) { /* entry already exists */
|
2007-06-07 18:19:32 +02:00
|
|
|
audit_inode(name, dentry);
|
2006-01-14 21:29:55 +01:00
|
|
|
error = -EEXIST;
|
|
|
|
if (oflag & O_EXCL)
|
|
|
|
goto out;
|
|
|
|
filp = do_open(dentry, oflag);
|
2005-04-17 00:20:36 +02:00
|
|
|
} else {
|
|
|
|
filp = do_create(mqueue_mnt->mnt_root, dentry,
|
|
|
|
oflag, mode, u_attr);
|
|
|
|
}
|
2006-01-14 21:29:55 +01:00
|
|
|
} else {
|
|
|
|
error = -ENOENT;
|
|
|
|
if (!dentry->d_inode)
|
|
|
|
goto out;
|
2007-06-07 18:19:32 +02:00
|
|
|
audit_inode(name, dentry);
|
2006-01-14 21:29:55 +01:00
|
|
|
filp = do_open(dentry, oflag);
|
|
|
|
}
|
2005-04-17 00:20:36 +02:00
|
|
|
|
|
|
|
if (IS_ERR(filp)) {
|
|
|
|
error = PTR_ERR(filp);
|
|
|
|
goto out_putfd;
|
|
|
|
}
|
|
|
|
|
|
|
|
fd_install(fd, filp);
|
|
|
|
goto out_upsem;
|
|
|
|
|
2006-01-14 21:29:55 +01:00
|
|
|
out:
|
|
|
|
dput(dentry);
|
2005-04-17 00:20:36 +02:00
|
|
|
mntput(mqueue_mnt);
|
2006-01-14 21:29:55 +01:00
|
|
|
out_putfd:
|
2005-04-17 00:20:36 +02:00
|
|
|
put_unused_fd(fd);
|
|
|
|
out_err:
|
|
|
|
fd = error;
|
|
|
|
out_upsem:
|
2006-01-10 00:59:24 +01:00
|
|
|
mutex_unlock(&mqueue_mnt->mnt_root->d_inode->i_mutex);
|
2005-04-17 00:20:36 +02:00
|
|
|
out_putname:
|
|
|
|
putname(name);
|
|
|
|
return fd;
|
|
|
|
}
|
|
|
|
|
|
|
|
asmlinkage long sys_mq_unlink(const char __user *u_name)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
char *name;
|
|
|
|
struct dentry *dentry;
|
|
|
|
struct inode *inode = NULL;
|
|
|
|
|
|
|
|
name = getname(u_name);
|
|
|
|
if (IS_ERR(name))
|
|
|
|
return PTR_ERR(name);
|
|
|
|
|
2007-03-06 10:42:09 +01:00
|
|
|
mutex_lock_nested(&mqueue_mnt->mnt_root->d_inode->i_mutex,
|
|
|
|
I_MUTEX_PARENT);
|
2005-04-17 00:20:36 +02:00
|
|
|
dentry = lookup_one_len(name, mqueue_mnt->mnt_root, strlen(name));
|
|
|
|
if (IS_ERR(dentry)) {
|
|
|
|
err = PTR_ERR(dentry);
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!dentry->d_inode) {
|
|
|
|
err = -ENOENT;
|
|
|
|
goto out_err;
|
|
|
|
}
|
|
|
|
|
|
|
|
inode = dentry->d_inode;
|
|
|
|
if (inode)
|
|
|
|
atomic_inc(&inode->i_count);
|
2008-02-15 23:37:34 +01:00
|
|
|
err = mnt_want_write(mqueue_mnt);
|
|
|
|
if (err)
|
|
|
|
goto out_err;
|
2005-04-17 00:20:36 +02:00
|
|
|
err = vfs_unlink(dentry->d_parent->d_inode, dentry);
|
2008-02-15 23:37:34 +01:00
|
|
|
mnt_drop_write(mqueue_mnt);
|
2005-04-17 00:20:36 +02:00
|
|
|
out_err:
|
|
|
|
dput(dentry);
|
|
|
|
|
|
|
|
out_unlock:
|
2006-01-10 00:59:24 +01:00
|
|
|
mutex_unlock(&mqueue_mnt->mnt_root->d_inode->i_mutex);
|
2005-04-17 00:20:36 +02:00
|
|
|
putname(name);
|
|
|
|
if (inode)
|
|
|
|
iput(inode);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Pipelined send and receive functions.
|
|
|
|
*
|
|
|
|
* If a receiver finds no waiting message, then it registers itself in the
|
|
|
|
* list of waiting receivers. A sender checks that list before adding the new
|
|
|
|
* message into the message array. If there is a waiting receiver, then it
|
|
|
|
* bypasses the message array and directly hands the message over to the
|
|
|
|
* receiver.
|
|
|
|
* The receiver accepts the message and returns without grabbing the queue
|
|
|
|
* spinlock. Therefore an intermediate STATE_PENDING state and memory barriers
|
|
|
|
* are necessary. The same algorithm is used for sysv semaphores, see
|
2006-03-28 11:56:23 +02:00
|
|
|
* ipc/sem.c for more details.
|
2005-04-17 00:20:36 +02:00
|
|
|
*
|
|
|
|
* The same algorithm is used for senders.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* pipelined_send() - send a message directly to the task waiting in
|
|
|
|
* sys_mq_timedreceive() (without inserting message into a queue).
|
|
|
|
*/
|
|
|
|
static inline void pipelined_send(struct mqueue_inode_info *info,
|
|
|
|
struct msg_msg *message,
|
|
|
|
struct ext_wait_queue *receiver)
|
|
|
|
{
|
|
|
|
receiver->msg = message;
|
|
|
|
list_del(&receiver->list);
|
|
|
|
receiver->state = STATE_PENDING;
|
|
|
|
wake_up_process(receiver->task);
|
2005-05-01 17:58:47 +02:00
|
|
|
smp_wmb();
|
2005-04-17 00:20:36 +02:00
|
|
|
receiver->state = STATE_READY;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
|
|
|
|
* gets its message and put to the queue (we have one free place for sure). */
|
|
|
|
static inline void pipelined_receive(struct mqueue_inode_info *info)
|
|
|
|
{
|
|
|
|
struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
|
|
|
|
|
|
|
|
if (!sender) {
|
|
|
|
/* for poll */
|
|
|
|
wake_up_interruptible(&info->wait_q);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
msg_insert(sender->msg, info);
|
|
|
|
list_del(&sender->list);
|
|
|
|
sender->state = STATE_PENDING;
|
|
|
|
wake_up_process(sender->task);
|
2005-05-01 17:58:47 +02:00
|
|
|
smp_wmb();
|
2005-04-17 00:20:36 +02:00
|
|
|
sender->state = STATE_READY;
|
|
|
|
}
|
|
|
|
|
|
|
|
asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr,
|
|
|
|
size_t msg_len, unsigned int msg_prio,
|
|
|
|
const struct timespec __user *u_abs_timeout)
|
|
|
|
{
|
|
|
|
struct file *filp;
|
|
|
|
struct inode *inode;
|
|
|
|
struct ext_wait_queue wait;
|
|
|
|
struct ext_wait_queue *receiver;
|
|
|
|
struct msg_msg *msg_ptr;
|
|
|
|
struct mqueue_inode_info *info;
|
|
|
|
long timeout;
|
|
|
|
int ret;
|
|
|
|
|
2006-05-24 23:09:55 +02:00
|
|
|
ret = audit_mq_timedsend(mqdes, msg_len, msg_prio, u_abs_timeout);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
|
|
|
|
2005-04-17 00:20:36 +02:00
|
|
|
if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
timeout = prepare_timeout(u_abs_timeout);
|
|
|
|
|
|
|
|
ret = -EBADF;
|
|
|
|
filp = fget(mqdes);
|
|
|
|
if (unlikely(!filp))
|
|
|
|
goto out;
|
|
|
|
|
2006-12-08 11:37:11 +01:00
|
|
|
inode = filp->f_path.dentry->d_inode;
|
2005-04-17 00:20:36 +02:00
|
|
|
if (unlikely(filp->f_op != &mqueue_file_operations))
|
|
|
|
goto out_fput;
|
|
|
|
info = MQUEUE_I(inode);
|
2007-06-07 18:19:32 +02:00
|
|
|
audit_inode(NULL, filp->f_path.dentry);
|
2005-04-17 00:20:36 +02:00
|
|
|
|
|
|
|
if (unlikely(!(filp->f_mode & FMODE_WRITE)))
|
|
|
|
goto out_fput;
|
|
|
|
|
|
|
|
if (unlikely(msg_len > info->attr.mq_msgsize)) {
|
|
|
|
ret = -EMSGSIZE;
|
|
|
|
goto out_fput;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* First try to allocate memory, before doing anything with
|
|
|
|
* existing queues. */
|
|
|
|
msg_ptr = load_msg(u_msg_ptr, msg_len);
|
|
|
|
if (IS_ERR(msg_ptr)) {
|
|
|
|
ret = PTR_ERR(msg_ptr);
|
|
|
|
goto out_fput;
|
|
|
|
}
|
|
|
|
msg_ptr->m_ts = msg_len;
|
|
|
|
msg_ptr->m_type = msg_prio;
|
|
|
|
|
|
|
|
spin_lock(&info->lock);
|
|
|
|
|
|
|
|
if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
|
|
|
|
if (filp->f_flags & O_NONBLOCK) {
|
|
|
|
spin_unlock(&info->lock);
|
|
|
|
ret = -EAGAIN;
|
|
|
|
} else if (unlikely(timeout < 0)) {
|
|
|
|
spin_unlock(&info->lock);
|
|
|
|
ret = timeout;
|
|
|
|
} else {
|
|
|
|
wait.task = current;
|
|
|
|
wait.msg = (void *) msg_ptr;
|
|
|
|
wait.state = STATE_NONE;
|
|
|
|
ret = wq_sleep(info, SEND, timeout, &wait);
|
|
|
|
}
|
|
|
|
if (ret < 0)
|
|
|
|
free_msg(msg_ptr);
|
|
|
|
} else {
|
|
|
|
receiver = wq_get_first_waiter(info, RECV);
|
|
|
|
if (receiver) {
|
|
|
|
pipelined_send(info, msg_ptr, receiver);
|
|
|
|
} else {
|
|
|
|
/* adds message to the queue */
|
|
|
|
msg_insert(msg_ptr, info);
|
|
|
|
__do_notify(info);
|
|
|
|
}
|
|
|
|
inode->i_atime = inode->i_mtime = inode->i_ctime =
|
|
|
|
CURRENT_TIME;
|
|
|
|
spin_unlock(&info->lock);
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
out_fput:
|
|
|
|
fput(filp);
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
asmlinkage ssize_t sys_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr,
|
|
|
|
size_t msg_len, unsigned int __user *u_msg_prio,
|
|
|
|
const struct timespec __user *u_abs_timeout)
|
|
|
|
{
|
|
|
|
long timeout;
|
|
|
|
ssize_t ret;
|
|
|
|
struct msg_msg *msg_ptr;
|
|
|
|
struct file *filp;
|
|
|
|
struct inode *inode;
|
|
|
|
struct mqueue_inode_info *info;
|
|
|
|
struct ext_wait_queue wait;
|
|
|
|
|
2006-05-24 23:09:55 +02:00
|
|
|
ret = audit_mq_timedreceive(mqdes, msg_len, u_msg_prio, u_abs_timeout);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
|
|
|
|
2005-04-17 00:20:36 +02:00
|
|
|
timeout = prepare_timeout(u_abs_timeout);
|
|
|
|
|
|
|
|
ret = -EBADF;
|
|
|
|
filp = fget(mqdes);
|
|
|
|
if (unlikely(!filp))
|
|
|
|
goto out;
|
|
|
|
|
2006-12-08 11:37:11 +01:00
|
|
|
inode = filp->f_path.dentry->d_inode;
|
2005-04-17 00:20:36 +02:00
|
|
|
if (unlikely(filp->f_op != &mqueue_file_operations))
|
|
|
|
goto out_fput;
|
|
|
|
info = MQUEUE_I(inode);
|
2007-06-07 18:19:32 +02:00
|
|
|
audit_inode(NULL, filp->f_path.dentry);
|
2005-04-17 00:20:36 +02:00
|
|
|
|
|
|
|
if (unlikely(!(filp->f_mode & FMODE_READ)))
|
|
|
|
goto out_fput;
|
|
|
|
|
|
|
|
/* checks if buffer is big enough */
|
|
|
|
if (unlikely(msg_len < info->attr.mq_msgsize)) {
|
|
|
|
ret = -EMSGSIZE;
|
|
|
|
goto out_fput;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock(&info->lock);
|
|
|
|
if (info->attr.mq_curmsgs == 0) {
|
|
|
|
if (filp->f_flags & O_NONBLOCK) {
|
|
|
|
spin_unlock(&info->lock);
|
|
|
|
ret = -EAGAIN;
|
|
|
|
msg_ptr = NULL;
|
|
|
|
} else if (unlikely(timeout < 0)) {
|
|
|
|
spin_unlock(&info->lock);
|
|
|
|
ret = timeout;
|
|
|
|
msg_ptr = NULL;
|
|
|
|
} else {
|
|
|
|
wait.task = current;
|
|
|
|
wait.state = STATE_NONE;
|
|
|
|
ret = wq_sleep(info, RECV, timeout, &wait);
|
|
|
|
msg_ptr = wait.msg;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
msg_ptr = msg_get(info);
|
|
|
|
|
|
|
|
inode->i_atime = inode->i_mtime = inode->i_ctime =
|
|
|
|
CURRENT_TIME;
|
|
|
|
|
|
|
|
/* There is now free space in queue. */
|
|
|
|
pipelined_receive(info);
|
|
|
|
spin_unlock(&info->lock);
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
if (ret == 0) {
|
|
|
|
ret = msg_ptr->m_ts;
|
|
|
|
|
|
|
|
if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) ||
|
|
|
|
store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
}
|
|
|
|
free_msg(msg_ptr);
|
|
|
|
}
|
|
|
|
out_fput:
|
|
|
|
fput(filp);
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Notes: the case when user wants us to deregister (with NULL as pointer)
|
|
|
|
* and he isn't currently owner of notification, will be silently discarded.
|
|
|
|
* It isn't explicitly defined in the POSIX.
|
|
|
|
*/
|
|
|
|
asmlinkage long sys_mq_notify(mqd_t mqdes,
|
|
|
|
const struct sigevent __user *u_notification)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct file *filp;
|
|
|
|
struct sock *sock;
|
|
|
|
struct inode *inode;
|
|
|
|
struct sigevent notification;
|
|
|
|
struct mqueue_inode_info *info;
|
|
|
|
struct sk_buff *nc;
|
|
|
|
|
2006-05-24 23:09:55 +02:00
|
|
|
ret = audit_mq_notify(mqdes, u_notification);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
|
|
|
|
2005-04-17 00:20:36 +02:00
|
|
|
nc = NULL;
|
|
|
|
sock = NULL;
|
|
|
|
if (u_notification != NULL) {
|
|
|
|
if (copy_from_user(¬ification, u_notification,
|
|
|
|
sizeof(struct sigevent)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (unlikely(notification.sigev_notify != SIGEV_NONE &&
|
|
|
|
notification.sigev_notify != SIGEV_SIGNAL &&
|
|
|
|
notification.sigev_notify != SIGEV_THREAD))
|
|
|
|
return -EINVAL;
|
|
|
|
if (notification.sigev_notify == SIGEV_SIGNAL &&
|
2005-05-01 17:59:14 +02:00
|
|
|
!valid_signal(notification.sigev_signo)) {
|
2005-04-17 00:20:36 +02:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (notification.sigev_notify == SIGEV_THREAD) {
|
2007-11-07 11:42:09 +01:00
|
|
|
long timeo;
|
|
|
|
|
2005-04-17 00:20:36 +02:00
|
|
|
/* create the notify skb */
|
|
|
|
nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
|
|
|
|
ret = -ENOMEM;
|
|
|
|
if (!nc)
|
|
|
|
goto out;
|
|
|
|
ret = -EFAULT;
|
|
|
|
if (copy_from_user(nc->data,
|
|
|
|
notification.sigev_value.sival_ptr,
|
|
|
|
NOTIFY_COOKIE_LEN)) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* TODO: add a header? */
|
|
|
|
skb_put(nc, NOTIFY_COOKIE_LEN);
|
|
|
|
/* and attach it to the socket */
|
|
|
|
retry:
|
|
|
|
filp = fget(notification.sigev_signo);
|
|
|
|
ret = -EBADF;
|
|
|
|
if (!filp)
|
|
|
|
goto out;
|
|
|
|
sock = netlink_getsockbyfilp(filp);
|
|
|
|
fput(filp);
|
|
|
|
if (IS_ERR(sock)) {
|
|
|
|
ret = PTR_ERR(sock);
|
|
|
|
sock = NULL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2007-11-07 11:42:09 +01:00
|
|
|
timeo = MAX_SCHEDULE_TIMEOUT;
|
2008-06-05 20:23:39 +02:00
|
|
|
ret = netlink_attachskb(sock, nc, &timeo, NULL);
|
2005-04-17 00:20:36 +02:00
|
|
|
if (ret == 1)
|
|
|
|
goto retry;
|
|
|
|
if (ret) {
|
|
|
|
sock = NULL;
|
|
|
|
nc = NULL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = -EBADF;
|
|
|
|
filp = fget(mqdes);
|
|
|
|
if (!filp)
|
|
|
|
goto out;
|
|
|
|
|
2006-12-08 11:37:11 +01:00
|
|
|
inode = filp->f_path.dentry->d_inode;
|
2005-04-17 00:20:36 +02:00
|
|
|
if (unlikely(filp->f_op != &mqueue_file_operations))
|
|
|
|
goto out_fput;
|
|
|
|
info = MQUEUE_I(inode);
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
spin_lock(&info->lock);
|
|
|
|
if (u_notification == NULL) {
|
2006-10-02 11:17:26 +02:00
|
|
|
if (info->notify_owner == task_tgid(current)) {
|
2005-04-17 00:20:36 +02:00
|
|
|
remove_notification(info);
|
|
|
|
inode->i_atime = inode->i_ctime = CURRENT_TIME;
|
|
|
|
}
|
2006-10-02 11:17:26 +02:00
|
|
|
} else if (info->notify_owner != NULL) {
|
2005-04-17 00:20:36 +02:00
|
|
|
ret = -EBUSY;
|
|
|
|
} else {
|
|
|
|
switch (notification.sigev_notify) {
|
|
|
|
case SIGEV_NONE:
|
|
|
|
info->notify.sigev_notify = SIGEV_NONE;
|
|
|
|
break;
|
|
|
|
case SIGEV_THREAD:
|
|
|
|
info->notify_sock = sock;
|
|
|
|
info->notify_cookie = nc;
|
|
|
|
sock = NULL;
|
|
|
|
nc = NULL;
|
|
|
|
info->notify.sigev_notify = SIGEV_THREAD;
|
|
|
|
break;
|
|
|
|
case SIGEV_SIGNAL:
|
|
|
|
info->notify.sigev_signo = notification.sigev_signo;
|
|
|
|
info->notify.sigev_value = notification.sigev_value;
|
|
|
|
info->notify.sigev_notify = SIGEV_SIGNAL;
|
|
|
|
break;
|
|
|
|
}
|
2006-10-02 11:17:26 +02:00
|
|
|
|
|
|
|
info->notify_owner = get_pid(task_tgid(current));
|
2005-04-17 00:20:36 +02:00
|
|
|
inode->i_atime = inode->i_ctime = CURRENT_TIME;
|
|
|
|
}
|
|
|
|
spin_unlock(&info->lock);
|
|
|
|
out_fput:
|
|
|
|
fput(filp);
|
|
|
|
out:
|
|
|
|
if (sock) {
|
|
|
|
netlink_detachskb(sock, nc);
|
|
|
|
} else if (nc) {
|
|
|
|
dev_kfree_skb(nc);
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
asmlinkage long sys_mq_getsetattr(mqd_t mqdes,
|
|
|
|
const struct mq_attr __user *u_mqstat,
|
|
|
|
struct mq_attr __user *u_omqstat)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct mq_attr mqstat, omqstat;
|
|
|
|
struct file *filp;
|
|
|
|
struct inode *inode;
|
|
|
|
struct mqueue_inode_info *info;
|
|
|
|
|
|
|
|
if (u_mqstat != NULL) {
|
|
|
|
if (copy_from_user(&mqstat, u_mqstat, sizeof(struct mq_attr)))
|
|
|
|
return -EFAULT;
|
|
|
|
if (mqstat.mq_flags & (~O_NONBLOCK))
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = -EBADF;
|
|
|
|
filp = fget(mqdes);
|
|
|
|
if (!filp)
|
|
|
|
goto out;
|
|
|
|
|
2006-12-08 11:37:11 +01:00
|
|
|
inode = filp->f_path.dentry->d_inode;
|
2005-04-17 00:20:36 +02:00
|
|
|
if (unlikely(filp->f_op != &mqueue_file_operations))
|
|
|
|
goto out_fput;
|
|
|
|
info = MQUEUE_I(inode);
|
|
|
|
|
|
|
|
spin_lock(&info->lock);
|
|
|
|
|
|
|
|
omqstat = info->attr;
|
|
|
|
omqstat.mq_flags = filp->f_flags & O_NONBLOCK;
|
|
|
|
if (u_mqstat) {
|
2006-05-24 23:09:55 +02:00
|
|
|
ret = audit_mq_getsetattr(mqdes, &mqstat);
|
2007-11-29 01:21:31 +01:00
|
|
|
if (ret != 0) {
|
|
|
|
spin_unlock(&info->lock);
|
|
|
|
goto out_fput;
|
|
|
|
}
|
2005-04-17 00:20:36 +02:00
|
|
|
if (mqstat.mq_flags & O_NONBLOCK)
|
|
|
|
filp->f_flags |= O_NONBLOCK;
|
|
|
|
else
|
|
|
|
filp->f_flags &= ~O_NONBLOCK;
|
|
|
|
|
|
|
|
inode->i_atime = inode->i_ctime = CURRENT_TIME;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock(&info->lock);
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
if (u_omqstat != NULL && copy_to_user(u_omqstat, &omqstat,
|
|
|
|
sizeof(struct mq_attr)))
|
|
|
|
ret = -EFAULT;
|
|
|
|
|
|
|
|
out_fput:
|
|
|
|
fput(filp);
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2007-02-12 09:55:39 +01:00
|
|
|
static const struct inode_operations mqueue_dir_inode_operations = {
|
2005-04-17 00:20:36 +02:00
|
|
|
.lookup = simple_lookup,
|
|
|
|
.create = mqueue_create,
|
|
|
|
.unlink = mqueue_unlink,
|
|
|
|
};
|
|
|
|
|
2007-02-12 09:55:35 +01:00
|
|
|
static const struct file_operations mqueue_file_operations = {
|
2005-04-17 00:20:36 +02:00
|
|
|
.flush = mqueue_flush_file,
|
|
|
|
.poll = mqueue_poll_file,
|
|
|
|
.read = mqueue_read_file,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct super_operations mqueue_super_ops = {
|
|
|
|
.alloc_inode = mqueue_alloc_inode,
|
|
|
|
.destroy_inode = mqueue_destroy_inode,
|
|
|
|
.statfs = simple_statfs,
|
|
|
|
.delete_inode = mqueue_delete_inode,
|
|
|
|
.drop_inode = generic_delete_inode,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct file_system_type mqueue_fs_type = {
|
|
|
|
.name = "mqueue",
|
|
|
|
.get_sb = mqueue_get_sb,
|
|
|
|
.kill_sb = kill_litter_super,
|
|
|
|
};
|
|
|
|
|
2008-10-19 05:28:32 +02:00
|
|
|
static int msg_max_limit_min = MIN_MSGMAX;
|
|
|
|
static int msg_max_limit_max = MAX_MSGMAX;
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2008-10-19 05:28:32 +02:00
|
|
|
static int msg_maxsize_limit_min = MIN_MSGSIZEMAX;
|
|
|
|
static int msg_maxsize_limit_max = MAX_MSGSIZEMAX;
|
2005-04-17 00:20:36 +02:00
|
|
|
|
|
|
|
static ctl_table mq_sysctls[] = {
|
|
|
|
{
|
|
|
|
.procname = "queues_max",
|
|
|
|
.data = &queues_max,
|
|
|
|
.maxlen = sizeof(int),
|
|
|
|
.mode = 0644,
|
|
|
|
.proc_handler = &proc_dointvec,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.procname = "msg_max",
|
|
|
|
.data = &msg_max,
|
|
|
|
.maxlen = sizeof(int),
|
|
|
|
.mode = 0644,
|
|
|
|
.proc_handler = &proc_dointvec_minmax,
|
|
|
|
.extra1 = &msg_max_limit_min,
|
|
|
|
.extra2 = &msg_max_limit_max,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.procname = "msgsize_max",
|
|
|
|
.data = &msgsize_max,
|
|
|
|
.maxlen = sizeof(int),
|
|
|
|
.mode = 0644,
|
|
|
|
.proc_handler = &proc_dointvec_minmax,
|
|
|
|
.extra1 = &msg_maxsize_limit_min,
|
|
|
|
.extra2 = &msg_maxsize_limit_max,
|
|
|
|
},
|
|
|
|
{ .ctl_name = 0 }
|
|
|
|
};
|
|
|
|
|
|
|
|
static ctl_table mq_sysctl_dir[] = {
|
|
|
|
{
|
|
|
|
.procname = "mqueue",
|
|
|
|
.mode = 0555,
|
|
|
|
.child = mq_sysctls,
|
|
|
|
},
|
|
|
|
{ .ctl_name = 0 }
|
|
|
|
};
|
|
|
|
|
|
|
|
static ctl_table mq_sysctl_root[] = {
|
|
|
|
{
|
|
|
|
.ctl_name = CTL_FS,
|
|
|
|
.procname = "fs",
|
|
|
|
.mode = 0555,
|
|
|
|
.child = mq_sysctl_dir,
|
|
|
|
},
|
|
|
|
{ .ctl_name = 0 }
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init init_mqueue_fs(void)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
|
|
|
mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
|
|
|
|
sizeof(struct mqueue_inode_info), 0,
|
2007-07-20 03:11:58 +02:00
|
|
|
SLAB_HWCACHE_ALIGN, init_once);
|
2005-04-17 00:20:36 +02:00
|
|
|
if (mqueue_inode_cachep == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
/* ignore failues - they are not fatal */
|
2007-02-14 09:34:09 +01:00
|
|
|
mq_sysctl_table = register_sysctl_table(mq_sysctl_root);
|
2005-04-17 00:20:36 +02:00
|
|
|
|
|
|
|
error = register_filesystem(&mqueue_fs_type);
|
|
|
|
if (error)
|
|
|
|
goto out_sysctl;
|
|
|
|
|
|
|
|
if (IS_ERR(mqueue_mnt = kern_mount(&mqueue_fs_type))) {
|
|
|
|
error = PTR_ERR(mqueue_mnt);
|
|
|
|
goto out_filesystem;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* internal initialization - not common for vfs */
|
|
|
|
queues_count = 0;
|
|
|
|
spin_lock_init(&mq_lock);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_filesystem:
|
|
|
|
unregister_filesystem(&mqueue_fs_type);
|
|
|
|
out_sysctl:
|
|
|
|
if (mq_sysctl_table)
|
|
|
|
unregister_sysctl_table(mq_sysctl_table);
|
2006-09-27 10:49:40 +02:00
|
|
|
kmem_cache_destroy(mqueue_inode_cachep);
|
2005-04-17 00:20:36 +02:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
__initcall(init_mqueue_fs);
|