af_packet: style cleanups

Some style cleanups to match current code practices.

Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Eric Dumazet 2009-07-21 21:57:59 +00:00 committed by David S. Miller
parent c9a73cdb77
commit 40d4e3dfc2
1 changed files with 121 additions and 123 deletions

View File

@ -137,8 +137,7 @@ dev->hard_header == NULL (ll header is added by device, we cannot control it)
/* Private packet socket structures. */
struct packet_mclist
{
struct packet_mclist {
struct packet_mclist *next;
int ifindex;
int count;
@ -149,8 +148,7 @@ struct packet_mclist
/* identical to struct packet_mreq except it has
* a longer address field.
*/
struct packet_mreq_max
{
struct packet_mreq_max {
int mr_ifindex;
unsigned short mr_type;
unsigned short mr_alen;
@ -239,7 +237,7 @@ static void __packet_set_status(struct packet_sock *po, void *frame, int status)
flush_dcache_page(virt_to_page(&h.h2->tp_status));
break;
default:
printk(KERN_ERR "TPACKET version not supported\n");
pr_err("TPACKET version not supported\n");
BUG();
}
@ -265,7 +263,7 @@ static int __packet_get_status(struct packet_sock *po, void *frame)
flush_dcache_page(virt_to_page(&h.h2->tp_status));
return h.h2->tp_status;
default:
printk(KERN_ERR "TPACKET version not supported\n");
pr_err("TPACKET version not supported\n");
BUG();
return 0;
}
@ -327,7 +325,7 @@ static void packet_sock_destruct(struct sock *sk)
WARN_ON(atomic_read(&sk->sk_wmem_alloc));
if (!sock_flag(sk, SOCK_DEAD)) {
printk("Attempt to release alive packet socket: %p\n", sk);
pr_err("Attempt to release alive packet socket: %p\n", sk);
return;
}
@ -339,7 +337,8 @@ static const struct proto_ops packet_ops;
static const struct proto_ops packet_ops_spkt;
static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
struct sock *sk;
struct sockaddr_pkt *spkt;
@ -368,7 +367,8 @@ static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct
if (dev_net(dev) != sock_net(sk))
goto out;
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
skb = skb_share_check(skb, GFP_ATOMIC);
if (skb == NULL)
goto oom;
/* drop any routing info */
@ -423,15 +423,13 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
* Get and verify the address.
*/
if (saddr)
{
if (saddr) {
if (msg->msg_namelen < sizeof(struct sockaddr))
return(-EINVAL);
return -EINVAL;
if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
proto = saddr->spkt_protocol;
}
else
return(-ENOTCONN); /* SOCK_PACKET must be sent giving an address */
} else
return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
/*
* Find the device first to size check it
@ -460,9 +458,9 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
skb = sock_wmalloc(sk, len + LL_RESERVED_SPACE(dev), 0, GFP_KERNEL);
/*
* If the write buffer is full, then tough. At this level the user gets to
* deal with the problem - do your own algorithmic backoffs. That's far
* more flexible.
* If the write buffer is full, then tough. At this level the user
* gets to deal with the problem - do your own algorithmic backoffs.
* That's far more flexible.
*/
if (skb == NULL)
@ -501,7 +499,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
dev_queue_xmit(skb);
dev_put(dev);
return(len);
return len;
out_free:
kfree_skb(skb);
@ -537,7 +535,8 @@ static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk,
we will not harm anyone.
*/
static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
struct sock *sk;
struct sockaddr_ll *sll;
@ -648,7 +647,8 @@ drop:
}
#ifdef CONFIG_PACKET_MMAP
static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
struct sock *sk;
struct packet_sock *po;
@ -867,8 +867,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff * skb,
break;
}
if (unlikely(tp_len > size_max)) {
printk(KERN_ERR "packet size is too long (%d > %d)\n",
tp_len, size_max);
pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
return -EMSGSIZE;
}
@ -886,9 +885,8 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff * skb,
} else if (dev->hard_header_len) {
/* net device doesn't like empty head */
if (unlikely(tp_len <= dev->hard_header_len)) {
printk(KERN_ERR "packet size is too short "
"(%d < %d)\n", tp_len,
dev->hard_header_len);
pr_err("packet size is too short (%d < %d)\n",
tp_len, dev->hard_header_len);
return -EINVAL;
}
@ -917,8 +915,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff * skb,
nr_frags = skb_shinfo(skb)->nr_frags;
if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
printk(KERN_ERR "Packet exceed the number "
"of skb frags(%lu)\n",
pr_err("Packet exceed the number of skb frags(%lu)\n",
MAX_SKB_FRAGS);
return -EFAULT;
}
@ -1038,8 +1035,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
goto out_xmit;
packet_increment_head(&po->tx_ring);
len_sum += tp_len;
}
while (likely((ph != NULL) || ((!(msg->msg_flags & MSG_DONTWAIT))
} while (likely((ph != NULL) || ((!(msg->msg_flags & MSG_DONTWAIT))
&& (atomic_read(&po->tx_ring.pending))))
);
@ -1140,7 +1136,7 @@ static int packet_snd(struct socket *sock,
dev_put(dev);
return(len);
return len;
out_free:
kfree_skb(skb);
@ -1283,7 +1279,8 @@ out_unlock:
* Bind a packet socket to a device
*/
static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, int addr_len)
static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
int addr_len)
{
struct sock *sk = sock->sk;
char name[15];
@ -1404,7 +1401,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol)
sk_add_node(sk, &net->packet.sklist);
sock_prot_inuse_add(net, &packet_proto, 1);
write_unlock_bh(&net->packet.sklist_lock);
return(0);
return 0;
out:
return err;
}
@ -1469,8 +1466,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
*/
copied = skb->len;
if (copied > len)
{
if (copied > len) {
copied = len;
msg->msg_flags |= MSG_TRUNC;
}
@ -1584,7 +1580,8 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
else
return dev_unicast_delete(dev, i->addr);
break;
default:;
default:
break;
}
return 0;
}
@ -1693,7 +1690,8 @@ static void packet_flush_mclist(struct sock *sk)
struct net_device *dev;
po->mclist = ml->next;
if ((dev = dev_get_by_index(sock_net(sk), ml->ifindex)) != NULL) {
dev = dev_get_by_index(sock_net(sk), ml->ifindex);
if (dev != NULL) {
packet_dev_mc(dev, ml, -1);
dev_put(dev);
}
@ -2239,7 +2237,7 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
skb_queue_purge(rb_queue);
#undef XC
if (atomic_read(&po->mapped))
printk(KERN_DEBUG "packet_mmap: vma is busy: %d\n",
pr_err("packet_mmap: vma is busy: %d\n",
atomic_read(&po->mapped));
}
mutex_unlock(&po->pg_vec_lock);