Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: (25 commits)
  [XFRM]: Fix oops in xfrm4_dst_destroy()
  [XFRM_TUNNEL]: Reload header pointer after pskb_may_pull/pskb_expand_head
  [IPV4]: Use random32() in net/ipv4/multipath
  [BRIDGE]: eliminate workqueue for carrier check
  [BRIDGE]: get rid of miscdevice include
  [IPV6]: Fix __ipv6_addr_type() export in correct place.
  [IPV4] devinet: Register inetdev earlier.
  [IPV6] ADDRCONF: Register inet6_dev earlier.
  [IPV6] ADDRCONF: Manage prefix route corresponding to address manually added.
  [IPV6] IP6TUNNEL: Use update_pmtu() of dst on xmit.
  [IPV6] ADDRCONF: Statically link __ipv6_addr_type() for sunrpc subsystem.
  [IPV4]: Correct links in net/ipv4/Kconfig
  [SCTP]: Strike the transport before updating rto.
  [SCTP]: Fix connection hang/slowdown with PR-SCTP
  [TCP]: Fix MD5 signature pool locking.
  [TG3]: TSO workaround fixes.
  [AF_PACKET]: Remove unnecessary casts.
  [IPV6]: Adjust inet6_exit() cleanup sequence against inet6_init()
  [IPSEC]: More fix is needed for __xfrm6_bundle_create().
  [IRDA] net/irda/: proper prototypes
  ...
This commit is contained in:
Linus Torvalds 2007-02-26 12:23:08 -08:00
commit 5d5dde2ed9
32 changed files with 408 additions and 237 deletions

View File

@ -307,3 +307,5 @@ MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Broadcom Blutonium firmware driver ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
MODULE_FIRMWARE("BCM2033-MD.hex");
MODULE_FIRMWARE("BCM2033-FW.bin");

View File

@ -801,3 +801,4 @@ MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("BlueFRITZ! USB driver ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
MODULE_FIRMWARE("bfubase.frm");

View File

@ -63,6 +63,7 @@
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>, Jose Orlando Pereira <jop@di.uminho.pt>");
MODULE_DESCRIPTION("Bluetooth driver for the 3Com Bluetooth PCMCIA card");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE("BT3CPCC.bin");

View File

@ -64,8 +64,8 @@
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "3.73"
#define DRV_MODULE_RELDATE "February 12, 2007"
#define DRV_MODULE_VERSION "3.74"
#define DRV_MODULE_RELDATE "February 20, 2007"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@ -3993,7 +3993,10 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
/* Estimate the number of fragments in the worst case */
if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
netif_stop_queue(tp->dev);
return NETDEV_TX_BUSY;
if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
return NETDEV_TX_BUSY;
netif_wake_queue(tp->dev);
}
segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
@ -4061,7 +4064,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
hdr_len = ip_tcp_len + tcp_opt_len;
if (unlikely((ETH_HLEN + hdr_len) > 80) &&
(tp->tg3_flags2 & TG3_FLG2_HW_TSO_1_BUG))
(tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
return (tg3_tso_bug(tp, skb));
base_flags |= (TXD_FLAG_CPU_PRE_DMA |
@ -8137,7 +8140,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
(ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
(ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
(ering->tx_pending <= MAX_SKB_FRAGS) ||
((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1_BUG) &&
((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
(ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
return -EINVAL;
@ -10557,12 +10560,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
} else {
tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 |
TG3_FLG2_HW_TSO_1_BUG;
tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
ASIC_REV_5750 &&
tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_1_BUG;
tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
}
}
@ -11867,7 +11869,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
} else {
tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
}
/* TSO is on by default on chips that support hardware TSO.

View File

@ -2227,7 +2227,7 @@ struct tg3 {
#define TG3_FLAG_INIT_COMPLETE 0x80000000
u32 tg3_flags2;
#define TG3_FLG2_RESTART_TIMER 0x00000001
#define TG3_FLG2_HW_TSO_1_BUG 0x00000002
#define TG3_FLG2_TSO_BUG 0x00000002
#define TG3_FLG2_NO_ETH_WIRE_SPEED 0x00000004
#define TG3_FLG2_IS_5788 0x00000008
#define TG3_FLG2_MAX_RXPEND_64 0x00000010

View File

@ -113,4 +113,20 @@ do { if(!(expr)) { \
#define IAS_IRCOMM_ID 0x2343
#define IAS_IRLPT_ID 0x9876
struct net_device;
struct packet_type;
extern void irda_proc_register(void);
extern void irda_proc_unregister(void);
extern int irda_sysctl_register(void);
extern void irda_sysctl_unregister(void);
extern int irsock_init(void);
extern void irsock_cleanup(void);
extern int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *ptype,
struct net_device *orig_dev);
#endif /* NET_IRDA_H */

View File

@ -1,6 +1,7 @@
config BT_HIDP
tristate "HIDP protocol support"
depends on BT && BT_L2CAP && INPUT
select HID
help
HIDP (Human Interface Device Protocol) is a transport layer
for HID reports. HIDP is required for the Bluetooth Human

View File

@ -38,6 +38,7 @@
#include <net/sock.h>
#include <linux/input.h>
#include <linux/hid.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@ -50,7 +51,7 @@
#define BT_DBG(D...)
#endif
#define VERSION "1.1"
#define VERSION "1.2"
static DECLARE_RWSEM(hidp_session_sem);
static LIST_HEAD(hidp_session_list);
@ -124,15 +125,22 @@ static void __hidp_copy_session(struct hidp_session *session, struct hidp_connin
else
strncpy(ci->name, "HID Boot Device", 128);
}
if (session->hid) {
ci->vendor = session->hid->vendor;
ci->product = session->hid->product;
ci->version = session->hid->version;
strncpy(ci->name, session->hid->name, 128);
}
}
static int hidp_input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
static inline int hidp_queue_event(struct hidp_session *session, struct input_dev *dev,
unsigned int type, unsigned int code, int value)
{
struct hidp_session *session = dev->private;
struct sk_buff *skb;
unsigned char newleds;
struct sk_buff *skb;
BT_DBG("input %p type %d code %d value %d", dev, type, code, value);
BT_DBG("session %p type %d code %d value %d", session, type, code, value);
if (type != EV_LED)
return -1;
@ -164,6 +172,21 @@ static int hidp_input_event(struct input_dev *dev, unsigned int type, unsigned i
return 0;
}
static int hidp_hidinput_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
{
struct hid_device *hid = dev->private;
struct hidp_session *session = hid->driver_data;
return hidp_queue_event(session, dev, type, code, value);
}
static int hidp_input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
{
struct hidp_session *session = dev->private;
return hidp_queue_event(session, dev, type, code, value);
}
static void hidp_input_report(struct hidp_session *session, struct sk_buff *skb)
{
struct input_dev *dev = session->input;
@ -219,6 +242,42 @@ static void hidp_input_report(struct hidp_session *session, struct sk_buff *skb)
input_sync(dev);
}
static inline int hidp_queue_report(struct hidp_session *session, unsigned char *data, int size)
{
struct sk_buff *skb;
BT_DBG("session %p hid %p data %p size %d", session, device, data, size);
if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) {
BT_ERR("Can't allocate memory for new frame");
return -ENOMEM;
}
*skb_put(skb, 1) = 0xa2;
if (size > 0)
memcpy(skb_put(skb, size), data, size);
skb_queue_tail(&session->intr_transmit, skb);
hidp_schedule(session);
return 0;
}
static int hidp_send_report(struct hidp_session *session, struct hid_report *report)
{
unsigned char buf[32];
int rsize;
rsize = ((report->size - 1) >> 3) + 1 + (report->id > 0);
if (rsize > sizeof(buf))
return -EIO;
hid_output_report(report, buf);
return hidp_queue_report(session, buf, rsize);
}
static void hidp_idle_timeout(unsigned long arg)
{
struct hidp_session *session = (struct hidp_session *) arg;
@ -346,6 +405,10 @@ static inline void hidp_process_data(struct hidp_session *session, struct sk_buf
if (session->input)
hidp_input_report(session, skb);
if (session->hid)
hid_input_report(session->hid, HID_INPUT_REPORT, skb->data, skb->len, 0);
break;
case HIDP_DATA_RTYPE_OTHER:
@ -404,8 +467,14 @@ static inline void hidp_recv_intr_frame(struct hidp_session *session, struct sk_
if (hdr == (HIDP_TRANS_DATA | HIDP_DATA_RTYPE_INPUT)) {
hidp_set_timer(session);
if (session->input)
hidp_input_report(session, skb);
if (session->hid) {
hid_input_report(session->hid, HID_INPUT_REPORT, skb->data, skb->len, 1);
BT_DBG("report len %d", skb->len);
}
} else {
BT_DBG("Unsupported protocol header 0x%02x", hdr);
}
@ -471,6 +540,11 @@ static int hidp_session(void *arg)
product = session->input->id.product;
}
if (session->hid) {
vendor = session->hid->vendor;
product = session->hid->product;
}
daemonize("khidpd_%04x%04x", vendor, product);
set_user_nice(current, -15);
current->flags |= PF_NOFREEZE;
@ -521,6 +595,12 @@ static int hidp_session(void *arg)
session->input = NULL;
}
if (session->hid) {
if (session->hid->claimed & HID_CLAIMED_INPUT)
hidinput_disconnect(session->hid);
hid_free_device(session->hid);
}
up_write(&hidp_session_sem);
kfree(session);
@ -590,6 +670,56 @@ static inline void hidp_setup_input(struct hidp_session *session, struct hidp_co
input_register_device(input);
}
static int hidp_open(struct hid_device *hid)
{
return 0;
}
static void hidp_close(struct hid_device *hid)
{
}
static inline void hidp_setup_hid(struct hidp_session *session, struct hidp_connadd_req *req)
{
struct hid_device *hid = session->hid;
struct hid_report *report;
bdaddr_t src, dst;
baswap(&src, &bt_sk(session->ctrl_sock->sk)->src);
baswap(&dst, &bt_sk(session->ctrl_sock->sk)->dst);
hid->driver_data = session;
hid->country = req->country;
hid->bus = BUS_BLUETOOTH;
hid->vendor = req->vendor;
hid->product = req->product;
hid->version = req->version;
strncpy(hid->name, req->name, 128);
strncpy(hid->phys, batostr(&src), 64);
strncpy(hid->uniq, batostr(&dst), 64);
hid->dev = hidp_get_device(session);
hid->hid_open = hidp_open;
hid->hid_close = hidp_close;
hid->hidinput_input_event = hidp_hidinput_event;
list_for_each_entry(report, &hid->report_enum[HID_INPUT_REPORT].report_list, list)
hidp_send_report(session, report);
list_for_each_entry(report, &hid->report_enum[HID_FEATURE_REPORT].report_list, list)
hidp_send_report(session, report);
if (hidinput_connect(hid) == 0) {
hid->claimed |= HID_CLAIMED_INPUT;
hid_ff_init(hid);
}
}
int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock)
{
struct hidp_session *session, *s;
@ -605,10 +735,38 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
if (!session)
return -ENOMEM;
session->input = input_allocate_device();
if (!session->input) {
kfree(session);
return -ENOMEM;
BT_DBG("rd_data %p rd_size %d", req->rd_data, req->rd_size);
if (req->rd_size > 0) {
unsigned char *buf = kmalloc(req->rd_size, GFP_KERNEL);
if (!buf) {
kfree(session);
return -ENOMEM;
}
if (copy_from_user(buf, req->rd_data, req->rd_size)) {
kfree(buf);
kfree(session);
return -EFAULT;
}
session->hid = hid_parse_report(buf, req->rd_size);
kfree(buf);
if (!session->hid) {
kfree(session);
return -EINVAL;
}
}
if (!session->hid) {
session->input = input_allocate_device();
if (!session->input) {
kfree(session);
return -ENOMEM;
}
}
down_write(&hidp_session_sem);
@ -644,6 +802,9 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
if (session->input)
hidp_setup_input(session, req);
if (session->hid)
hidp_setup_hid(session, req);
__hidp_link_session(session);
hidp_set_timer(session);
@ -677,6 +838,9 @@ unlink:
failed:
up_write(&hidp_session_sem);
if (session->hid)
hid_free_device(session->hid);
kfree(session->input);
kfree(session);
return err;

View File

@ -145,6 +145,8 @@ struct hidp_session {
struct input_dev *input;
struct hid_device *hid;
struct timer_list timer;
struct sk_buff_head ctrl_transmit;

View File

@ -194,7 +194,7 @@ static int hidp_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigne
if (put_user(ca.ctrl_sock, &uca->ctrl_sock) ||
put_user(ca.intr_sock, &uca->intr_sock) ||
put_user(ca.parser, &uca->parser) ||
put_user(ca.rd_size, &uca->parser) ||
put_user(ca.rd_size, &uca->rd_size) ||
put_user(compat_ptr(ca.rd_data), &uca->rd_data) ||
put_user(ca.country, &uca->country) ||
put_user(ca.subclass, &uca->subclass) ||

View File

@ -74,6 +74,8 @@ struct rfcomm_dev {
wait_queue_head_t wait;
struct tasklet_struct wakeup_task;
struct device *tty_dev;
atomic_t wmem_alloc;
};
@ -261,7 +263,7 @@ out:
return err;
}
tty_register_device(rfcomm_tty_driver, dev->id, rfcomm_get_device(dev));
dev->tty_dev = tty_register_device(rfcomm_tty_driver, dev->id, NULL);
return dev->id;
}
@ -630,6 +632,9 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
set_current_state(TASK_RUNNING);
remove_wait_queue(&dev->wait, &wait);
if (err == 0)
device_move(dev->tty_dev, rfcomm_get_device(dev));
return err;
}
@ -642,6 +647,8 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc, dev->opened);
if (--dev->opened == 0) {
device_move(dev->tty_dev, NULL);
/* Close DLC and dettach TTY */
rfcomm_dlc_close(dev->dlc, 0);

View File

@ -77,26 +77,15 @@ static int port_cost(struct net_device *dev)
* Called from work queue to allow for calling functions that
* might sleep (such as speed check), and to debounce.
*/
static void port_carrier_check(struct work_struct *work)
void br_port_carrier_check(struct net_bridge_port *p)
{
struct net_bridge_port *p;
struct net_device *dev;
struct net_bridge *br;
dev = container_of(work, struct net_bridge_port,
carrier_check.work)->dev;
work_release(work);
rtnl_lock();
p = dev->br_port;
if (!p)
goto done;
br = p->br;
struct net_device *dev = p->dev;
struct net_bridge *br = p->br;
if (netif_carrier_ok(dev))
p->path_cost = port_cost(dev);
if (br->dev->flags & IFF_UP) {
if (netif_running(br->dev)) {
spin_lock_bh(&br->lock);
if (netif_carrier_ok(dev)) {
if (p->state == BR_STATE_DISABLED)
@ -107,9 +96,6 @@ static void port_carrier_check(struct work_struct *work)
}
spin_unlock_bh(&br->lock);
}
done:
dev_put(dev);
rtnl_unlock();
}
static void release_nbp(struct kobject *kobj)
@ -162,9 +148,6 @@ static void del_nbp(struct net_bridge_port *p)
dev_set_promiscuity(dev, -1);
if (cancel_delayed_work(&p->carrier_check))
dev_put(dev);
spin_lock_bh(&br->lock);
br_stp_disable_port(p);
spin_unlock_bh(&br->lock);
@ -282,7 +265,6 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
p->port_no = index;
br_init_port(p);
p->state = BR_STATE_DISABLED;
INIT_DELAYED_WORK_NAR(&p->carrier_check, port_carrier_check);
br_stp_port_timer_init(p);
kobject_init(&p->kobj);
@ -446,12 +428,10 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
spin_lock_bh(&br->lock);
br_stp_recalculate_bridge_id(br);
br_features_recompute(br);
if (schedule_delayed_work(&p->carrier_check, BR_PORT_DEBOUNCE))
dev_hold(dev);
spin_unlock_bh(&br->lock);
dev_set_mtu(br->dev, br_min_mtu(br));
kobject_uevent(&p->kobj, KOBJ_ADD);
return 0;

View File

@ -42,51 +42,48 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
br = p->br;
spin_lock_bh(&br->lock);
switch (event) {
case NETDEV_CHANGEMTU:
dev_set_mtu(br->dev, br_min_mtu(br));
break;
case NETDEV_CHANGEADDR:
spin_lock_bh(&br->lock);
br_fdb_changeaddr(p, dev->dev_addr);
br_ifinfo_notify(RTM_NEWLINK, p);
br_stp_recalculate_bridge_id(br);
spin_unlock_bh(&br->lock);
break;
case NETDEV_CHANGE:
if (br->dev->flags & IFF_UP)
if (schedule_delayed_work(&p->carrier_check,
BR_PORT_DEBOUNCE))
dev_hold(dev);
br_port_carrier_check(p);
break;
case NETDEV_FEAT_CHANGE:
if (br->dev->flags & IFF_UP)
spin_lock_bh(&br->lock);
if (netif_running(br->dev))
br_features_recompute(br);
/* could do recursive feature change notification
* but who would care??
*/
spin_unlock_bh(&br->lock);
break;
case NETDEV_DOWN:
spin_lock_bh(&br->lock);
if (br->dev->flags & IFF_UP)
br_stp_disable_port(p);
spin_unlock_bh(&br->lock);
break;
case NETDEV_UP:
spin_lock_bh(&br->lock);
if (netif_carrier_ok(dev) && (br->dev->flags & IFF_UP))
br_stp_enable_port(p);
spin_unlock_bh(&br->lock);
break;
case NETDEV_UNREGISTER:
spin_unlock_bh(&br->lock);
br_del_if(br, dev);
goto done;
break;
}
spin_unlock_bh(&br->lock);
done:
return NOTIFY_DONE;
}

View File

@ -16,7 +16,6 @@
#define _BR_PRIVATE_H
#include <linux/netdevice.h>
#include <linux/miscdevice.h>
#include <linux/if_bridge.h>
#define BR_HASH_BITS 8
@ -27,8 +26,6 @@
#define BR_PORT_BITS 10
#define BR_MAX_PORTS (1<<BR_PORT_BITS)
#define BR_PORT_DEBOUNCE (HZ/10)
#define BR_VERSION "2.2"
typedef struct bridge_id bridge_id;
@ -82,7 +79,6 @@ struct net_bridge_port
struct timer_list hold_timer;
struct timer_list message_age_timer;
struct kobject kobj;
struct delayed_work carrier_check;
struct rcu_head rcu;
};
@ -173,6 +169,7 @@ extern void br_flood_forward(struct net_bridge *br,
int clone);
/* br_if.c */
extern void br_port_carrier_check(struct net_bridge_port *p);
extern int br_add_bridge(const char *name);
extern int br_del_bridge(const char *name);
extern void br_cleanup_bridges(void);

View File

@ -442,7 +442,7 @@ config INET_DIAG
---help---
Support for INET (TCP, DCCP, etc) socket monitoring interface used by
native Linux tools such as ss. ss is included in iproute2, currently
downloadable at <http://developer.osdl.org/dev/iproute2>.
downloadable at <http://linux-net.osdl.org/index.php/Iproute2>.
If unsure, say Y.
@ -550,7 +550,7 @@ config TCP_CONG_SCALABLE
Scalable TCP is a sender-side only change to TCP which uses a
MIMD congestion control algorithm which has some nice scaling
properties, though is known to have fairness issues.
See http://www-lce.eng.cam.ac.uk/~ctk21/scalable/
See http://www.deneholme.net/tom/scalable/
config TCP_CONG_LP
tristate "TCP Low Priority"

View File

@ -1054,12 +1054,14 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
ASSERT_RTNL();
if (!in_dev) {
if (event == NETDEV_REGISTER && dev == &loopback_dev) {
if (event == NETDEV_REGISTER) {
in_dev = inetdev_init(dev);
if (!in_dev)
panic("devinet: Failed to create loopback\n");
in_dev->cnf.no_xfrm = 1;
in_dev->cnf.no_policy = 1;
if (dev == &loopback_dev) {
in_dev->cnf.no_xfrm = 1;
in_dev->cnf.no_policy = 1;
}
}
goto out;
}

View File

@ -32,6 +32,7 @@
#include <linux/module.h>
#include <linux/mroute.h>
#include <linux/init.h>
#include <linux/random.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <linux/skbuff.h>
@ -48,21 +49,6 @@
#define MULTIPATH_MAX_CANDIDATES 40
/* interface to random number generation */
static unsigned int RANDOM_SEED = 93186752;
static inline unsigned int random(unsigned int ubound)
{
static unsigned int a = 1588635695,
q = 2,
r = 1117695901;
RANDOM_SEED = a*(RANDOM_SEED % q) - r*(RANDOM_SEED / q);
return RANDOM_SEED % ubound;
}
static void random_select_route(const struct flowi *flp,
struct rtable *first,
struct rtable **rp)
@ -84,7 +70,7 @@ static void random_select_route(const struct flowi *flp,
if (candidate_count > 1) {
unsigned char i = 0;
unsigned char candidate_no = (unsigned char)
random(candidate_count);
(random32() % candidate_count);
/* find chosen candidate and adjust GC data for all candidates
* to ensure they stay in cache

View File

@ -32,6 +32,7 @@
#include <linux/module.h>
#include <linux/mroute.h>
#include <linux/init.h>
#include <linux/random.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <linux/skbuff.h>
@ -84,18 +85,6 @@ struct multipath_route {
/* state: primarily weight per route information */
static struct multipath_bucket state[MULTIPATH_STATE_SIZE];
/* interface to random number generation */
static unsigned int RANDOM_SEED = 93186752;
static inline unsigned int random(unsigned int ubound)
{
static unsigned int a = 1588635695,
q = 2,
r = 1117695901;
RANDOM_SEED = a*(RANDOM_SEED % q) - r*(RANDOM_SEED / q);
return RANDOM_SEED % ubound;
}
static unsigned char __multipath_lookup_weight(const struct flowi *fl,
const struct rtable *rt)
{
@ -193,7 +182,7 @@ static void wrandom_select_route(const struct flowi *flp,
/* choose a weighted random candidate */
decision = first;
selector = random(power);
selector = random32() % power;
last_power = 0;
/* select candidate, adjust GC data and cleanup local state */

View File

@ -2266,12 +2266,12 @@ void tcp_free_md5sig_pool(void)
{
struct tcp_md5sig_pool **pool = NULL;
spin_lock(&tcp_md5sig_pool_lock);
spin_lock_bh(&tcp_md5sig_pool_lock);
if (--tcp_md5sig_users == 0) {
pool = tcp_md5sig_pool;
tcp_md5sig_pool = NULL;
}
spin_unlock(&tcp_md5sig_pool_lock);
spin_unlock_bh(&tcp_md5sig_pool_lock);
if (pool)
__tcp_free_md5sig_pool(pool);
}
@ -2314,36 +2314,36 @@ struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void)
int alloc = 0;
retry:
spin_lock(&tcp_md5sig_pool_lock);
spin_lock_bh(&tcp_md5sig_pool_lock);
pool = tcp_md5sig_pool;
if (tcp_md5sig_users++ == 0) {
alloc = 1;
spin_unlock(&tcp_md5sig_pool_lock);
spin_unlock_bh(&tcp_md5sig_pool_lock);
} else if (!pool) {
tcp_md5sig_users--;
spin_unlock(&tcp_md5sig_pool_lock);
spin_unlock_bh(&tcp_md5sig_pool_lock);
cpu_relax();
goto retry;
} else
spin_unlock(&tcp_md5sig_pool_lock);
spin_unlock_bh(&tcp_md5sig_pool_lock);
if (alloc) {
/* we cannot hold spinlock here because this may sleep. */
struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool();
spin_lock(&tcp_md5sig_pool_lock);
spin_lock_bh(&tcp_md5sig_pool_lock);
if (!p) {
tcp_md5sig_users--;
spin_unlock(&tcp_md5sig_pool_lock);
spin_unlock_bh(&tcp_md5sig_pool_lock);
return NULL;
}
pool = tcp_md5sig_pool;
if (pool) {
/* oops, it has already been assigned. */
spin_unlock(&tcp_md5sig_pool_lock);
spin_unlock_bh(&tcp_md5sig_pool_lock);
__tcp_free_md5sig_pool(p);
} else {
tcp_md5sig_pool = pool = p;
spin_unlock(&tcp_md5sig_pool_lock);
spin_unlock_bh(&tcp_md5sig_pool_lock);
}
}
return pool;
@ -2354,11 +2354,11 @@ EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
{
struct tcp_md5sig_pool **p;
spin_lock(&tcp_md5sig_pool_lock);
spin_lock_bh(&tcp_md5sig_pool_lock);
p = tcp_md5sig_pool;
if (p)
tcp_md5sig_users++;
spin_unlock(&tcp_md5sig_pool_lock);
spin_unlock_bh(&tcp_md5sig_pool_lock);
return (p ? *per_cpu_ptr(p, cpu) : NULL);
}

View File

@ -111,6 +111,7 @@ static int xfrm4_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
(err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
goto out;
iph = skb->nh.iph;
if (iph->protocol == IPPROTO_IPIP) {
if (x->props.flags & XFRM_STATE_DECAP_DSCP)
ipv4_copy_dscp(iph, skb->h.ipiph);

View File

@ -291,7 +291,7 @@ static void xfrm4_dst_destroy(struct dst_entry *dst)
if (likely(xdst->u.rt.idev))
in_dev_put(xdst->u.rt.idev);
if (dst->xfrm->props.family == AF_INET && likely(xdst->u.rt.peer))
if (dst->xfrm && dst->xfrm->props.family == AF_INET && likely(xdst->u.rt.peer))
inet_putpeer(xdst->u.rt.peer);
xfrm_dst_destroy(xdst);
}

View File

@ -32,6 +32,6 @@ obj-$(CONFIG_NETFILTER) += netfilter/
obj-$(CONFIG_IPV6_SIT) += sit.o
obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o
obj-y += exthdrs_core.o
obj-y += addrconf_core.o exthdrs_core.o
obj-$(subst m,y,$(CONFIG_IPV6)) += inet6_hashtables.o

View File

@ -211,74 +211,6 @@ const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT;
#endif
const struct in6_addr in6addr_loopback = IN6ADDR_LOOPBACK_INIT;
#define IPV6_ADDR_SCOPE_TYPE(scope) ((scope) << 16)
static inline unsigned ipv6_addr_scope2type(unsigned scope)
{
switch(scope) {
case IPV6_ADDR_SCOPE_NODELOCAL:
return (IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_NODELOCAL) |
IPV6_ADDR_LOOPBACK);
case IPV6_ADDR_SCOPE_LINKLOCAL:
return (IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_LINKLOCAL) |
IPV6_ADDR_LINKLOCAL);
case IPV6_ADDR_SCOPE_SITELOCAL:
return (IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_SITELOCAL) |
IPV6_ADDR_SITELOCAL);
}
return IPV6_ADDR_SCOPE_TYPE(scope);
}
int __ipv6_addr_type(const struct in6_addr *addr)
{
__be32 st;
st = addr->s6_addr32[0];
/* Consider all addresses with the first three bits different of
000 and 111 as unicasts.
*/
if ((st & htonl(0xE0000000)) != htonl(0x00000000) &&
(st & htonl(0xE0000000)) != htonl(0xE0000000))
return (IPV6_ADDR_UNICAST |
IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL));
if ((st & htonl(0xFF000000)) == htonl(0xFF000000)) {
/* multicast */
/* addr-select 3.1 */
return (IPV6_ADDR_MULTICAST |
ipv6_addr_scope2type(IPV6_ADDR_MC_SCOPE(addr)));
}
if ((st & htonl(0xFFC00000)) == htonl(0xFE800000))
return (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_UNICAST |
IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_LINKLOCAL)); /* addr-select 3.1 */
if ((st & htonl(0xFFC00000)) == htonl(0xFEC00000))
return (IPV6_ADDR_SITELOCAL | IPV6_ADDR_UNICAST |
IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_SITELOCAL)); /* addr-select 3.1 */
if ((addr->s6_addr32[0] | addr->s6_addr32[1]) == 0) {
if (addr->s6_addr32[2] == 0) {
if (addr->s6_addr32[3] == 0)
return IPV6_ADDR_ANY;
if (addr->s6_addr32[3] == htonl(0x00000001))
return (IPV6_ADDR_LOOPBACK | IPV6_ADDR_UNICAST |
IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_LINKLOCAL)); /* addr-select 3.4 */
return (IPV6_ADDR_COMPATv4 | IPV6_ADDR_UNICAST |
IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* addr-select 3.3 */
}
if (addr->s6_addr32[2] == htonl(0x0000ffff))
return (IPV6_ADDR_MAPPED |
IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* addr-select 3.3 */
}
return (IPV6_ADDR_RESERVED |
IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* addr-select 3.4 */
}
static void addrconf_del_timer(struct inet6_ifaddr *ifp)
{
if (del_timer(&ifp->timer))
@ -1910,6 +1842,7 @@ static int inet6_addr_add(int ifindex, struct in6_addr *pfx, int plen,
struct inet6_dev *idev;
struct net_device *dev;
int scope;
u32 flags = RTF_EXPIRES;
ASSERT_RTNL();
@ -1925,9 +1858,10 @@ static int inet6_addr_add(int ifindex, struct in6_addr *pfx, int plen,
scope = ipv6_addr_scope(pfx);
if (valid_lft == INFINITY_LIFE_TIME)
if (valid_lft == INFINITY_LIFE_TIME) {
ifa_flags |= IFA_F_PERMANENT;
else if (valid_lft >= 0x7FFFFFFF/HZ)
flags = 0;
} else if (valid_lft >= 0x7FFFFFFF/HZ)
valid_lft = 0x7FFFFFFF/HZ;
if (prefered_lft == 0)
@ -1945,6 +1879,8 @@ static int inet6_addr_add(int ifindex, struct in6_addr *pfx, int plen,
ifp->tstamp = jiffies;
spin_unlock_bh(&ifp->lock);
addrconf_prefix_route(&ifp->addr, ifp->prefix_len, dev,
jiffies_to_clock_t(valid_lft * HZ), flags);
addrconf_dad_start(ifp, 0);
in6_ifa_put(ifp);
addrconf_verify(0);
@ -2124,6 +2060,7 @@ static void addrconf_add_linklocal(struct inet6_dev *idev, struct in6_addr *addr
ifp = ipv6_add_addr(idev, addr, 64, IFA_LINK, IFA_F_PERMANENT);
if (!IS_ERR(ifp)) {
addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0);
addrconf_dad_start(ifp, 0);
in6_ifa_put(ifp);
}
@ -2240,6 +2177,14 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
int run_pending = 0;
switch(event) {
case NETDEV_REGISTER:
if (!idev) {
idev = ipv6_add_dev(dev);
if (!idev)
printk(KERN_WARNING "IPv6: add_dev failed for %s\n",
dev->name);
}
break;
case NETDEV_UP:
case NETDEV_CHANGE:
if (event == NETDEV_UP) {
@ -2538,10 +2483,6 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags)
addrconf_join_solict(dev, &ifp->addr);
if (ifp->prefix_len != 128 && (ifp->flags&IFA_F_PERMANENT))
addrconf_prefix_route(&ifp->addr, ifp->prefix_len, dev, 0,
flags);
net_srandom(ifp->addr.s6_addr32[3]);
read_lock_bh(&idev->lock);
@ -2972,12 +2913,15 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
static int inet6_addr_modify(struct inet6_ifaddr *ifp, u8 ifa_flags,
u32 prefered_lft, u32 valid_lft)
{
u32 flags = RTF_EXPIRES;
if (!valid_lft || (prefered_lft > valid_lft))
return -EINVAL;
if (valid_lft == INFINITY_LIFE_TIME)
if (valid_lft == INFINITY_LIFE_TIME) {
ifa_flags |= IFA_F_PERMANENT;
else if (valid_lft >= 0x7FFFFFFF/HZ)
flags = 0;
} else if (valid_lft >= 0x7FFFFFFF/HZ)
valid_lft = 0x7FFFFFFF/HZ;
if (prefered_lft == 0)
@ -2996,6 +2940,8 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, u8 ifa_flags,
if (!(ifp->flags&IFA_F_TENTATIVE))
ipv6_ifa_notify(0, ifp);
addrconf_prefix_route(&ifp->addr, ifp->prefix_len, ifp->idev->dev,
jiffies_to_clock_t(valid_lft * HZ), flags);
addrconf_verify(0);
return 0;

76
net/ipv6/addrconf_core.c Normal file
View File

@ -0,0 +1,76 @@
/*
* IPv6 library code, needed by static components when full IPv6 support is
* not configured or static.
*/
#include <net/ipv6.h>
#define IPV6_ADDR_SCOPE_TYPE(scope) ((scope) << 16)
static inline unsigned ipv6_addr_scope2type(unsigned scope)
{
switch(scope) {
case IPV6_ADDR_SCOPE_NODELOCAL:
return (IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_NODELOCAL) |
IPV6_ADDR_LOOPBACK);
case IPV6_ADDR_SCOPE_LINKLOCAL:
return (IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_LINKLOCAL) |
IPV6_ADDR_LINKLOCAL);
case IPV6_ADDR_SCOPE_SITELOCAL:
return (IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_SITELOCAL) |
IPV6_ADDR_SITELOCAL);
}
return IPV6_ADDR_SCOPE_TYPE(scope);
}
int __ipv6_addr_type(const struct in6_addr *addr)
{
__be32 st;
st = addr->s6_addr32[0];
/* Consider all addresses with the first three bits different of
000 and 111 as unicasts.
*/
if ((st & htonl(0xE0000000)) != htonl(0x00000000) &&
(st & htonl(0xE0000000)) != htonl(0xE0000000))
return (IPV6_ADDR_UNICAST |
IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL));
if ((st & htonl(0xFF000000)) == htonl(0xFF000000)) {
/* multicast */
/* addr-select 3.1 */
return (IPV6_ADDR_MULTICAST |
ipv6_addr_scope2type(IPV6_ADDR_MC_SCOPE(addr)));
}
if ((st & htonl(0xFFC00000)) == htonl(0xFE800000))
return (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_UNICAST |
IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_LINKLOCAL)); /* addr-select 3.1 */
if ((st & htonl(0xFFC00000)) == htonl(0xFEC00000))
return (IPV6_ADDR_SITELOCAL | IPV6_ADDR_UNICAST |
IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_SITELOCAL)); /* addr-select 3.1 */
if ((addr->s6_addr32[0] | addr->s6_addr32[1]) == 0) {
if (addr->s6_addr32[2] == 0) {
if (addr->s6_addr32[3] == 0)
return IPV6_ADDR_ANY;
if (addr->s6_addr32[3] == htonl(0x00000001))
return (IPV6_ADDR_LOOPBACK | IPV6_ADDR_UNICAST |
IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_LINKLOCAL)); /* addr-select 3.4 */
return (IPV6_ADDR_COMPATv4 | IPV6_ADDR_UNICAST |
IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* addr-select 3.3 */
}
if (addr->s6_addr32[2] == htonl(0x0000ffff))
return (IPV6_ADDR_MAPPED |
IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* addr-select 3.3 */
}
return (IPV6_ADDR_RESERVED |
IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* addr-select 3.4 */
}
EXPORT_SYMBOL(__ipv6_addr_type);

View File

@ -929,25 +929,28 @@ static void __exit inet6_exit(void)
{
/* First of all disallow new sockets creation. */
sock_unregister(PF_INET6);
#ifdef CONFIG_PROC_FS
if6_proc_exit();
ac6_proc_exit();
ipv6_misc_proc_exit();
udp6_proc_exit();
udplite6_proc_exit();
tcp6_proc_exit();
raw6_proc_exit();
#endif
/* Cleanup code parts. */
ipv6_packet_cleanup();
#ifdef CONFIG_IPV6_MIP6
mip6_fini();
#endif
/* Cleanup code parts. */
ip6_flowlabel_cleanup();
addrconf_cleanup();
ip6_flowlabel_cleanup();
ip6_route_cleanup();
ipv6_packet_cleanup();
igmp6_cleanup();
#ifdef CONFIG_PROC_FS
/* Cleanup code parts. */
if6_proc_exit();
ac6_proc_exit();
ipv6_misc_proc_exit();
udplite6_proc_exit();
udp6_proc_exit();
tcp6_proc_exit();
raw6_proc_exit();
#endif
ipv6_netfilter_fini();
igmp6_cleanup();
ndisc_cleanup();
icmpv6_cleanup();
#ifdef CONFIG_SYSCTL
@ -955,6 +958,7 @@ static void __exit inet6_exit(void)
#endif
cleanup_ipv6_mibs();
proto_unregister(&rawv6_prot);
proto_unregister(&udplitev6_prot);
proto_unregister(&udpv6_prot);
proto_unregister(&tcpv6_prot);
}

View File

@ -727,11 +727,8 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
}
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
if (skb->dst && mtu < dst_mtu(skb->dst)) {
struct rt6_info *rt = (struct rt6_info *) skb->dst;
rt->rt6i_flags |= RTF_MODIFIED;
rt->u.dst.metrics[RTAX_MTU-1] = mtu;
}
if (skb->dst)
skb->dst->ops->update_pmtu(skb->dst, mtu);
if (skb->len > mtu) {
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
goto tx_err_dst_release;

View File

@ -6,7 +6,6 @@
#include <net/ip6_route.h>
#include <net/xfrm.h>
EXPORT_SYMBOL(__ipv6_addr_type);
EXPORT_SYMBOL(icmpv6_send);
EXPORT_SYMBOL(icmpv6_statistics);
EXPORT_SYMBOL(icmpv6_err_convert);

View File

@ -189,7 +189,7 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
case AF_INET6:
ipv6_addr_copy(&fl_tunnel.fl6_dst, __xfrm6_bundle_addr_remote(xfrm[i], &fl->fl6_dst));
ipv6_addr_copy(&fl_tunnel.fl6_src, __xfrm6_bundle_addr_remote(xfrm[i], &fl->fl6_src));
ipv6_addr_copy(&fl_tunnel.fl6_src, __xfrm6_bundle_addr_local(xfrm[i], &fl->fl6_src));
break;
default:
BUG_ON(1);

View File

@ -42,19 +42,6 @@
#include <net/irda/irttp.h> /* irttp_init */
#include <net/irda/irda_device.h> /* irda_device_init */
/* irproc.c */
extern void irda_proc_register(void);
extern void irda_proc_unregister(void);
/* irsysctl.c */
extern int irda_sysctl_register(void);
extern void irda_sysctl_unregister(void);
/* af_irda.c */
extern int irsock_init(void);
extern void irsock_cleanup(void);
/* irlap_frame.c */
extern int irlap_driver_rcv(struct sk_buff *, struct net_device *,
struct packet_type *, struct net_device *);
/*
* Module parameters
*/

View File

@ -227,17 +227,14 @@ struct packet_skb_cb {
#ifdef CONFIG_PACKET_MMAP
static inline char *packet_lookup_frame(struct packet_sock *po, unsigned int position)
static inline struct tpacket_hdr *packet_lookup_frame(struct packet_sock *po, unsigned int position)
{
unsigned int pg_vec_pos, frame_offset;
char *frame;
pg_vec_pos = position / po->frames_per_block;
frame_offset = position % po->frames_per_block;
frame = po->pg_vec[pg_vec_pos] + (frame_offset * po->frame_size);
return frame;
return (struct tpacket_hdr *)(po->pg_vec[pg_vec_pos] + (frame_offset * po->frame_size));
}
#endif
@ -639,7 +636,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
}
spin_lock(&sk->sk_receive_queue.lock);
h = (struct tpacket_hdr *)packet_lookup_frame(po, po->head);
h = packet_lookup_frame(po, po->head);
if (h->tp_status)
goto ring_is_full;
@ -1473,7 +1470,7 @@ static int packet_notifier(struct notifier_block *this, unsigned long msg, void
{
struct sock *sk;
struct hlist_node *node;
struct net_device *dev = (struct net_device*)data;
struct net_device *dev = data;
read_lock(&packet_sklist_lock);
sk_for_each(sk, node, &packet_sklist) {
@ -1588,7 +1585,7 @@ static unsigned int packet_poll(struct file * file, struct socket *sock,
unsigned last = po->head ? po->head-1 : po->frame_max;
struct tpacket_hdr *h;
h = (struct tpacket_hdr *)packet_lookup_frame(po, last);
h = packet_lookup_frame(po, last);
if (h->tp_status)
mask |= POLLIN | POLLRDNORM;

View File

@ -396,6 +396,19 @@ void sctp_retransmit_mark(struct sctp_outq *q,
if (sctp_chunk_abandoned(chunk)) {
list_del_init(lchunk);
sctp_insert_list(&q->abandoned, lchunk);
/* If this chunk has not been previousely acked,
* stop considering it 'outstanding'. Our peer
* will most likely never see it since it will
* not be retransmitted
*/
if (!chunk->tsn_gap_acked) {
chunk->transport->flight_size -=
sctp_data_size(chunk);
q->outstanding_bytes -= sctp_data_size(chunk);
q->asoc->peer.rwnd += (sctp_data_size(chunk) +
sizeof(struct sk_buff));
}
continue;
}
@ -1244,6 +1257,15 @@ static void sctp_check_transmitted(struct sctp_outq *q,
if (sctp_chunk_abandoned(tchunk)) {
/* Move the chunk to abandoned list. */
sctp_insert_list(&q->abandoned, lchunk);
/* If this chunk has not been acked, stop
* considering it as 'outstanding'.
*/
if (!tchunk->tsn_gap_acked) {
tchunk->transport->flight_size -=
sctp_data_size(tchunk);
q->outstanding_bytes -= sctp_data_size(tchunk);
}
continue;
}
@ -1695,11 +1717,6 @@ static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
*/
if (TSN_lte(tsn, ctsn)) {
list_del_init(lchunk);
if (!chunk->tsn_gap_acked) {
chunk->transport->flight_size -=
sctp_data_size(chunk);
q->outstanding_bytes -= sctp_data_size(chunk);
}
sctp_chunk_free(chunk);
} else {
if (TSN_lte(tsn, asoc->adv_peer_ack_point+1)) {

View File

@ -4605,12 +4605,12 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep,
* sent as soon as cwnd allows (normally when a SACK arrives).
*/
/* NB: Rules E4 and F1 are implicit in R1. */
sctp_add_cmd_sf(commands, SCTP_CMD_RETRAN, SCTP_TRANSPORT(transport));
/* Do some failure management (Section 8.2). */
sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE, SCTP_TRANSPORT(transport));
/* NB: Rules E4 and F1 are implicit in R1. */
sctp_add_cmd_sf(commands, SCTP_CMD_RETRAN, SCTP_TRANSPORT(transport));
return SCTP_DISPOSITION_CONSUME;
}