|
@@ -113,7 +113,6 @@ struct lowpan_dev_record {
|
|
|
|
|
|
struct lowpan_fragment {
|
|
struct lowpan_fragment {
|
|
struct sk_buff *skb; /* skb to be assembled */
|
|
struct sk_buff *skb; /* skb to be assembled */
|
|
- spinlock_t lock; /* concurency lock */
|
|
|
|
u16 length; /* length to be assemled */
|
|
u16 length; /* length to be assemled */
|
|
u32 bytes_rcv; /* bytes received */
|
|
u32 bytes_rcv; /* bytes received */
|
|
u16 tag; /* current fragment tag */
|
|
u16 tag; /* current fragment tag */
|
|
@@ -637,10 +636,7 @@ static void lowpan_fragment_timer_expired(unsigned long entry_addr)
|
|
|
|
|
|
pr_debug("timer expired for frame with tag %d\n", entry->tag);
|
|
pr_debug("timer expired for frame with tag %d\n", entry->tag);
|
|
|
|
|
|
- spin_lock(&flist_lock);
|
|
|
|
list_del(&entry->list);
|
|
list_del(&entry->list);
|
|
- spin_unlock(&flist_lock);
|
|
|
|
-
|
|
|
|
dev_kfree_skb(entry->skb);
|
|
dev_kfree_skb(entry->skb);
|
|
kfree(entry);
|
|
kfree(entry);
|
|
}
|
|
}
|
|
@@ -727,7 +723,7 @@ lowpan_process_data(struct sk_buff *skb)
|
|
* check if frame assembling with the same tag is
|
|
* check if frame assembling with the same tag is
|
|
* already in progress
|
|
* already in progress
|
|
*/
|
|
*/
|
|
- spin_lock(&flist_lock);
|
|
|
|
|
|
+ spin_lock_bh(&flist_lock);
|
|
|
|
|
|
list_for_each_entry(frame, &lowpan_fragments, list)
|
|
list_for_each_entry(frame, &lowpan_fragments, list)
|
|
if (frame->tag == tag) {
|
|
if (frame->tag == tag) {
|
|
@@ -761,9 +757,9 @@ lowpan_process_data(struct sk_buff *skb)
|
|
if ((frame->bytes_rcv == frame->length) &&
|
|
if ((frame->bytes_rcv == frame->length) &&
|
|
frame->timer.expires > jiffies) {
|
|
frame->timer.expires > jiffies) {
|
|
/* if timer haven't expired - first of all delete it */
|
|
/* if timer haven't expired - first of all delete it */
|
|
- del_timer(&frame->timer);
|
|
|
|
|
|
+ del_timer_sync(&frame->timer);
|
|
list_del(&frame->list);
|
|
list_del(&frame->list);
|
|
- spin_unlock(&flist_lock);
|
|
|
|
|
|
+ spin_unlock_bh(&flist_lock);
|
|
|
|
|
|
dev_kfree_skb(skb);
|
|
dev_kfree_skb(skb);
|
|
skb = frame->skb;
|
|
skb = frame->skb;
|
|
@@ -774,7 +770,7 @@ lowpan_process_data(struct sk_buff *skb)
|
|
|
|
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
- spin_unlock(&flist_lock);
|
|
|
|
|
|
+ spin_unlock_bh(&flist_lock);
|
|
|
|
|
|
return kfree_skb(skb), 0;
|
|
return kfree_skb(skb), 0;
|
|
}
|
|
}
|
|
@@ -929,7 +925,7 @@ lowpan_process_data(struct sk_buff *skb)
|
|
return lowpan_skb_deliver(skb, &hdr);
|
|
return lowpan_skb_deliver(skb, &hdr);
|
|
|
|
|
|
unlock_and_drop:
|
|
unlock_and_drop:
|
|
- spin_unlock(&flist_lock);
|
|
|
|
|
|
+ spin_unlock_bh(&flist_lock);
|
|
drop:
|
|
drop:
|
|
kfree_skb(skb);
|
|
kfree_skb(skb);
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
@@ -1196,19 +1192,9 @@ static void lowpan_dellink(struct net_device *dev, struct list_head *head)
|
|
struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
|
|
struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
|
|
struct net_device *real_dev = lowpan_dev->real_dev;
|
|
struct net_device *real_dev = lowpan_dev->real_dev;
|
|
struct lowpan_dev_record *entry, *tmp;
|
|
struct lowpan_dev_record *entry, *tmp;
|
|
- struct lowpan_fragment *frame, *tframe;
|
|
|
|
|
|
|
|
ASSERT_RTNL();
|
|
ASSERT_RTNL();
|
|
|
|
|
|
- spin_lock(&flist_lock);
|
|
|
|
- list_for_each_entry_safe(frame, tframe, &lowpan_fragments, list) {
|
|
|
|
- del_timer(&frame->timer);
|
|
|
|
- list_del(&frame->list);
|
|
|
|
- dev_kfree_skb(frame->skb);
|
|
|
|
- kfree(frame);
|
|
|
|
- }
|
|
|
|
- spin_unlock(&flist_lock);
|
|
|
|
-
|
|
|
|
mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
|
|
mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
|
|
list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
|
|
list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
|
|
if (entry->ldev == dev) {
|
|
if (entry->ldev == dev) {
|
|
@@ -1264,9 +1250,24 @@ out:
|
|
|
|
|
|
static void __exit lowpan_cleanup_module(void)
|
|
static void __exit lowpan_cleanup_module(void)
|
|
{
|
|
{
|
|
|
|
+ struct lowpan_fragment *frame, *tframe;
|
|
|
|
+
|
|
lowpan_netlink_fini();
|
|
lowpan_netlink_fini();
|
|
|
|
|
|
dev_remove_pack(&lowpan_packet_type);
|
|
dev_remove_pack(&lowpan_packet_type);
|
|
|
|
+
|
|
|
|
+ /* Now 6lowpan packet_type is removed, so no new fragments are
|
|
|
|
+ * expected on RX, therefore that's the time to clean incomplete
|
|
|
|
+ * fragments.
|
|
|
|
+ */
|
|
|
|
+ spin_lock_bh(&flist_lock);
|
|
|
|
+ list_for_each_entry_safe(frame, tframe, &lowpan_fragments, list) {
|
|
|
|
+ del_timer_sync(&frame->timer);
|
|
|
|
+ list_del(&frame->list);
|
|
|
|
+ dev_kfree_skb(frame->skb);
|
|
|
|
+ kfree(frame);
|
|
|
|
+ }
|
|
|
|
+ spin_unlock_bh(&flist_lock);
|
|
}
|
|
}
|
|
|
|
|
|
module_init(lowpan_init_module);
|
|
module_init(lowpan_init_module);
|