|
@@ -17,6 +17,7 @@
|
|
#include <linux/netdevice.h>
|
|
#include <linux/netdevice.h>
|
|
#include <linux/mutex.h>
|
|
#include <linux/mutex.h>
|
|
#include <linux/module.h>
|
|
#include <linux/module.h>
|
|
|
|
+#include <linux/spinlock.h>
|
|
#include <net/netns/generic.h>
|
|
#include <net/netns/generic.h>
|
|
#include <net/net_namespace.h>
|
|
#include <net/net_namespace.h>
|
|
#include <net/pkt_sched.h>
|
|
#include <net/pkt_sched.h>
|
|
@@ -34,6 +35,8 @@ struct caif_device_entry {
|
|
struct list_head list;
|
|
struct list_head list;
|
|
struct net_device *netdev;
|
|
struct net_device *netdev;
|
|
int __percpu *pcpu_refcnt;
|
|
int __percpu *pcpu_refcnt;
|
|
|
|
+ spinlock_t flow_lock;
|
|
|
|
+ bool xoff;
|
|
};
|
|
};
|
|
|
|
|
|
struct caif_device_entry_list {
|
|
struct caif_device_entry_list {
|
|
@@ -48,6 +51,7 @@ struct caif_net {
|
|
};
|
|
};
|
|
|
|
|
|
static int caif_net_id;
|
|
static int caif_net_id;
|
|
|
|
+static int q_high = 50; /* Percent */
|
|
|
|
|
|
struct cfcnfg *get_cfcnfg(struct net *net)
|
|
struct cfcnfg *get_cfcnfg(struct net *net)
|
|
{
|
|
{
|
|
@@ -126,17 +130,94 @@ static struct caif_device_entry *caif_get(struct net_device *dev)
|
|
return NULL;
|
|
return NULL;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+void caif_flow_cb(struct sk_buff *skb)
|
|
|
|
+{
|
|
|
|
+ struct caif_device_entry *caifd;
|
|
|
|
+ bool send_xoff;
|
|
|
|
+
|
|
|
|
+ WARN_ON(skb->dev == NULL);
|
|
|
|
+
|
|
|
|
+ rcu_read_lock();
|
|
|
|
+ caifd = caif_get(skb->dev);
|
|
|
|
+ caifd_hold(caifd);
|
|
|
|
+ rcu_read_unlock();
|
|
|
|
+
|
|
|
|
+ spin_lock_bh(&caifd->flow_lock);
|
|
|
|
+ send_xoff = caifd->xoff;
|
|
|
|
+ caifd->xoff = 0;
|
|
|
|
+ spin_unlock_bh(&caifd->flow_lock);
|
|
|
|
+
|
|
|
|
+ if (send_xoff)
|
|
|
|
+ caifd->layer.up->
|
|
|
|
+ ctrlcmd(caifd->layer.up,
|
|
|
|
+ _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND,
|
|
|
|
+ caifd->layer.id);
|
|
|
|
+ caifd_put(caifd);
|
|
|
|
+}
|
|
|
|
+
|
|
static int transmit(struct cflayer *layer, struct cfpkt *pkt)
|
|
static int transmit(struct cflayer *layer, struct cfpkt *pkt)
|
|
{
|
|
{
|
|
- int err;
|
|
|
|
|
|
+ int err, high = 0, qlen = 0;
|
|
|
|
+ struct caif_dev_common *caifdev;
|
|
struct caif_device_entry *caifd =
|
|
struct caif_device_entry *caifd =
|
|
container_of(layer, struct caif_device_entry, layer);
|
|
container_of(layer, struct caif_device_entry, layer);
|
|
struct sk_buff *skb;
|
|
struct sk_buff *skb;
|
|
|
|
+ struct netdev_queue *txq;
|
|
|
|
+
|
|
|
|
+ rcu_read_lock_bh();
|
|
|
|
|
|
skb = cfpkt_tonative(pkt);
|
|
skb = cfpkt_tonative(pkt);
|
|
skb->dev = caifd->netdev;
|
|
skb->dev = caifd->netdev;
|
|
skb_reset_network_header(skb);
|
|
skb_reset_network_header(skb);
|
|
skb->protocol = htons(ETH_P_CAIF);
|
|
skb->protocol = htons(ETH_P_CAIF);
|
|
|
|
+ caifdev = netdev_priv(caifd->netdev);
|
|
|
|
+
|
|
|
|
+ /* Check if we need to handle xoff */
|
|
|
|
+ if (likely(caifd->netdev->tx_queue_len == 0))
|
|
|
|
+ goto noxoff;
|
|
|
|
+
|
|
|
|
+ if (unlikely(caifd->xoff))
|
|
|
|
+ goto noxoff;
|
|
|
|
+
|
|
|
|
+ if (likely(!netif_queue_stopped(caifd->netdev))) {
|
|
|
|
+ /* If we run with a TX queue, check if the queue is too long*/
|
|
|
|
+ txq = netdev_get_tx_queue(skb->dev, 0);
|
|
|
|
+ qlen = qdisc_qlen(rcu_dereference_bh(txq->qdisc));
|
|
|
|
+
|
|
|
|
+ if (likely(qlen == 0))
|
|
|
|
+ goto noxoff;
|
|
|
|
+
|
|
|
|
+ high = (caifd->netdev->tx_queue_len * q_high) / 100;
|
|
|
|
+ if (likely(qlen < high))
|
|
|
|
+ goto noxoff;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* Hold lock while accessing xoff */
|
|
|
|
+ spin_lock_bh(&caifd->flow_lock);
|
|
|
|
+ if (caifd->xoff) {
|
|
|
|
+ spin_unlock_bh(&caifd->flow_lock);
|
|
|
|
+ goto noxoff;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Handle flow off, we do this by temporary hi-jacking this
|
|
|
|
+ * skb's destructor function, and replace it with our own
|
|
|
|
+ * flow-on callback. The callback will set flow-on and call
|
|
|
|
+ * the original destructor.
|
|
|
|
+ */
|
|
|
|
+
|
|
|
|
+ pr_debug("queue has stopped(%d) or is full (%d > %d)\n",
|
|
|
|
+ netif_queue_stopped(caifd->netdev),
|
|
|
|
+ qlen, high);
|
|
|
|
+ caifd->xoff = 1;
|
|
|
|
+ spin_unlock_bh(&caifd->flow_lock);
|
|
|
|
+ skb_orphan(skb);
|
|
|
|
+
|
|
|
|
+ caifd->layer.up->ctrlcmd(caifd->layer.up,
|
|
|
|
+ _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
|
|
|
|
+ caifd->layer.id);
|
|
|
|
+noxoff:
|
|
|
|
+ rcu_read_unlock_bh();
|
|
|
|
|
|
err = dev_queue_xmit(skb);
|
|
err = dev_queue_xmit(skb);
|
|
if (err > 0)
|
|
if (err > 0)
|
|
@@ -232,6 +313,7 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
|
|
if (!caifd)
|
|
if (!caifd)
|
|
return;
|
|
return;
|
|
*layer = &caifd->layer;
|
|
*layer = &caifd->layer;
|
|
|
|
+ spin_lock_init(&caifd->flow_lock);
|
|
|
|
|
|
switch (caifdev->link_select) {
|
|
switch (caifdev->link_select) {
|
|
case CAIF_LINK_HIGH_BANDW:
|
|
case CAIF_LINK_HIGH_BANDW:
|
|
@@ -316,6 +398,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ caifd->xoff = 0;
|
|
cfcnfg_set_phy_state(cfg, &caifd->layer, true);
|
|
cfcnfg_set_phy_state(cfg, &caifd->layer, true);
|
|
rcu_read_unlock();
|
|
rcu_read_unlock();
|
|
|
|
|