wme.c 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295
  1. /*
  2. * Copyright 2004, Instant802 Networks, Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #include <linux/netdevice.h>
  9. #include <linux/skbuff.h>
  10. #include <linux/module.h>
  11. #include <linux/if_arp.h>
  12. #include <linux/types.h>
  13. #include <net/ip.h>
  14. #include <net/pkt_sched.h>
  15. #include <net/mac80211.h>
  16. #include "ieee80211_i.h"
  17. #include "wme.h"
  18. /* Default mapping in classifier to work with default
  19. * queue setup.
  20. */
  21. const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 };
  22. static const char llc_ip_hdr[8] = {0xAA, 0xAA, 0x3, 0, 0, 0, 0x08, 0};
  23. /* Given a data frame determine the 802.1p/1d tag to use. */
  24. static unsigned int classify_1d(struct sk_buff *skb)
  25. {
  26. unsigned int dscp;
  27. /* skb->priority values from 256->263 are magic values to
  28. * directly indicate a specific 802.1d priority. This is used
  29. * to allow 802.1d priority to be passed directly in from VLAN
  30. * tags, etc.
  31. */
  32. if (skb->priority >= 256 && skb->priority <= 263)
  33. return skb->priority - 256;
  34. switch (skb->protocol) {
  35. case htons(ETH_P_IP):
  36. dscp = ip_hdr(skb)->tos & 0xfc;
  37. break;
  38. default:
  39. return 0;
  40. }
  41. return dscp >> 5;
  42. }
  43. static int wme_downgrade_ac(struct sk_buff *skb)
  44. {
  45. switch (skb->priority) {
  46. case 6:
  47. case 7:
  48. skb->priority = 5; /* VO -> VI */
  49. return 0;
  50. case 4:
  51. case 5:
  52. skb->priority = 3; /* VI -> BE */
  53. return 0;
  54. case 0:
  55. case 3:
  56. skb->priority = 2; /* BE -> BK */
  57. return 0;
  58. default:
  59. return -1;
  60. }
  61. }
  62. /* Indicate which queue to use. */
  63. static u16 classify80211(struct ieee80211_local *local, struct sk_buff *skb)
  64. {
  65. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  66. if (!ieee80211_is_data(hdr->frame_control)) {
  67. /* management frames go on AC_VO queue, but are sent
  68. * without QoS control fields */
  69. return 0;
  70. }
  71. if (0 /* injected */) {
  72. /* use AC from radiotap */
  73. }
  74. if (!ieee80211_is_data_qos(hdr->frame_control)) {
  75. skb->priority = 0; /* required for correct WPA/11i MIC */
  76. return ieee802_1d_to_ac[skb->priority];
  77. }
  78. /* use the data classifier to determine what 802.1d tag the
  79. * data frame has */
  80. skb->priority = classify_1d(skb);
  81. /* in case we are a client verify acm is not set for this ac */
  82. while (unlikely(local->wmm_acm & BIT(skb->priority))) {
  83. if (wme_downgrade_ac(skb)) {
  84. /* The old code would drop the packet in this
  85. * case.
  86. */
  87. return 0;
  88. }
  89. }
  90. /* look up which queue to use for frames with this 1d tag */
  91. return ieee802_1d_to_ac[skb->priority];
  92. }
  93. u16 ieee80211_select_queue(struct net_device *dev, struct sk_buff *skb)
  94. {
  95. struct ieee80211_master_priv *mpriv = netdev_priv(dev);
  96. struct ieee80211_local *local = mpriv->local;
  97. struct ieee80211_hw *hw = &local->hw;
  98. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  99. struct sta_info *sta;
  100. u16 queue;
  101. u8 tid;
  102. queue = classify80211(local, skb);
  103. if (unlikely(queue >= local->hw.queues))
  104. queue = local->hw.queues - 1;
  105. if (skb->requeue) {
  106. if (!hw->ampdu_queues)
  107. return queue;
  108. rcu_read_lock();
  109. sta = sta_info_get(local, hdr->addr1);
  110. tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
  111. if (sta) {
  112. int ampdu_queue = sta->tid_to_tx_q[tid];
  113. if ((ampdu_queue < ieee80211_num_queues(hw)) &&
  114. test_bit(ampdu_queue, local->queue_pool))
  115. queue = ampdu_queue;
  116. }
  117. rcu_read_unlock();
  118. return queue;
  119. }
  120. /* Now we know the 1d priority, fill in the QoS header if
  121. * there is one.
  122. */
  123. if (ieee80211_is_data_qos(hdr->frame_control)) {
  124. u8 *p = ieee80211_get_qos_ctl(hdr);
  125. u8 ack_policy = 0;
  126. tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
  127. if (local->wifi_wme_noack_test)
  128. ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK <<
  129. QOS_CONTROL_ACK_POLICY_SHIFT;
  130. /* qos header is 2 bytes, second reserved */
  131. *p++ = ack_policy | tid;
  132. *p = 0;
  133. if (!hw->ampdu_queues)
  134. return queue;
  135. rcu_read_lock();
  136. sta = sta_info_get(local, hdr->addr1);
  137. if (sta) {
  138. int ampdu_queue = sta->tid_to_tx_q[tid];
  139. if ((ampdu_queue < ieee80211_num_queues(hw)) &&
  140. test_bit(ampdu_queue, local->queue_pool))
  141. queue = ampdu_queue;
  142. }
  143. rcu_read_unlock();
  144. }
  145. return queue;
  146. }
  147. int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
  148. struct sta_info *sta, u16 tid)
  149. {
  150. int i;
  151. /* XXX: currently broken due to cb/requeue use */
  152. return -EPERM;
  153. /* prepare the filter and save it for the SW queue
  154. * matching the received HW queue */
  155. if (!local->hw.ampdu_queues)
  156. return -EPERM;
  157. /* try to get a Qdisc from the pool */
  158. for (i = local->hw.queues; i < ieee80211_num_queues(&local->hw); i++)
  159. if (!test_and_set_bit(i, local->queue_pool)) {
  160. ieee80211_stop_queue(local_to_hw(local), i);
  161. sta->tid_to_tx_q[tid] = i;
  162. /* IF there are already pending packets
  163. * on this tid first we need to drain them
  164. * on the previous queue
  165. * since HT is strict in order */
  166. #ifdef CONFIG_MAC80211_HT_DEBUG
  167. if (net_ratelimit())
  168. printk(KERN_DEBUG "allocated aggregation queue"
  169. " %d tid %d addr %pM pool=0x%lX\n",
  170. i, tid, sta->sta.addr,
  171. local->queue_pool[0]);
  172. #endif /* CONFIG_MAC80211_HT_DEBUG */
  173. return 0;
  174. }
  175. return -EAGAIN;
  176. }
  177. /**
  178. * the caller needs to hold netdev_get_tx_queue(local->mdev, X)->lock
  179. */
  180. void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
  181. struct sta_info *sta, u16 tid,
  182. u8 requeue)
  183. {
  184. int agg_queue = sta->tid_to_tx_q[tid];
  185. struct ieee80211_hw *hw = &local->hw;
  186. /* return the qdisc to the pool */
  187. clear_bit(agg_queue, local->queue_pool);
  188. sta->tid_to_tx_q[tid] = ieee80211_num_queues(hw);
  189. if (requeue) {
  190. ieee80211_requeue(local, agg_queue);
  191. } else {
  192. struct netdev_queue *txq;
  193. spinlock_t *root_lock;
  194. struct Qdisc *q;
  195. txq = netdev_get_tx_queue(local->mdev, agg_queue);
  196. q = rcu_dereference(txq->qdisc);
  197. root_lock = qdisc_lock(q);
  198. spin_lock_bh(root_lock);
  199. qdisc_reset(q);
  200. spin_unlock_bh(root_lock);
  201. }
  202. }
  203. void ieee80211_requeue(struct ieee80211_local *local, int queue)
  204. {
  205. struct netdev_queue *txq = netdev_get_tx_queue(local->mdev, queue);
  206. struct sk_buff_head list;
  207. spinlock_t *root_lock;
  208. struct Qdisc *qdisc;
  209. u32 len;
  210. rcu_read_lock_bh();
  211. qdisc = rcu_dereference(txq->qdisc);
  212. if (!qdisc || !qdisc->dequeue)
  213. goto out_unlock;
  214. skb_queue_head_init(&list);
  215. root_lock = qdisc_root_lock(qdisc);
  216. spin_lock(root_lock);
  217. for (len = qdisc->q.qlen; len > 0; len--) {
  218. struct sk_buff *skb = qdisc->dequeue(qdisc);
  219. if (skb)
  220. __skb_queue_tail(&list, skb);
  221. }
  222. spin_unlock(root_lock);
  223. for (len = list.qlen; len > 0; len--) {
  224. struct sk_buff *skb = __skb_dequeue(&list);
  225. u16 new_queue;
  226. BUG_ON(!skb);
  227. new_queue = ieee80211_select_queue(local->mdev, skb);
  228. skb_set_queue_mapping(skb, new_queue);
  229. txq = netdev_get_tx_queue(local->mdev, new_queue);
  230. qdisc = rcu_dereference(txq->qdisc);
  231. root_lock = qdisc_root_lock(qdisc);
  232. spin_lock(root_lock);
  233. qdisc_enqueue_root(skb, qdisc);
  234. spin_unlock(root_lock);
  235. }
  236. out_unlock:
  237. rcu_read_unlock_bh();
  238. }