wme.c 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303
  1. /*
  2. * Copyright 2004, Instant802 Networks, Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #include <linux/netdevice.h>
  9. #include <linux/skbuff.h>
  10. #include <linux/module.h>
  11. #include <linux/if_arp.h>
  12. #include <linux/types.h>
  13. #include <net/ip.h>
  14. #include <net/pkt_sched.h>
  15. #include <net/mac80211.h>
  16. #include "ieee80211_i.h"
  17. #include "wme.h"
  18. /* Default mapping in classifier to work with default
  19. * queue setup.
  20. */
  21. const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 };
  22. static const char llc_ip_hdr[8] = {0xAA, 0xAA, 0x3, 0, 0, 0, 0x08, 0};
  23. /* Given a data frame determine the 802.1p/1d tag to use. */
  24. static unsigned int classify_1d(struct sk_buff *skb)
  25. {
  26. unsigned int dscp;
  27. /* skb->priority values from 256->263 are magic values to
  28. * directly indicate a specific 802.1d priority. This is used
  29. * to allow 802.1d priority to be passed directly in from VLAN
  30. * tags, etc.
  31. */
  32. if (skb->priority >= 256 && skb->priority <= 263)
  33. return skb->priority - 256;
  34. switch (skb->protocol) {
  35. case __constant_htons(ETH_P_IP):
  36. dscp = ip_hdr(skb)->tos & 0xfc;
  37. break;
  38. default:
  39. return 0;
  40. }
  41. if (dscp & 0x1c)
  42. return 0;
  43. return dscp >> 5;
  44. }
  45. static int wme_downgrade_ac(struct sk_buff *skb)
  46. {
  47. switch (skb->priority) {
  48. case 6:
  49. case 7:
  50. skb->priority = 5; /* VO -> VI */
  51. return 0;
  52. case 4:
  53. case 5:
  54. skb->priority = 3; /* VI -> BE */
  55. return 0;
  56. case 0:
  57. case 3:
  58. skb->priority = 2; /* BE -> BK */
  59. return 0;
  60. default:
  61. return -1;
  62. }
  63. }
  64. /* Indicate which queue to use. */
  65. static u16 classify80211(struct sk_buff *skb, struct net_device *dev)
  66. {
  67. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  68. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  69. if (!ieee80211_is_data(hdr->frame_control)) {
  70. /* management frames go on AC_VO queue, but are sent
  71. * without QoS control fields */
  72. return 0;
  73. }
  74. if (0 /* injected */) {
  75. /* use AC from radiotap */
  76. }
  77. if (!ieee80211_is_data_qos(hdr->frame_control)) {
  78. skb->priority = 0; /* required for correct WPA/11i MIC */
  79. return ieee802_1d_to_ac[skb->priority];
  80. }
  81. /* use the data classifier to determine what 802.1d tag the
  82. * data frame has */
  83. skb->priority = classify_1d(skb);
  84. /* in case we are a client verify acm is not set for this ac */
  85. while (unlikely(local->wmm_acm & BIT(skb->priority))) {
  86. if (wme_downgrade_ac(skb)) {
  87. /* The old code would drop the packet in this
  88. * case.
  89. */
  90. return 0;
  91. }
  92. }
  93. /* look up which queue to use for frames with this 1d tag */
  94. return ieee802_1d_to_ac[skb->priority];
  95. }
  96. u16 ieee80211_select_queue(struct net_device *dev, struct sk_buff *skb)
  97. {
  98. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  99. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  100. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  101. struct sta_info *sta;
  102. u16 queue;
  103. u8 tid;
  104. queue = classify80211(skb, dev);
  105. if (unlikely(queue >= local->hw.queues))
  106. queue = local->hw.queues - 1;
  107. if (info->flags & IEEE80211_TX_CTL_REQUEUE) {
  108. rcu_read_lock();
  109. sta = sta_info_get(local, hdr->addr1);
  110. tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
  111. if (sta) {
  112. struct ieee80211_hw *hw = &local->hw;
  113. int ampdu_queue = sta->tid_to_tx_q[tid];
  114. if ((ampdu_queue < ieee80211_num_queues(hw)) &&
  115. test_bit(ampdu_queue, local->queue_pool)) {
  116. queue = ampdu_queue;
  117. info->flags |= IEEE80211_TX_CTL_AMPDU;
  118. } else {
  119. info->flags &= ~IEEE80211_TX_CTL_AMPDU;
  120. }
  121. }
  122. rcu_read_unlock();
  123. return queue;
  124. }
  125. /* Now we know the 1d priority, fill in the QoS header if
  126. * there is one.
  127. */
  128. if (ieee80211_is_data_qos(hdr->frame_control)) {
  129. u8 *p = ieee80211_get_qos_ctl(hdr);
  130. u8 ack_policy = 0;
  131. tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
  132. if (local->wifi_wme_noack_test)
  133. ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK <<
  134. QOS_CONTROL_ACK_POLICY_SHIFT;
  135. /* qos header is 2 bytes, second reserved */
  136. *p++ = ack_policy | tid;
  137. *p = 0;
  138. rcu_read_lock();
  139. sta = sta_info_get(local, hdr->addr1);
  140. if (sta) {
  141. int ampdu_queue = sta->tid_to_tx_q[tid];
  142. struct ieee80211_hw *hw = &local->hw;
  143. if ((ampdu_queue < ieee80211_num_queues(hw)) &&
  144. test_bit(ampdu_queue, local->queue_pool)) {
  145. queue = ampdu_queue;
  146. info->flags |= IEEE80211_TX_CTL_AMPDU;
  147. } else {
  148. info->flags &= ~IEEE80211_TX_CTL_AMPDU;
  149. }
  150. }
  151. rcu_read_unlock();
  152. }
  153. return queue;
  154. }
  155. int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
  156. struct sta_info *sta, u16 tid)
  157. {
  158. int i;
  159. /* XXX: currently broken due to cb/requeue use */
  160. return -EPERM;
  161. /* prepare the filter and save it for the SW queue
  162. * matching the received HW queue */
  163. if (!local->hw.ampdu_queues)
  164. return -EPERM;
  165. /* try to get a Qdisc from the pool */
  166. for (i = local->hw.queues; i < ieee80211_num_queues(&local->hw); i++)
  167. if (!test_and_set_bit(i, local->queue_pool)) {
  168. ieee80211_stop_queue(local_to_hw(local), i);
  169. sta->tid_to_tx_q[tid] = i;
  170. /* IF there are already pending packets
  171. * on this tid first we need to drain them
  172. * on the previous queue
  173. * since HT is strict in order */
  174. #ifdef CONFIG_MAC80211_HT_DEBUG
  175. if (net_ratelimit()) {
  176. DECLARE_MAC_BUF(mac);
  177. printk(KERN_DEBUG "allocated aggregation queue"
  178. " %d tid %d addr %s pool=0x%lX\n",
  179. i, tid, print_mac(mac, sta->addr),
  180. local->queue_pool[0]);
  181. }
  182. #endif /* CONFIG_MAC80211_HT_DEBUG */
  183. return 0;
  184. }
  185. return -EAGAIN;
  186. }
  187. /**
  188. * the caller needs to hold netdev_get_tx_queue(local->mdev, X)->lock
  189. */
  190. void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
  191. struct sta_info *sta, u16 tid,
  192. u8 requeue)
  193. {
  194. int agg_queue = sta->tid_to_tx_q[tid];
  195. struct ieee80211_hw *hw = &local->hw;
  196. /* return the qdisc to the pool */
  197. clear_bit(agg_queue, local->queue_pool);
  198. sta->tid_to_tx_q[tid] = ieee80211_num_queues(hw);
  199. if (requeue) {
  200. ieee80211_requeue(local, agg_queue);
  201. } else {
  202. struct netdev_queue *txq;
  203. spinlock_t *root_lock;
  204. struct Qdisc *q;
  205. txq = netdev_get_tx_queue(local->mdev, agg_queue);
  206. q = rcu_dereference(txq->qdisc);
  207. root_lock = qdisc_lock(q);
  208. spin_lock_bh(root_lock);
  209. qdisc_reset(q);
  210. spin_unlock_bh(root_lock);
  211. }
  212. }
  213. void ieee80211_requeue(struct ieee80211_local *local, int queue)
  214. {
  215. struct netdev_queue *txq = netdev_get_tx_queue(local->mdev, queue);
  216. struct sk_buff_head list;
  217. spinlock_t *root_lock;
  218. struct Qdisc *qdisc;
  219. u32 len;
  220. rcu_read_lock_bh();
  221. qdisc = rcu_dereference(txq->qdisc);
  222. if (!qdisc || !qdisc->dequeue)
  223. goto out_unlock;
  224. skb_queue_head_init(&list);
  225. root_lock = qdisc_root_lock(qdisc);
  226. spin_lock(root_lock);
  227. for (len = qdisc->q.qlen; len > 0; len--) {
  228. struct sk_buff *skb = qdisc->dequeue(qdisc);
  229. if (skb)
  230. __skb_queue_tail(&list, skb);
  231. }
  232. spin_unlock(root_lock);
  233. for (len = list.qlen; len > 0; len--) {
  234. struct sk_buff *skb = __skb_dequeue(&list);
  235. u16 new_queue;
  236. BUG_ON(!skb);
  237. new_queue = ieee80211_select_queue(local->mdev, skb);
  238. skb_set_queue_mapping(skb, new_queue);
  239. txq = netdev_get_tx_queue(local->mdev, new_queue);
  240. qdisc = rcu_dereference(txq->qdisc);
  241. root_lock = qdisc_root_lock(qdisc);
  242. spin_lock(root_lock);
  243. qdisc_enqueue_root(skb, qdisc);
  244. spin_unlock(root_lock);
  245. }
  246. out_unlock:
  247. rcu_read_unlock_bh();
  248. }