wme.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701
  1. /*
  2. * Copyright 2004, Instant802 Networks, Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #include <linux/netdevice.h>
  9. #include <linux/skbuff.h>
  10. #include <linux/module.h>
  11. #include <linux/if_arp.h>
  12. #include <linux/types.h>
  13. #include <net/ip.h>
  14. #include <net/pkt_sched.h>
  15. #include <net/mac80211.h>
  16. #include "ieee80211_i.h"
  17. #include "wme.h"
  18. /* maximum number of hardware queues we support. */
  19. #define QD_MAX_QUEUES (IEEE80211_MAX_AMPDU_QUEUES + IEEE80211_MAX_QUEUES)
  20. /* current number of hardware queues we support. */
  21. #define QD_NUM(hw) ((hw)->queues + (hw)->ampdu_queues)
  22. /*
  23. * Default mapping in classifier to work with default
  24. * queue setup.
  25. */
  26. const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 };
  27. struct ieee80211_sched_data
  28. {
  29. unsigned long qdisc_pool[BITS_TO_LONGS(QD_MAX_QUEUES)];
  30. struct tcf_proto *filter_list;
  31. struct Qdisc *queues[QD_MAX_QUEUES];
  32. struct sk_buff_head requeued[QD_MAX_QUEUES];
  33. };
  34. static const char llc_ip_hdr[8] = {0xAA, 0xAA, 0x3, 0, 0, 0, 0x08, 0};
  35. /* given a data frame determine the 802.1p/1d tag to use */
  36. static inline unsigned classify_1d(struct sk_buff *skb, struct Qdisc *qd)
  37. {
  38. struct iphdr *ip;
  39. int dscp;
  40. int offset;
  41. struct ieee80211_sched_data *q = qdisc_priv(qd);
  42. struct tcf_result res = { -1, 0 };
  43. /* if there is a user set filter list, call out to that */
  44. if (q->filter_list) {
  45. tc_classify(skb, q->filter_list, &res);
  46. if (res.class != -1)
  47. return res.class;
  48. }
  49. /* skb->priority values from 256->263 are magic values to
  50. * directly indicate a specific 802.1d priority.
  51. * This is used to allow 802.1d priority to be passed directly in
  52. * from VLAN tags, etc. */
  53. if (skb->priority >= 256 && skb->priority <= 263)
  54. return skb->priority - 256;
  55. /* check there is a valid IP header present */
  56. offset = ieee80211_get_hdrlen_from_skb(skb);
  57. if (skb->len < offset + sizeof(llc_ip_hdr) + sizeof(*ip) ||
  58. memcmp(skb->data + offset, llc_ip_hdr, sizeof(llc_ip_hdr)))
  59. return 0;
  60. ip = (struct iphdr *) (skb->data + offset + sizeof(llc_ip_hdr));
  61. dscp = ip->tos & 0xfc;
  62. if (dscp & 0x1c)
  63. return 0;
  64. return dscp >> 5;
  65. }
  66. static inline int wme_downgrade_ac(struct sk_buff *skb)
  67. {
  68. switch (skb->priority) {
  69. case 6:
  70. case 7:
  71. skb->priority = 5; /* VO -> VI */
  72. return 0;
  73. case 4:
  74. case 5:
  75. skb->priority = 3; /* VI -> BE */
  76. return 0;
  77. case 0:
  78. case 3:
  79. skb->priority = 2; /* BE -> BK */
  80. return 0;
  81. default:
  82. return -1;
  83. }
  84. }
  85. /* positive return value indicates which queue to use
  86. * negative return value indicates to drop the frame */
  87. static int classify80211(struct sk_buff *skb, struct Qdisc *qd)
  88. {
  89. struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
  90. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  91. unsigned short fc = le16_to_cpu(hdr->frame_control);
  92. int qos;
  93. /* see if frame is data or non data frame */
  94. if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)) {
  95. /* management frames go on AC_VO queue, but are sent
  96. * without QoS control fields */
  97. return 0;
  98. }
  99. if (0 /* injected */) {
  100. /* use AC from radiotap */
  101. }
  102. /* is this a QoS frame? */
  103. qos = fc & IEEE80211_STYPE_QOS_DATA;
  104. if (!qos) {
  105. skb->priority = 0; /* required for correct WPA/11i MIC */
  106. return ieee802_1d_to_ac[skb->priority];
  107. }
  108. /* use the data classifier to determine what 802.1d tag the
  109. * data frame has */
  110. skb->priority = classify_1d(skb, qd);
  111. /* in case we are a client verify acm is not set for this ac */
  112. while (unlikely(local->wmm_acm & BIT(skb->priority))) {
  113. if (wme_downgrade_ac(skb)) {
  114. /* No AC with lower priority has acm=0, drop packet. */
  115. return -1;
  116. }
  117. }
  118. /* look up which queue to use for frames with this 1d tag */
  119. return ieee802_1d_to_ac[skb->priority];
  120. }
  121. static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
  122. {
  123. struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
  124. struct ieee80211_hw *hw = &local->hw;
  125. struct ieee80211_sched_data *q = qdisc_priv(qd);
  126. struct ieee80211_tx_packet_data *pkt_data =
  127. (struct ieee80211_tx_packet_data *) skb->cb;
  128. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  129. unsigned short fc = le16_to_cpu(hdr->frame_control);
  130. struct Qdisc *qdisc;
  131. struct sta_info *sta;
  132. int err;
  133. u16 queue;
  134. u8 tid;
  135. if (pkt_data->flags & IEEE80211_TXPD_REQUEUE) {
  136. queue = pkt_data->queue;
  137. rcu_read_lock();
  138. sta = sta_info_get(local, hdr->addr1);
  139. tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
  140. if (sta) {
  141. int ampdu_queue = sta->tid_to_tx_q[tid];
  142. if ((ampdu_queue < QD_NUM(hw)) &&
  143. test_bit(ampdu_queue, q->qdisc_pool)) {
  144. queue = ampdu_queue;
  145. pkt_data->flags |= IEEE80211_TXPD_AMPDU;
  146. } else {
  147. pkt_data->flags &= ~IEEE80211_TXPD_AMPDU;
  148. }
  149. }
  150. rcu_read_unlock();
  151. skb_queue_tail(&q->requeued[queue], skb);
  152. qd->q.qlen++;
  153. return 0;
  154. }
  155. queue = classify80211(skb, qd);
  156. if (unlikely(queue >= local->hw.queues))
  157. queue = local->hw.queues - 1;
  158. /* now we know the 1d priority, fill in the QoS header if there is one
  159. */
  160. if (WLAN_FC_IS_QOS_DATA(fc)) {
  161. u8 *p = skb->data + ieee80211_get_hdrlen(fc) - 2;
  162. u8 ack_policy = 0;
  163. tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
  164. if (local->wifi_wme_noack_test)
  165. ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK <<
  166. QOS_CONTROL_ACK_POLICY_SHIFT;
  167. /* qos header is 2 bytes, second reserved */
  168. *p = ack_policy | tid;
  169. p++;
  170. *p = 0;
  171. rcu_read_lock();
  172. sta = sta_info_get(local, hdr->addr1);
  173. if (sta) {
  174. int ampdu_queue = sta->tid_to_tx_q[tid];
  175. if ((ampdu_queue < QD_NUM(hw)) &&
  176. test_bit(ampdu_queue, q->qdisc_pool)) {
  177. queue = ampdu_queue;
  178. pkt_data->flags |= IEEE80211_TXPD_AMPDU;
  179. } else {
  180. pkt_data->flags &= ~IEEE80211_TXPD_AMPDU;
  181. }
  182. }
  183. rcu_read_unlock();
  184. }
  185. tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
  186. pkt_data->queue = (unsigned int) queue;
  187. qdisc = q->queues[queue];
  188. err = qdisc->enqueue(skb, qdisc);
  189. if (err == NET_XMIT_SUCCESS) {
  190. qd->q.qlen++;
  191. qd->bstats.bytes += skb->len;
  192. qd->bstats.packets++;
  193. return NET_XMIT_SUCCESS;
  194. }
  195. qd->qstats.drops++;
  196. return err;
  197. }
  198. /* TODO: clean up the cases where master_hard_start_xmit
  199. * returns non 0 - it shouldn't ever do that. Once done we
  200. * can remove this function */
  201. static int wme_qdiscop_requeue(struct sk_buff *skb, struct Qdisc* qd)
  202. {
  203. struct ieee80211_sched_data *q = qdisc_priv(qd);
  204. struct ieee80211_tx_packet_data *pkt_data =
  205. (struct ieee80211_tx_packet_data *) skb->cb;
  206. struct Qdisc *qdisc;
  207. int err;
  208. /* we recorded which queue to use earlier! */
  209. qdisc = q->queues[pkt_data->queue];
  210. if ((err = qdisc->ops->requeue(skb, qdisc)) == 0) {
  211. qd->q.qlen++;
  212. return 0;
  213. }
  214. qd->qstats.drops++;
  215. return err;
  216. }
  217. static struct sk_buff *wme_qdiscop_dequeue(struct Qdisc* qd)
  218. {
  219. struct ieee80211_sched_data *q = qdisc_priv(qd);
  220. struct net_device *dev = qd->dev;
  221. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  222. struct ieee80211_hw *hw = &local->hw;
  223. struct sk_buff *skb;
  224. struct Qdisc *qdisc;
  225. int queue;
  226. /* check all the h/w queues in numeric/priority order */
  227. for (queue = 0; queue < QD_NUM(hw); queue++) {
  228. /* see if there is room in this hardware queue */
  229. if ((test_bit(IEEE80211_LINK_STATE_XOFF,
  230. &local->state[queue])) ||
  231. (test_bit(IEEE80211_LINK_STATE_PENDING,
  232. &local->state[queue])) ||
  233. (!test_bit(queue, q->qdisc_pool)))
  234. continue;
  235. /* there is space - try and get a frame */
  236. skb = skb_dequeue(&q->requeued[queue]);
  237. if (skb) {
  238. qd->q.qlen--;
  239. return skb;
  240. }
  241. qdisc = q->queues[queue];
  242. skb = qdisc->dequeue(qdisc);
  243. if (skb) {
  244. qd->q.qlen--;
  245. return skb;
  246. }
  247. }
  248. /* returning a NULL here when all the h/w queues are full means we
  249. * never need to call netif_stop_queue in the driver */
  250. return NULL;
  251. }
  252. static void wme_qdiscop_reset(struct Qdisc* qd)
  253. {
  254. struct ieee80211_sched_data *q = qdisc_priv(qd);
  255. struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
  256. struct ieee80211_hw *hw = &local->hw;
  257. int queue;
  258. /* QUESTION: should we have some hardware flush functionality here? */
  259. for (queue = 0; queue < QD_NUM(hw); queue++) {
  260. skb_queue_purge(&q->requeued[queue]);
  261. qdisc_reset(q->queues[queue]);
  262. }
  263. qd->q.qlen = 0;
  264. }
  265. static void wme_qdiscop_destroy(struct Qdisc* qd)
  266. {
  267. struct ieee80211_sched_data *q = qdisc_priv(qd);
  268. struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
  269. struct ieee80211_hw *hw = &local->hw;
  270. int queue;
  271. tcf_destroy_chain(q->filter_list);
  272. q->filter_list = NULL;
  273. for (queue = 0; queue < QD_NUM(hw); queue++) {
  274. skb_queue_purge(&q->requeued[queue]);
  275. qdisc_destroy(q->queues[queue]);
  276. q->queues[queue] = &noop_qdisc;
  277. }
  278. }
  279. /* called whenever parameters are updated on existing qdisc */
  280. static int wme_qdiscop_tune(struct Qdisc *qd, struct nlattr *opt)
  281. {
  282. return 0;
  283. }
  284. /* called during initial creation of qdisc on device */
  285. static int wme_qdiscop_init(struct Qdisc *qd, struct nlattr *opt)
  286. {
  287. struct ieee80211_sched_data *q = qdisc_priv(qd);
  288. struct net_device *dev = qd->dev;
  289. struct ieee80211_local *local;
  290. struct ieee80211_hw *hw;
  291. int err = 0, i;
  292. /* check that device is a mac80211 device */
  293. if (!dev->ieee80211_ptr ||
  294. dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid)
  295. return -EINVAL;
  296. local = wdev_priv(dev->ieee80211_ptr);
  297. hw = &local->hw;
  298. /* only allow on master dev */
  299. if (dev != local->mdev)
  300. return -EINVAL;
  301. /* ensure that we are root qdisc */
  302. if (qd->parent != TC_H_ROOT)
  303. return -EINVAL;
  304. if (qd->flags & TCQ_F_INGRESS)
  305. return -EINVAL;
  306. /* if options were passed in, set them */
  307. if (opt)
  308. err = wme_qdiscop_tune(qd, opt);
  309. /* create child queues */
  310. for (i = 0; i < QD_NUM(hw); i++) {
  311. skb_queue_head_init(&q->requeued[i]);
  312. q->queues[i] = qdisc_create_dflt(qd->dev, &pfifo_qdisc_ops,
  313. qd->handle);
  314. if (!q->queues[i]) {
  315. q->queues[i] = &noop_qdisc;
  316. printk(KERN_ERR "%s child qdisc %i creation failed", dev->name, i);
  317. }
  318. }
  319. /* non-aggregation queues: reserve/mark as used */
  320. for (i = 0; i < local->hw.queues; i++)
  321. set_bit(i, q->qdisc_pool);
  322. return err;
  323. }
  324. static int wme_qdiscop_dump(struct Qdisc *qd, struct sk_buff *skb)
  325. {
  326. return -1;
  327. }
  328. static int wme_classop_graft(struct Qdisc *qd, unsigned long arg,
  329. struct Qdisc *new, struct Qdisc **old)
  330. {
  331. struct ieee80211_sched_data *q = qdisc_priv(qd);
  332. struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
  333. struct ieee80211_hw *hw = &local->hw;
  334. unsigned long queue = arg - 1;
  335. if (queue >= QD_NUM(hw))
  336. return -EINVAL;
  337. if (!new)
  338. new = &noop_qdisc;
  339. sch_tree_lock(qd);
  340. *old = q->queues[queue];
  341. q->queues[queue] = new;
  342. qdisc_reset(*old);
  343. sch_tree_unlock(qd);
  344. return 0;
  345. }
  346. static struct Qdisc *
  347. wme_classop_leaf(struct Qdisc *qd, unsigned long arg)
  348. {
  349. struct ieee80211_sched_data *q = qdisc_priv(qd);
  350. struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
  351. struct ieee80211_hw *hw = &local->hw;
  352. unsigned long queue = arg - 1;
  353. if (queue >= QD_NUM(hw))
  354. return NULL;
  355. return q->queues[queue];
  356. }
  357. static unsigned long wme_classop_get(struct Qdisc *qd, u32 classid)
  358. {
  359. struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
  360. struct ieee80211_hw *hw = &local->hw;
  361. unsigned long queue = TC_H_MIN(classid);
  362. if (queue - 1 >= QD_NUM(hw))
  363. return 0;
  364. return queue;
  365. }
  366. static unsigned long wme_classop_bind(struct Qdisc *qd, unsigned long parent,
  367. u32 classid)
  368. {
  369. return wme_classop_get(qd, classid);
  370. }
  371. static void wme_classop_put(struct Qdisc *q, unsigned long cl)
  372. {
  373. }
  374. static int wme_classop_change(struct Qdisc *qd, u32 handle, u32 parent,
  375. struct nlattr **tca, unsigned long *arg)
  376. {
  377. unsigned long cl = *arg;
  378. struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
  379. struct ieee80211_hw *hw = &local->hw;
  380. if (cl - 1 > QD_NUM(hw))
  381. return -ENOENT;
  382. /* TODO: put code to program hardware queue parameters here,
  383. * to allow programming from tc command line */
  384. return 0;
  385. }
  386. /* we don't support deleting hardware queues
  387. * when we add WMM-SA support - TSPECs may be deleted here */
  388. static int wme_classop_delete(struct Qdisc *qd, unsigned long cl)
  389. {
  390. struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
  391. struct ieee80211_hw *hw = &local->hw;
  392. if (cl - 1 > QD_NUM(hw))
  393. return -ENOENT;
  394. return 0;
  395. }
  396. static int wme_classop_dump_class(struct Qdisc *qd, unsigned long cl,
  397. struct sk_buff *skb, struct tcmsg *tcm)
  398. {
  399. struct ieee80211_sched_data *q = qdisc_priv(qd);
  400. struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
  401. struct ieee80211_hw *hw = &local->hw;
  402. if (cl - 1 > QD_NUM(hw))
  403. return -ENOENT;
  404. tcm->tcm_handle = TC_H_MIN(cl);
  405. tcm->tcm_parent = qd->handle;
  406. tcm->tcm_info = q->queues[cl-1]->handle; /* do we need this? */
  407. return 0;
  408. }
  409. static void wme_classop_walk(struct Qdisc *qd, struct qdisc_walker *arg)
  410. {
  411. struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
  412. struct ieee80211_hw *hw = &local->hw;
  413. int queue;
  414. if (arg->stop)
  415. return;
  416. for (queue = 0; queue < QD_NUM(hw); queue++) {
  417. if (arg->count < arg->skip) {
  418. arg->count++;
  419. continue;
  420. }
  421. /* we should return classids for our internal queues here
  422. * as well as the external ones */
  423. if (arg->fn(qd, queue+1, arg) < 0) {
  424. arg->stop = 1;
  425. break;
  426. }
  427. arg->count++;
  428. }
  429. }
  430. static struct tcf_proto ** wme_classop_find_tcf(struct Qdisc *qd,
  431. unsigned long cl)
  432. {
  433. struct ieee80211_sched_data *q = qdisc_priv(qd);
  434. if (cl)
  435. return NULL;
  436. return &q->filter_list;
  437. }
  438. /* this qdisc is classful (i.e. has classes, some of which may have leaf qdiscs attached)
  439. * - these are the operations on the classes */
  440. static const struct Qdisc_class_ops class_ops =
  441. {
  442. .graft = wme_classop_graft,
  443. .leaf = wme_classop_leaf,
  444. .get = wme_classop_get,
  445. .put = wme_classop_put,
  446. .change = wme_classop_change,
  447. .delete = wme_classop_delete,
  448. .walk = wme_classop_walk,
  449. .tcf_chain = wme_classop_find_tcf,
  450. .bind_tcf = wme_classop_bind,
  451. .unbind_tcf = wme_classop_put,
  452. .dump = wme_classop_dump_class,
  453. };
  454. /* queueing discipline operations */
  455. static struct Qdisc_ops wme_qdisc_ops __read_mostly =
  456. {
  457. .next = NULL,
  458. .cl_ops = &class_ops,
  459. .id = "ieee80211",
  460. .priv_size = sizeof(struct ieee80211_sched_data),
  461. .enqueue = wme_qdiscop_enqueue,
  462. .dequeue = wme_qdiscop_dequeue,
  463. .requeue = wme_qdiscop_requeue,
  464. .drop = NULL, /* drop not needed since we are always the root qdisc */
  465. .init = wme_qdiscop_init,
  466. .reset = wme_qdiscop_reset,
  467. .destroy = wme_qdiscop_destroy,
  468. .change = wme_qdiscop_tune,
  469. .dump = wme_qdiscop_dump,
  470. };
  471. void ieee80211_install_qdisc(struct net_device *dev)
  472. {
  473. struct Qdisc *qdisc;
  474. qdisc = qdisc_create_dflt(dev, &wme_qdisc_ops, TC_H_ROOT);
  475. if (!qdisc) {
  476. printk(KERN_ERR "%s: qdisc installation failed\n", dev->name);
  477. return;
  478. }
  479. /* same handle as would be allocated by qdisc_alloc_handle() */
  480. qdisc->handle = 0x80010000;
  481. qdisc_lock_tree(dev);
  482. list_add_tail(&qdisc->list, &dev->qdisc_list);
  483. dev->qdisc_sleeping = qdisc;
  484. qdisc_unlock_tree(dev);
  485. }
  486. int ieee80211_qdisc_installed(struct net_device *dev)
  487. {
  488. return dev->qdisc_sleeping->ops == &wme_qdisc_ops;
  489. }
  490. int ieee80211_wme_register(void)
  491. {
  492. return register_qdisc(&wme_qdisc_ops);
  493. }
  494. void ieee80211_wme_unregister(void)
  495. {
  496. unregister_qdisc(&wme_qdisc_ops);
  497. }
  498. int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
  499. struct sta_info *sta, u16 tid)
  500. {
  501. int i;
  502. struct ieee80211_sched_data *q =
  503. qdisc_priv(local->mdev->qdisc_sleeping);
  504. DECLARE_MAC_BUF(mac);
  505. /* prepare the filter and save it for the SW queue
  506. * matching the received HW queue */
  507. if (!local->hw.ampdu_queues)
  508. return -EPERM;
  509. /* try to get a Qdisc from the pool */
  510. for (i = local->hw.queues; i < QD_NUM(&local->hw); i++)
  511. if (!test_and_set_bit(i, q->qdisc_pool)) {
  512. ieee80211_stop_queue(local_to_hw(local), i);
  513. sta->tid_to_tx_q[tid] = i;
  514. /* IF there are already pending packets
  515. * on this tid first we need to drain them
  516. * on the previous queue
  517. * since HT is strict in order */
  518. #ifdef CONFIG_MAC80211_HT_DEBUG
  519. if (net_ratelimit())
  520. printk(KERN_DEBUG "allocated aggregation queue"
  521. " %d tid %d addr %s pool=0x%lX",
  522. i, tid, print_mac(mac, sta->addr),
  523. q->qdisc_pool[0]);
  524. #endif /* CONFIG_MAC80211_HT_DEBUG */
  525. return 0;
  526. }
  527. return -EAGAIN;
  528. }
  529. /**
  530. * the caller needs to hold local->mdev->queue_lock
  531. */
  532. void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
  533. struct sta_info *sta, u16 tid,
  534. u8 requeue)
  535. {
  536. struct ieee80211_hw *hw = &local->hw;
  537. struct ieee80211_sched_data *q =
  538. qdisc_priv(local->mdev->qdisc_sleeping);
  539. int agg_queue = sta->tid_to_tx_q[tid];
  540. /* return the qdisc to the pool */
  541. clear_bit(agg_queue, q->qdisc_pool);
  542. sta->tid_to_tx_q[tid] = QD_NUM(hw);
  543. if (requeue)
  544. ieee80211_requeue(local, agg_queue);
  545. else
  546. q->queues[agg_queue]->ops->reset(q->queues[agg_queue]);
  547. }
  548. void ieee80211_requeue(struct ieee80211_local *local, int queue)
  549. {
  550. struct Qdisc *root_qd = local->mdev->qdisc_sleeping;
  551. struct ieee80211_sched_data *q = qdisc_priv(root_qd);
  552. struct Qdisc *qdisc = q->queues[queue];
  553. struct sk_buff *skb = NULL;
  554. u32 len;
  555. if (!qdisc || !qdisc->dequeue)
  556. return;
  557. printk(KERN_DEBUG "requeue: qlen = %d\n", qdisc->q.qlen);
  558. for (len = qdisc->q.qlen; len > 0; len--) {
  559. skb = qdisc->dequeue(qdisc);
  560. root_qd->q.qlen--;
  561. /* packet will be classified again and */
  562. /* skb->packet_data->queue will be overridden if needed */
  563. if (skb)
  564. wme_qdiscop_enqueue(skb, root_qd);
  565. }
  566. }