wme.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722
  1. /*
  2. * Copyright 2004, Instant802 Networks, Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #include <linux/netdevice.h>
  9. #include <linux/skbuff.h>
  10. #include <linux/module.h>
  11. #include <linux/if_arp.h>
  12. #include <linux/types.h>
  13. #include <net/ip.h>
  14. #include <net/pkt_sched.h>
  15. #include <net/mac80211.h>
  16. #include "ieee80211_i.h"
  17. #include "wme.h"
  18. /* maximum number of hardware queues we support. */
  19. #define TC_80211_MAX_QUEUES 16
  20. const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 };
  21. struct ieee80211_sched_data
  22. {
  23. unsigned long qdisc_pool;
  24. struct tcf_proto *filter_list;
  25. struct Qdisc *queues[TC_80211_MAX_QUEUES];
  26. struct sk_buff_head requeued[TC_80211_MAX_QUEUES];
  27. };
  28. static const char llc_ip_hdr[8] = {0xAA, 0xAA, 0x3, 0, 0, 0, 0x08, 0};
  29. /* given a data frame determine the 802.1p/1d tag to use */
  30. static inline unsigned classify_1d(struct sk_buff *skb, struct Qdisc *qd)
  31. {
  32. struct iphdr *ip;
  33. int dscp;
  34. int offset;
  35. struct ieee80211_sched_data *q = qdisc_priv(qd);
  36. struct tcf_result res = { -1, 0 };
  37. /* if there is a user set filter list, call out to that */
  38. if (q->filter_list) {
  39. tc_classify(skb, q->filter_list, &res);
  40. if (res.class != -1)
  41. return res.class;
  42. }
  43. /* skb->priority values from 256->263 are magic values to
  44. * directly indicate a specific 802.1d priority.
  45. * This is used to allow 802.1d priority to be passed directly in
  46. * from VLAN tags, etc. */
  47. if (skb->priority >= 256 && skb->priority <= 263)
  48. return skb->priority - 256;
  49. /* check there is a valid IP header present */
  50. offset = ieee80211_get_hdrlen_from_skb(skb);
  51. if (skb->len < offset + sizeof(llc_ip_hdr) + sizeof(*ip) ||
  52. memcmp(skb->data + offset, llc_ip_hdr, sizeof(llc_ip_hdr)))
  53. return 0;
  54. ip = (struct iphdr *) (skb->data + offset + sizeof(llc_ip_hdr));
  55. dscp = ip->tos & 0xfc;
  56. if (dscp & 0x1c)
  57. return 0;
  58. return dscp >> 5;
  59. }
  60. static inline int wme_downgrade_ac(struct sk_buff *skb)
  61. {
  62. switch (skb->priority) {
  63. case 6:
  64. case 7:
  65. skb->priority = 5; /* VO -> VI */
  66. return 0;
  67. case 4:
  68. case 5:
  69. skb->priority = 3; /* VI -> BE */
  70. return 0;
  71. case 0:
  72. case 3:
  73. skb->priority = 2; /* BE -> BK */
  74. return 0;
  75. default:
  76. return -1;
  77. }
  78. }
  79. /* positive return value indicates which queue to use
  80. * negative return value indicates to drop the frame */
  81. static inline int classify80211(struct sk_buff *skb, struct Qdisc *qd)
  82. {
  83. struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
  84. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  85. unsigned short fc = le16_to_cpu(hdr->frame_control);
  86. int qos;
  87. /* see if frame is data or non data frame */
  88. if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)) {
  89. /* management frames go on AC_VO queue, but are sent
  90. * without QoS control fields */
  91. return IEEE80211_TX_QUEUE_DATA0;
  92. }
  93. if (0 /* injected */) {
  94. /* use AC from radiotap */
  95. }
  96. /* is this a QoS frame? */
  97. qos = fc & IEEE80211_STYPE_QOS_DATA;
  98. if (!qos) {
  99. skb->priority = 0; /* required for correct WPA/11i MIC */
  100. return ieee802_1d_to_ac[skb->priority];
  101. }
  102. /* use the data classifier to determine what 802.1d tag the
  103. * data frame has */
  104. skb->priority = classify_1d(skb, qd);
  105. /* in case we are a client verify acm is not set for this ac */
  106. while (unlikely(local->wmm_acm & BIT(skb->priority))) {
  107. if (wme_downgrade_ac(skb)) {
  108. /* No AC with lower priority has acm=0, drop packet. */
  109. return -1;
  110. }
  111. }
  112. /* look up which queue to use for frames with this 1d tag */
  113. return ieee802_1d_to_ac[skb->priority];
  114. }
  115. static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
  116. {
  117. struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
  118. struct ieee80211_sched_data *q = qdisc_priv(qd);
  119. struct ieee80211_tx_packet_data *pkt_data =
  120. (struct ieee80211_tx_packet_data *) skb->cb;
  121. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  122. unsigned short fc = le16_to_cpu(hdr->frame_control);
  123. struct Qdisc *qdisc;
  124. int err, queue;
  125. struct sta_info *sta;
  126. u8 tid;
  127. if (pkt_data->flags & IEEE80211_TXPD_REQUEUE) {
  128. queue = pkt_data->queue;
  129. sta = sta_info_get(local, hdr->addr1);
  130. tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
  131. if (sta) {
  132. int ampdu_queue = sta->tid_to_tx_q[tid];
  133. if ((ampdu_queue < local->hw.queues) &&
  134. test_bit(ampdu_queue, &q->qdisc_pool)) {
  135. queue = ampdu_queue;
  136. pkt_data->flags |= IEEE80211_TXPD_AMPDU;
  137. } else {
  138. pkt_data->flags &= ~IEEE80211_TXPD_AMPDU;
  139. }
  140. sta_info_put(sta);
  141. }
  142. skb_queue_tail(&q->requeued[queue], skb);
  143. qd->q.qlen++;
  144. return 0;
  145. }
  146. queue = classify80211(skb, qd);
  147. /* now we know the 1d priority, fill in the QoS header if there is one
  148. */
  149. if (WLAN_FC_IS_QOS_DATA(fc)) {
  150. u8 *p = skb->data + ieee80211_get_hdrlen(fc) - 2;
  151. u8 ack_policy = 0;
  152. tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
  153. if (local->wifi_wme_noack_test)
  154. ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK <<
  155. QOS_CONTROL_ACK_POLICY_SHIFT;
  156. /* qos header is 2 bytes, second reserved */
  157. *p = ack_policy | tid;
  158. p++;
  159. *p = 0;
  160. sta = sta_info_get(local, hdr->addr1);
  161. if (sta) {
  162. int ampdu_queue = sta->tid_to_tx_q[tid];
  163. if ((ampdu_queue < local->hw.queues) &&
  164. test_bit(ampdu_queue, &q->qdisc_pool)) {
  165. queue = ampdu_queue;
  166. pkt_data->flags |= IEEE80211_TXPD_AMPDU;
  167. } else {
  168. pkt_data->flags &= ~IEEE80211_TXPD_AMPDU;
  169. }
  170. sta_info_put(sta);
  171. }
  172. }
  173. if (unlikely(queue >= local->hw.queues)) {
  174. #if 0
  175. if (net_ratelimit()) {
  176. printk(KERN_DEBUG "%s - queue=%d (hw does not "
  177. "support) -> %d\n",
  178. __func__, queue, local->hw.queues - 1);
  179. }
  180. #endif
  181. queue = local->hw.queues - 1;
  182. }
  183. if (unlikely(queue < 0)) {
  184. kfree_skb(skb);
  185. err = NET_XMIT_DROP;
  186. } else {
  187. tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
  188. pkt_data->queue = (unsigned int) queue;
  189. qdisc = q->queues[queue];
  190. err = qdisc->enqueue(skb, qdisc);
  191. if (err == NET_XMIT_SUCCESS) {
  192. qd->q.qlen++;
  193. qd->bstats.bytes += skb->len;
  194. qd->bstats.packets++;
  195. return NET_XMIT_SUCCESS;
  196. }
  197. }
  198. qd->qstats.drops++;
  199. return err;
  200. }
  201. /* TODO: clean up the cases where master_hard_start_xmit
  202. * returns non 0 - it shouldn't ever do that. Once done we
  203. * can remove this function */
  204. static int wme_qdiscop_requeue(struct sk_buff *skb, struct Qdisc* qd)
  205. {
  206. struct ieee80211_sched_data *q = qdisc_priv(qd);
  207. struct ieee80211_tx_packet_data *pkt_data =
  208. (struct ieee80211_tx_packet_data *) skb->cb;
  209. struct Qdisc *qdisc;
  210. int err;
  211. /* we recorded which queue to use earlier! */
  212. qdisc = q->queues[pkt_data->queue];
  213. if ((err = qdisc->ops->requeue(skb, qdisc)) == 0) {
  214. qd->q.qlen++;
  215. return 0;
  216. }
  217. qd->qstats.drops++;
  218. return err;
  219. }
  220. static struct sk_buff *wme_qdiscop_dequeue(struct Qdisc* qd)
  221. {
  222. struct ieee80211_sched_data *q = qdisc_priv(qd);
  223. struct net_device *dev = qd->dev;
  224. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  225. struct ieee80211_hw *hw = &local->hw;
  226. struct sk_buff *skb;
  227. struct Qdisc *qdisc;
  228. int queue;
  229. /* check all the h/w queues in numeric/priority order */
  230. for (queue = 0; queue < hw->queues; queue++) {
  231. /* see if there is room in this hardware queue */
  232. if ((test_bit(IEEE80211_LINK_STATE_XOFF,
  233. &local->state[queue])) ||
  234. (test_bit(IEEE80211_LINK_STATE_PENDING,
  235. &local->state[queue])) ||
  236. (!test_bit(queue, &q->qdisc_pool)))
  237. continue;
  238. /* there is space - try and get a frame */
  239. skb = skb_dequeue(&q->requeued[queue]);
  240. if (skb) {
  241. qd->q.qlen--;
  242. return skb;
  243. }
  244. qdisc = q->queues[queue];
  245. skb = qdisc->dequeue(qdisc);
  246. if (skb) {
  247. qd->q.qlen--;
  248. return skb;
  249. }
  250. }
  251. /* returning a NULL here when all the h/w queues are full means we
  252. * never need to call netif_stop_queue in the driver */
  253. return NULL;
  254. }
  255. static void wme_qdiscop_reset(struct Qdisc* qd)
  256. {
  257. struct ieee80211_sched_data *q = qdisc_priv(qd);
  258. struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
  259. struct ieee80211_hw *hw = &local->hw;
  260. int queue;
  261. /* QUESTION: should we have some hardware flush functionality here? */
  262. for (queue = 0; queue < hw->queues; queue++) {
  263. skb_queue_purge(&q->requeued[queue]);
  264. qdisc_reset(q->queues[queue]);
  265. }
  266. qd->q.qlen = 0;
  267. }
  268. static void wme_qdiscop_destroy(struct Qdisc* qd)
  269. {
  270. struct ieee80211_sched_data *q = qdisc_priv(qd);
  271. struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
  272. struct ieee80211_hw *hw = &local->hw;
  273. int queue;
  274. tcf_destroy_chain(q->filter_list);
  275. q->filter_list = NULL;
  276. for (queue=0; queue < hw->queues; queue++) {
  277. skb_queue_purge(&q->requeued[queue]);
  278. qdisc_destroy(q->queues[queue]);
  279. q->queues[queue] = &noop_qdisc;
  280. }
  281. }
  282. /* called whenever parameters are updated on existing qdisc */
  283. static int wme_qdiscop_tune(struct Qdisc *qd, struct nlattr *opt)
  284. {
  285. /* struct ieee80211_sched_data *q = qdisc_priv(qd);
  286. */
  287. /* check our options block is the right size */
  288. /* copy any options to our local structure */
  289. /* Ignore options block for now - always use static mapping
  290. struct tc_ieee80211_qopt *qopt = nla_data(opt);
  291. if (opt->nla_len < nla_attr_size(sizeof(*qopt)))
  292. return -EINVAL;
  293. memcpy(q->tag2queue, qopt->tag2queue, sizeof(qopt->tag2queue));
  294. */
  295. return 0;
  296. }
  297. /* called during initial creation of qdisc on device */
  298. static int wme_qdiscop_init(struct Qdisc *qd, struct nlattr *opt)
  299. {
  300. struct ieee80211_sched_data *q = qdisc_priv(qd);
  301. struct net_device *dev = qd->dev;
  302. struct ieee80211_local *local;
  303. int queues;
  304. int err = 0, i;
  305. /* check that device is a mac80211 device */
  306. if (!dev->ieee80211_ptr ||
  307. dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid)
  308. return -EINVAL;
  309. /* check this device is an ieee80211 master type device */
  310. if (dev->type != ARPHRD_IEEE80211)
  311. return -EINVAL;
  312. /* check that there is no qdisc currently attached to device
  313. * this ensures that we will be the root qdisc. (I can't find a better
  314. * way to test this explicitly) */
  315. if (dev->qdisc_sleeping != &noop_qdisc)
  316. return -EINVAL;
  317. if (qd->flags & TCQ_F_INGRESS)
  318. return -EINVAL;
  319. local = wdev_priv(dev->ieee80211_ptr);
  320. queues = local->hw.queues;
  321. /* if options were passed in, set them */
  322. if (opt) {
  323. err = wme_qdiscop_tune(qd, opt);
  324. }
  325. /* create child queues */
  326. for (i = 0; i < queues; i++) {
  327. skb_queue_head_init(&q->requeued[i]);
  328. q->queues[i] = qdisc_create_dflt(qd->dev, &pfifo_qdisc_ops,
  329. qd->handle);
  330. if (!q->queues[i]) {
  331. q->queues[i] = &noop_qdisc;
  332. printk(KERN_ERR "%s child qdisc %i creation failed", dev->name, i);
  333. }
  334. }
  335. /* reserve all legacy QoS queues */
  336. for (i = 0; i < min(IEEE80211_TX_QUEUE_DATA4, queues); i++)
  337. set_bit(i, &q->qdisc_pool);
  338. return err;
  339. }
  340. static int wme_qdiscop_dump(struct Qdisc *qd, struct sk_buff *skb)
  341. {
  342. /* struct ieee80211_sched_data *q = qdisc_priv(qd);
  343. unsigned char *p = skb->tail;
  344. struct tc_ieee80211_qopt opt;
  345. memcpy(&opt.tag2queue, q->tag2queue, TC_80211_MAX_TAG + 1);
  346. NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
  347. */ return skb->len;
  348. /*
  349. nla_put_failure:
  350. skb_trim(skb, p - skb->data);*/
  351. return -1;
  352. }
  353. static int wme_classop_graft(struct Qdisc *qd, unsigned long arg,
  354. struct Qdisc *new, struct Qdisc **old)
  355. {
  356. struct ieee80211_sched_data *q = qdisc_priv(qd);
  357. struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
  358. struct ieee80211_hw *hw = &local->hw;
  359. unsigned long queue = arg - 1;
  360. if (queue >= hw->queues)
  361. return -EINVAL;
  362. if (!new)
  363. new = &noop_qdisc;
  364. sch_tree_lock(qd);
  365. *old = q->queues[queue];
  366. q->queues[queue] = new;
  367. qdisc_reset(*old);
  368. sch_tree_unlock(qd);
  369. return 0;
  370. }
  371. static struct Qdisc *
  372. wme_classop_leaf(struct Qdisc *qd, unsigned long arg)
  373. {
  374. struct ieee80211_sched_data *q = qdisc_priv(qd);
  375. struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
  376. struct ieee80211_hw *hw = &local->hw;
  377. unsigned long queue = arg - 1;
  378. if (queue >= hw->queues)
  379. return NULL;
  380. return q->queues[queue];
  381. }
  382. static unsigned long wme_classop_get(struct Qdisc *qd, u32 classid)
  383. {
  384. struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
  385. struct ieee80211_hw *hw = &local->hw;
  386. unsigned long queue = TC_H_MIN(classid);
  387. if (queue - 1 >= hw->queues)
  388. return 0;
  389. return queue;
  390. }
  391. static unsigned long wme_classop_bind(struct Qdisc *qd, unsigned long parent,
  392. u32 classid)
  393. {
  394. return wme_classop_get(qd, classid);
  395. }
  396. static void wme_classop_put(struct Qdisc *q, unsigned long cl)
  397. {
  398. }
  399. static int wme_classop_change(struct Qdisc *qd, u32 handle, u32 parent,
  400. struct nlattr **tca, unsigned long *arg)
  401. {
  402. unsigned long cl = *arg;
  403. struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
  404. struct ieee80211_hw *hw = &local->hw;
  405. if (cl - 1 > hw->queues)
  406. return -ENOENT;
  407. /* TODO: put code to program hardware queue parameters here,
  408. * to allow programming from tc command line */
  409. return 0;
  410. }
  411. /* we don't support deleting hardware queues
  412. * when we add WMM-SA support - TSPECs may be deleted here */
  413. static int wme_classop_delete(struct Qdisc *qd, unsigned long cl)
  414. {
  415. struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
  416. struct ieee80211_hw *hw = &local->hw;
  417. if (cl - 1 > hw->queues)
  418. return -ENOENT;
  419. return 0;
  420. }
  421. static int wme_classop_dump_class(struct Qdisc *qd, unsigned long cl,
  422. struct sk_buff *skb, struct tcmsg *tcm)
  423. {
  424. struct ieee80211_sched_data *q = qdisc_priv(qd);
  425. struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
  426. struct ieee80211_hw *hw = &local->hw;
  427. if (cl - 1 > hw->queues)
  428. return -ENOENT;
  429. tcm->tcm_handle = TC_H_MIN(cl);
  430. tcm->tcm_parent = qd->handle;
  431. tcm->tcm_info = q->queues[cl-1]->handle; /* do we need this? */
  432. return 0;
  433. }
  434. static void wme_classop_walk(struct Qdisc *qd, struct qdisc_walker *arg)
  435. {
  436. struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
  437. struct ieee80211_hw *hw = &local->hw;
  438. int queue;
  439. if (arg->stop)
  440. return;
  441. for (queue = 0; queue < hw->queues; queue++) {
  442. if (arg->count < arg->skip) {
  443. arg->count++;
  444. continue;
  445. }
  446. /* we should return classids for our internal queues here
  447. * as well as the external ones */
  448. if (arg->fn(qd, queue+1, arg) < 0) {
  449. arg->stop = 1;
  450. break;
  451. }
  452. arg->count++;
  453. }
  454. }
  455. static struct tcf_proto ** wme_classop_find_tcf(struct Qdisc *qd,
  456. unsigned long cl)
  457. {
  458. struct ieee80211_sched_data *q = qdisc_priv(qd);
  459. if (cl)
  460. return NULL;
  461. return &q->filter_list;
  462. }
  463. /* this qdisc is classful (i.e. has classes, some of which may have leaf qdiscs attached)
  464. * - these are the operations on the classes */
  465. static const struct Qdisc_class_ops class_ops =
  466. {
  467. .graft = wme_classop_graft,
  468. .leaf = wme_classop_leaf,
  469. .get = wme_classop_get,
  470. .put = wme_classop_put,
  471. .change = wme_classop_change,
  472. .delete = wme_classop_delete,
  473. .walk = wme_classop_walk,
  474. .tcf_chain = wme_classop_find_tcf,
  475. .bind_tcf = wme_classop_bind,
  476. .unbind_tcf = wme_classop_put,
  477. .dump = wme_classop_dump_class,
  478. };
  479. /* queueing discipline operations */
  480. static struct Qdisc_ops wme_qdisc_ops __read_mostly =
  481. {
  482. .next = NULL,
  483. .cl_ops = &class_ops,
  484. .id = "ieee80211",
  485. .priv_size = sizeof(struct ieee80211_sched_data),
  486. .enqueue = wme_qdiscop_enqueue,
  487. .dequeue = wme_qdiscop_dequeue,
  488. .requeue = wme_qdiscop_requeue,
  489. .drop = NULL, /* drop not needed since we are always the root qdisc */
  490. .init = wme_qdiscop_init,
  491. .reset = wme_qdiscop_reset,
  492. .destroy = wme_qdiscop_destroy,
  493. .change = wme_qdiscop_tune,
  494. .dump = wme_qdiscop_dump,
  495. };
  496. void ieee80211_install_qdisc(struct net_device *dev)
  497. {
  498. struct Qdisc *qdisc;
  499. qdisc = qdisc_create_dflt(dev, &wme_qdisc_ops, TC_H_ROOT);
  500. if (!qdisc) {
  501. printk(KERN_ERR "%s: qdisc installation failed\n", dev->name);
  502. return;
  503. }
  504. /* same handle as would be allocated by qdisc_alloc_handle() */
  505. qdisc->handle = 0x80010000;
  506. qdisc_lock_tree(dev);
  507. list_add_tail(&qdisc->list, &dev->qdisc_list);
  508. dev->qdisc_sleeping = qdisc;
  509. qdisc_unlock_tree(dev);
  510. }
  511. int ieee80211_qdisc_installed(struct net_device *dev)
  512. {
  513. return dev->qdisc_sleeping->ops == &wme_qdisc_ops;
  514. }
  515. int ieee80211_wme_register(void)
  516. {
  517. return register_qdisc(&wme_qdisc_ops);
  518. }
  519. void ieee80211_wme_unregister(void)
  520. {
  521. unregister_qdisc(&wme_qdisc_ops);
  522. }
  523. int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
  524. struct sta_info *sta, u16 tid)
  525. {
  526. int i;
  527. struct ieee80211_sched_data *q =
  528. qdisc_priv(local->mdev->qdisc_sleeping);
  529. DECLARE_MAC_BUF(mac);
  530. /* prepare the filter and save it for the SW queue
  531. * matching the recieved HW queue */
  532. /* try to get a Qdisc from the pool */
  533. for (i = IEEE80211_TX_QUEUE_BEACON; i < local->hw.queues; i++)
  534. if (!test_and_set_bit(i, &q->qdisc_pool)) {
  535. ieee80211_stop_queue(local_to_hw(local), i);
  536. sta->tid_to_tx_q[tid] = i;
  537. /* IF there are already pending packets
  538. * on this tid first we need to drain them
  539. * on the previous queue
  540. * since HT is strict in order */
  541. #ifdef CONFIG_MAC80211_HT_DEBUG
  542. if (net_ratelimit())
  543. printk(KERN_DEBUG "allocated aggregation queue"
  544. " %d tid %d addr %s pool=0x%lX\n",
  545. i, tid, print_mac(mac, sta->addr),
  546. q->qdisc_pool);
  547. #endif /* CONFIG_MAC80211_HT_DEBUG */
  548. return 0;
  549. }
  550. return -EAGAIN;
  551. }
  552. /**
  553. * the caller needs to hold local->mdev->queue_lock
  554. */
  555. void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
  556. struct sta_info *sta, u16 tid,
  557. u8 requeue)
  558. {
  559. struct ieee80211_sched_data *q =
  560. qdisc_priv(local->mdev->qdisc_sleeping);
  561. int agg_queue = sta->tid_to_tx_q[tid];
  562. /* return the qdisc to the pool */
  563. clear_bit(agg_queue, &q->qdisc_pool);
  564. sta->tid_to_tx_q[tid] = local->hw.queues;
  565. if (requeue)
  566. ieee80211_requeue(local, agg_queue);
  567. else
  568. q->queues[agg_queue]->ops->reset(q->queues[agg_queue]);
  569. }
  570. void ieee80211_requeue(struct ieee80211_local *local, int queue)
  571. {
  572. struct Qdisc *root_qd = local->mdev->qdisc_sleeping;
  573. struct ieee80211_sched_data *q = qdisc_priv(root_qd);
  574. struct Qdisc *qdisc = q->queues[queue];
  575. struct sk_buff *skb = NULL;
  576. u32 len = qdisc->q.qlen;
  577. if (!qdisc || !qdisc->dequeue)
  578. return;
  579. printk(KERN_DEBUG "requeue: qlen = %d\n", qdisc->q.qlen);
  580. for (len = qdisc->q.qlen; len > 0; len--) {
  581. skb = qdisc->dequeue(qdisc);
  582. root_qd->q.qlen--;
  583. /* packet will be classified again and */
  584. /* skb->packet_data->queue will be overridden if needed */
  585. if (skb)
  586. wme_qdiscop_enqueue(skb, root_qd);
  587. }
  588. }