sch_hfsc.c 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753
  1. /*
  2. * Copyright (c) 2003 Patrick McHardy, <kaber@trash.net>
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version 2
  7. * of the License, or (at your option) any later version.
  8. *
  9. * 2003-10-17 - Ported from altq
  10. */
  11. /*
  12. * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
  13. *
  14. * Permission to use, copy, modify, and distribute this software and
  15. * its documentation is hereby granted (including for commercial or
  16. * for-profit use), provided that both the copyright notice and this
  17. * permission notice appear in all copies of the software, derivative
  18. * works, or modified versions, and any portions thereof.
  19. *
  20. * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
  21. * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS
  22. * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
  23. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  24. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  25. * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
  26. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  27. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
  28. * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  29. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  30. * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  31. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  32. * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
  33. * DAMAGE.
  34. *
  35. * Carnegie Mellon encourages (but does not require) users of this
  36. * software to return any improvements or extensions that they make,
  37. * and to grant Carnegie Mellon the rights to redistribute these
  38. * changes without encumbrance.
  39. */
  40. /*
  41. * H-FSC is described in Proceedings of SIGCOMM'97,
  42. * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
  43. * Real-Time and Priority Service"
  44. * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
  45. *
  46. * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing.
  47. * when a class has an upperlimit, the fit-time is computed from the
  48. * upperlimit service curve. the link-sharing scheduler does not schedule
  49. * a class whose fit-time exceeds the current time.
  50. */
  51. #include <linux/kernel.h>
  52. #include <linux/module.h>
  53. #include <linux/types.h>
  54. #include <linux/errno.h>
  55. #include <linux/jiffies.h>
  56. #include <linux/compiler.h>
  57. #include <linux/spinlock.h>
  58. #include <linux/skbuff.h>
  59. #include <linux/string.h>
  60. #include <linux/slab.h>
  61. #include <linux/list.h>
  62. #include <linux/rbtree.h>
  63. #include <linux/init.h>
  64. #include <linux/netdevice.h>
  65. #include <linux/rtnetlink.h>
  66. #include <linux/pkt_sched.h>
  67. #include <net/netlink.h>
  68. #include <net/pkt_sched.h>
  69. #include <net/pkt_cls.h>
  70. #include <asm/system.h>
  71. #include <asm/div64.h>
  72. /*
  73. * kernel internal service curve representation:
  74. * coordinates are given by 64 bit unsigned integers.
  75. * x-axis: unit is clock count.
  76. * y-axis: unit is byte.
  77. *
  78. * The service curve parameters are converted to the internal
  79. * representation. The slope values are scaled to avoid overflow.
  80. * the inverse slope values as well as the y-projection of the 1st
  81. * segment are kept in order to to avoid 64-bit divide operations
  82. * that are expensive on 32-bit architectures.
  83. */
  84. struct internal_sc
  85. {
  86. u64 sm1; /* scaled slope of the 1st segment */
  87. u64 ism1; /* scaled inverse-slope of the 1st segment */
  88. u64 dx; /* the x-projection of the 1st segment */
  89. u64 dy; /* the y-projection of the 1st segment */
  90. u64 sm2; /* scaled slope of the 2nd segment */
  91. u64 ism2; /* scaled inverse-slope of the 2nd segment */
  92. };
  93. /* runtime service curve */
  94. struct runtime_sc
  95. {
  96. u64 x; /* current starting position on x-axis */
  97. u64 y; /* current starting position on y-axis */
  98. u64 sm1; /* scaled slope of the 1st segment */
  99. u64 ism1; /* scaled inverse-slope of the 1st segment */
  100. u64 dx; /* the x-projection of the 1st segment */
  101. u64 dy; /* the y-projection of the 1st segment */
  102. u64 sm2; /* scaled slope of the 2nd segment */
  103. u64 ism2; /* scaled inverse-slope of the 2nd segment */
  104. };
  105. enum hfsc_class_flags
  106. {
  107. HFSC_RSC = 0x1,
  108. HFSC_FSC = 0x2,
  109. HFSC_USC = 0x4
  110. };
  111. struct hfsc_class
  112. {
  113. u32 classid; /* class id */
  114. unsigned int refcnt; /* usage count */
  115. struct gnet_stats_basic bstats;
  116. struct gnet_stats_queue qstats;
  117. struct gnet_stats_rate_est rate_est;
  118. unsigned int level; /* class level in hierarchy */
  119. struct tcf_proto *filter_list; /* filter list */
  120. unsigned int filter_cnt; /* filter count */
  121. struct hfsc_sched *sched; /* scheduler data */
  122. struct hfsc_class *cl_parent; /* parent class */
  123. struct list_head siblings; /* sibling classes */
  124. struct list_head children; /* child classes */
  125. struct Qdisc *qdisc; /* leaf qdisc */
  126. struct rb_node el_node; /* qdisc's eligible tree member */
  127. struct rb_root vt_tree; /* active children sorted by cl_vt */
  128. struct rb_node vt_node; /* parent's vt_tree member */
  129. struct rb_root cf_tree; /* active children sorted by cl_f */
  130. struct rb_node cf_node; /* parent's cf_heap member */
  131. struct list_head hlist; /* hash list member */
  132. struct list_head dlist; /* drop list member */
  133. u64 cl_total; /* total work in bytes */
  134. u64 cl_cumul; /* cumulative work in bytes done by
  135. real-time criteria */
  136. u64 cl_d; /* deadline*/
  137. u64 cl_e; /* eligible time */
  138. u64 cl_vt; /* virtual time */
  139. u64 cl_f; /* time when this class will fit for
  140. link-sharing, max(myf, cfmin) */
  141. u64 cl_myf; /* my fit-time (calculated from this
  142. class's own upperlimit curve) */
  143. u64 cl_myfadj; /* my fit-time adjustment (to cancel
  144. history dependence) */
  145. u64 cl_cfmin; /* earliest children's fit-time (used
  146. with cl_myf to obtain cl_f) */
  147. u64 cl_cvtmin; /* minimal virtual time among the
  148. children fit for link-sharing
  149. (monotonic within a period) */
  150. u64 cl_vtadj; /* intra-period cumulative vt
  151. adjustment */
  152. u64 cl_vtoff; /* inter-period cumulative vt offset */
  153. u64 cl_cvtmax; /* max child's vt in the last period */
  154. u64 cl_cvtoff; /* cumulative cvtmax of all periods */
  155. u64 cl_pcvtoff; /* parent's cvtoff at initalization
  156. time */
  157. struct internal_sc cl_rsc; /* internal real-time service curve */
  158. struct internal_sc cl_fsc; /* internal fair service curve */
  159. struct internal_sc cl_usc; /* internal upperlimit service curve */
  160. struct runtime_sc cl_deadline; /* deadline curve */
  161. struct runtime_sc cl_eligible; /* eligible curve */
  162. struct runtime_sc cl_virtual; /* virtual curve */
  163. struct runtime_sc cl_ulimit; /* upperlimit curve */
  164. unsigned long cl_flags; /* which curves are valid */
  165. unsigned long cl_vtperiod; /* vt period sequence number */
  166. unsigned long cl_parentperiod;/* parent's vt period sequence number*/
  167. unsigned long cl_nactive; /* number of active children */
  168. };
  169. #define HFSC_HSIZE 16
  170. struct hfsc_sched
  171. {
  172. u16 defcls; /* default class id */
  173. struct hfsc_class root; /* root class */
  174. struct list_head clhash[HFSC_HSIZE]; /* class hash */
  175. struct rb_root eligible; /* eligible tree */
  176. struct list_head droplist; /* active leaf class list (for
  177. dropping) */
  178. struct sk_buff_head requeue; /* requeued packet */
  179. struct qdisc_watchdog watchdog; /* watchdog timer */
  180. };
  181. #define HT_INFINITY 0xffffffffffffffffULL /* infinite time value */
  182. /*
  183. * eligible tree holds backlogged classes being sorted by their eligible times.
  184. * there is one eligible tree per hfsc instance.
  185. */
  186. static void
  187. eltree_insert(struct hfsc_class *cl)
  188. {
  189. struct rb_node **p = &cl->sched->eligible.rb_node;
  190. struct rb_node *parent = NULL;
  191. struct hfsc_class *cl1;
  192. while (*p != NULL) {
  193. parent = *p;
  194. cl1 = rb_entry(parent, struct hfsc_class, el_node);
  195. if (cl->cl_e >= cl1->cl_e)
  196. p = &parent->rb_right;
  197. else
  198. p = &parent->rb_left;
  199. }
  200. rb_link_node(&cl->el_node, parent, p);
  201. rb_insert_color(&cl->el_node, &cl->sched->eligible);
  202. }
  203. static inline void
  204. eltree_remove(struct hfsc_class *cl)
  205. {
  206. rb_erase(&cl->el_node, &cl->sched->eligible);
  207. }
  208. static inline void
  209. eltree_update(struct hfsc_class *cl)
  210. {
  211. eltree_remove(cl);
  212. eltree_insert(cl);
  213. }
  214. /* find the class with the minimum deadline among the eligible classes */
  215. static inline struct hfsc_class *
  216. eltree_get_mindl(struct hfsc_sched *q, u64 cur_time)
  217. {
  218. struct hfsc_class *p, *cl = NULL;
  219. struct rb_node *n;
  220. for (n = rb_first(&q->eligible); n != NULL; n = rb_next(n)) {
  221. p = rb_entry(n, struct hfsc_class, el_node);
  222. if (p->cl_e > cur_time)
  223. break;
  224. if (cl == NULL || p->cl_d < cl->cl_d)
  225. cl = p;
  226. }
  227. return cl;
  228. }
  229. /* find the class with minimum eligible time among the eligible classes */
  230. static inline struct hfsc_class *
  231. eltree_get_minel(struct hfsc_sched *q)
  232. {
  233. struct rb_node *n;
  234. n = rb_first(&q->eligible);
  235. if (n == NULL)
  236. return NULL;
  237. return rb_entry(n, struct hfsc_class, el_node);
  238. }
  239. /*
  240. * vttree holds holds backlogged child classes being sorted by their virtual
  241. * time. each intermediate class has one vttree.
  242. */
  243. static void
  244. vttree_insert(struct hfsc_class *cl)
  245. {
  246. struct rb_node **p = &cl->cl_parent->vt_tree.rb_node;
  247. struct rb_node *parent = NULL;
  248. struct hfsc_class *cl1;
  249. while (*p != NULL) {
  250. parent = *p;
  251. cl1 = rb_entry(parent, struct hfsc_class, vt_node);
  252. if (cl->cl_vt >= cl1->cl_vt)
  253. p = &parent->rb_right;
  254. else
  255. p = &parent->rb_left;
  256. }
  257. rb_link_node(&cl->vt_node, parent, p);
  258. rb_insert_color(&cl->vt_node, &cl->cl_parent->vt_tree);
  259. }
  260. static inline void
  261. vttree_remove(struct hfsc_class *cl)
  262. {
  263. rb_erase(&cl->vt_node, &cl->cl_parent->vt_tree);
  264. }
  265. static inline void
  266. vttree_update(struct hfsc_class *cl)
  267. {
  268. vttree_remove(cl);
  269. vttree_insert(cl);
  270. }
  271. static inline struct hfsc_class *
  272. vttree_firstfit(struct hfsc_class *cl, u64 cur_time)
  273. {
  274. struct hfsc_class *p;
  275. struct rb_node *n;
  276. for (n = rb_first(&cl->vt_tree); n != NULL; n = rb_next(n)) {
  277. p = rb_entry(n, struct hfsc_class, vt_node);
  278. if (p->cl_f <= cur_time)
  279. return p;
  280. }
  281. return NULL;
  282. }
  283. /*
  284. * get the leaf class with the minimum vt in the hierarchy
  285. */
  286. static struct hfsc_class *
  287. vttree_get_minvt(struct hfsc_class *cl, u64 cur_time)
  288. {
  289. /* if root-class's cfmin is bigger than cur_time nothing to do */
  290. if (cl->cl_cfmin > cur_time)
  291. return NULL;
  292. while (cl->level > 0) {
  293. cl = vttree_firstfit(cl, cur_time);
  294. if (cl == NULL)
  295. return NULL;
  296. /*
  297. * update parent's cl_cvtmin.
  298. */
  299. if (cl->cl_parent->cl_cvtmin < cl->cl_vt)
  300. cl->cl_parent->cl_cvtmin = cl->cl_vt;
  301. }
  302. return cl;
  303. }
  304. static void
  305. cftree_insert(struct hfsc_class *cl)
  306. {
  307. struct rb_node **p = &cl->cl_parent->cf_tree.rb_node;
  308. struct rb_node *parent = NULL;
  309. struct hfsc_class *cl1;
  310. while (*p != NULL) {
  311. parent = *p;
  312. cl1 = rb_entry(parent, struct hfsc_class, cf_node);
  313. if (cl->cl_f >= cl1->cl_f)
  314. p = &parent->rb_right;
  315. else
  316. p = &parent->rb_left;
  317. }
  318. rb_link_node(&cl->cf_node, parent, p);
  319. rb_insert_color(&cl->cf_node, &cl->cl_parent->cf_tree);
  320. }
  321. static inline void
  322. cftree_remove(struct hfsc_class *cl)
  323. {
  324. rb_erase(&cl->cf_node, &cl->cl_parent->cf_tree);
  325. }
  326. static inline void
  327. cftree_update(struct hfsc_class *cl)
  328. {
  329. cftree_remove(cl);
  330. cftree_insert(cl);
  331. }
  332. /*
  333. * service curve support functions
  334. *
  335. * external service curve parameters
  336. * m: bps
  337. * d: us
  338. * internal service curve parameters
  339. * sm: (bytes/psched_us) << SM_SHIFT
  340. * ism: (psched_us/byte) << ISM_SHIFT
  341. * dx: psched_us
  342. *
  343. * The clock source resolution with ktime is 1.024us.
  344. *
  345. * sm and ism are scaled in order to keep effective digits.
  346. * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective
  347. * digits in decimal using the following table.
  348. *
  349. * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps
  350. * ------------+-------------------------------------------------------
  351. * bytes/1.024us 12.8e-3 128e-3 1280e-3 12800e-3 128000e-3
  352. *
  353. * 1.024us/byte 78.125 7.8125 0.78125 0.078125 0.0078125
  354. */
  355. #define SM_SHIFT 20
  356. #define ISM_SHIFT 18
  357. #define SM_MASK ((1ULL << SM_SHIFT) - 1)
  358. #define ISM_MASK ((1ULL << ISM_SHIFT) - 1)
  359. static inline u64
  360. seg_x2y(u64 x, u64 sm)
  361. {
  362. u64 y;
  363. /*
  364. * compute
  365. * y = x * sm >> SM_SHIFT
  366. * but divide it for the upper and lower bits to avoid overflow
  367. */
  368. y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT);
  369. return y;
  370. }
  371. static inline u64
  372. seg_y2x(u64 y, u64 ism)
  373. {
  374. u64 x;
  375. if (y == 0)
  376. x = 0;
  377. else if (ism == HT_INFINITY)
  378. x = HT_INFINITY;
  379. else {
  380. x = (y >> ISM_SHIFT) * ism
  381. + (((y & ISM_MASK) * ism) >> ISM_SHIFT);
  382. }
  383. return x;
  384. }
  385. /* Convert m (bps) into sm (bytes/psched us) */
  386. static u64
  387. m2sm(u32 m)
  388. {
  389. u64 sm;
  390. sm = ((u64)m << SM_SHIFT);
  391. sm += PSCHED_TICKS_PER_SEC - 1;
  392. do_div(sm, PSCHED_TICKS_PER_SEC);
  393. return sm;
  394. }
  395. /* convert m (bps) into ism (psched us/byte) */
  396. static u64
  397. m2ism(u32 m)
  398. {
  399. u64 ism;
  400. if (m == 0)
  401. ism = HT_INFINITY;
  402. else {
  403. ism = ((u64)PSCHED_TICKS_PER_SEC << ISM_SHIFT);
  404. ism += m - 1;
  405. do_div(ism, m);
  406. }
  407. return ism;
  408. }
  409. /* convert d (us) into dx (psched us) */
  410. static u64
  411. d2dx(u32 d)
  412. {
  413. u64 dx;
  414. dx = ((u64)d * PSCHED_TICKS_PER_SEC);
  415. dx += USEC_PER_SEC - 1;
  416. do_div(dx, USEC_PER_SEC);
  417. return dx;
  418. }
  419. /* convert sm (bytes/psched us) into m (bps) */
  420. static u32
  421. sm2m(u64 sm)
  422. {
  423. u64 m;
  424. m = (sm * PSCHED_TICKS_PER_SEC) >> SM_SHIFT;
  425. return (u32)m;
  426. }
  427. /* convert dx (psched us) into d (us) */
  428. static u32
  429. dx2d(u64 dx)
  430. {
  431. u64 d;
  432. d = dx * USEC_PER_SEC;
  433. do_div(d, PSCHED_TICKS_PER_SEC);
  434. return (u32)d;
  435. }
  436. static void
  437. sc2isc(struct tc_service_curve *sc, struct internal_sc *isc)
  438. {
  439. isc->sm1 = m2sm(sc->m1);
  440. isc->ism1 = m2ism(sc->m1);
  441. isc->dx = d2dx(sc->d);
  442. isc->dy = seg_x2y(isc->dx, isc->sm1);
  443. isc->sm2 = m2sm(sc->m2);
  444. isc->ism2 = m2ism(sc->m2);
  445. }
  446. /*
  447. * initialize the runtime service curve with the given internal
  448. * service curve starting at (x, y).
  449. */
  450. static void
  451. rtsc_init(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
  452. {
  453. rtsc->x = x;
  454. rtsc->y = y;
  455. rtsc->sm1 = isc->sm1;
  456. rtsc->ism1 = isc->ism1;
  457. rtsc->dx = isc->dx;
  458. rtsc->dy = isc->dy;
  459. rtsc->sm2 = isc->sm2;
  460. rtsc->ism2 = isc->ism2;
  461. }
  462. /*
  463. * calculate the y-projection of the runtime service curve by the
  464. * given x-projection value
  465. */
  466. static u64
  467. rtsc_y2x(struct runtime_sc *rtsc, u64 y)
  468. {
  469. u64 x;
  470. if (y < rtsc->y)
  471. x = rtsc->x;
  472. else if (y <= rtsc->y + rtsc->dy) {
  473. /* x belongs to the 1st segment */
  474. if (rtsc->dy == 0)
  475. x = rtsc->x + rtsc->dx;
  476. else
  477. x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
  478. } else {
  479. /* x belongs to the 2nd segment */
  480. x = rtsc->x + rtsc->dx
  481. + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
  482. }
  483. return x;
  484. }
  485. static u64
  486. rtsc_x2y(struct runtime_sc *rtsc, u64 x)
  487. {
  488. u64 y;
  489. if (x <= rtsc->x)
  490. y = rtsc->y;
  491. else if (x <= rtsc->x + rtsc->dx)
  492. /* y belongs to the 1st segment */
  493. y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
  494. else
  495. /* y belongs to the 2nd segment */
  496. y = rtsc->y + rtsc->dy
  497. + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
  498. return y;
  499. }
  500. /*
  501. * update the runtime service curve by taking the minimum of the current
  502. * runtime service curve and the service curve starting at (x, y).
  503. */
  504. static void
  505. rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
  506. {
  507. u64 y1, y2, dx, dy;
  508. u32 dsm;
  509. if (isc->sm1 <= isc->sm2) {
  510. /* service curve is convex */
  511. y1 = rtsc_x2y(rtsc, x);
  512. if (y1 < y)
  513. /* the current rtsc is smaller */
  514. return;
  515. rtsc->x = x;
  516. rtsc->y = y;
  517. return;
  518. }
  519. /*
  520. * service curve is concave
  521. * compute the two y values of the current rtsc
  522. * y1: at x
  523. * y2: at (x + dx)
  524. */
  525. y1 = rtsc_x2y(rtsc, x);
  526. if (y1 <= y) {
  527. /* rtsc is below isc, no change to rtsc */
  528. return;
  529. }
  530. y2 = rtsc_x2y(rtsc, x + isc->dx);
  531. if (y2 >= y + isc->dy) {
  532. /* rtsc is above isc, replace rtsc by isc */
  533. rtsc->x = x;
  534. rtsc->y = y;
  535. rtsc->dx = isc->dx;
  536. rtsc->dy = isc->dy;
  537. return;
  538. }
  539. /*
  540. * the two curves intersect
  541. * compute the offsets (dx, dy) using the reverse
  542. * function of seg_x2y()
  543. * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
  544. */
  545. dx = (y1 - y) << SM_SHIFT;
  546. dsm = isc->sm1 - isc->sm2;
  547. do_div(dx, dsm);
  548. /*
  549. * check if (x, y1) belongs to the 1st segment of rtsc.
  550. * if so, add the offset.
  551. */
  552. if (rtsc->x + rtsc->dx > x)
  553. dx += rtsc->x + rtsc->dx - x;
  554. dy = seg_x2y(dx, isc->sm1);
  555. rtsc->x = x;
  556. rtsc->y = y;
  557. rtsc->dx = dx;
  558. rtsc->dy = dy;
  559. return;
  560. }
  561. static void
  562. init_ed(struct hfsc_class *cl, unsigned int next_len)
  563. {
  564. u64 cur_time = psched_get_time();
  565. /* update the deadline curve */
  566. rtsc_min(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
  567. /*
  568. * update the eligible curve.
  569. * for concave, it is equal to the deadline curve.
  570. * for convex, it is a linear curve with slope m2.
  571. */
  572. cl->cl_eligible = cl->cl_deadline;
  573. if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
  574. cl->cl_eligible.dx = 0;
  575. cl->cl_eligible.dy = 0;
  576. }
  577. /* compute e and d */
  578. cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
  579. cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
  580. eltree_insert(cl);
  581. }
  582. static void
  583. update_ed(struct hfsc_class *cl, unsigned int next_len)
  584. {
  585. cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
  586. cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
  587. eltree_update(cl);
  588. }
  589. static inline void
  590. update_d(struct hfsc_class *cl, unsigned int next_len)
  591. {
  592. cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
  593. }
  594. static inline void
  595. update_cfmin(struct hfsc_class *cl)
  596. {
  597. struct rb_node *n = rb_first(&cl->cf_tree);
  598. struct hfsc_class *p;
  599. if (n == NULL) {
  600. cl->cl_cfmin = 0;
  601. return;
  602. }
  603. p = rb_entry(n, struct hfsc_class, cf_node);
  604. cl->cl_cfmin = p->cl_f;
  605. }
  606. static void
  607. init_vf(struct hfsc_class *cl, unsigned int len)
  608. {
  609. struct hfsc_class *max_cl;
  610. struct rb_node *n;
  611. u64 vt, f, cur_time;
  612. int go_active;
  613. cur_time = 0;
  614. go_active = 1;
  615. for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
  616. if (go_active && cl->cl_nactive++ == 0)
  617. go_active = 1;
  618. else
  619. go_active = 0;
  620. if (go_active) {
  621. n = rb_last(&cl->cl_parent->vt_tree);
  622. if (n != NULL) {
  623. max_cl = rb_entry(n, struct hfsc_class,vt_node);
  624. /*
  625. * set vt to the average of the min and max
  626. * classes. if the parent's period didn't
  627. * change, don't decrease vt of the class.
  628. */
  629. vt = max_cl->cl_vt;
  630. if (cl->cl_parent->cl_cvtmin != 0)
  631. vt = (cl->cl_parent->cl_cvtmin + vt)/2;
  632. if (cl->cl_parent->cl_vtperiod !=
  633. cl->cl_parentperiod || vt > cl->cl_vt)
  634. cl->cl_vt = vt;
  635. } else {
  636. /*
  637. * first child for a new parent backlog period.
  638. * add parent's cvtmax to cvtoff to make a new
  639. * vt (vtoff + vt) larger than the vt in the
  640. * last period for all children.
  641. */
  642. vt = cl->cl_parent->cl_cvtmax;
  643. cl->cl_parent->cl_cvtoff += vt;
  644. cl->cl_parent->cl_cvtmax = 0;
  645. cl->cl_parent->cl_cvtmin = 0;
  646. cl->cl_vt = 0;
  647. }
  648. cl->cl_vtoff = cl->cl_parent->cl_cvtoff -
  649. cl->cl_pcvtoff;
  650. /* update the virtual curve */
  651. vt = cl->cl_vt + cl->cl_vtoff;
  652. rtsc_min(&cl->cl_virtual, &cl->cl_fsc, vt,
  653. cl->cl_total);
  654. if (cl->cl_virtual.x == vt) {
  655. cl->cl_virtual.x -= cl->cl_vtoff;
  656. cl->cl_vtoff = 0;
  657. }
  658. cl->cl_vtadj = 0;
  659. cl->cl_vtperiod++; /* increment vt period */
  660. cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
  661. if (cl->cl_parent->cl_nactive == 0)
  662. cl->cl_parentperiod++;
  663. cl->cl_f = 0;
  664. vttree_insert(cl);
  665. cftree_insert(cl);
  666. if (cl->cl_flags & HFSC_USC) {
  667. /* class has upper limit curve */
  668. if (cur_time == 0)
  669. cur_time = psched_get_time();
  670. /* update the ulimit curve */
  671. rtsc_min(&cl->cl_ulimit, &cl->cl_usc, cur_time,
  672. cl->cl_total);
  673. /* compute myf */
  674. cl->cl_myf = rtsc_y2x(&cl->cl_ulimit,
  675. cl->cl_total);
  676. cl->cl_myfadj = 0;
  677. }
  678. }
  679. f = max(cl->cl_myf, cl->cl_cfmin);
  680. if (f != cl->cl_f) {
  681. cl->cl_f = f;
  682. cftree_update(cl);
  683. update_cfmin(cl->cl_parent);
  684. }
  685. }
  686. }
  687. static void
  688. update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time)
  689. {
  690. u64 f; /* , myf_bound, delta; */
  691. int go_passive = 0;
  692. if (cl->qdisc->q.qlen == 0 && cl->cl_flags & HFSC_FSC)
  693. go_passive = 1;
  694. for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
  695. cl->cl_total += len;
  696. if (!(cl->cl_flags & HFSC_FSC) || cl->cl_nactive == 0)
  697. continue;
  698. if (go_passive && --cl->cl_nactive == 0)
  699. go_passive = 1;
  700. else
  701. go_passive = 0;
  702. if (go_passive) {
  703. /* no more active child, going passive */
  704. /* update cvtmax of the parent class */
  705. if (cl->cl_vt > cl->cl_parent->cl_cvtmax)
  706. cl->cl_parent->cl_cvtmax = cl->cl_vt;
  707. /* remove this class from the vt tree */
  708. vttree_remove(cl);
  709. cftree_remove(cl);
  710. update_cfmin(cl->cl_parent);
  711. continue;
  712. }
  713. /*
  714. * update vt and f
  715. */
  716. cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total)
  717. - cl->cl_vtoff + cl->cl_vtadj;
  718. /*
  719. * if vt of the class is smaller than cvtmin,
  720. * the class was skipped in the past due to non-fit.
  721. * if so, we need to adjust vtadj.
  722. */
  723. if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {
  724. cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;
  725. cl->cl_vt = cl->cl_parent->cl_cvtmin;
  726. }
  727. /* update the vt tree */
  728. vttree_update(cl);
  729. if (cl->cl_flags & HFSC_USC) {
  730. cl->cl_myf = cl->cl_myfadj + rtsc_y2x(&cl->cl_ulimit,
  731. cl->cl_total);
  732. #if 0
  733. /*
  734. * This code causes classes to stay way under their
  735. * limit when multiple classes are used at gigabit
  736. * speed. needs investigation. -kaber
  737. */
  738. /*
  739. * if myf lags behind by more than one clock tick
  740. * from the current time, adjust myfadj to prevent
  741. * a rate-limited class from going greedy.
  742. * in a steady state under rate-limiting, myf
  743. * fluctuates within one clock tick.
  744. */
  745. myf_bound = cur_time - PSCHED_JIFFIE2US(1);
  746. if (cl->cl_myf < myf_bound) {
  747. delta = cur_time - cl->cl_myf;
  748. cl->cl_myfadj += delta;
  749. cl->cl_myf += delta;
  750. }
  751. #endif
  752. }
  753. f = max(cl->cl_myf, cl->cl_cfmin);
  754. if (f != cl->cl_f) {
  755. cl->cl_f = f;
  756. cftree_update(cl);
  757. update_cfmin(cl->cl_parent);
  758. }
  759. }
  760. }
  761. static void
  762. set_active(struct hfsc_class *cl, unsigned int len)
  763. {
  764. if (cl->cl_flags & HFSC_RSC)
  765. init_ed(cl, len);
  766. if (cl->cl_flags & HFSC_FSC)
  767. init_vf(cl, len);
  768. list_add_tail(&cl->dlist, &cl->sched->droplist);
  769. }
  770. static void
  771. set_passive(struct hfsc_class *cl)
  772. {
  773. if (cl->cl_flags & HFSC_RSC)
  774. eltree_remove(cl);
  775. list_del(&cl->dlist);
  776. /*
  777. * vttree is now handled in update_vf() so that update_vf(cl, 0, 0)
  778. * needs to be called explicitly to remove a class from vttree.
  779. */
  780. }
  781. /*
  782. * hack to get length of first packet in queue.
  783. */
  784. static unsigned int
  785. qdisc_peek_len(struct Qdisc *sch)
  786. {
  787. struct sk_buff *skb;
  788. unsigned int len;
  789. skb = sch->dequeue(sch);
  790. if (skb == NULL) {
  791. if (net_ratelimit())
  792. printk("qdisc_peek_len: non work-conserving qdisc ?\n");
  793. return 0;
  794. }
  795. len = skb->len;
  796. if (unlikely(sch->ops->requeue(skb, sch) != NET_XMIT_SUCCESS)) {
  797. if (net_ratelimit())
  798. printk("qdisc_peek_len: failed to requeue\n");
  799. qdisc_tree_decrease_qlen(sch, 1);
  800. return 0;
  801. }
  802. return len;
  803. }
  804. static void
  805. hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
  806. {
  807. unsigned int len = cl->qdisc->q.qlen;
  808. qdisc_reset(cl->qdisc);
  809. qdisc_tree_decrease_qlen(cl->qdisc, len);
  810. }
  811. static void
  812. hfsc_adjust_levels(struct hfsc_class *cl)
  813. {
  814. struct hfsc_class *p;
  815. unsigned int level;
  816. do {
  817. level = 0;
  818. list_for_each_entry(p, &cl->children, siblings) {
  819. if (p->level >= level)
  820. level = p->level + 1;
  821. }
  822. cl->level = level;
  823. } while ((cl = cl->cl_parent) != NULL);
  824. }
  825. static inline unsigned int
  826. hfsc_hash(u32 h)
  827. {
  828. h ^= h >> 8;
  829. h ^= h >> 4;
  830. return h & (HFSC_HSIZE - 1);
  831. }
  832. static inline struct hfsc_class *
  833. hfsc_find_class(u32 classid, struct Qdisc *sch)
  834. {
  835. struct hfsc_sched *q = qdisc_priv(sch);
  836. struct hfsc_class *cl;
  837. list_for_each_entry(cl, &q->clhash[hfsc_hash(classid)], hlist) {
  838. if (cl->classid == classid)
  839. return cl;
  840. }
  841. return NULL;
  842. }
  843. static void
  844. hfsc_change_rsc(struct hfsc_class *cl, struct tc_service_curve *rsc,
  845. u64 cur_time)
  846. {
  847. sc2isc(rsc, &cl->cl_rsc);
  848. rtsc_init(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
  849. cl->cl_eligible = cl->cl_deadline;
  850. if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
  851. cl->cl_eligible.dx = 0;
  852. cl->cl_eligible.dy = 0;
  853. }
  854. cl->cl_flags |= HFSC_RSC;
  855. }
  856. static void
  857. hfsc_change_fsc(struct hfsc_class *cl, struct tc_service_curve *fsc)
  858. {
  859. sc2isc(fsc, &cl->cl_fsc);
  860. rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total);
  861. cl->cl_flags |= HFSC_FSC;
  862. }
  863. static void
  864. hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc,
  865. u64 cur_time)
  866. {
  867. sc2isc(usc, &cl->cl_usc);
  868. rtsc_init(&cl->cl_ulimit, &cl->cl_usc, cur_time, cl->cl_total);
  869. cl->cl_flags |= HFSC_USC;
  870. }
  871. static int
  872. hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
  873. struct rtattr **tca, unsigned long *arg)
  874. {
  875. struct hfsc_sched *q = qdisc_priv(sch);
  876. struct hfsc_class *cl = (struct hfsc_class *)*arg;
  877. struct hfsc_class *parent = NULL;
  878. struct rtattr *opt = tca[TCA_OPTIONS-1];
  879. struct rtattr *tb[TCA_HFSC_MAX];
  880. struct tc_service_curve *rsc = NULL, *fsc = NULL, *usc = NULL;
  881. u64 cur_time;
  882. if (opt == NULL || rtattr_parse_nested(tb, TCA_HFSC_MAX, opt))
  883. return -EINVAL;
  884. if (tb[TCA_HFSC_RSC-1]) {
  885. if (RTA_PAYLOAD(tb[TCA_HFSC_RSC-1]) < sizeof(*rsc))
  886. return -EINVAL;
  887. rsc = RTA_DATA(tb[TCA_HFSC_RSC-1]);
  888. if (rsc->m1 == 0 && rsc->m2 == 0)
  889. rsc = NULL;
  890. }
  891. if (tb[TCA_HFSC_FSC-1]) {
  892. if (RTA_PAYLOAD(tb[TCA_HFSC_FSC-1]) < sizeof(*fsc))
  893. return -EINVAL;
  894. fsc = RTA_DATA(tb[TCA_HFSC_FSC-1]);
  895. if (fsc->m1 == 0 && fsc->m2 == 0)
  896. fsc = NULL;
  897. }
  898. if (tb[TCA_HFSC_USC-1]) {
  899. if (RTA_PAYLOAD(tb[TCA_HFSC_USC-1]) < sizeof(*usc))
  900. return -EINVAL;
  901. usc = RTA_DATA(tb[TCA_HFSC_USC-1]);
  902. if (usc->m1 == 0 && usc->m2 == 0)
  903. usc = NULL;
  904. }
  905. if (cl != NULL) {
  906. if (parentid) {
  907. if (cl->cl_parent && cl->cl_parent->classid != parentid)
  908. return -EINVAL;
  909. if (cl->cl_parent == NULL && parentid != TC_H_ROOT)
  910. return -EINVAL;
  911. }
  912. cur_time = psched_get_time();
  913. sch_tree_lock(sch);
  914. if (rsc != NULL)
  915. hfsc_change_rsc(cl, rsc, cur_time);
  916. if (fsc != NULL)
  917. hfsc_change_fsc(cl, fsc);
  918. if (usc != NULL)
  919. hfsc_change_usc(cl, usc, cur_time);
  920. if (cl->qdisc->q.qlen != 0) {
  921. if (cl->cl_flags & HFSC_RSC)
  922. update_ed(cl, qdisc_peek_len(cl->qdisc));
  923. if (cl->cl_flags & HFSC_FSC)
  924. update_vf(cl, 0, cur_time);
  925. }
  926. sch_tree_unlock(sch);
  927. if (tca[TCA_RATE-1])
  928. gen_replace_estimator(&cl->bstats, &cl->rate_est,
  929. &sch->dev->queue_lock,
  930. tca[TCA_RATE-1]);
  931. return 0;
  932. }
  933. if (parentid == TC_H_ROOT)
  934. return -EEXIST;
  935. parent = &q->root;
  936. if (parentid) {
  937. parent = hfsc_find_class(parentid, sch);
  938. if (parent == NULL)
  939. return -ENOENT;
  940. }
  941. if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0)
  942. return -EINVAL;
  943. if (hfsc_find_class(classid, sch))
  944. return -EEXIST;
  945. if (rsc == NULL && fsc == NULL)
  946. return -EINVAL;
  947. cl = kzalloc(sizeof(struct hfsc_class), GFP_KERNEL);
  948. if (cl == NULL)
  949. return -ENOBUFS;
  950. if (rsc != NULL)
  951. hfsc_change_rsc(cl, rsc, 0);
  952. if (fsc != NULL)
  953. hfsc_change_fsc(cl, fsc);
  954. if (usc != NULL)
  955. hfsc_change_usc(cl, usc, 0);
  956. cl->refcnt = 1;
  957. cl->classid = classid;
  958. cl->sched = q;
  959. cl->cl_parent = parent;
  960. cl->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid);
  961. if (cl->qdisc == NULL)
  962. cl->qdisc = &noop_qdisc;
  963. INIT_LIST_HEAD(&cl->children);
  964. cl->vt_tree = RB_ROOT;
  965. cl->cf_tree = RB_ROOT;
  966. sch_tree_lock(sch);
  967. list_add_tail(&cl->hlist, &q->clhash[hfsc_hash(classid)]);
  968. list_add_tail(&cl->siblings, &parent->children);
  969. if (parent->level == 0)
  970. hfsc_purge_queue(sch, parent);
  971. hfsc_adjust_levels(parent);
  972. cl->cl_pcvtoff = parent->cl_cvtoff;
  973. sch_tree_unlock(sch);
  974. if (tca[TCA_RATE-1])
  975. gen_new_estimator(&cl->bstats, &cl->rate_est,
  976. &sch->dev->queue_lock, tca[TCA_RATE-1]);
  977. *arg = (unsigned long)cl;
  978. return 0;
  979. }
  980. static void
  981. hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl)
  982. {
  983. struct hfsc_sched *q = qdisc_priv(sch);
  984. tcf_destroy_chain(cl->filter_list);
  985. qdisc_destroy(cl->qdisc);
  986. gen_kill_estimator(&cl->bstats, &cl->rate_est);
  987. if (cl != &q->root)
  988. kfree(cl);
  989. }
  990. static int
  991. hfsc_delete_class(struct Qdisc *sch, unsigned long arg)
  992. {
  993. struct hfsc_sched *q = qdisc_priv(sch);
  994. struct hfsc_class *cl = (struct hfsc_class *)arg;
  995. if (cl->level > 0 || cl->filter_cnt > 0 || cl == &q->root)
  996. return -EBUSY;
  997. sch_tree_lock(sch);
  998. list_del(&cl->siblings);
  999. hfsc_adjust_levels(cl->cl_parent);
  1000. hfsc_purge_queue(sch, cl);
  1001. list_del(&cl->hlist);
  1002. if (--cl->refcnt == 0)
  1003. hfsc_destroy_class(sch, cl);
  1004. sch_tree_unlock(sch);
  1005. return 0;
  1006. }
  1007. static struct hfsc_class *
  1008. hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
  1009. {
  1010. struct hfsc_sched *q = qdisc_priv(sch);
  1011. struct hfsc_class *cl;
  1012. struct tcf_result res;
  1013. struct tcf_proto *tcf;
  1014. int result;
  1015. if (TC_H_MAJ(skb->priority ^ sch->handle) == 0 &&
  1016. (cl = hfsc_find_class(skb->priority, sch)) != NULL)
  1017. if (cl->level == 0)
  1018. return cl;
  1019. *qerr = NET_XMIT_BYPASS;
  1020. tcf = q->root.filter_list;
  1021. while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
  1022. #ifdef CONFIG_NET_CLS_ACT
  1023. switch (result) {
  1024. case TC_ACT_QUEUED:
  1025. case TC_ACT_STOLEN:
  1026. *qerr = NET_XMIT_SUCCESS;
  1027. case TC_ACT_SHOT:
  1028. return NULL;
  1029. }
  1030. #elif defined(CONFIG_NET_CLS_POLICE)
  1031. if (result == TC_POLICE_SHOT)
  1032. return NULL;
  1033. #endif
  1034. if ((cl = (struct hfsc_class *)res.class) == NULL) {
  1035. if ((cl = hfsc_find_class(res.classid, sch)) == NULL)
  1036. break; /* filter selected invalid classid */
  1037. }
  1038. if (cl->level == 0)
  1039. return cl; /* hit leaf class */
  1040. /* apply inner filter chain */
  1041. tcf = cl->filter_list;
  1042. }
  1043. /* classification failed, try default class */
  1044. cl = hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
  1045. if (cl == NULL || cl->level > 0)
  1046. return NULL;
  1047. return cl;
  1048. }
  1049. static int
  1050. hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
  1051. struct Qdisc **old)
  1052. {
  1053. struct hfsc_class *cl = (struct hfsc_class *)arg;
  1054. if (cl == NULL)
  1055. return -ENOENT;
  1056. if (cl->level > 0)
  1057. return -EINVAL;
  1058. if (new == NULL) {
  1059. new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
  1060. cl->classid);
  1061. if (new == NULL)
  1062. new = &noop_qdisc;
  1063. }
  1064. sch_tree_lock(sch);
  1065. hfsc_purge_queue(sch, cl);
  1066. *old = xchg(&cl->qdisc, new);
  1067. sch_tree_unlock(sch);
  1068. return 0;
  1069. }
  1070. static struct Qdisc *
  1071. hfsc_class_leaf(struct Qdisc *sch, unsigned long arg)
  1072. {
  1073. struct hfsc_class *cl = (struct hfsc_class *)arg;
  1074. if (cl != NULL && cl->level == 0)
  1075. return cl->qdisc;
  1076. return NULL;
  1077. }
  1078. static void
  1079. hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg)
  1080. {
  1081. struct hfsc_class *cl = (struct hfsc_class *)arg;
  1082. if (cl->qdisc->q.qlen == 0) {
  1083. update_vf(cl, 0, 0);
  1084. set_passive(cl);
  1085. }
  1086. }
  1087. static unsigned long
  1088. hfsc_get_class(struct Qdisc *sch, u32 classid)
  1089. {
  1090. struct hfsc_class *cl = hfsc_find_class(classid, sch);
  1091. if (cl != NULL)
  1092. cl->refcnt++;
  1093. return (unsigned long)cl;
  1094. }
  1095. static void
  1096. hfsc_put_class(struct Qdisc *sch, unsigned long arg)
  1097. {
  1098. struct hfsc_class *cl = (struct hfsc_class *)arg;
  1099. if (--cl->refcnt == 0)
  1100. hfsc_destroy_class(sch, cl);
  1101. }
  1102. static unsigned long
  1103. hfsc_bind_tcf(struct Qdisc *sch, unsigned long parent, u32 classid)
  1104. {
  1105. struct hfsc_class *p = (struct hfsc_class *)parent;
  1106. struct hfsc_class *cl = hfsc_find_class(classid, sch);
  1107. if (cl != NULL) {
  1108. if (p != NULL && p->level <= cl->level)
  1109. return 0;
  1110. cl->filter_cnt++;
  1111. }
  1112. return (unsigned long)cl;
  1113. }
  1114. static void
  1115. hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg)
  1116. {
  1117. struct hfsc_class *cl = (struct hfsc_class *)arg;
  1118. cl->filter_cnt--;
  1119. }
  1120. static struct tcf_proto **
  1121. hfsc_tcf_chain(struct Qdisc *sch, unsigned long arg)
  1122. {
  1123. struct hfsc_sched *q = qdisc_priv(sch);
  1124. struct hfsc_class *cl = (struct hfsc_class *)arg;
  1125. if (cl == NULL)
  1126. cl = &q->root;
  1127. return &cl->filter_list;
  1128. }
  1129. static int
  1130. hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc)
  1131. {
  1132. struct tc_service_curve tsc;
  1133. tsc.m1 = sm2m(sc->sm1);
  1134. tsc.d = dx2d(sc->dx);
  1135. tsc.m2 = sm2m(sc->sm2);
  1136. RTA_PUT(skb, attr, sizeof(tsc), &tsc);
  1137. return skb->len;
  1138. rtattr_failure:
  1139. return -1;
  1140. }
  1141. static inline int
  1142. hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl)
  1143. {
  1144. if ((cl->cl_flags & HFSC_RSC) &&
  1145. (hfsc_dump_sc(skb, TCA_HFSC_RSC, &cl->cl_rsc) < 0))
  1146. goto rtattr_failure;
  1147. if ((cl->cl_flags & HFSC_FSC) &&
  1148. (hfsc_dump_sc(skb, TCA_HFSC_FSC, &cl->cl_fsc) < 0))
  1149. goto rtattr_failure;
  1150. if ((cl->cl_flags & HFSC_USC) &&
  1151. (hfsc_dump_sc(skb, TCA_HFSC_USC, &cl->cl_usc) < 0))
  1152. goto rtattr_failure;
  1153. return skb->len;
  1154. rtattr_failure:
  1155. return -1;
  1156. }
  1157. static int
  1158. hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb,
  1159. struct tcmsg *tcm)
  1160. {
  1161. struct hfsc_class *cl = (struct hfsc_class *)arg;
  1162. unsigned char *b = skb_tail_pointer(skb);
  1163. struct rtattr *rta = (struct rtattr *)b;
  1164. tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->classid : TC_H_ROOT;
  1165. tcm->tcm_handle = cl->classid;
  1166. if (cl->level == 0)
  1167. tcm->tcm_info = cl->qdisc->handle;
  1168. RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
  1169. if (hfsc_dump_curves(skb, cl) < 0)
  1170. goto rtattr_failure;
  1171. rta->rta_len = skb_tail_pointer(skb) - b;
  1172. return skb->len;
  1173. rtattr_failure:
  1174. nlmsg_trim(skb, b);
  1175. return -1;
  1176. }
  1177. static int
  1178. hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
  1179. struct gnet_dump *d)
  1180. {
  1181. struct hfsc_class *cl = (struct hfsc_class *)arg;
  1182. struct tc_hfsc_stats xstats;
  1183. cl->qstats.qlen = cl->qdisc->q.qlen;
  1184. xstats.level = cl->level;
  1185. xstats.period = cl->cl_vtperiod;
  1186. xstats.work = cl->cl_total;
  1187. xstats.rtwork = cl->cl_cumul;
  1188. if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
  1189. gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
  1190. gnet_stats_copy_queue(d, &cl->qstats) < 0)
  1191. return -1;
  1192. return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
  1193. }
  1194. static void
  1195. hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
  1196. {
  1197. struct hfsc_sched *q = qdisc_priv(sch);
  1198. struct hfsc_class *cl;
  1199. unsigned int i;
  1200. if (arg->stop)
  1201. return;
  1202. for (i = 0; i < HFSC_HSIZE; i++) {
  1203. list_for_each_entry(cl, &q->clhash[i], hlist) {
  1204. if (arg->count < arg->skip) {
  1205. arg->count++;
  1206. continue;
  1207. }
  1208. if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
  1209. arg->stop = 1;
  1210. return;
  1211. }
  1212. arg->count++;
  1213. }
  1214. }
  1215. }
  1216. static void
  1217. hfsc_schedule_watchdog(struct Qdisc *sch)
  1218. {
  1219. struct hfsc_sched *q = qdisc_priv(sch);
  1220. struct hfsc_class *cl;
  1221. u64 next_time = 0;
  1222. if ((cl = eltree_get_minel(q)) != NULL)
  1223. next_time = cl->cl_e;
  1224. if (q->root.cl_cfmin != 0) {
  1225. if (next_time == 0 || next_time > q->root.cl_cfmin)
  1226. next_time = q->root.cl_cfmin;
  1227. }
  1228. WARN_ON(next_time == 0);
  1229. qdisc_watchdog_schedule(&q->watchdog, next_time);
  1230. }
  1231. static int
  1232. hfsc_init_qdisc(struct Qdisc *sch, struct rtattr *opt)
  1233. {
  1234. struct hfsc_sched *q = qdisc_priv(sch);
  1235. struct tc_hfsc_qopt *qopt;
  1236. unsigned int i;
  1237. if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt))
  1238. return -EINVAL;
  1239. qopt = RTA_DATA(opt);
  1240. q->defcls = qopt->defcls;
  1241. for (i = 0; i < HFSC_HSIZE; i++)
  1242. INIT_LIST_HEAD(&q->clhash[i]);
  1243. q->eligible = RB_ROOT;
  1244. INIT_LIST_HEAD(&q->droplist);
  1245. skb_queue_head_init(&q->requeue);
  1246. q->root.refcnt = 1;
  1247. q->root.classid = sch->handle;
  1248. q->root.sched = q;
  1249. q->root.qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
  1250. sch->handle);
  1251. if (q->root.qdisc == NULL)
  1252. q->root.qdisc = &noop_qdisc;
  1253. INIT_LIST_HEAD(&q->root.children);
  1254. q->root.vt_tree = RB_ROOT;
  1255. q->root.cf_tree = RB_ROOT;
  1256. list_add(&q->root.hlist, &q->clhash[hfsc_hash(q->root.classid)]);
  1257. qdisc_watchdog_init(&q->watchdog, sch);
  1258. return 0;
  1259. }
  1260. static int
  1261. hfsc_change_qdisc(struct Qdisc *sch, struct rtattr *opt)
  1262. {
  1263. struct hfsc_sched *q = qdisc_priv(sch);
  1264. struct tc_hfsc_qopt *qopt;
  1265. if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt))
  1266. return -EINVAL;
  1267. qopt = RTA_DATA(opt);
  1268. sch_tree_lock(sch);
  1269. q->defcls = qopt->defcls;
  1270. sch_tree_unlock(sch);
  1271. return 0;
  1272. }
  1273. static void
  1274. hfsc_reset_class(struct hfsc_class *cl)
  1275. {
  1276. cl->cl_total = 0;
  1277. cl->cl_cumul = 0;
  1278. cl->cl_d = 0;
  1279. cl->cl_e = 0;
  1280. cl->cl_vt = 0;
  1281. cl->cl_vtadj = 0;
  1282. cl->cl_vtoff = 0;
  1283. cl->cl_cvtmin = 0;
  1284. cl->cl_cvtmax = 0;
  1285. cl->cl_cvtoff = 0;
  1286. cl->cl_pcvtoff = 0;
  1287. cl->cl_vtperiod = 0;
  1288. cl->cl_parentperiod = 0;
  1289. cl->cl_f = 0;
  1290. cl->cl_myf = 0;
  1291. cl->cl_myfadj = 0;
  1292. cl->cl_cfmin = 0;
  1293. cl->cl_nactive = 0;
  1294. cl->vt_tree = RB_ROOT;
  1295. cl->cf_tree = RB_ROOT;
  1296. qdisc_reset(cl->qdisc);
  1297. if (cl->cl_flags & HFSC_RSC)
  1298. rtsc_init(&cl->cl_deadline, &cl->cl_rsc, 0, 0);
  1299. if (cl->cl_flags & HFSC_FSC)
  1300. rtsc_init(&cl->cl_virtual, &cl->cl_fsc, 0, 0);
  1301. if (cl->cl_flags & HFSC_USC)
  1302. rtsc_init(&cl->cl_ulimit, &cl->cl_usc, 0, 0);
  1303. }
  1304. static void
  1305. hfsc_reset_qdisc(struct Qdisc *sch)
  1306. {
  1307. struct hfsc_sched *q = qdisc_priv(sch);
  1308. struct hfsc_class *cl;
  1309. unsigned int i;
  1310. for (i = 0; i < HFSC_HSIZE; i++) {
  1311. list_for_each_entry(cl, &q->clhash[i], hlist)
  1312. hfsc_reset_class(cl);
  1313. }
  1314. __skb_queue_purge(&q->requeue);
  1315. q->eligible = RB_ROOT;
  1316. INIT_LIST_HEAD(&q->droplist);
  1317. qdisc_watchdog_cancel(&q->watchdog);
  1318. sch->q.qlen = 0;
  1319. }
  1320. static void
  1321. hfsc_destroy_qdisc(struct Qdisc *sch)
  1322. {
  1323. struct hfsc_sched *q = qdisc_priv(sch);
  1324. struct hfsc_class *cl, *next;
  1325. unsigned int i;
  1326. for (i = 0; i < HFSC_HSIZE; i++) {
  1327. list_for_each_entry_safe(cl, next, &q->clhash[i], hlist)
  1328. hfsc_destroy_class(sch, cl);
  1329. }
  1330. __skb_queue_purge(&q->requeue);
  1331. qdisc_watchdog_cancel(&q->watchdog);
  1332. }
  1333. static int
  1334. hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
  1335. {
  1336. struct hfsc_sched *q = qdisc_priv(sch);
  1337. unsigned char *b = skb_tail_pointer(skb);
  1338. struct tc_hfsc_qopt qopt;
  1339. qopt.defcls = q->defcls;
  1340. RTA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
  1341. return skb->len;
  1342. rtattr_failure:
  1343. nlmsg_trim(skb, b);
  1344. return -1;
  1345. }
  1346. static int
  1347. hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
  1348. {
  1349. struct hfsc_class *cl;
  1350. unsigned int len;
  1351. int err;
  1352. cl = hfsc_classify(skb, sch, &err);
  1353. if (cl == NULL) {
  1354. if (err == NET_XMIT_BYPASS)
  1355. sch->qstats.drops++;
  1356. kfree_skb(skb);
  1357. return err;
  1358. }
  1359. len = skb->len;
  1360. err = cl->qdisc->enqueue(skb, cl->qdisc);
  1361. if (unlikely(err != NET_XMIT_SUCCESS)) {
  1362. cl->qstats.drops++;
  1363. sch->qstats.drops++;
  1364. return err;
  1365. }
  1366. if (cl->qdisc->q.qlen == 1)
  1367. set_active(cl, len);
  1368. cl->bstats.packets++;
  1369. cl->bstats.bytes += len;
  1370. sch->bstats.packets++;
  1371. sch->bstats.bytes += len;
  1372. sch->q.qlen++;
  1373. return NET_XMIT_SUCCESS;
  1374. }
  1375. static struct sk_buff *
  1376. hfsc_dequeue(struct Qdisc *sch)
  1377. {
  1378. struct hfsc_sched *q = qdisc_priv(sch);
  1379. struct hfsc_class *cl;
  1380. struct sk_buff *skb;
  1381. u64 cur_time;
  1382. unsigned int next_len;
  1383. int realtime = 0;
  1384. if (sch->q.qlen == 0)
  1385. return NULL;
  1386. if ((skb = __skb_dequeue(&q->requeue)))
  1387. goto out;
  1388. cur_time = psched_get_time();
  1389. /*
  1390. * if there are eligible classes, use real-time criteria.
  1391. * find the class with the minimum deadline among
  1392. * the eligible classes.
  1393. */
  1394. if ((cl = eltree_get_mindl(q, cur_time)) != NULL) {
  1395. realtime = 1;
  1396. } else {
  1397. /*
  1398. * use link-sharing criteria
  1399. * get the class with the minimum vt in the hierarchy
  1400. */
  1401. cl = vttree_get_minvt(&q->root, cur_time);
  1402. if (cl == NULL) {
  1403. sch->qstats.overlimits++;
  1404. hfsc_schedule_watchdog(sch);
  1405. return NULL;
  1406. }
  1407. }
  1408. skb = cl->qdisc->dequeue(cl->qdisc);
  1409. if (skb == NULL) {
  1410. if (net_ratelimit())
  1411. printk("HFSC: Non-work-conserving qdisc ?\n");
  1412. return NULL;
  1413. }
  1414. update_vf(cl, skb->len, cur_time);
  1415. if (realtime)
  1416. cl->cl_cumul += skb->len;
  1417. if (cl->qdisc->q.qlen != 0) {
  1418. if (cl->cl_flags & HFSC_RSC) {
  1419. /* update ed */
  1420. next_len = qdisc_peek_len(cl->qdisc);
  1421. if (realtime)
  1422. update_ed(cl, next_len);
  1423. else
  1424. update_d(cl, next_len);
  1425. }
  1426. } else {
  1427. /* the class becomes passive */
  1428. set_passive(cl);
  1429. }
  1430. out:
  1431. sch->flags &= ~TCQ_F_THROTTLED;
  1432. sch->q.qlen--;
  1433. return skb;
  1434. }
  1435. static int
  1436. hfsc_requeue(struct sk_buff *skb, struct Qdisc *sch)
  1437. {
  1438. struct hfsc_sched *q = qdisc_priv(sch);
  1439. __skb_queue_head(&q->requeue, skb);
  1440. sch->q.qlen++;
  1441. sch->qstats.requeues++;
  1442. return NET_XMIT_SUCCESS;
  1443. }
  1444. static unsigned int
  1445. hfsc_drop(struct Qdisc *sch)
  1446. {
  1447. struct hfsc_sched *q = qdisc_priv(sch);
  1448. struct hfsc_class *cl;
  1449. unsigned int len;
  1450. list_for_each_entry(cl, &q->droplist, dlist) {
  1451. if (cl->qdisc->ops->drop != NULL &&
  1452. (len = cl->qdisc->ops->drop(cl->qdisc)) > 0) {
  1453. if (cl->qdisc->q.qlen == 0) {
  1454. update_vf(cl, 0, 0);
  1455. set_passive(cl);
  1456. } else {
  1457. list_move_tail(&cl->dlist, &q->droplist);
  1458. }
  1459. cl->qstats.drops++;
  1460. sch->qstats.drops++;
  1461. sch->q.qlen--;
  1462. return len;
  1463. }
  1464. }
  1465. return 0;
  1466. }
  1467. static struct Qdisc_class_ops hfsc_class_ops = {
  1468. .change = hfsc_change_class,
  1469. .delete = hfsc_delete_class,
  1470. .graft = hfsc_graft_class,
  1471. .leaf = hfsc_class_leaf,
  1472. .qlen_notify = hfsc_qlen_notify,
  1473. .get = hfsc_get_class,
  1474. .put = hfsc_put_class,
  1475. .bind_tcf = hfsc_bind_tcf,
  1476. .unbind_tcf = hfsc_unbind_tcf,
  1477. .tcf_chain = hfsc_tcf_chain,
  1478. .dump = hfsc_dump_class,
  1479. .dump_stats = hfsc_dump_class_stats,
  1480. .walk = hfsc_walk
  1481. };
  1482. static struct Qdisc_ops hfsc_qdisc_ops = {
  1483. .id = "hfsc",
  1484. .init = hfsc_init_qdisc,
  1485. .change = hfsc_change_qdisc,
  1486. .reset = hfsc_reset_qdisc,
  1487. .destroy = hfsc_destroy_qdisc,
  1488. .dump = hfsc_dump_qdisc,
  1489. .enqueue = hfsc_enqueue,
  1490. .dequeue = hfsc_dequeue,
  1491. .requeue = hfsc_requeue,
  1492. .drop = hfsc_drop,
  1493. .cl_ops = &hfsc_class_ops,
  1494. .priv_size = sizeof(struct hfsc_sched),
  1495. .owner = THIS_MODULE
  1496. };
  1497. static int __init
  1498. hfsc_init(void)
  1499. {
  1500. return register_qdisc(&hfsc_qdisc_ops);
  1501. }
  1502. static void __exit
  1503. hfsc_cleanup(void)
  1504. {
  1505. unregister_qdisc(&hfsc_qdisc_ops);
  1506. }
  1507. MODULE_LICENSE("GPL");
  1508. module_init(hfsc_init);
  1509. module_exit(hfsc_cleanup);