sch_hfsc.c 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757
  1. /*
  2. * Copyright (c) 2003 Patrick McHardy, <kaber@trash.net>
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version 2
  7. * of the License, or (at your option) any later version.
  8. *
  9. * 2003-10-17 - Ported from altq
  10. */
  11. /*
  12. * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
  13. *
  14. * Permission to use, copy, modify, and distribute this software and
  15. * its documentation is hereby granted (including for commercial or
  16. * for-profit use), provided that both the copyright notice and this
  17. * permission notice appear in all copies of the software, derivative
  18. * works, or modified versions, and any portions thereof.
  19. *
  20. * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
  21. * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS
  22. * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
  23. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  24. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  25. * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
  26. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  27. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
  28. * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  29. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  30. * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  31. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  32. * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
  33. * DAMAGE.
  34. *
  35. * Carnegie Mellon encourages (but does not require) users of this
  36. * software to return any improvements or extensions that they make,
  37. * and to grant Carnegie Mellon the rights to redistribute these
  38. * changes without encumbrance.
  39. */
  40. /*
  41. * H-FSC is described in Proceedings of SIGCOMM'97,
  42. * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
  43. * Real-Time and Priority Service"
  44. * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
  45. *
  46. * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing.
  47. * when a class has an upperlimit, the fit-time is computed from the
  48. * upperlimit service curve. the link-sharing scheduler does not schedule
  49. * a class whose fit-time exceeds the current time.
  50. */
  51. #include <linux/kernel.h>
  52. #include <linux/module.h>
  53. #include <linux/types.h>
  54. #include <linux/errno.h>
  55. #include <linux/compiler.h>
  56. #include <linux/spinlock.h>
  57. #include <linux/skbuff.h>
  58. #include <linux/string.h>
  59. #include <linux/slab.h>
  60. #include <linux/list.h>
  61. #include <linux/rbtree.h>
  62. #include <linux/init.h>
  63. #include <linux/rtnetlink.h>
  64. #include <linux/pkt_sched.h>
  65. #include <net/netlink.h>
  66. #include <net/pkt_sched.h>
  67. #include <net/pkt_cls.h>
  68. #include <asm/div64.h>
  69. /*
  70. * kernel internal service curve representation:
  71. * coordinates are given by 64 bit unsigned integers.
  72. * x-axis: unit is clock count.
  73. * y-axis: unit is byte.
  74. *
  75. * The service curve parameters are converted to the internal
  76. * representation. The slope values are scaled to avoid overflow.
  77. * the inverse slope values as well as the y-projection of the 1st
  78. * segment are kept in order to to avoid 64-bit divide operations
  79. * that are expensive on 32-bit architectures.
  80. */
  81. struct internal_sc
  82. {
  83. u64 sm1; /* scaled slope of the 1st segment */
  84. u64 ism1; /* scaled inverse-slope of the 1st segment */
  85. u64 dx; /* the x-projection of the 1st segment */
  86. u64 dy; /* the y-projection of the 1st segment */
  87. u64 sm2; /* scaled slope of the 2nd segment */
  88. u64 ism2; /* scaled inverse-slope of the 2nd segment */
  89. };
  90. /* runtime service curve */
  91. struct runtime_sc
  92. {
  93. u64 x; /* current starting position on x-axis */
  94. u64 y; /* current starting position on y-axis */
  95. u64 sm1; /* scaled slope of the 1st segment */
  96. u64 ism1; /* scaled inverse-slope of the 1st segment */
  97. u64 dx; /* the x-projection of the 1st segment */
  98. u64 dy; /* the y-projection of the 1st segment */
  99. u64 sm2; /* scaled slope of the 2nd segment */
  100. u64 ism2; /* scaled inverse-slope of the 2nd segment */
  101. };
  102. enum hfsc_class_flags
  103. {
  104. HFSC_RSC = 0x1,
  105. HFSC_FSC = 0x2,
  106. HFSC_USC = 0x4
  107. };
  108. struct hfsc_class
  109. {
  110. u32 classid; /* class id */
  111. unsigned int refcnt; /* usage count */
  112. struct gnet_stats_basic bstats;
  113. struct gnet_stats_queue qstats;
  114. struct gnet_stats_rate_est rate_est;
  115. unsigned int level; /* class level in hierarchy */
  116. struct tcf_proto *filter_list; /* filter list */
  117. unsigned int filter_cnt; /* filter count */
  118. struct hfsc_sched *sched; /* scheduler data */
  119. struct hfsc_class *cl_parent; /* parent class */
  120. struct list_head siblings; /* sibling classes */
  121. struct list_head children; /* child classes */
  122. struct Qdisc *qdisc; /* leaf qdisc */
  123. struct rb_node el_node; /* qdisc's eligible tree member */
  124. struct rb_root vt_tree; /* active children sorted by cl_vt */
  125. struct rb_node vt_node; /* parent's vt_tree member */
  126. struct rb_root cf_tree; /* active children sorted by cl_f */
  127. struct rb_node cf_node; /* parent's cf_heap member */
  128. struct list_head hlist; /* hash list member */
  129. struct list_head dlist; /* drop list member */
  130. u64 cl_total; /* total work in bytes */
  131. u64 cl_cumul; /* cumulative work in bytes done by
  132. real-time criteria */
  133. u64 cl_d; /* deadline*/
  134. u64 cl_e; /* eligible time */
  135. u64 cl_vt; /* virtual time */
  136. u64 cl_f; /* time when this class will fit for
  137. link-sharing, max(myf, cfmin) */
  138. u64 cl_myf; /* my fit-time (calculated from this
  139. class's own upperlimit curve) */
  140. u64 cl_myfadj; /* my fit-time adjustment (to cancel
  141. history dependence) */
  142. u64 cl_cfmin; /* earliest children's fit-time (used
  143. with cl_myf to obtain cl_f) */
  144. u64 cl_cvtmin; /* minimal virtual time among the
  145. children fit for link-sharing
  146. (monotonic within a period) */
  147. u64 cl_vtadj; /* intra-period cumulative vt
  148. adjustment */
  149. u64 cl_vtoff; /* inter-period cumulative vt offset */
  150. u64 cl_cvtmax; /* max child's vt in the last period */
  151. u64 cl_cvtoff; /* cumulative cvtmax of all periods */
  152. u64 cl_pcvtoff; /* parent's cvtoff at initialization
  153. time */
  154. struct internal_sc cl_rsc; /* internal real-time service curve */
  155. struct internal_sc cl_fsc; /* internal fair service curve */
  156. struct internal_sc cl_usc; /* internal upperlimit service curve */
  157. struct runtime_sc cl_deadline; /* deadline curve */
  158. struct runtime_sc cl_eligible; /* eligible curve */
  159. struct runtime_sc cl_virtual; /* virtual curve */
  160. struct runtime_sc cl_ulimit; /* upperlimit curve */
  161. unsigned long cl_flags; /* which curves are valid */
  162. unsigned long cl_vtperiod; /* vt period sequence number */
  163. unsigned long cl_parentperiod;/* parent's vt period sequence number*/
  164. unsigned long cl_nactive; /* number of active children */
  165. };
  166. #define HFSC_HSIZE 16
  167. struct hfsc_sched
  168. {
  169. u16 defcls; /* default class id */
  170. struct hfsc_class root; /* root class */
  171. struct list_head clhash[HFSC_HSIZE]; /* class hash */
  172. struct rb_root eligible; /* eligible tree */
  173. struct list_head droplist; /* active leaf class list (for
  174. dropping) */
  175. struct sk_buff_head requeue; /* requeued packet */
  176. struct qdisc_watchdog watchdog; /* watchdog timer */
  177. };
  178. #define HT_INFINITY 0xffffffffffffffffULL /* infinite time value */
  179. /*
  180. * eligible tree holds backlogged classes being sorted by their eligible times.
  181. * there is one eligible tree per hfsc instance.
  182. */
  183. static void
  184. eltree_insert(struct hfsc_class *cl)
  185. {
  186. struct rb_node **p = &cl->sched->eligible.rb_node;
  187. struct rb_node *parent = NULL;
  188. struct hfsc_class *cl1;
  189. while (*p != NULL) {
  190. parent = *p;
  191. cl1 = rb_entry(parent, struct hfsc_class, el_node);
  192. if (cl->cl_e >= cl1->cl_e)
  193. p = &parent->rb_right;
  194. else
  195. p = &parent->rb_left;
  196. }
  197. rb_link_node(&cl->el_node, parent, p);
  198. rb_insert_color(&cl->el_node, &cl->sched->eligible);
  199. }
  200. static inline void
  201. eltree_remove(struct hfsc_class *cl)
  202. {
  203. rb_erase(&cl->el_node, &cl->sched->eligible);
  204. }
  205. static inline void
  206. eltree_update(struct hfsc_class *cl)
  207. {
  208. eltree_remove(cl);
  209. eltree_insert(cl);
  210. }
  211. /* find the class with the minimum deadline among the eligible classes */
  212. static inline struct hfsc_class *
  213. eltree_get_mindl(struct hfsc_sched *q, u64 cur_time)
  214. {
  215. struct hfsc_class *p, *cl = NULL;
  216. struct rb_node *n;
  217. for (n = rb_first(&q->eligible); n != NULL; n = rb_next(n)) {
  218. p = rb_entry(n, struct hfsc_class, el_node);
  219. if (p->cl_e > cur_time)
  220. break;
  221. if (cl == NULL || p->cl_d < cl->cl_d)
  222. cl = p;
  223. }
  224. return cl;
  225. }
  226. /* find the class with minimum eligible time among the eligible classes */
  227. static inline struct hfsc_class *
  228. eltree_get_minel(struct hfsc_sched *q)
  229. {
  230. struct rb_node *n;
  231. n = rb_first(&q->eligible);
  232. if (n == NULL)
  233. return NULL;
  234. return rb_entry(n, struct hfsc_class, el_node);
  235. }
  236. /*
  237. * vttree holds holds backlogged child classes being sorted by their virtual
  238. * time. each intermediate class has one vttree.
  239. */
  240. static void
  241. vttree_insert(struct hfsc_class *cl)
  242. {
  243. struct rb_node **p = &cl->cl_parent->vt_tree.rb_node;
  244. struct rb_node *parent = NULL;
  245. struct hfsc_class *cl1;
  246. while (*p != NULL) {
  247. parent = *p;
  248. cl1 = rb_entry(parent, struct hfsc_class, vt_node);
  249. if (cl->cl_vt >= cl1->cl_vt)
  250. p = &parent->rb_right;
  251. else
  252. p = &parent->rb_left;
  253. }
  254. rb_link_node(&cl->vt_node, parent, p);
  255. rb_insert_color(&cl->vt_node, &cl->cl_parent->vt_tree);
  256. }
  257. static inline void
  258. vttree_remove(struct hfsc_class *cl)
  259. {
  260. rb_erase(&cl->vt_node, &cl->cl_parent->vt_tree);
  261. }
  262. static inline void
  263. vttree_update(struct hfsc_class *cl)
  264. {
  265. vttree_remove(cl);
  266. vttree_insert(cl);
  267. }
  268. static inline struct hfsc_class *
  269. vttree_firstfit(struct hfsc_class *cl, u64 cur_time)
  270. {
  271. struct hfsc_class *p;
  272. struct rb_node *n;
  273. for (n = rb_first(&cl->vt_tree); n != NULL; n = rb_next(n)) {
  274. p = rb_entry(n, struct hfsc_class, vt_node);
  275. if (p->cl_f <= cur_time)
  276. return p;
  277. }
  278. return NULL;
  279. }
  280. /*
  281. * get the leaf class with the minimum vt in the hierarchy
  282. */
  283. static struct hfsc_class *
  284. vttree_get_minvt(struct hfsc_class *cl, u64 cur_time)
  285. {
  286. /* if root-class's cfmin is bigger than cur_time nothing to do */
  287. if (cl->cl_cfmin > cur_time)
  288. return NULL;
  289. while (cl->level > 0) {
  290. cl = vttree_firstfit(cl, cur_time);
  291. if (cl == NULL)
  292. return NULL;
  293. /*
  294. * update parent's cl_cvtmin.
  295. */
  296. if (cl->cl_parent->cl_cvtmin < cl->cl_vt)
  297. cl->cl_parent->cl_cvtmin = cl->cl_vt;
  298. }
  299. return cl;
  300. }
  301. static void
  302. cftree_insert(struct hfsc_class *cl)
  303. {
  304. struct rb_node **p = &cl->cl_parent->cf_tree.rb_node;
  305. struct rb_node *parent = NULL;
  306. struct hfsc_class *cl1;
  307. while (*p != NULL) {
  308. parent = *p;
  309. cl1 = rb_entry(parent, struct hfsc_class, cf_node);
  310. if (cl->cl_f >= cl1->cl_f)
  311. p = &parent->rb_right;
  312. else
  313. p = &parent->rb_left;
  314. }
  315. rb_link_node(&cl->cf_node, parent, p);
  316. rb_insert_color(&cl->cf_node, &cl->cl_parent->cf_tree);
  317. }
  318. static inline void
  319. cftree_remove(struct hfsc_class *cl)
  320. {
  321. rb_erase(&cl->cf_node, &cl->cl_parent->cf_tree);
  322. }
  323. static inline void
  324. cftree_update(struct hfsc_class *cl)
  325. {
  326. cftree_remove(cl);
  327. cftree_insert(cl);
  328. }
  329. /*
  330. * service curve support functions
  331. *
  332. * external service curve parameters
  333. * m: bps
  334. * d: us
  335. * internal service curve parameters
  336. * sm: (bytes/psched_us) << SM_SHIFT
  337. * ism: (psched_us/byte) << ISM_SHIFT
  338. * dx: psched_us
  339. *
  340. * The clock source resolution with ktime is 1.024us.
  341. *
  342. * sm and ism are scaled in order to keep effective digits.
  343. * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective
  344. * digits in decimal using the following table.
  345. *
  346. * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps
  347. * ------------+-------------------------------------------------------
  348. * bytes/1.024us 12.8e-3 128e-3 1280e-3 12800e-3 128000e-3
  349. *
  350. * 1.024us/byte 78.125 7.8125 0.78125 0.078125 0.0078125
  351. */
  352. #define SM_SHIFT 20
  353. #define ISM_SHIFT 18
  354. #define SM_MASK ((1ULL << SM_SHIFT) - 1)
  355. #define ISM_MASK ((1ULL << ISM_SHIFT) - 1)
  356. static inline u64
  357. seg_x2y(u64 x, u64 sm)
  358. {
  359. u64 y;
  360. /*
  361. * compute
  362. * y = x * sm >> SM_SHIFT
  363. * but divide it for the upper and lower bits to avoid overflow
  364. */
  365. y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT);
  366. return y;
  367. }
  368. static inline u64
  369. seg_y2x(u64 y, u64 ism)
  370. {
  371. u64 x;
  372. if (y == 0)
  373. x = 0;
  374. else if (ism == HT_INFINITY)
  375. x = HT_INFINITY;
  376. else {
  377. x = (y >> ISM_SHIFT) * ism
  378. + (((y & ISM_MASK) * ism) >> ISM_SHIFT);
  379. }
  380. return x;
  381. }
  382. /* Convert m (bps) into sm (bytes/psched us) */
  383. static u64
  384. m2sm(u32 m)
  385. {
  386. u64 sm;
  387. sm = ((u64)m << SM_SHIFT);
  388. sm += PSCHED_TICKS_PER_SEC - 1;
  389. do_div(sm, PSCHED_TICKS_PER_SEC);
  390. return sm;
  391. }
  392. /* convert m (bps) into ism (psched us/byte) */
  393. static u64
  394. m2ism(u32 m)
  395. {
  396. u64 ism;
  397. if (m == 0)
  398. ism = HT_INFINITY;
  399. else {
  400. ism = ((u64)PSCHED_TICKS_PER_SEC << ISM_SHIFT);
  401. ism += m - 1;
  402. do_div(ism, m);
  403. }
  404. return ism;
  405. }
  406. /* convert d (us) into dx (psched us) */
  407. static u64
  408. d2dx(u32 d)
  409. {
  410. u64 dx;
  411. dx = ((u64)d * PSCHED_TICKS_PER_SEC);
  412. dx += USEC_PER_SEC - 1;
  413. do_div(dx, USEC_PER_SEC);
  414. return dx;
  415. }
  416. /* convert sm (bytes/psched us) into m (bps) */
  417. static u32
  418. sm2m(u64 sm)
  419. {
  420. u64 m;
  421. m = (sm * PSCHED_TICKS_PER_SEC) >> SM_SHIFT;
  422. return (u32)m;
  423. }
  424. /* convert dx (psched us) into d (us) */
  425. static u32
  426. dx2d(u64 dx)
  427. {
  428. u64 d;
  429. d = dx * USEC_PER_SEC;
  430. do_div(d, PSCHED_TICKS_PER_SEC);
  431. return (u32)d;
  432. }
  433. static void
  434. sc2isc(struct tc_service_curve *sc, struct internal_sc *isc)
  435. {
  436. isc->sm1 = m2sm(sc->m1);
  437. isc->ism1 = m2ism(sc->m1);
  438. isc->dx = d2dx(sc->d);
  439. isc->dy = seg_x2y(isc->dx, isc->sm1);
  440. isc->sm2 = m2sm(sc->m2);
  441. isc->ism2 = m2ism(sc->m2);
  442. }
  443. /*
  444. * initialize the runtime service curve with the given internal
  445. * service curve starting at (x, y).
  446. */
  447. static void
  448. rtsc_init(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
  449. {
  450. rtsc->x = x;
  451. rtsc->y = y;
  452. rtsc->sm1 = isc->sm1;
  453. rtsc->ism1 = isc->ism1;
  454. rtsc->dx = isc->dx;
  455. rtsc->dy = isc->dy;
  456. rtsc->sm2 = isc->sm2;
  457. rtsc->ism2 = isc->ism2;
  458. }
  459. /*
  460. * calculate the y-projection of the runtime service curve by the
  461. * given x-projection value
  462. */
  463. static u64
  464. rtsc_y2x(struct runtime_sc *rtsc, u64 y)
  465. {
  466. u64 x;
  467. if (y < rtsc->y)
  468. x = rtsc->x;
  469. else if (y <= rtsc->y + rtsc->dy) {
  470. /* x belongs to the 1st segment */
  471. if (rtsc->dy == 0)
  472. x = rtsc->x + rtsc->dx;
  473. else
  474. x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
  475. } else {
  476. /* x belongs to the 2nd segment */
  477. x = rtsc->x + rtsc->dx
  478. + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
  479. }
  480. return x;
  481. }
  482. static u64
  483. rtsc_x2y(struct runtime_sc *rtsc, u64 x)
  484. {
  485. u64 y;
  486. if (x <= rtsc->x)
  487. y = rtsc->y;
  488. else if (x <= rtsc->x + rtsc->dx)
  489. /* y belongs to the 1st segment */
  490. y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
  491. else
  492. /* y belongs to the 2nd segment */
  493. y = rtsc->y + rtsc->dy
  494. + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
  495. return y;
  496. }
  497. /*
  498. * update the runtime service curve by taking the minimum of the current
  499. * runtime service curve and the service curve starting at (x, y).
  500. */
  501. static void
  502. rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
  503. {
  504. u64 y1, y2, dx, dy;
  505. u32 dsm;
  506. if (isc->sm1 <= isc->sm2) {
  507. /* service curve is convex */
  508. y1 = rtsc_x2y(rtsc, x);
  509. if (y1 < y)
  510. /* the current rtsc is smaller */
  511. return;
  512. rtsc->x = x;
  513. rtsc->y = y;
  514. return;
  515. }
  516. /*
  517. * service curve is concave
  518. * compute the two y values of the current rtsc
  519. * y1: at x
  520. * y2: at (x + dx)
  521. */
  522. y1 = rtsc_x2y(rtsc, x);
  523. if (y1 <= y) {
  524. /* rtsc is below isc, no change to rtsc */
  525. return;
  526. }
  527. y2 = rtsc_x2y(rtsc, x + isc->dx);
  528. if (y2 >= y + isc->dy) {
  529. /* rtsc is above isc, replace rtsc by isc */
  530. rtsc->x = x;
  531. rtsc->y = y;
  532. rtsc->dx = isc->dx;
  533. rtsc->dy = isc->dy;
  534. return;
  535. }
  536. /*
  537. * the two curves intersect
  538. * compute the offsets (dx, dy) using the reverse
  539. * function of seg_x2y()
  540. * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
  541. */
  542. dx = (y1 - y) << SM_SHIFT;
  543. dsm = isc->sm1 - isc->sm2;
  544. do_div(dx, dsm);
  545. /*
  546. * check if (x, y1) belongs to the 1st segment of rtsc.
  547. * if so, add the offset.
  548. */
  549. if (rtsc->x + rtsc->dx > x)
  550. dx += rtsc->x + rtsc->dx - x;
  551. dy = seg_x2y(dx, isc->sm1);
  552. rtsc->x = x;
  553. rtsc->y = y;
  554. rtsc->dx = dx;
  555. rtsc->dy = dy;
  556. return;
  557. }
  558. static void
  559. init_ed(struct hfsc_class *cl, unsigned int next_len)
  560. {
  561. u64 cur_time = psched_get_time();
  562. /* update the deadline curve */
  563. rtsc_min(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
  564. /*
  565. * update the eligible curve.
  566. * for concave, it is equal to the deadline curve.
  567. * for convex, it is a linear curve with slope m2.
  568. */
  569. cl->cl_eligible = cl->cl_deadline;
  570. if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
  571. cl->cl_eligible.dx = 0;
  572. cl->cl_eligible.dy = 0;
  573. }
  574. /* compute e and d */
  575. cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
  576. cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
  577. eltree_insert(cl);
  578. }
  579. static void
  580. update_ed(struct hfsc_class *cl, unsigned int next_len)
  581. {
  582. cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
  583. cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
  584. eltree_update(cl);
  585. }
  586. static inline void
  587. update_d(struct hfsc_class *cl, unsigned int next_len)
  588. {
  589. cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
  590. }
  591. static inline void
  592. update_cfmin(struct hfsc_class *cl)
  593. {
  594. struct rb_node *n = rb_first(&cl->cf_tree);
  595. struct hfsc_class *p;
  596. if (n == NULL) {
  597. cl->cl_cfmin = 0;
  598. return;
  599. }
  600. p = rb_entry(n, struct hfsc_class, cf_node);
  601. cl->cl_cfmin = p->cl_f;
  602. }
  603. static void
  604. init_vf(struct hfsc_class *cl, unsigned int len)
  605. {
  606. struct hfsc_class *max_cl;
  607. struct rb_node *n;
  608. u64 vt, f, cur_time;
  609. int go_active;
  610. cur_time = 0;
  611. go_active = 1;
  612. for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
  613. if (go_active && cl->cl_nactive++ == 0)
  614. go_active = 1;
  615. else
  616. go_active = 0;
  617. if (go_active) {
  618. n = rb_last(&cl->cl_parent->vt_tree);
  619. if (n != NULL) {
  620. max_cl = rb_entry(n, struct hfsc_class,vt_node);
  621. /*
  622. * set vt to the average of the min and max
  623. * classes. if the parent's period didn't
  624. * change, don't decrease vt of the class.
  625. */
  626. vt = max_cl->cl_vt;
  627. if (cl->cl_parent->cl_cvtmin != 0)
  628. vt = (cl->cl_parent->cl_cvtmin + vt)/2;
  629. if (cl->cl_parent->cl_vtperiod !=
  630. cl->cl_parentperiod || vt > cl->cl_vt)
  631. cl->cl_vt = vt;
  632. } else {
  633. /*
  634. * first child for a new parent backlog period.
  635. * add parent's cvtmax to cvtoff to make a new
  636. * vt (vtoff + vt) larger than the vt in the
  637. * last period for all children.
  638. */
  639. vt = cl->cl_parent->cl_cvtmax;
  640. cl->cl_parent->cl_cvtoff += vt;
  641. cl->cl_parent->cl_cvtmax = 0;
  642. cl->cl_parent->cl_cvtmin = 0;
  643. cl->cl_vt = 0;
  644. }
  645. cl->cl_vtoff = cl->cl_parent->cl_cvtoff -
  646. cl->cl_pcvtoff;
  647. /* update the virtual curve */
  648. vt = cl->cl_vt + cl->cl_vtoff;
  649. rtsc_min(&cl->cl_virtual, &cl->cl_fsc, vt,
  650. cl->cl_total);
  651. if (cl->cl_virtual.x == vt) {
  652. cl->cl_virtual.x -= cl->cl_vtoff;
  653. cl->cl_vtoff = 0;
  654. }
  655. cl->cl_vtadj = 0;
  656. cl->cl_vtperiod++; /* increment vt period */
  657. cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
  658. if (cl->cl_parent->cl_nactive == 0)
  659. cl->cl_parentperiod++;
  660. cl->cl_f = 0;
  661. vttree_insert(cl);
  662. cftree_insert(cl);
  663. if (cl->cl_flags & HFSC_USC) {
  664. /* class has upper limit curve */
  665. if (cur_time == 0)
  666. cur_time = psched_get_time();
  667. /* update the ulimit curve */
  668. rtsc_min(&cl->cl_ulimit, &cl->cl_usc, cur_time,
  669. cl->cl_total);
  670. /* compute myf */
  671. cl->cl_myf = rtsc_y2x(&cl->cl_ulimit,
  672. cl->cl_total);
  673. cl->cl_myfadj = 0;
  674. }
  675. }
  676. f = max(cl->cl_myf, cl->cl_cfmin);
  677. if (f != cl->cl_f) {
  678. cl->cl_f = f;
  679. cftree_update(cl);
  680. update_cfmin(cl->cl_parent);
  681. }
  682. }
  683. }
  684. static void
  685. update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time)
  686. {
  687. u64 f; /* , myf_bound, delta; */
  688. int go_passive = 0;
  689. if (cl->qdisc->q.qlen == 0 && cl->cl_flags & HFSC_FSC)
  690. go_passive = 1;
  691. for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
  692. cl->cl_total += len;
  693. if (!(cl->cl_flags & HFSC_FSC) || cl->cl_nactive == 0)
  694. continue;
  695. if (go_passive && --cl->cl_nactive == 0)
  696. go_passive = 1;
  697. else
  698. go_passive = 0;
  699. if (go_passive) {
  700. /* no more active child, going passive */
  701. /* update cvtmax of the parent class */
  702. if (cl->cl_vt > cl->cl_parent->cl_cvtmax)
  703. cl->cl_parent->cl_cvtmax = cl->cl_vt;
  704. /* remove this class from the vt tree */
  705. vttree_remove(cl);
  706. cftree_remove(cl);
  707. update_cfmin(cl->cl_parent);
  708. continue;
  709. }
  710. /*
  711. * update vt and f
  712. */
  713. cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total)
  714. - cl->cl_vtoff + cl->cl_vtadj;
  715. /*
  716. * if vt of the class is smaller than cvtmin,
  717. * the class was skipped in the past due to non-fit.
  718. * if so, we need to adjust vtadj.
  719. */
  720. if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {
  721. cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;
  722. cl->cl_vt = cl->cl_parent->cl_cvtmin;
  723. }
  724. /* update the vt tree */
  725. vttree_update(cl);
  726. if (cl->cl_flags & HFSC_USC) {
  727. cl->cl_myf = cl->cl_myfadj + rtsc_y2x(&cl->cl_ulimit,
  728. cl->cl_total);
  729. #if 0
  730. /*
  731. * This code causes classes to stay way under their
  732. * limit when multiple classes are used at gigabit
  733. * speed. needs investigation. -kaber
  734. */
  735. /*
  736. * if myf lags behind by more than one clock tick
  737. * from the current time, adjust myfadj to prevent
  738. * a rate-limited class from going greedy.
  739. * in a steady state under rate-limiting, myf
  740. * fluctuates within one clock tick.
  741. */
  742. myf_bound = cur_time - PSCHED_JIFFIE2US(1);
  743. if (cl->cl_myf < myf_bound) {
  744. delta = cur_time - cl->cl_myf;
  745. cl->cl_myfadj += delta;
  746. cl->cl_myf += delta;
  747. }
  748. #endif
  749. }
  750. f = max(cl->cl_myf, cl->cl_cfmin);
  751. if (f != cl->cl_f) {
  752. cl->cl_f = f;
  753. cftree_update(cl);
  754. update_cfmin(cl->cl_parent);
  755. }
  756. }
  757. }
  758. static void
  759. set_active(struct hfsc_class *cl, unsigned int len)
  760. {
  761. if (cl->cl_flags & HFSC_RSC)
  762. init_ed(cl, len);
  763. if (cl->cl_flags & HFSC_FSC)
  764. init_vf(cl, len);
  765. list_add_tail(&cl->dlist, &cl->sched->droplist);
  766. }
  767. static void
  768. set_passive(struct hfsc_class *cl)
  769. {
  770. if (cl->cl_flags & HFSC_RSC)
  771. eltree_remove(cl);
  772. list_del(&cl->dlist);
  773. /*
  774. * vttree is now handled in update_vf() so that update_vf(cl, 0, 0)
  775. * needs to be called explicitly to remove a class from vttree.
  776. */
  777. }
  778. /*
  779. * hack to get length of first packet in queue.
  780. */
  781. static unsigned int
  782. qdisc_peek_len(struct Qdisc *sch)
  783. {
  784. struct sk_buff *skb;
  785. unsigned int len;
  786. skb = sch->dequeue(sch);
  787. if (skb == NULL) {
  788. if (net_ratelimit())
  789. printk("qdisc_peek_len: non work-conserving qdisc ?\n");
  790. return 0;
  791. }
  792. len = skb->len;
  793. if (unlikely(sch->ops->requeue(skb, sch) != NET_XMIT_SUCCESS)) {
  794. if (net_ratelimit())
  795. printk("qdisc_peek_len: failed to requeue\n");
  796. qdisc_tree_decrease_qlen(sch, 1);
  797. return 0;
  798. }
  799. return len;
  800. }
  801. static void
  802. hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
  803. {
  804. unsigned int len = cl->qdisc->q.qlen;
  805. qdisc_reset(cl->qdisc);
  806. qdisc_tree_decrease_qlen(cl->qdisc, len);
  807. }
  808. static void
  809. hfsc_adjust_levels(struct hfsc_class *cl)
  810. {
  811. struct hfsc_class *p;
  812. unsigned int level;
  813. do {
  814. level = 0;
  815. list_for_each_entry(p, &cl->children, siblings) {
  816. if (p->level >= level)
  817. level = p->level + 1;
  818. }
  819. cl->level = level;
  820. } while ((cl = cl->cl_parent) != NULL);
  821. }
  822. static inline unsigned int
  823. hfsc_hash(u32 h)
  824. {
  825. h ^= h >> 8;
  826. h ^= h >> 4;
  827. return h & (HFSC_HSIZE - 1);
  828. }
  829. static inline struct hfsc_class *
  830. hfsc_find_class(u32 classid, struct Qdisc *sch)
  831. {
  832. struct hfsc_sched *q = qdisc_priv(sch);
  833. struct hfsc_class *cl;
  834. list_for_each_entry(cl, &q->clhash[hfsc_hash(classid)], hlist) {
  835. if (cl->classid == classid)
  836. return cl;
  837. }
  838. return NULL;
  839. }
  840. static void
  841. hfsc_change_rsc(struct hfsc_class *cl, struct tc_service_curve *rsc,
  842. u64 cur_time)
  843. {
  844. sc2isc(rsc, &cl->cl_rsc);
  845. rtsc_init(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
  846. cl->cl_eligible = cl->cl_deadline;
  847. if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
  848. cl->cl_eligible.dx = 0;
  849. cl->cl_eligible.dy = 0;
  850. }
  851. cl->cl_flags |= HFSC_RSC;
  852. }
  853. static void
  854. hfsc_change_fsc(struct hfsc_class *cl, struct tc_service_curve *fsc)
  855. {
  856. sc2isc(fsc, &cl->cl_fsc);
  857. rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total);
  858. cl->cl_flags |= HFSC_FSC;
  859. }
  860. static void
  861. hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc,
  862. u64 cur_time)
  863. {
  864. sc2isc(usc, &cl->cl_usc);
  865. rtsc_init(&cl->cl_ulimit, &cl->cl_usc, cur_time, cl->cl_total);
  866. cl->cl_flags |= HFSC_USC;
  867. }
  868. static const struct nla_policy hfsc_policy[TCA_HFSC_MAX + 1] = {
  869. [TCA_HFSC_RSC] = { .len = sizeof(struct tc_service_curve) },
  870. [TCA_HFSC_FSC] = { .len = sizeof(struct tc_service_curve) },
  871. [TCA_HFSC_USC] = { .len = sizeof(struct tc_service_curve) },
  872. };
  873. static int
  874. hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
  875. struct nlattr **tca, unsigned long *arg)
  876. {
  877. struct hfsc_sched *q = qdisc_priv(sch);
  878. struct hfsc_class *cl = (struct hfsc_class *)*arg;
  879. struct hfsc_class *parent = NULL;
  880. struct nlattr *opt = tca[TCA_OPTIONS];
  881. struct nlattr *tb[TCA_HFSC_MAX + 1];
  882. struct tc_service_curve *rsc = NULL, *fsc = NULL, *usc = NULL;
  883. u64 cur_time;
  884. int err;
  885. if (opt == NULL)
  886. return -EINVAL;
  887. err = nla_parse_nested(tb, TCA_HFSC_MAX, opt, hfsc_policy);
  888. if (err < 0)
  889. return err;
  890. if (tb[TCA_HFSC_RSC]) {
  891. rsc = nla_data(tb[TCA_HFSC_RSC]);
  892. if (rsc->m1 == 0 && rsc->m2 == 0)
  893. rsc = NULL;
  894. }
  895. if (tb[TCA_HFSC_FSC]) {
  896. fsc = nla_data(tb[TCA_HFSC_FSC]);
  897. if (fsc->m1 == 0 && fsc->m2 == 0)
  898. fsc = NULL;
  899. }
  900. if (tb[TCA_HFSC_USC]) {
  901. usc = nla_data(tb[TCA_HFSC_USC]);
  902. if (usc->m1 == 0 && usc->m2 == 0)
  903. usc = NULL;
  904. }
  905. if (cl != NULL) {
  906. if (parentid) {
  907. if (cl->cl_parent && cl->cl_parent->classid != parentid)
  908. return -EINVAL;
  909. if (cl->cl_parent == NULL && parentid != TC_H_ROOT)
  910. return -EINVAL;
  911. }
  912. cur_time = psched_get_time();
  913. sch_tree_lock(sch);
  914. if (rsc != NULL)
  915. hfsc_change_rsc(cl, rsc, cur_time);
  916. if (fsc != NULL)
  917. hfsc_change_fsc(cl, fsc);
  918. if (usc != NULL)
  919. hfsc_change_usc(cl, usc, cur_time);
  920. if (cl->qdisc->q.qlen != 0) {
  921. if (cl->cl_flags & HFSC_RSC)
  922. update_ed(cl, qdisc_peek_len(cl->qdisc));
  923. if (cl->cl_flags & HFSC_FSC)
  924. update_vf(cl, 0, cur_time);
  925. }
  926. sch_tree_unlock(sch);
  927. if (tca[TCA_RATE])
  928. gen_replace_estimator(&cl->bstats, &cl->rate_est,
  929. &sch->dev->queue_lock,
  930. tca[TCA_RATE]);
  931. return 0;
  932. }
  933. if (parentid == TC_H_ROOT)
  934. return -EEXIST;
  935. parent = &q->root;
  936. if (parentid) {
  937. parent = hfsc_find_class(parentid, sch);
  938. if (parent == NULL)
  939. return -ENOENT;
  940. }
  941. if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0)
  942. return -EINVAL;
  943. if (hfsc_find_class(classid, sch))
  944. return -EEXIST;
  945. if (rsc == NULL && fsc == NULL)
  946. return -EINVAL;
  947. cl = kzalloc(sizeof(struct hfsc_class), GFP_KERNEL);
  948. if (cl == NULL)
  949. return -ENOBUFS;
  950. if (rsc != NULL)
  951. hfsc_change_rsc(cl, rsc, 0);
  952. if (fsc != NULL)
  953. hfsc_change_fsc(cl, fsc);
  954. if (usc != NULL)
  955. hfsc_change_usc(cl, usc, 0);
  956. cl->refcnt = 1;
  957. cl->classid = classid;
  958. cl->sched = q;
  959. cl->cl_parent = parent;
  960. cl->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid);
  961. if (cl->qdisc == NULL)
  962. cl->qdisc = &noop_qdisc;
  963. INIT_LIST_HEAD(&cl->children);
  964. cl->vt_tree = RB_ROOT;
  965. cl->cf_tree = RB_ROOT;
  966. sch_tree_lock(sch);
  967. list_add_tail(&cl->hlist, &q->clhash[hfsc_hash(classid)]);
  968. list_add_tail(&cl->siblings, &parent->children);
  969. if (parent->level == 0)
  970. hfsc_purge_queue(sch, parent);
  971. hfsc_adjust_levels(parent);
  972. cl->cl_pcvtoff = parent->cl_cvtoff;
  973. sch_tree_unlock(sch);
  974. if (tca[TCA_RATE])
  975. gen_new_estimator(&cl->bstats, &cl->rate_est,
  976. &sch->dev->queue_lock, tca[TCA_RATE]);
  977. *arg = (unsigned long)cl;
  978. return 0;
  979. }
  980. static void
  981. hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl)
  982. {
  983. struct hfsc_sched *q = qdisc_priv(sch);
  984. tcf_destroy_chain(&cl->filter_list);
  985. qdisc_destroy(cl->qdisc);
  986. gen_kill_estimator(&cl->bstats, &cl->rate_est);
  987. if (cl != &q->root)
  988. kfree(cl);
  989. }
  990. static int
  991. hfsc_delete_class(struct Qdisc *sch, unsigned long arg)
  992. {
  993. struct hfsc_sched *q = qdisc_priv(sch);
  994. struct hfsc_class *cl = (struct hfsc_class *)arg;
  995. if (cl->level > 0 || cl->filter_cnt > 0 || cl == &q->root)
  996. return -EBUSY;
  997. sch_tree_lock(sch);
  998. list_del(&cl->siblings);
  999. hfsc_adjust_levels(cl->cl_parent);
  1000. hfsc_purge_queue(sch, cl);
  1001. list_del(&cl->hlist);
  1002. if (--cl->refcnt == 0)
  1003. hfsc_destroy_class(sch, cl);
  1004. sch_tree_unlock(sch);
  1005. return 0;
  1006. }
  1007. static struct hfsc_class *
  1008. hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
  1009. {
  1010. struct hfsc_sched *q = qdisc_priv(sch);
  1011. struct hfsc_class *cl;
  1012. struct tcf_result res;
  1013. struct tcf_proto *tcf;
  1014. int result;
  1015. if (TC_H_MAJ(skb->priority ^ sch->handle) == 0 &&
  1016. (cl = hfsc_find_class(skb->priority, sch)) != NULL)
  1017. if (cl->level == 0)
  1018. return cl;
  1019. *qerr = NET_XMIT_BYPASS;
  1020. tcf = q->root.filter_list;
  1021. while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
  1022. #ifdef CONFIG_NET_CLS_ACT
  1023. switch (result) {
  1024. case TC_ACT_QUEUED:
  1025. case TC_ACT_STOLEN:
  1026. *qerr = NET_XMIT_SUCCESS;
  1027. case TC_ACT_SHOT:
  1028. return NULL;
  1029. }
  1030. #endif
  1031. if ((cl = (struct hfsc_class *)res.class) == NULL) {
  1032. if ((cl = hfsc_find_class(res.classid, sch)) == NULL)
  1033. break; /* filter selected invalid classid */
  1034. }
  1035. if (cl->level == 0)
  1036. return cl; /* hit leaf class */
  1037. /* apply inner filter chain */
  1038. tcf = cl->filter_list;
  1039. }
  1040. /* classification failed, try default class */
  1041. cl = hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
  1042. if (cl == NULL || cl->level > 0)
  1043. return NULL;
  1044. return cl;
  1045. }
  1046. static int
  1047. hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
  1048. struct Qdisc **old)
  1049. {
  1050. struct hfsc_class *cl = (struct hfsc_class *)arg;
  1051. if (cl == NULL)
  1052. return -ENOENT;
  1053. if (cl->level > 0)
  1054. return -EINVAL;
  1055. if (new == NULL) {
  1056. new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
  1057. cl->classid);
  1058. if (new == NULL)
  1059. new = &noop_qdisc;
  1060. }
  1061. sch_tree_lock(sch);
  1062. hfsc_purge_queue(sch, cl);
  1063. *old = xchg(&cl->qdisc, new);
  1064. sch_tree_unlock(sch);
  1065. return 0;
  1066. }
  1067. static struct Qdisc *
  1068. hfsc_class_leaf(struct Qdisc *sch, unsigned long arg)
  1069. {
  1070. struct hfsc_class *cl = (struct hfsc_class *)arg;
  1071. if (cl != NULL && cl->level == 0)
  1072. return cl->qdisc;
  1073. return NULL;
  1074. }
  1075. static void
  1076. hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg)
  1077. {
  1078. struct hfsc_class *cl = (struct hfsc_class *)arg;
  1079. if (cl->qdisc->q.qlen == 0) {
  1080. update_vf(cl, 0, 0);
  1081. set_passive(cl);
  1082. }
  1083. }
  1084. static unsigned long
  1085. hfsc_get_class(struct Qdisc *sch, u32 classid)
  1086. {
  1087. struct hfsc_class *cl = hfsc_find_class(classid, sch);
  1088. if (cl != NULL)
  1089. cl->refcnt++;
  1090. return (unsigned long)cl;
  1091. }
  1092. static void
  1093. hfsc_put_class(struct Qdisc *sch, unsigned long arg)
  1094. {
  1095. struct hfsc_class *cl = (struct hfsc_class *)arg;
  1096. if (--cl->refcnt == 0)
  1097. hfsc_destroy_class(sch, cl);
  1098. }
  1099. static unsigned long
  1100. hfsc_bind_tcf(struct Qdisc *sch, unsigned long parent, u32 classid)
  1101. {
  1102. struct hfsc_class *p = (struct hfsc_class *)parent;
  1103. struct hfsc_class *cl = hfsc_find_class(classid, sch);
  1104. if (cl != NULL) {
  1105. if (p != NULL && p->level <= cl->level)
  1106. return 0;
  1107. cl->filter_cnt++;
  1108. }
  1109. return (unsigned long)cl;
  1110. }
  1111. static void
  1112. hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg)
  1113. {
  1114. struct hfsc_class *cl = (struct hfsc_class *)arg;
  1115. cl->filter_cnt--;
  1116. }
  1117. static struct tcf_proto **
  1118. hfsc_tcf_chain(struct Qdisc *sch, unsigned long arg)
  1119. {
  1120. struct hfsc_sched *q = qdisc_priv(sch);
  1121. struct hfsc_class *cl = (struct hfsc_class *)arg;
  1122. if (cl == NULL)
  1123. cl = &q->root;
  1124. return &cl->filter_list;
  1125. }
  1126. static int
  1127. hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc)
  1128. {
  1129. struct tc_service_curve tsc;
  1130. tsc.m1 = sm2m(sc->sm1);
  1131. tsc.d = dx2d(sc->dx);
  1132. tsc.m2 = sm2m(sc->sm2);
  1133. NLA_PUT(skb, attr, sizeof(tsc), &tsc);
  1134. return skb->len;
  1135. nla_put_failure:
  1136. return -1;
  1137. }
  1138. static inline int
  1139. hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl)
  1140. {
  1141. if ((cl->cl_flags & HFSC_RSC) &&
  1142. (hfsc_dump_sc(skb, TCA_HFSC_RSC, &cl->cl_rsc) < 0))
  1143. goto nla_put_failure;
  1144. if ((cl->cl_flags & HFSC_FSC) &&
  1145. (hfsc_dump_sc(skb, TCA_HFSC_FSC, &cl->cl_fsc) < 0))
  1146. goto nla_put_failure;
  1147. if ((cl->cl_flags & HFSC_USC) &&
  1148. (hfsc_dump_sc(skb, TCA_HFSC_USC, &cl->cl_usc) < 0))
  1149. goto nla_put_failure;
  1150. return skb->len;
  1151. nla_put_failure:
  1152. return -1;
  1153. }
  1154. static int
  1155. hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb,
  1156. struct tcmsg *tcm)
  1157. {
  1158. struct hfsc_class *cl = (struct hfsc_class *)arg;
  1159. struct nlattr *nest;
  1160. tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->classid : TC_H_ROOT;
  1161. tcm->tcm_handle = cl->classid;
  1162. if (cl->level == 0)
  1163. tcm->tcm_info = cl->qdisc->handle;
  1164. nest = nla_nest_start(skb, TCA_OPTIONS);
  1165. if (nest == NULL)
  1166. goto nla_put_failure;
  1167. if (hfsc_dump_curves(skb, cl) < 0)
  1168. goto nla_put_failure;
  1169. nla_nest_end(skb, nest);
  1170. return skb->len;
  1171. nla_put_failure:
  1172. nla_nest_cancel(skb, nest);
  1173. return -EMSGSIZE;
  1174. }
  1175. static int
  1176. hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
  1177. struct gnet_dump *d)
  1178. {
  1179. struct hfsc_class *cl = (struct hfsc_class *)arg;
  1180. struct tc_hfsc_stats xstats;
  1181. cl->qstats.qlen = cl->qdisc->q.qlen;
  1182. xstats.level = cl->level;
  1183. xstats.period = cl->cl_vtperiod;
  1184. xstats.work = cl->cl_total;
  1185. xstats.rtwork = cl->cl_cumul;
  1186. if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
  1187. gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
  1188. gnet_stats_copy_queue(d, &cl->qstats) < 0)
  1189. return -1;
  1190. return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
  1191. }
  1192. static void
  1193. hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
  1194. {
  1195. struct hfsc_sched *q = qdisc_priv(sch);
  1196. struct hfsc_class *cl;
  1197. unsigned int i;
  1198. if (arg->stop)
  1199. return;
  1200. for (i = 0; i < HFSC_HSIZE; i++) {
  1201. list_for_each_entry(cl, &q->clhash[i], hlist) {
  1202. if (arg->count < arg->skip) {
  1203. arg->count++;
  1204. continue;
  1205. }
  1206. if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
  1207. arg->stop = 1;
  1208. return;
  1209. }
  1210. arg->count++;
  1211. }
  1212. }
  1213. }
  1214. static void
  1215. hfsc_schedule_watchdog(struct Qdisc *sch)
  1216. {
  1217. struct hfsc_sched *q = qdisc_priv(sch);
  1218. struct hfsc_class *cl;
  1219. u64 next_time = 0;
  1220. if ((cl = eltree_get_minel(q)) != NULL)
  1221. next_time = cl->cl_e;
  1222. if (q->root.cl_cfmin != 0) {
  1223. if (next_time == 0 || next_time > q->root.cl_cfmin)
  1224. next_time = q->root.cl_cfmin;
  1225. }
  1226. WARN_ON(next_time == 0);
  1227. qdisc_watchdog_schedule(&q->watchdog, next_time);
  1228. }
  1229. static int
  1230. hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
  1231. {
  1232. struct hfsc_sched *q = qdisc_priv(sch);
  1233. struct tc_hfsc_qopt *qopt;
  1234. unsigned int i;
  1235. if (opt == NULL || nla_len(opt) < sizeof(*qopt))
  1236. return -EINVAL;
  1237. qopt = nla_data(opt);
  1238. q->defcls = qopt->defcls;
  1239. for (i = 0; i < HFSC_HSIZE; i++)
  1240. INIT_LIST_HEAD(&q->clhash[i]);
  1241. q->eligible = RB_ROOT;
  1242. INIT_LIST_HEAD(&q->droplist);
  1243. skb_queue_head_init(&q->requeue);
  1244. q->root.refcnt = 1;
  1245. q->root.classid = sch->handle;
  1246. q->root.sched = q;
  1247. q->root.qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
  1248. sch->handle);
  1249. if (q->root.qdisc == NULL)
  1250. q->root.qdisc = &noop_qdisc;
  1251. INIT_LIST_HEAD(&q->root.children);
  1252. q->root.vt_tree = RB_ROOT;
  1253. q->root.cf_tree = RB_ROOT;
  1254. list_add(&q->root.hlist, &q->clhash[hfsc_hash(q->root.classid)]);
  1255. qdisc_watchdog_init(&q->watchdog, sch);
  1256. return 0;
  1257. }
  1258. static int
  1259. hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt)
  1260. {
  1261. struct hfsc_sched *q = qdisc_priv(sch);
  1262. struct tc_hfsc_qopt *qopt;
  1263. if (opt == NULL || nla_len(opt) < sizeof(*qopt))
  1264. return -EINVAL;
  1265. qopt = nla_data(opt);
  1266. sch_tree_lock(sch);
  1267. q->defcls = qopt->defcls;
  1268. sch_tree_unlock(sch);
  1269. return 0;
  1270. }
  1271. static void
  1272. hfsc_reset_class(struct hfsc_class *cl)
  1273. {
  1274. cl->cl_total = 0;
  1275. cl->cl_cumul = 0;
  1276. cl->cl_d = 0;
  1277. cl->cl_e = 0;
  1278. cl->cl_vt = 0;
  1279. cl->cl_vtadj = 0;
  1280. cl->cl_vtoff = 0;
  1281. cl->cl_cvtmin = 0;
  1282. cl->cl_cvtmax = 0;
  1283. cl->cl_cvtoff = 0;
  1284. cl->cl_pcvtoff = 0;
  1285. cl->cl_vtperiod = 0;
  1286. cl->cl_parentperiod = 0;
  1287. cl->cl_f = 0;
  1288. cl->cl_myf = 0;
  1289. cl->cl_myfadj = 0;
  1290. cl->cl_cfmin = 0;
  1291. cl->cl_nactive = 0;
  1292. cl->vt_tree = RB_ROOT;
  1293. cl->cf_tree = RB_ROOT;
  1294. qdisc_reset(cl->qdisc);
  1295. if (cl->cl_flags & HFSC_RSC)
  1296. rtsc_init(&cl->cl_deadline, &cl->cl_rsc, 0, 0);
  1297. if (cl->cl_flags & HFSC_FSC)
  1298. rtsc_init(&cl->cl_virtual, &cl->cl_fsc, 0, 0);
  1299. if (cl->cl_flags & HFSC_USC)
  1300. rtsc_init(&cl->cl_ulimit, &cl->cl_usc, 0, 0);
  1301. }
  1302. static void
  1303. hfsc_reset_qdisc(struct Qdisc *sch)
  1304. {
  1305. struct hfsc_sched *q = qdisc_priv(sch);
  1306. struct hfsc_class *cl;
  1307. unsigned int i;
  1308. for (i = 0; i < HFSC_HSIZE; i++) {
  1309. list_for_each_entry(cl, &q->clhash[i], hlist)
  1310. hfsc_reset_class(cl);
  1311. }
  1312. __skb_queue_purge(&q->requeue);
  1313. q->eligible = RB_ROOT;
  1314. INIT_LIST_HEAD(&q->droplist);
  1315. qdisc_watchdog_cancel(&q->watchdog);
  1316. sch->q.qlen = 0;
  1317. }
  1318. static void
  1319. hfsc_destroy_qdisc(struct Qdisc *sch)
  1320. {
  1321. struct hfsc_sched *q = qdisc_priv(sch);
  1322. struct hfsc_class *cl, *next;
  1323. unsigned int i;
  1324. for (i = 0; i < HFSC_HSIZE; i++) {
  1325. list_for_each_entry(cl, &q->clhash[i], hlist)
  1326. tcf_destroy_chain(&cl->filter_list);
  1327. }
  1328. for (i = 0; i < HFSC_HSIZE; i++) {
  1329. list_for_each_entry_safe(cl, next, &q->clhash[i], hlist)
  1330. hfsc_destroy_class(sch, cl);
  1331. }
  1332. __skb_queue_purge(&q->requeue);
  1333. qdisc_watchdog_cancel(&q->watchdog);
  1334. }
  1335. static int
  1336. hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
  1337. {
  1338. struct hfsc_sched *q = qdisc_priv(sch);
  1339. unsigned char *b = skb_tail_pointer(skb);
  1340. struct tc_hfsc_qopt qopt;
  1341. qopt.defcls = q->defcls;
  1342. NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
  1343. return skb->len;
  1344. nla_put_failure:
  1345. nlmsg_trim(skb, b);
  1346. return -1;
  1347. }
  1348. static int
  1349. hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
  1350. {
  1351. struct hfsc_class *cl;
  1352. unsigned int len;
  1353. int err;
  1354. cl = hfsc_classify(skb, sch, &err);
  1355. if (cl == NULL) {
  1356. if (err == NET_XMIT_BYPASS)
  1357. sch->qstats.drops++;
  1358. kfree_skb(skb);
  1359. return err;
  1360. }
  1361. len = skb->len;
  1362. err = cl->qdisc->enqueue(skb, cl->qdisc);
  1363. if (unlikely(err != NET_XMIT_SUCCESS)) {
  1364. cl->qstats.drops++;
  1365. sch->qstats.drops++;
  1366. return err;
  1367. }
  1368. if (cl->qdisc->q.qlen == 1)
  1369. set_active(cl, len);
  1370. cl->bstats.packets++;
  1371. cl->bstats.bytes += len;
  1372. sch->bstats.packets++;
  1373. sch->bstats.bytes += len;
  1374. sch->q.qlen++;
  1375. return NET_XMIT_SUCCESS;
  1376. }
  1377. static struct sk_buff *
  1378. hfsc_dequeue(struct Qdisc *sch)
  1379. {
  1380. struct hfsc_sched *q = qdisc_priv(sch);
  1381. struct hfsc_class *cl;
  1382. struct sk_buff *skb;
  1383. u64 cur_time;
  1384. unsigned int next_len;
  1385. int realtime = 0;
  1386. if (sch->q.qlen == 0)
  1387. return NULL;
  1388. if ((skb = __skb_dequeue(&q->requeue)))
  1389. goto out;
  1390. cur_time = psched_get_time();
  1391. /*
  1392. * if there are eligible classes, use real-time criteria.
  1393. * find the class with the minimum deadline among
  1394. * the eligible classes.
  1395. */
  1396. if ((cl = eltree_get_mindl(q, cur_time)) != NULL) {
  1397. realtime = 1;
  1398. } else {
  1399. /*
  1400. * use link-sharing criteria
  1401. * get the class with the minimum vt in the hierarchy
  1402. */
  1403. cl = vttree_get_minvt(&q->root, cur_time);
  1404. if (cl == NULL) {
  1405. sch->qstats.overlimits++;
  1406. hfsc_schedule_watchdog(sch);
  1407. return NULL;
  1408. }
  1409. }
  1410. skb = cl->qdisc->dequeue(cl->qdisc);
  1411. if (skb == NULL) {
  1412. if (net_ratelimit())
  1413. printk("HFSC: Non-work-conserving qdisc ?\n");
  1414. return NULL;
  1415. }
  1416. update_vf(cl, skb->len, cur_time);
  1417. if (realtime)
  1418. cl->cl_cumul += skb->len;
  1419. if (cl->qdisc->q.qlen != 0) {
  1420. if (cl->cl_flags & HFSC_RSC) {
  1421. /* update ed */
  1422. next_len = qdisc_peek_len(cl->qdisc);
  1423. if (realtime)
  1424. update_ed(cl, next_len);
  1425. else
  1426. update_d(cl, next_len);
  1427. }
  1428. } else {
  1429. /* the class becomes passive */
  1430. set_passive(cl);
  1431. }
  1432. out:
  1433. sch->flags &= ~TCQ_F_THROTTLED;
  1434. sch->q.qlen--;
  1435. return skb;
  1436. }
  1437. static int
  1438. hfsc_requeue(struct sk_buff *skb, struct Qdisc *sch)
  1439. {
  1440. struct hfsc_sched *q = qdisc_priv(sch);
  1441. __skb_queue_head(&q->requeue, skb);
  1442. sch->q.qlen++;
  1443. sch->qstats.requeues++;
  1444. return NET_XMIT_SUCCESS;
  1445. }
  1446. static unsigned int
  1447. hfsc_drop(struct Qdisc *sch)
  1448. {
  1449. struct hfsc_sched *q = qdisc_priv(sch);
  1450. struct hfsc_class *cl;
  1451. unsigned int len;
  1452. list_for_each_entry(cl, &q->droplist, dlist) {
  1453. if (cl->qdisc->ops->drop != NULL &&
  1454. (len = cl->qdisc->ops->drop(cl->qdisc)) > 0) {
  1455. if (cl->qdisc->q.qlen == 0) {
  1456. update_vf(cl, 0, 0);
  1457. set_passive(cl);
  1458. } else {
  1459. list_move_tail(&cl->dlist, &q->droplist);
  1460. }
  1461. cl->qstats.drops++;
  1462. sch->qstats.drops++;
  1463. sch->q.qlen--;
  1464. return len;
  1465. }
  1466. }
  1467. return 0;
  1468. }
  1469. static const struct Qdisc_class_ops hfsc_class_ops = {
  1470. .change = hfsc_change_class,
  1471. .delete = hfsc_delete_class,
  1472. .graft = hfsc_graft_class,
  1473. .leaf = hfsc_class_leaf,
  1474. .qlen_notify = hfsc_qlen_notify,
  1475. .get = hfsc_get_class,
  1476. .put = hfsc_put_class,
  1477. .bind_tcf = hfsc_bind_tcf,
  1478. .unbind_tcf = hfsc_unbind_tcf,
  1479. .tcf_chain = hfsc_tcf_chain,
  1480. .dump = hfsc_dump_class,
  1481. .dump_stats = hfsc_dump_class_stats,
  1482. .walk = hfsc_walk
  1483. };
  1484. static struct Qdisc_ops hfsc_qdisc_ops __read_mostly = {
  1485. .id = "hfsc",
  1486. .init = hfsc_init_qdisc,
  1487. .change = hfsc_change_qdisc,
  1488. .reset = hfsc_reset_qdisc,
  1489. .destroy = hfsc_destroy_qdisc,
  1490. .dump = hfsc_dump_qdisc,
  1491. .enqueue = hfsc_enqueue,
  1492. .dequeue = hfsc_dequeue,
  1493. .requeue = hfsc_requeue,
  1494. .drop = hfsc_drop,
  1495. .cl_ops = &hfsc_class_ops,
  1496. .priv_size = sizeof(struct hfsc_sched),
  1497. .owner = THIS_MODULE
  1498. };
  1499. static int __init
  1500. hfsc_init(void)
  1501. {
  1502. return register_qdisc(&hfsc_qdisc_ops);
  1503. }
  1504. static void __exit
  1505. hfsc_cleanup(void)
  1506. {
  1507. unregister_qdisc(&hfsc_qdisc_ops);
  1508. }
  1509. MODULE_LICENSE("GPL");
  1510. module_init(hfsc_init);
  1511. module_exit(hfsc_cleanup);