sch_hfsc.c 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757
  1. /*
  2. * Copyright (c) 2003 Patrick McHardy, <kaber@trash.net>
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version 2
  7. * of the License, or (at your option) any later version.
  8. *
  9. * 2003-10-17 - Ported from altq
  10. */
  11. /*
  12. * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
  13. *
  14. * Permission to use, copy, modify, and distribute this software and
  15. * its documentation is hereby granted (including for commercial or
  16. * for-profit use), provided that both the copyright notice and this
  17. * permission notice appear in all copies of the software, derivative
  18. * works, or modified versions, and any portions thereof.
  19. *
  20. * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
  21. * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS
  22. * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
  23. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  24. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  25. * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
  26. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  27. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
  28. * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  29. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  30. * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  31. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  32. * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
  33. * DAMAGE.
  34. *
  35. * Carnegie Mellon encourages (but does not require) users of this
  36. * software to return any improvements or extensions that they make,
  37. * and to grant Carnegie Mellon the rights to redistribute these
  38. * changes without encumbrance.
  39. */
  40. /*
  41. * H-FSC is described in Proceedings of SIGCOMM'97,
  42. * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
  43. * Real-Time and Priority Service"
  44. * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
  45. *
  46. * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing.
  47. * when a class has an upperlimit, the fit-time is computed from the
  48. * upperlimit service curve. the link-sharing scheduler does not schedule
  49. * a class whose fit-time exceeds the current time.
  50. */
  51. #include <linux/kernel.h>
  52. #include <linux/module.h>
  53. #include <linux/types.h>
  54. #include <linux/errno.h>
  55. #include <linux/jiffies.h>
  56. #include <linux/compiler.h>
  57. #include <linux/spinlock.h>
  58. #include <linux/skbuff.h>
  59. #include <linux/string.h>
  60. #include <linux/slab.h>
  61. #include <linux/list.h>
  62. #include <linux/rbtree.h>
  63. #include <linux/init.h>
  64. #include <linux/netdevice.h>
  65. #include <linux/rtnetlink.h>
  66. #include <linux/pkt_sched.h>
  67. #include <net/netlink.h>
  68. #include <net/pkt_sched.h>
  69. #include <net/pkt_cls.h>
  70. #include <asm/system.h>
  71. #include <asm/div64.h>
  72. /*
  73. * kernel internal service curve representation:
  74. * coordinates are given by 64 bit unsigned integers.
  75. * x-axis: unit is clock count.
  76. * y-axis: unit is byte.
  77. *
  78. * The service curve parameters are converted to the internal
  79. * representation. The slope values are scaled to avoid overflow.
  80. * the inverse slope values as well as the y-projection of the 1st
  81. * segment are kept in order to to avoid 64-bit divide operations
  82. * that are expensive on 32-bit architectures.
  83. */
  84. struct internal_sc
  85. {
  86. u64 sm1; /* scaled slope of the 1st segment */
  87. u64 ism1; /* scaled inverse-slope of the 1st segment */
  88. u64 dx; /* the x-projection of the 1st segment */
  89. u64 dy; /* the y-projection of the 1st segment */
  90. u64 sm2; /* scaled slope of the 2nd segment */
  91. u64 ism2; /* scaled inverse-slope of the 2nd segment */
  92. };
  93. /* runtime service curve */
  94. struct runtime_sc
  95. {
  96. u64 x; /* current starting position on x-axis */
  97. u64 y; /* current starting position on y-axis */
  98. u64 sm1; /* scaled slope of the 1st segment */
  99. u64 ism1; /* scaled inverse-slope of the 1st segment */
  100. u64 dx; /* the x-projection of the 1st segment */
  101. u64 dy; /* the y-projection of the 1st segment */
  102. u64 sm2; /* scaled slope of the 2nd segment */
  103. u64 ism2; /* scaled inverse-slope of the 2nd segment */
  104. };
  105. enum hfsc_class_flags
  106. {
  107. HFSC_RSC = 0x1,
  108. HFSC_FSC = 0x2,
  109. HFSC_USC = 0x4
  110. };
  111. struct hfsc_class
  112. {
  113. u32 classid; /* class id */
  114. unsigned int refcnt; /* usage count */
  115. struct gnet_stats_basic bstats;
  116. struct gnet_stats_queue qstats;
  117. struct gnet_stats_rate_est rate_est;
  118. spinlock_t *stats_lock;
  119. unsigned int level; /* class level in hierarchy */
  120. struct tcf_proto *filter_list; /* filter list */
  121. unsigned int filter_cnt; /* filter count */
  122. struct hfsc_sched *sched; /* scheduler data */
  123. struct hfsc_class *cl_parent; /* parent class */
  124. struct list_head siblings; /* sibling classes */
  125. struct list_head children; /* child classes */
  126. struct Qdisc *qdisc; /* leaf qdisc */
  127. struct rb_node el_node; /* qdisc's eligible tree member */
  128. struct rb_root vt_tree; /* active children sorted by cl_vt */
  129. struct rb_node vt_node; /* parent's vt_tree member */
  130. struct rb_root cf_tree; /* active children sorted by cl_f */
  131. struct rb_node cf_node; /* parent's cf_heap member */
  132. struct list_head hlist; /* hash list member */
  133. struct list_head dlist; /* drop list member */
  134. u64 cl_total; /* total work in bytes */
  135. u64 cl_cumul; /* cumulative work in bytes done by
  136. real-time criteria */
  137. u64 cl_d; /* deadline*/
  138. u64 cl_e; /* eligible time */
  139. u64 cl_vt; /* virtual time */
  140. u64 cl_f; /* time when this class will fit for
  141. link-sharing, max(myf, cfmin) */
  142. u64 cl_myf; /* my fit-time (calculated from this
  143. class's own upperlimit curve) */
  144. u64 cl_myfadj; /* my fit-time adjustment (to cancel
  145. history dependence) */
  146. u64 cl_cfmin; /* earliest children's fit-time (used
  147. with cl_myf to obtain cl_f) */
  148. u64 cl_cvtmin; /* minimal virtual time among the
  149. children fit for link-sharing
  150. (monotonic within a period) */
  151. u64 cl_vtadj; /* intra-period cumulative vt
  152. adjustment */
  153. u64 cl_vtoff; /* inter-period cumulative vt offset */
  154. u64 cl_cvtmax; /* max child's vt in the last period */
  155. u64 cl_cvtoff; /* cumulative cvtmax of all periods */
  156. u64 cl_pcvtoff; /* parent's cvtoff at initalization
  157. time */
  158. struct internal_sc cl_rsc; /* internal real-time service curve */
  159. struct internal_sc cl_fsc; /* internal fair service curve */
  160. struct internal_sc cl_usc; /* internal upperlimit service curve */
  161. struct runtime_sc cl_deadline; /* deadline curve */
  162. struct runtime_sc cl_eligible; /* eligible curve */
  163. struct runtime_sc cl_virtual; /* virtual curve */
  164. struct runtime_sc cl_ulimit; /* upperlimit curve */
  165. unsigned long cl_flags; /* which curves are valid */
  166. unsigned long cl_vtperiod; /* vt period sequence number */
  167. unsigned long cl_parentperiod;/* parent's vt period sequence number*/
  168. unsigned long cl_nactive; /* number of active children */
  169. };
  170. #define HFSC_HSIZE 16
  171. struct hfsc_sched
  172. {
  173. u16 defcls; /* default class id */
  174. struct hfsc_class root; /* root class */
  175. struct list_head clhash[HFSC_HSIZE]; /* class hash */
  176. struct rb_root eligible; /* eligible tree */
  177. struct list_head droplist; /* active leaf class list (for
  178. dropping) */
  179. struct sk_buff_head requeue; /* requeued packet */
  180. struct qdisc_watchdog watchdog; /* watchdog timer */
  181. };
  182. #define HT_INFINITY 0xffffffffffffffffULL /* infinite time value */
  183. /*
  184. * eligible tree holds backlogged classes being sorted by their eligible times.
  185. * there is one eligible tree per hfsc instance.
  186. */
  187. static void
  188. eltree_insert(struct hfsc_class *cl)
  189. {
  190. struct rb_node **p = &cl->sched->eligible.rb_node;
  191. struct rb_node *parent = NULL;
  192. struct hfsc_class *cl1;
  193. while (*p != NULL) {
  194. parent = *p;
  195. cl1 = rb_entry(parent, struct hfsc_class, el_node);
  196. if (cl->cl_e >= cl1->cl_e)
  197. p = &parent->rb_right;
  198. else
  199. p = &parent->rb_left;
  200. }
  201. rb_link_node(&cl->el_node, parent, p);
  202. rb_insert_color(&cl->el_node, &cl->sched->eligible);
  203. }
  204. static inline void
  205. eltree_remove(struct hfsc_class *cl)
  206. {
  207. rb_erase(&cl->el_node, &cl->sched->eligible);
  208. }
  209. static inline void
  210. eltree_update(struct hfsc_class *cl)
  211. {
  212. eltree_remove(cl);
  213. eltree_insert(cl);
  214. }
  215. /* find the class with the minimum deadline among the eligible classes */
  216. static inline struct hfsc_class *
  217. eltree_get_mindl(struct hfsc_sched *q, u64 cur_time)
  218. {
  219. struct hfsc_class *p, *cl = NULL;
  220. struct rb_node *n;
  221. for (n = rb_first(&q->eligible); n != NULL; n = rb_next(n)) {
  222. p = rb_entry(n, struct hfsc_class, el_node);
  223. if (p->cl_e > cur_time)
  224. break;
  225. if (cl == NULL || p->cl_d < cl->cl_d)
  226. cl = p;
  227. }
  228. return cl;
  229. }
  230. /* find the class with minimum eligible time among the eligible classes */
  231. static inline struct hfsc_class *
  232. eltree_get_minel(struct hfsc_sched *q)
  233. {
  234. struct rb_node *n;
  235. n = rb_first(&q->eligible);
  236. if (n == NULL)
  237. return NULL;
  238. return rb_entry(n, struct hfsc_class, el_node);
  239. }
  240. /*
  241. * vttree holds holds backlogged child classes being sorted by their virtual
  242. * time. each intermediate class has one vttree.
  243. */
  244. static void
  245. vttree_insert(struct hfsc_class *cl)
  246. {
  247. struct rb_node **p = &cl->cl_parent->vt_tree.rb_node;
  248. struct rb_node *parent = NULL;
  249. struct hfsc_class *cl1;
  250. while (*p != NULL) {
  251. parent = *p;
  252. cl1 = rb_entry(parent, struct hfsc_class, vt_node);
  253. if (cl->cl_vt >= cl1->cl_vt)
  254. p = &parent->rb_right;
  255. else
  256. p = &parent->rb_left;
  257. }
  258. rb_link_node(&cl->vt_node, parent, p);
  259. rb_insert_color(&cl->vt_node, &cl->cl_parent->vt_tree);
  260. }
  261. static inline void
  262. vttree_remove(struct hfsc_class *cl)
  263. {
  264. rb_erase(&cl->vt_node, &cl->cl_parent->vt_tree);
  265. }
  266. static inline void
  267. vttree_update(struct hfsc_class *cl)
  268. {
  269. vttree_remove(cl);
  270. vttree_insert(cl);
  271. }
  272. static inline struct hfsc_class *
  273. vttree_firstfit(struct hfsc_class *cl, u64 cur_time)
  274. {
  275. struct hfsc_class *p;
  276. struct rb_node *n;
  277. for (n = rb_first(&cl->vt_tree); n != NULL; n = rb_next(n)) {
  278. p = rb_entry(n, struct hfsc_class, vt_node);
  279. if (p->cl_f <= cur_time)
  280. return p;
  281. }
  282. return NULL;
  283. }
  284. /*
  285. * get the leaf class with the minimum vt in the hierarchy
  286. */
  287. static struct hfsc_class *
  288. vttree_get_minvt(struct hfsc_class *cl, u64 cur_time)
  289. {
  290. /* if root-class's cfmin is bigger than cur_time nothing to do */
  291. if (cl->cl_cfmin > cur_time)
  292. return NULL;
  293. while (cl->level > 0) {
  294. cl = vttree_firstfit(cl, cur_time);
  295. if (cl == NULL)
  296. return NULL;
  297. /*
  298. * update parent's cl_cvtmin.
  299. */
  300. if (cl->cl_parent->cl_cvtmin < cl->cl_vt)
  301. cl->cl_parent->cl_cvtmin = cl->cl_vt;
  302. }
  303. return cl;
  304. }
  305. static void
  306. cftree_insert(struct hfsc_class *cl)
  307. {
  308. struct rb_node **p = &cl->cl_parent->cf_tree.rb_node;
  309. struct rb_node *parent = NULL;
  310. struct hfsc_class *cl1;
  311. while (*p != NULL) {
  312. parent = *p;
  313. cl1 = rb_entry(parent, struct hfsc_class, cf_node);
  314. if (cl->cl_f >= cl1->cl_f)
  315. p = &parent->rb_right;
  316. else
  317. p = &parent->rb_left;
  318. }
  319. rb_link_node(&cl->cf_node, parent, p);
  320. rb_insert_color(&cl->cf_node, &cl->cl_parent->cf_tree);
  321. }
  322. static inline void
  323. cftree_remove(struct hfsc_class *cl)
  324. {
  325. rb_erase(&cl->cf_node, &cl->cl_parent->cf_tree);
  326. }
  327. static inline void
  328. cftree_update(struct hfsc_class *cl)
  329. {
  330. cftree_remove(cl);
  331. cftree_insert(cl);
  332. }
  333. /*
  334. * service curve support functions
  335. *
  336. * external service curve parameters
  337. * m: bps
  338. * d: us
  339. * internal service curve parameters
  340. * sm: (bytes/psched_us) << SM_SHIFT
  341. * ism: (psched_us/byte) << ISM_SHIFT
  342. * dx: psched_us
  343. *
  344. * The clock source resolution with ktime is 1.024us.
  345. *
  346. * sm and ism are scaled in order to keep effective digits.
  347. * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective
  348. * digits in decimal using the following table.
  349. *
  350. * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps
  351. * ------------+-------------------------------------------------------
  352. * bytes/1.024us 12.8e-3 128e-3 1280e-3 12800e-3 128000e-3
  353. *
  354. * 1.024us/byte 78.125 7.8125 0.78125 0.078125 0.0078125
  355. */
  356. #define SM_SHIFT 20
  357. #define ISM_SHIFT 18
  358. #define SM_MASK ((1ULL << SM_SHIFT) - 1)
  359. #define ISM_MASK ((1ULL << ISM_SHIFT) - 1)
  360. static inline u64
  361. seg_x2y(u64 x, u64 sm)
  362. {
  363. u64 y;
  364. /*
  365. * compute
  366. * y = x * sm >> SM_SHIFT
  367. * but divide it for the upper and lower bits to avoid overflow
  368. */
  369. y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT);
  370. return y;
  371. }
  372. static inline u64
  373. seg_y2x(u64 y, u64 ism)
  374. {
  375. u64 x;
  376. if (y == 0)
  377. x = 0;
  378. else if (ism == HT_INFINITY)
  379. x = HT_INFINITY;
  380. else {
  381. x = (y >> ISM_SHIFT) * ism
  382. + (((y & ISM_MASK) * ism) >> ISM_SHIFT);
  383. }
  384. return x;
  385. }
  386. /* Convert m (bps) into sm (bytes/psched us) */
  387. static u64
  388. m2sm(u32 m)
  389. {
  390. u64 sm;
  391. sm = ((u64)m << SM_SHIFT);
  392. sm += PSCHED_TICKS_PER_SEC - 1;
  393. do_div(sm, PSCHED_TICKS_PER_SEC);
  394. return sm;
  395. }
  396. /* convert m (bps) into ism (psched us/byte) */
  397. static u64
  398. m2ism(u32 m)
  399. {
  400. u64 ism;
  401. if (m == 0)
  402. ism = HT_INFINITY;
  403. else {
  404. ism = ((u64)PSCHED_TICKS_PER_SEC << ISM_SHIFT);
  405. ism += m - 1;
  406. do_div(ism, m);
  407. }
  408. return ism;
  409. }
  410. /* convert d (us) into dx (psched us) */
  411. static u64
  412. d2dx(u32 d)
  413. {
  414. u64 dx;
  415. dx = ((u64)d * PSCHED_TICKS_PER_SEC);
  416. dx += USEC_PER_SEC - 1;
  417. do_div(dx, USEC_PER_SEC);
  418. return dx;
  419. }
  420. /* convert sm (bytes/psched us) into m (bps) */
  421. static u32
  422. sm2m(u64 sm)
  423. {
  424. u64 m;
  425. m = (sm * PSCHED_TICKS_PER_SEC) >> SM_SHIFT;
  426. return (u32)m;
  427. }
  428. /* convert dx (psched us) into d (us) */
  429. static u32
  430. dx2d(u64 dx)
  431. {
  432. u64 d;
  433. d = dx * USEC_PER_SEC;
  434. do_div(d, PSCHED_TICKS_PER_SEC);
  435. return (u32)d;
  436. }
  437. static void
  438. sc2isc(struct tc_service_curve *sc, struct internal_sc *isc)
  439. {
  440. isc->sm1 = m2sm(sc->m1);
  441. isc->ism1 = m2ism(sc->m1);
  442. isc->dx = d2dx(sc->d);
  443. isc->dy = seg_x2y(isc->dx, isc->sm1);
  444. isc->sm2 = m2sm(sc->m2);
  445. isc->ism2 = m2ism(sc->m2);
  446. }
  447. /*
  448. * initialize the runtime service curve with the given internal
  449. * service curve starting at (x, y).
  450. */
  451. static void
  452. rtsc_init(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
  453. {
  454. rtsc->x = x;
  455. rtsc->y = y;
  456. rtsc->sm1 = isc->sm1;
  457. rtsc->ism1 = isc->ism1;
  458. rtsc->dx = isc->dx;
  459. rtsc->dy = isc->dy;
  460. rtsc->sm2 = isc->sm2;
  461. rtsc->ism2 = isc->ism2;
  462. }
  463. /*
  464. * calculate the y-projection of the runtime service curve by the
  465. * given x-projection value
  466. */
  467. static u64
  468. rtsc_y2x(struct runtime_sc *rtsc, u64 y)
  469. {
  470. u64 x;
  471. if (y < rtsc->y)
  472. x = rtsc->x;
  473. else if (y <= rtsc->y + rtsc->dy) {
  474. /* x belongs to the 1st segment */
  475. if (rtsc->dy == 0)
  476. x = rtsc->x + rtsc->dx;
  477. else
  478. x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
  479. } else {
  480. /* x belongs to the 2nd segment */
  481. x = rtsc->x + rtsc->dx
  482. + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
  483. }
  484. return x;
  485. }
  486. static u64
  487. rtsc_x2y(struct runtime_sc *rtsc, u64 x)
  488. {
  489. u64 y;
  490. if (x <= rtsc->x)
  491. y = rtsc->y;
  492. else if (x <= rtsc->x + rtsc->dx)
  493. /* y belongs to the 1st segment */
  494. y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
  495. else
  496. /* y belongs to the 2nd segment */
  497. y = rtsc->y + rtsc->dy
  498. + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
  499. return y;
  500. }
  501. /*
  502. * update the runtime service curve by taking the minimum of the current
  503. * runtime service curve and the service curve starting at (x, y).
  504. */
  505. static void
  506. rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
  507. {
  508. u64 y1, y2, dx, dy;
  509. u32 dsm;
  510. if (isc->sm1 <= isc->sm2) {
  511. /* service curve is convex */
  512. y1 = rtsc_x2y(rtsc, x);
  513. if (y1 < y)
  514. /* the current rtsc is smaller */
  515. return;
  516. rtsc->x = x;
  517. rtsc->y = y;
  518. return;
  519. }
  520. /*
  521. * service curve is concave
  522. * compute the two y values of the current rtsc
  523. * y1: at x
  524. * y2: at (x + dx)
  525. */
  526. y1 = rtsc_x2y(rtsc, x);
  527. if (y1 <= y) {
  528. /* rtsc is below isc, no change to rtsc */
  529. return;
  530. }
  531. y2 = rtsc_x2y(rtsc, x + isc->dx);
  532. if (y2 >= y + isc->dy) {
  533. /* rtsc is above isc, replace rtsc by isc */
  534. rtsc->x = x;
  535. rtsc->y = y;
  536. rtsc->dx = isc->dx;
  537. rtsc->dy = isc->dy;
  538. return;
  539. }
  540. /*
  541. * the two curves intersect
  542. * compute the offsets (dx, dy) using the reverse
  543. * function of seg_x2y()
  544. * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
  545. */
  546. dx = (y1 - y) << SM_SHIFT;
  547. dsm = isc->sm1 - isc->sm2;
  548. do_div(dx, dsm);
  549. /*
  550. * check if (x, y1) belongs to the 1st segment of rtsc.
  551. * if so, add the offset.
  552. */
  553. if (rtsc->x + rtsc->dx > x)
  554. dx += rtsc->x + rtsc->dx - x;
  555. dy = seg_x2y(dx, isc->sm1);
  556. rtsc->x = x;
  557. rtsc->y = y;
  558. rtsc->dx = dx;
  559. rtsc->dy = dy;
  560. return;
  561. }
  562. static void
  563. init_ed(struct hfsc_class *cl, unsigned int next_len)
  564. {
  565. u64 cur_time = psched_get_time();
  566. /* update the deadline curve */
  567. rtsc_min(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
  568. /*
  569. * update the eligible curve.
  570. * for concave, it is equal to the deadline curve.
  571. * for convex, it is a linear curve with slope m2.
  572. */
  573. cl->cl_eligible = cl->cl_deadline;
  574. if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
  575. cl->cl_eligible.dx = 0;
  576. cl->cl_eligible.dy = 0;
  577. }
  578. /* compute e and d */
  579. cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
  580. cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
  581. eltree_insert(cl);
  582. }
  583. static void
  584. update_ed(struct hfsc_class *cl, unsigned int next_len)
  585. {
  586. cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
  587. cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
  588. eltree_update(cl);
  589. }
  590. static inline void
  591. update_d(struct hfsc_class *cl, unsigned int next_len)
  592. {
  593. cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
  594. }
  595. static inline void
  596. update_cfmin(struct hfsc_class *cl)
  597. {
  598. struct rb_node *n = rb_first(&cl->cf_tree);
  599. struct hfsc_class *p;
  600. if (n == NULL) {
  601. cl->cl_cfmin = 0;
  602. return;
  603. }
  604. p = rb_entry(n, struct hfsc_class, cf_node);
  605. cl->cl_cfmin = p->cl_f;
  606. }
  607. static void
  608. init_vf(struct hfsc_class *cl, unsigned int len)
  609. {
  610. struct hfsc_class *max_cl;
  611. struct rb_node *n;
  612. u64 vt, f, cur_time;
  613. int go_active;
  614. cur_time = 0;
  615. go_active = 1;
  616. for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
  617. if (go_active && cl->cl_nactive++ == 0)
  618. go_active = 1;
  619. else
  620. go_active = 0;
  621. if (go_active) {
  622. n = rb_last(&cl->cl_parent->vt_tree);
  623. if (n != NULL) {
  624. max_cl = rb_entry(n, struct hfsc_class,vt_node);
  625. /*
  626. * set vt to the average of the min and max
  627. * classes. if the parent's period didn't
  628. * change, don't decrease vt of the class.
  629. */
  630. vt = max_cl->cl_vt;
  631. if (cl->cl_parent->cl_cvtmin != 0)
  632. vt = (cl->cl_parent->cl_cvtmin + vt)/2;
  633. if (cl->cl_parent->cl_vtperiod !=
  634. cl->cl_parentperiod || vt > cl->cl_vt)
  635. cl->cl_vt = vt;
  636. } else {
  637. /*
  638. * first child for a new parent backlog period.
  639. * add parent's cvtmax to cvtoff to make a new
  640. * vt (vtoff + vt) larger than the vt in the
  641. * last period for all children.
  642. */
  643. vt = cl->cl_parent->cl_cvtmax;
  644. cl->cl_parent->cl_cvtoff += vt;
  645. cl->cl_parent->cl_cvtmax = 0;
  646. cl->cl_parent->cl_cvtmin = 0;
  647. cl->cl_vt = 0;
  648. }
  649. cl->cl_vtoff = cl->cl_parent->cl_cvtoff -
  650. cl->cl_pcvtoff;
  651. /* update the virtual curve */
  652. vt = cl->cl_vt + cl->cl_vtoff;
  653. rtsc_min(&cl->cl_virtual, &cl->cl_fsc, vt,
  654. cl->cl_total);
  655. if (cl->cl_virtual.x == vt) {
  656. cl->cl_virtual.x -= cl->cl_vtoff;
  657. cl->cl_vtoff = 0;
  658. }
  659. cl->cl_vtadj = 0;
  660. cl->cl_vtperiod++; /* increment vt period */
  661. cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
  662. if (cl->cl_parent->cl_nactive == 0)
  663. cl->cl_parentperiod++;
  664. cl->cl_f = 0;
  665. vttree_insert(cl);
  666. cftree_insert(cl);
  667. if (cl->cl_flags & HFSC_USC) {
  668. /* class has upper limit curve */
  669. if (cur_time == 0)
  670. cur_time = psched_get_time();
  671. /* update the ulimit curve */
  672. rtsc_min(&cl->cl_ulimit, &cl->cl_usc, cur_time,
  673. cl->cl_total);
  674. /* compute myf */
  675. cl->cl_myf = rtsc_y2x(&cl->cl_ulimit,
  676. cl->cl_total);
  677. cl->cl_myfadj = 0;
  678. }
  679. }
  680. f = max(cl->cl_myf, cl->cl_cfmin);
  681. if (f != cl->cl_f) {
  682. cl->cl_f = f;
  683. cftree_update(cl);
  684. update_cfmin(cl->cl_parent);
  685. }
  686. }
  687. }
  688. static void
  689. update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time)
  690. {
  691. u64 f; /* , myf_bound, delta; */
  692. int go_passive = 0;
  693. if (cl->qdisc->q.qlen == 0 && cl->cl_flags & HFSC_FSC)
  694. go_passive = 1;
  695. for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
  696. cl->cl_total += len;
  697. if (!(cl->cl_flags & HFSC_FSC) || cl->cl_nactive == 0)
  698. continue;
  699. if (go_passive && --cl->cl_nactive == 0)
  700. go_passive = 1;
  701. else
  702. go_passive = 0;
  703. if (go_passive) {
  704. /* no more active child, going passive */
  705. /* update cvtmax of the parent class */
  706. if (cl->cl_vt > cl->cl_parent->cl_cvtmax)
  707. cl->cl_parent->cl_cvtmax = cl->cl_vt;
  708. /* remove this class from the vt tree */
  709. vttree_remove(cl);
  710. cftree_remove(cl);
  711. update_cfmin(cl->cl_parent);
  712. continue;
  713. }
  714. /*
  715. * update vt and f
  716. */
  717. cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total)
  718. - cl->cl_vtoff + cl->cl_vtadj;
  719. /*
  720. * if vt of the class is smaller than cvtmin,
  721. * the class was skipped in the past due to non-fit.
  722. * if so, we need to adjust vtadj.
  723. */
  724. if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {
  725. cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;
  726. cl->cl_vt = cl->cl_parent->cl_cvtmin;
  727. }
  728. /* update the vt tree */
  729. vttree_update(cl);
  730. if (cl->cl_flags & HFSC_USC) {
  731. cl->cl_myf = cl->cl_myfadj + rtsc_y2x(&cl->cl_ulimit,
  732. cl->cl_total);
  733. #if 0
  734. /*
  735. * This code causes classes to stay way under their
  736. * limit when multiple classes are used at gigabit
  737. * speed. needs investigation. -kaber
  738. */
  739. /*
  740. * if myf lags behind by more than one clock tick
  741. * from the current time, adjust myfadj to prevent
  742. * a rate-limited class from going greedy.
  743. * in a steady state under rate-limiting, myf
  744. * fluctuates within one clock tick.
  745. */
  746. myf_bound = cur_time - PSCHED_JIFFIE2US(1);
  747. if (cl->cl_myf < myf_bound) {
  748. delta = cur_time - cl->cl_myf;
  749. cl->cl_myfadj += delta;
  750. cl->cl_myf += delta;
  751. }
  752. #endif
  753. }
  754. f = max(cl->cl_myf, cl->cl_cfmin);
  755. if (f != cl->cl_f) {
  756. cl->cl_f = f;
  757. cftree_update(cl);
  758. update_cfmin(cl->cl_parent);
  759. }
  760. }
  761. }
  762. static void
  763. set_active(struct hfsc_class *cl, unsigned int len)
  764. {
  765. if (cl->cl_flags & HFSC_RSC)
  766. init_ed(cl, len);
  767. if (cl->cl_flags & HFSC_FSC)
  768. init_vf(cl, len);
  769. list_add_tail(&cl->dlist, &cl->sched->droplist);
  770. }
  771. static void
  772. set_passive(struct hfsc_class *cl)
  773. {
  774. if (cl->cl_flags & HFSC_RSC)
  775. eltree_remove(cl);
  776. list_del(&cl->dlist);
  777. /*
  778. * vttree is now handled in update_vf() so that update_vf(cl, 0, 0)
  779. * needs to be called explicitly to remove a class from vttree.
  780. */
  781. }
  782. /*
  783. * hack to get length of first packet in queue.
  784. */
  785. static unsigned int
  786. qdisc_peek_len(struct Qdisc *sch)
  787. {
  788. struct sk_buff *skb;
  789. unsigned int len;
  790. skb = sch->dequeue(sch);
  791. if (skb == NULL) {
  792. if (net_ratelimit())
  793. printk("qdisc_peek_len: non work-conserving qdisc ?\n");
  794. return 0;
  795. }
  796. len = skb->len;
  797. if (unlikely(sch->ops->requeue(skb, sch) != NET_XMIT_SUCCESS)) {
  798. if (net_ratelimit())
  799. printk("qdisc_peek_len: failed to requeue\n");
  800. qdisc_tree_decrease_qlen(sch, 1);
  801. return 0;
  802. }
  803. return len;
  804. }
  805. static void
  806. hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
  807. {
  808. unsigned int len = cl->qdisc->q.qlen;
  809. qdisc_reset(cl->qdisc);
  810. qdisc_tree_decrease_qlen(cl->qdisc, len);
  811. }
  812. static void
  813. hfsc_adjust_levels(struct hfsc_class *cl)
  814. {
  815. struct hfsc_class *p;
  816. unsigned int level;
  817. do {
  818. level = 0;
  819. list_for_each_entry(p, &cl->children, siblings) {
  820. if (p->level >= level)
  821. level = p->level + 1;
  822. }
  823. cl->level = level;
  824. } while ((cl = cl->cl_parent) != NULL);
  825. }
  826. static inline unsigned int
  827. hfsc_hash(u32 h)
  828. {
  829. h ^= h >> 8;
  830. h ^= h >> 4;
  831. return h & (HFSC_HSIZE - 1);
  832. }
  833. static inline struct hfsc_class *
  834. hfsc_find_class(u32 classid, struct Qdisc *sch)
  835. {
  836. struct hfsc_sched *q = qdisc_priv(sch);
  837. struct hfsc_class *cl;
  838. list_for_each_entry(cl, &q->clhash[hfsc_hash(classid)], hlist) {
  839. if (cl->classid == classid)
  840. return cl;
  841. }
  842. return NULL;
  843. }
  844. static void
  845. hfsc_change_rsc(struct hfsc_class *cl, struct tc_service_curve *rsc,
  846. u64 cur_time)
  847. {
  848. sc2isc(rsc, &cl->cl_rsc);
  849. rtsc_init(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
  850. cl->cl_eligible = cl->cl_deadline;
  851. if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
  852. cl->cl_eligible.dx = 0;
  853. cl->cl_eligible.dy = 0;
  854. }
  855. cl->cl_flags |= HFSC_RSC;
  856. }
  857. static void
  858. hfsc_change_fsc(struct hfsc_class *cl, struct tc_service_curve *fsc)
  859. {
  860. sc2isc(fsc, &cl->cl_fsc);
  861. rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total);
  862. cl->cl_flags |= HFSC_FSC;
  863. }
  864. static void
  865. hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc,
  866. u64 cur_time)
  867. {
  868. sc2isc(usc, &cl->cl_usc);
  869. rtsc_init(&cl->cl_ulimit, &cl->cl_usc, cur_time, cl->cl_total);
  870. cl->cl_flags |= HFSC_USC;
  871. }
  872. static int
  873. hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
  874. struct rtattr **tca, unsigned long *arg)
  875. {
  876. struct hfsc_sched *q = qdisc_priv(sch);
  877. struct hfsc_class *cl = (struct hfsc_class *)*arg;
  878. struct hfsc_class *parent = NULL;
  879. struct rtattr *opt = tca[TCA_OPTIONS-1];
  880. struct rtattr *tb[TCA_HFSC_MAX];
  881. struct tc_service_curve *rsc = NULL, *fsc = NULL, *usc = NULL;
  882. u64 cur_time;
  883. if (opt == NULL || rtattr_parse_nested(tb, TCA_HFSC_MAX, opt))
  884. return -EINVAL;
  885. if (tb[TCA_HFSC_RSC-1]) {
  886. if (RTA_PAYLOAD(tb[TCA_HFSC_RSC-1]) < sizeof(*rsc))
  887. return -EINVAL;
  888. rsc = RTA_DATA(tb[TCA_HFSC_RSC-1]);
  889. if (rsc->m1 == 0 && rsc->m2 == 0)
  890. rsc = NULL;
  891. }
  892. if (tb[TCA_HFSC_FSC-1]) {
  893. if (RTA_PAYLOAD(tb[TCA_HFSC_FSC-1]) < sizeof(*fsc))
  894. return -EINVAL;
  895. fsc = RTA_DATA(tb[TCA_HFSC_FSC-1]);
  896. if (fsc->m1 == 0 && fsc->m2 == 0)
  897. fsc = NULL;
  898. }
  899. if (tb[TCA_HFSC_USC-1]) {
  900. if (RTA_PAYLOAD(tb[TCA_HFSC_USC-1]) < sizeof(*usc))
  901. return -EINVAL;
  902. usc = RTA_DATA(tb[TCA_HFSC_USC-1]);
  903. if (usc->m1 == 0 && usc->m2 == 0)
  904. usc = NULL;
  905. }
  906. if (cl != NULL) {
  907. if (parentid) {
  908. if (cl->cl_parent && cl->cl_parent->classid != parentid)
  909. return -EINVAL;
  910. if (cl->cl_parent == NULL && parentid != TC_H_ROOT)
  911. return -EINVAL;
  912. }
  913. cur_time = psched_get_time();
  914. sch_tree_lock(sch);
  915. if (rsc != NULL)
  916. hfsc_change_rsc(cl, rsc, cur_time);
  917. if (fsc != NULL)
  918. hfsc_change_fsc(cl, fsc);
  919. if (usc != NULL)
  920. hfsc_change_usc(cl, usc, cur_time);
  921. if (cl->qdisc->q.qlen != 0) {
  922. if (cl->cl_flags & HFSC_RSC)
  923. update_ed(cl, qdisc_peek_len(cl->qdisc));
  924. if (cl->cl_flags & HFSC_FSC)
  925. update_vf(cl, 0, cur_time);
  926. }
  927. sch_tree_unlock(sch);
  928. if (tca[TCA_RATE-1])
  929. gen_replace_estimator(&cl->bstats, &cl->rate_est,
  930. cl->stats_lock, tca[TCA_RATE-1]);
  931. return 0;
  932. }
  933. if (parentid == TC_H_ROOT)
  934. return -EEXIST;
  935. parent = &q->root;
  936. if (parentid) {
  937. parent = hfsc_find_class(parentid, sch);
  938. if (parent == NULL)
  939. return -ENOENT;
  940. }
  941. if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0)
  942. return -EINVAL;
  943. if (hfsc_find_class(classid, sch))
  944. return -EEXIST;
  945. if (rsc == NULL && fsc == NULL)
  946. return -EINVAL;
  947. cl = kzalloc(sizeof(struct hfsc_class), GFP_KERNEL);
  948. if (cl == NULL)
  949. return -ENOBUFS;
  950. if (rsc != NULL)
  951. hfsc_change_rsc(cl, rsc, 0);
  952. if (fsc != NULL)
  953. hfsc_change_fsc(cl, fsc);
  954. if (usc != NULL)
  955. hfsc_change_usc(cl, usc, 0);
  956. cl->refcnt = 1;
  957. cl->classid = classid;
  958. cl->sched = q;
  959. cl->cl_parent = parent;
  960. cl->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid);
  961. if (cl->qdisc == NULL)
  962. cl->qdisc = &noop_qdisc;
  963. cl->stats_lock = &sch->dev->queue_lock;
  964. INIT_LIST_HEAD(&cl->children);
  965. cl->vt_tree = RB_ROOT;
  966. cl->cf_tree = RB_ROOT;
  967. sch_tree_lock(sch);
  968. list_add_tail(&cl->hlist, &q->clhash[hfsc_hash(classid)]);
  969. list_add_tail(&cl->siblings, &parent->children);
  970. if (parent->level == 0)
  971. hfsc_purge_queue(sch, parent);
  972. hfsc_adjust_levels(parent);
  973. cl->cl_pcvtoff = parent->cl_cvtoff;
  974. sch_tree_unlock(sch);
  975. if (tca[TCA_RATE-1])
  976. gen_new_estimator(&cl->bstats, &cl->rate_est,
  977. cl->stats_lock, tca[TCA_RATE-1]);
  978. *arg = (unsigned long)cl;
  979. return 0;
  980. }
  981. static void
  982. hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl)
  983. {
  984. struct hfsc_sched *q = qdisc_priv(sch);
  985. tcf_destroy_chain(cl->filter_list);
  986. qdisc_destroy(cl->qdisc);
  987. gen_kill_estimator(&cl->bstats, &cl->rate_est);
  988. if (cl != &q->root)
  989. kfree(cl);
  990. }
  991. static int
  992. hfsc_delete_class(struct Qdisc *sch, unsigned long arg)
  993. {
  994. struct hfsc_sched *q = qdisc_priv(sch);
  995. struct hfsc_class *cl = (struct hfsc_class *)arg;
  996. if (cl->level > 0 || cl->filter_cnt > 0 || cl == &q->root)
  997. return -EBUSY;
  998. sch_tree_lock(sch);
  999. list_del(&cl->siblings);
  1000. hfsc_adjust_levels(cl->cl_parent);
  1001. hfsc_purge_queue(sch, cl);
  1002. list_del(&cl->hlist);
  1003. if (--cl->refcnt == 0)
  1004. hfsc_destroy_class(sch, cl);
  1005. sch_tree_unlock(sch);
  1006. return 0;
  1007. }
  1008. static struct hfsc_class *
  1009. hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
  1010. {
  1011. struct hfsc_sched *q = qdisc_priv(sch);
  1012. struct hfsc_class *cl;
  1013. struct tcf_result res;
  1014. struct tcf_proto *tcf;
  1015. int result;
  1016. if (TC_H_MAJ(skb->priority ^ sch->handle) == 0 &&
  1017. (cl = hfsc_find_class(skb->priority, sch)) != NULL)
  1018. if (cl->level == 0)
  1019. return cl;
  1020. *qerr = NET_XMIT_BYPASS;
  1021. tcf = q->root.filter_list;
  1022. while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
  1023. #ifdef CONFIG_NET_CLS_ACT
  1024. switch (result) {
  1025. case TC_ACT_QUEUED:
  1026. case TC_ACT_STOLEN:
  1027. *qerr = NET_XMIT_SUCCESS;
  1028. case TC_ACT_SHOT:
  1029. return NULL;
  1030. }
  1031. #elif defined(CONFIG_NET_CLS_POLICE)
  1032. if (result == TC_POLICE_SHOT)
  1033. return NULL;
  1034. #endif
  1035. if ((cl = (struct hfsc_class *)res.class) == NULL) {
  1036. if ((cl = hfsc_find_class(res.classid, sch)) == NULL)
  1037. break; /* filter selected invalid classid */
  1038. }
  1039. if (cl->level == 0)
  1040. return cl; /* hit leaf class */
  1041. /* apply inner filter chain */
  1042. tcf = cl->filter_list;
  1043. }
  1044. /* classification failed, try default class */
  1045. cl = hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
  1046. if (cl == NULL || cl->level > 0)
  1047. return NULL;
  1048. return cl;
  1049. }
  1050. static int
  1051. hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
  1052. struct Qdisc **old)
  1053. {
  1054. struct hfsc_class *cl = (struct hfsc_class *)arg;
  1055. if (cl == NULL)
  1056. return -ENOENT;
  1057. if (cl->level > 0)
  1058. return -EINVAL;
  1059. if (new == NULL) {
  1060. new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
  1061. cl->classid);
  1062. if (new == NULL)
  1063. new = &noop_qdisc;
  1064. }
  1065. sch_tree_lock(sch);
  1066. hfsc_purge_queue(sch, cl);
  1067. *old = xchg(&cl->qdisc, new);
  1068. sch_tree_unlock(sch);
  1069. return 0;
  1070. }
  1071. static struct Qdisc *
  1072. hfsc_class_leaf(struct Qdisc *sch, unsigned long arg)
  1073. {
  1074. struct hfsc_class *cl = (struct hfsc_class *)arg;
  1075. if (cl != NULL && cl->level == 0)
  1076. return cl->qdisc;
  1077. return NULL;
  1078. }
  1079. static void
  1080. hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg)
  1081. {
  1082. struct hfsc_class *cl = (struct hfsc_class *)arg;
  1083. if (cl->qdisc->q.qlen == 0) {
  1084. update_vf(cl, 0, 0);
  1085. set_passive(cl);
  1086. }
  1087. }
  1088. static unsigned long
  1089. hfsc_get_class(struct Qdisc *sch, u32 classid)
  1090. {
  1091. struct hfsc_class *cl = hfsc_find_class(classid, sch);
  1092. if (cl != NULL)
  1093. cl->refcnt++;
  1094. return (unsigned long)cl;
  1095. }
  1096. static void
  1097. hfsc_put_class(struct Qdisc *sch, unsigned long arg)
  1098. {
  1099. struct hfsc_class *cl = (struct hfsc_class *)arg;
  1100. if (--cl->refcnt == 0)
  1101. hfsc_destroy_class(sch, cl);
  1102. }
  1103. static unsigned long
  1104. hfsc_bind_tcf(struct Qdisc *sch, unsigned long parent, u32 classid)
  1105. {
  1106. struct hfsc_class *p = (struct hfsc_class *)parent;
  1107. struct hfsc_class *cl = hfsc_find_class(classid, sch);
  1108. if (cl != NULL) {
  1109. if (p != NULL && p->level <= cl->level)
  1110. return 0;
  1111. cl->filter_cnt++;
  1112. }
  1113. return (unsigned long)cl;
  1114. }
  1115. static void
  1116. hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg)
  1117. {
  1118. struct hfsc_class *cl = (struct hfsc_class *)arg;
  1119. cl->filter_cnt--;
  1120. }
  1121. static struct tcf_proto **
  1122. hfsc_tcf_chain(struct Qdisc *sch, unsigned long arg)
  1123. {
  1124. struct hfsc_sched *q = qdisc_priv(sch);
  1125. struct hfsc_class *cl = (struct hfsc_class *)arg;
  1126. if (cl == NULL)
  1127. cl = &q->root;
  1128. return &cl->filter_list;
  1129. }
  1130. static int
  1131. hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc)
  1132. {
  1133. struct tc_service_curve tsc;
  1134. tsc.m1 = sm2m(sc->sm1);
  1135. tsc.d = dx2d(sc->dx);
  1136. tsc.m2 = sm2m(sc->sm2);
  1137. RTA_PUT(skb, attr, sizeof(tsc), &tsc);
  1138. return skb->len;
  1139. rtattr_failure:
  1140. return -1;
  1141. }
  1142. static inline int
  1143. hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl)
  1144. {
  1145. if ((cl->cl_flags & HFSC_RSC) &&
  1146. (hfsc_dump_sc(skb, TCA_HFSC_RSC, &cl->cl_rsc) < 0))
  1147. goto rtattr_failure;
  1148. if ((cl->cl_flags & HFSC_FSC) &&
  1149. (hfsc_dump_sc(skb, TCA_HFSC_FSC, &cl->cl_fsc) < 0))
  1150. goto rtattr_failure;
  1151. if ((cl->cl_flags & HFSC_USC) &&
  1152. (hfsc_dump_sc(skb, TCA_HFSC_USC, &cl->cl_usc) < 0))
  1153. goto rtattr_failure;
  1154. return skb->len;
  1155. rtattr_failure:
  1156. return -1;
  1157. }
  1158. static int
  1159. hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb,
  1160. struct tcmsg *tcm)
  1161. {
  1162. struct hfsc_class *cl = (struct hfsc_class *)arg;
  1163. unsigned char *b = skb_tail_pointer(skb);
  1164. struct rtattr *rta = (struct rtattr *)b;
  1165. tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->classid : TC_H_ROOT;
  1166. tcm->tcm_handle = cl->classid;
  1167. if (cl->level == 0)
  1168. tcm->tcm_info = cl->qdisc->handle;
  1169. RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
  1170. if (hfsc_dump_curves(skb, cl) < 0)
  1171. goto rtattr_failure;
  1172. rta->rta_len = skb_tail_pointer(skb) - b;
  1173. return skb->len;
  1174. rtattr_failure:
  1175. nlmsg_trim(skb, b);
  1176. return -1;
  1177. }
  1178. static int
  1179. hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
  1180. struct gnet_dump *d)
  1181. {
  1182. struct hfsc_class *cl = (struct hfsc_class *)arg;
  1183. struct tc_hfsc_stats xstats;
  1184. cl->qstats.qlen = cl->qdisc->q.qlen;
  1185. xstats.level = cl->level;
  1186. xstats.period = cl->cl_vtperiod;
  1187. xstats.work = cl->cl_total;
  1188. xstats.rtwork = cl->cl_cumul;
  1189. if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
  1190. gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
  1191. gnet_stats_copy_queue(d, &cl->qstats) < 0)
  1192. return -1;
  1193. return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
  1194. }
  1195. static void
  1196. hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
  1197. {
  1198. struct hfsc_sched *q = qdisc_priv(sch);
  1199. struct hfsc_class *cl;
  1200. unsigned int i;
  1201. if (arg->stop)
  1202. return;
  1203. for (i = 0; i < HFSC_HSIZE; i++) {
  1204. list_for_each_entry(cl, &q->clhash[i], hlist) {
  1205. if (arg->count < arg->skip) {
  1206. arg->count++;
  1207. continue;
  1208. }
  1209. if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
  1210. arg->stop = 1;
  1211. return;
  1212. }
  1213. arg->count++;
  1214. }
  1215. }
  1216. }
  1217. static void
  1218. hfsc_schedule_watchdog(struct Qdisc *sch)
  1219. {
  1220. struct hfsc_sched *q = qdisc_priv(sch);
  1221. struct hfsc_class *cl;
  1222. u64 next_time = 0;
  1223. if ((cl = eltree_get_minel(q)) != NULL)
  1224. next_time = cl->cl_e;
  1225. if (q->root.cl_cfmin != 0) {
  1226. if (next_time == 0 || next_time > q->root.cl_cfmin)
  1227. next_time = q->root.cl_cfmin;
  1228. }
  1229. WARN_ON(next_time == 0);
  1230. qdisc_watchdog_schedule(&q->watchdog, next_time);
  1231. }
  1232. static int
  1233. hfsc_init_qdisc(struct Qdisc *sch, struct rtattr *opt)
  1234. {
  1235. struct hfsc_sched *q = qdisc_priv(sch);
  1236. struct tc_hfsc_qopt *qopt;
  1237. unsigned int i;
  1238. if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt))
  1239. return -EINVAL;
  1240. qopt = RTA_DATA(opt);
  1241. sch->stats_lock = &sch->dev->queue_lock;
  1242. q->defcls = qopt->defcls;
  1243. for (i = 0; i < HFSC_HSIZE; i++)
  1244. INIT_LIST_HEAD(&q->clhash[i]);
  1245. q->eligible = RB_ROOT;
  1246. INIT_LIST_HEAD(&q->droplist);
  1247. skb_queue_head_init(&q->requeue);
  1248. q->root.refcnt = 1;
  1249. q->root.classid = sch->handle;
  1250. q->root.sched = q;
  1251. q->root.qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
  1252. sch->handle);
  1253. if (q->root.qdisc == NULL)
  1254. q->root.qdisc = &noop_qdisc;
  1255. q->root.stats_lock = &sch->dev->queue_lock;
  1256. INIT_LIST_HEAD(&q->root.children);
  1257. q->root.vt_tree = RB_ROOT;
  1258. q->root.cf_tree = RB_ROOT;
  1259. list_add(&q->root.hlist, &q->clhash[hfsc_hash(q->root.classid)]);
  1260. qdisc_watchdog_init(&q->watchdog, sch);
  1261. return 0;
  1262. }
  1263. static int
  1264. hfsc_change_qdisc(struct Qdisc *sch, struct rtattr *opt)
  1265. {
  1266. struct hfsc_sched *q = qdisc_priv(sch);
  1267. struct tc_hfsc_qopt *qopt;
  1268. if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt))
  1269. return -EINVAL;
  1270. qopt = RTA_DATA(opt);
  1271. sch_tree_lock(sch);
  1272. q->defcls = qopt->defcls;
  1273. sch_tree_unlock(sch);
  1274. return 0;
  1275. }
  1276. static void
  1277. hfsc_reset_class(struct hfsc_class *cl)
  1278. {
  1279. cl->cl_total = 0;
  1280. cl->cl_cumul = 0;
  1281. cl->cl_d = 0;
  1282. cl->cl_e = 0;
  1283. cl->cl_vt = 0;
  1284. cl->cl_vtadj = 0;
  1285. cl->cl_vtoff = 0;
  1286. cl->cl_cvtmin = 0;
  1287. cl->cl_cvtmax = 0;
  1288. cl->cl_cvtoff = 0;
  1289. cl->cl_pcvtoff = 0;
  1290. cl->cl_vtperiod = 0;
  1291. cl->cl_parentperiod = 0;
  1292. cl->cl_f = 0;
  1293. cl->cl_myf = 0;
  1294. cl->cl_myfadj = 0;
  1295. cl->cl_cfmin = 0;
  1296. cl->cl_nactive = 0;
  1297. cl->vt_tree = RB_ROOT;
  1298. cl->cf_tree = RB_ROOT;
  1299. qdisc_reset(cl->qdisc);
  1300. if (cl->cl_flags & HFSC_RSC)
  1301. rtsc_init(&cl->cl_deadline, &cl->cl_rsc, 0, 0);
  1302. if (cl->cl_flags & HFSC_FSC)
  1303. rtsc_init(&cl->cl_virtual, &cl->cl_fsc, 0, 0);
  1304. if (cl->cl_flags & HFSC_USC)
  1305. rtsc_init(&cl->cl_ulimit, &cl->cl_usc, 0, 0);
  1306. }
  1307. static void
  1308. hfsc_reset_qdisc(struct Qdisc *sch)
  1309. {
  1310. struct hfsc_sched *q = qdisc_priv(sch);
  1311. struct hfsc_class *cl;
  1312. unsigned int i;
  1313. for (i = 0; i < HFSC_HSIZE; i++) {
  1314. list_for_each_entry(cl, &q->clhash[i], hlist)
  1315. hfsc_reset_class(cl);
  1316. }
  1317. __skb_queue_purge(&q->requeue);
  1318. q->eligible = RB_ROOT;
  1319. INIT_LIST_HEAD(&q->droplist);
  1320. qdisc_watchdog_cancel(&q->watchdog);
  1321. sch->q.qlen = 0;
  1322. }
  1323. static void
  1324. hfsc_destroy_qdisc(struct Qdisc *sch)
  1325. {
  1326. struct hfsc_sched *q = qdisc_priv(sch);
  1327. struct hfsc_class *cl, *next;
  1328. unsigned int i;
  1329. for (i = 0; i < HFSC_HSIZE; i++) {
  1330. list_for_each_entry_safe(cl, next, &q->clhash[i], hlist)
  1331. hfsc_destroy_class(sch, cl);
  1332. }
  1333. __skb_queue_purge(&q->requeue);
  1334. qdisc_watchdog_cancel(&q->watchdog);
  1335. }
  1336. static int
  1337. hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
  1338. {
  1339. struct hfsc_sched *q = qdisc_priv(sch);
  1340. unsigned char *b = skb_tail_pointer(skb);
  1341. struct tc_hfsc_qopt qopt;
  1342. qopt.defcls = q->defcls;
  1343. RTA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
  1344. return skb->len;
  1345. rtattr_failure:
  1346. nlmsg_trim(skb, b);
  1347. return -1;
  1348. }
  1349. static int
  1350. hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
  1351. {
  1352. struct hfsc_class *cl;
  1353. unsigned int len;
  1354. int err;
  1355. cl = hfsc_classify(skb, sch, &err);
  1356. if (cl == NULL) {
  1357. if (err == NET_XMIT_BYPASS)
  1358. sch->qstats.drops++;
  1359. kfree_skb(skb);
  1360. return err;
  1361. }
  1362. len = skb->len;
  1363. err = cl->qdisc->enqueue(skb, cl->qdisc);
  1364. if (unlikely(err != NET_XMIT_SUCCESS)) {
  1365. cl->qstats.drops++;
  1366. sch->qstats.drops++;
  1367. return err;
  1368. }
  1369. if (cl->qdisc->q.qlen == 1)
  1370. set_active(cl, len);
  1371. cl->bstats.packets++;
  1372. cl->bstats.bytes += len;
  1373. sch->bstats.packets++;
  1374. sch->bstats.bytes += len;
  1375. sch->q.qlen++;
  1376. return NET_XMIT_SUCCESS;
  1377. }
  1378. static struct sk_buff *
  1379. hfsc_dequeue(struct Qdisc *sch)
  1380. {
  1381. struct hfsc_sched *q = qdisc_priv(sch);
  1382. struct hfsc_class *cl;
  1383. struct sk_buff *skb;
  1384. u64 cur_time;
  1385. unsigned int next_len;
  1386. int realtime = 0;
  1387. if (sch->q.qlen == 0)
  1388. return NULL;
  1389. if ((skb = __skb_dequeue(&q->requeue)))
  1390. goto out;
  1391. cur_time = psched_get_time();
  1392. /*
  1393. * if there are eligible classes, use real-time criteria.
  1394. * find the class with the minimum deadline among
  1395. * the eligible classes.
  1396. */
  1397. if ((cl = eltree_get_mindl(q, cur_time)) != NULL) {
  1398. realtime = 1;
  1399. } else {
  1400. /*
  1401. * use link-sharing criteria
  1402. * get the class with the minimum vt in the hierarchy
  1403. */
  1404. cl = vttree_get_minvt(&q->root, cur_time);
  1405. if (cl == NULL) {
  1406. sch->qstats.overlimits++;
  1407. hfsc_schedule_watchdog(sch);
  1408. return NULL;
  1409. }
  1410. }
  1411. skb = cl->qdisc->dequeue(cl->qdisc);
  1412. if (skb == NULL) {
  1413. if (net_ratelimit())
  1414. printk("HFSC: Non-work-conserving qdisc ?\n");
  1415. return NULL;
  1416. }
  1417. update_vf(cl, skb->len, cur_time);
  1418. if (realtime)
  1419. cl->cl_cumul += skb->len;
  1420. if (cl->qdisc->q.qlen != 0) {
  1421. if (cl->cl_flags & HFSC_RSC) {
  1422. /* update ed */
  1423. next_len = qdisc_peek_len(cl->qdisc);
  1424. if (realtime)
  1425. update_ed(cl, next_len);
  1426. else
  1427. update_d(cl, next_len);
  1428. }
  1429. } else {
  1430. /* the class becomes passive */
  1431. set_passive(cl);
  1432. }
  1433. out:
  1434. sch->flags &= ~TCQ_F_THROTTLED;
  1435. sch->q.qlen--;
  1436. return skb;
  1437. }
  1438. static int
  1439. hfsc_requeue(struct sk_buff *skb, struct Qdisc *sch)
  1440. {
  1441. struct hfsc_sched *q = qdisc_priv(sch);
  1442. __skb_queue_head(&q->requeue, skb);
  1443. sch->q.qlen++;
  1444. sch->qstats.requeues++;
  1445. return NET_XMIT_SUCCESS;
  1446. }
  1447. static unsigned int
  1448. hfsc_drop(struct Qdisc *sch)
  1449. {
  1450. struct hfsc_sched *q = qdisc_priv(sch);
  1451. struct hfsc_class *cl;
  1452. unsigned int len;
  1453. list_for_each_entry(cl, &q->droplist, dlist) {
  1454. if (cl->qdisc->ops->drop != NULL &&
  1455. (len = cl->qdisc->ops->drop(cl->qdisc)) > 0) {
  1456. if (cl->qdisc->q.qlen == 0) {
  1457. update_vf(cl, 0, 0);
  1458. set_passive(cl);
  1459. } else {
  1460. list_move_tail(&cl->dlist, &q->droplist);
  1461. }
  1462. cl->qstats.drops++;
  1463. sch->qstats.drops++;
  1464. sch->q.qlen--;
  1465. return len;
  1466. }
  1467. }
  1468. return 0;
  1469. }
  1470. static struct Qdisc_class_ops hfsc_class_ops = {
  1471. .change = hfsc_change_class,
  1472. .delete = hfsc_delete_class,
  1473. .graft = hfsc_graft_class,
  1474. .leaf = hfsc_class_leaf,
  1475. .qlen_notify = hfsc_qlen_notify,
  1476. .get = hfsc_get_class,
  1477. .put = hfsc_put_class,
  1478. .bind_tcf = hfsc_bind_tcf,
  1479. .unbind_tcf = hfsc_unbind_tcf,
  1480. .tcf_chain = hfsc_tcf_chain,
  1481. .dump = hfsc_dump_class,
  1482. .dump_stats = hfsc_dump_class_stats,
  1483. .walk = hfsc_walk
  1484. };
  1485. static struct Qdisc_ops hfsc_qdisc_ops = {
  1486. .id = "hfsc",
  1487. .init = hfsc_init_qdisc,
  1488. .change = hfsc_change_qdisc,
  1489. .reset = hfsc_reset_qdisc,
  1490. .destroy = hfsc_destroy_qdisc,
  1491. .dump = hfsc_dump_qdisc,
  1492. .enqueue = hfsc_enqueue,
  1493. .dequeue = hfsc_dequeue,
  1494. .requeue = hfsc_requeue,
  1495. .drop = hfsc_drop,
  1496. .cl_ops = &hfsc_class_ops,
  1497. .priv_size = sizeof(struct hfsc_sched),
  1498. .owner = THIS_MODULE
  1499. };
  1500. static int __init
  1501. hfsc_init(void)
  1502. {
  1503. return register_qdisc(&hfsc_qdisc_ops);
  1504. }
  1505. static void __exit
  1506. hfsc_cleanup(void)
  1507. {
  1508. unregister_qdisc(&hfsc_qdisc_ops);
  1509. }
  1510. MODULE_LICENSE("GPL");
  1511. module_init(hfsc_init);
  1512. module_exit(hfsc_cleanup);