ccid3.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237
  1. /*
  2. * net/dccp/ccids/ccid3.c
  3. *
  4. * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
  5. * Copyright (c) 2005-6 Ian McDonald <imcdnzl@gmail.com>
  6. *
  7. * An implementation of the DCCP protocol
  8. *
  9. * This code has been developed by the University of Waikato WAND
  10. * research group. For further information please see http://www.wand.net.nz/
  11. *
  12. * This code also uses code from Lulea University, rereleased as GPL by its
  13. * authors:
  14. * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon
  15. *
  16. * Changes to meet Linux coding standards, to make it meet latest ccid3 draft
  17. * and to make it work as a loadable module in the DCCP stack written by
  18. * Arnaldo Carvalho de Melo <acme@conectiva.com.br>.
  19. *
  20. * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
  21. *
  22. * This program is free software; you can redistribute it and/or modify
  23. * it under the terms of the GNU General Public License as published by
  24. * the Free Software Foundation; either version 2 of the License, or
  25. * (at your option) any later version.
  26. *
  27. * This program is distributed in the hope that it will be useful,
  28. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  29. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  30. * GNU General Public License for more details.
  31. *
  32. * You should have received a copy of the GNU General Public License
  33. * along with this program; if not, write to the Free Software
  34. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  35. */
  36. #include "../ccid.h"
  37. #include "../dccp.h"
  38. #include "lib/packet_history.h"
  39. #include "lib/loss_interval.h"
  40. #include "lib/tfrc.h"
  41. #include "ccid3.h"
  42. /*
  43. * Reason for maths here is to avoid 32 bit overflow when a is big.
  44. * With this we get close to the limit.
  45. */
  46. static u32 usecs_div(const u32 a, const u32 b)
  47. {
  48. const u32 div = a < (UINT_MAX / (USEC_PER_SEC / 10)) ? 10 :
  49. a < (UINT_MAX / (USEC_PER_SEC / 50)) ? 50 :
  50. a < (UINT_MAX / (USEC_PER_SEC / 100)) ? 100 :
  51. a < (UINT_MAX / (USEC_PER_SEC / 500)) ? 500 :
  52. a < (UINT_MAX / (USEC_PER_SEC / 1000)) ? 1000 :
  53. a < (UINT_MAX / (USEC_PER_SEC / 5000)) ? 5000 :
  54. a < (UINT_MAX / (USEC_PER_SEC / 10000)) ? 10000 :
  55. a < (UINT_MAX / (USEC_PER_SEC / 50000)) ? 50000 :
  56. 100000;
  57. const u32 tmp = a * (USEC_PER_SEC / div);
  58. return (b >= 2 * div) ? tmp / (b / div) : tmp;
  59. }
  60. static int ccid3_debug;
  61. #ifdef CCID3_DEBUG
  62. #define ccid3_pr_debug(format, a...) \
  63. do { if (ccid3_debug) \
  64. printk(KERN_DEBUG "%s: " format, __FUNCTION__, ##a); \
  65. } while (0)
  66. #else
  67. #define ccid3_pr_debug(format, a...)
  68. #endif
  69. static struct dccp_tx_hist *ccid3_tx_hist;
  70. static struct dccp_rx_hist *ccid3_rx_hist;
  71. static struct dccp_li_hist *ccid3_li_hist;
  72. /* TFRC sender states */
  73. enum ccid3_hc_tx_states {
  74. TFRC_SSTATE_NO_SENT = 1,
  75. TFRC_SSTATE_NO_FBACK,
  76. TFRC_SSTATE_FBACK,
  77. TFRC_SSTATE_TERM,
  78. };
  79. #ifdef CCID3_DEBUG
  80. static const char *ccid3_tx_state_name(enum ccid3_hc_tx_states state)
  81. {
  82. static char *ccid3_state_names[] = {
  83. [TFRC_SSTATE_NO_SENT] = "NO_SENT",
  84. [TFRC_SSTATE_NO_FBACK] = "NO_FBACK",
  85. [TFRC_SSTATE_FBACK] = "FBACK",
  86. [TFRC_SSTATE_TERM] = "TERM",
  87. };
  88. return ccid3_state_names[state];
  89. }
  90. #endif
  91. static void ccid3_hc_tx_set_state(struct sock *sk,
  92. enum ccid3_hc_tx_states state)
  93. {
  94. struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  95. enum ccid3_hc_tx_states oldstate = hctx->ccid3hctx_state;
  96. ccid3_pr_debug("%s(%p) %-8.8s -> %s\n",
  97. dccp_role(sk), sk, ccid3_tx_state_name(oldstate),
  98. ccid3_tx_state_name(state));
  99. WARN_ON(state == oldstate);
  100. hctx->ccid3hctx_state = state;
  101. }
  102. /* Calculate new t_ipi (inter packet interval) by t_ipi = s / X_inst */
  103. static inline void ccid3_calc_new_t_ipi(struct ccid3_hc_tx_sock *hctx)
  104. {
  105. /*
  106. * If no feedback spec says t_ipi is 1 second (set elsewhere and then
  107. * doubles after every no feedback timer (separate function)
  108. */
  109. if (hctx->ccid3hctx_state != TFRC_SSTATE_NO_FBACK)
  110. hctx->ccid3hctx_t_ipi = usecs_div(hctx->ccid3hctx_s,
  111. hctx->ccid3hctx_x);
  112. }
  113. /* Calculate new delta by delta = min(t_ipi / 2, t_gran / 2) */
  114. static inline void ccid3_calc_new_delta(struct ccid3_hc_tx_sock *hctx)
  115. {
  116. hctx->ccid3hctx_delta = min_t(u32, hctx->ccid3hctx_t_ipi / 2,
  117. TFRC_OPSYS_HALF_TIME_GRAN);
  118. }
  119. /*
  120. * Update X by
  121. * If (p > 0)
  122. * x_calc = calcX(s, R, p);
  123. * X = max(min(X_calc, 2 * X_recv), s / t_mbi);
  124. * Else
  125. * If (now - tld >= R)
  126. * X = max(min(2 * X, 2 * X_recv), s / R);
  127. * tld = now;
  128. */
  129. static void ccid3_hc_tx_update_x(struct sock *sk)
  130. {
  131. struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  132. /* To avoid large error in calcX */
  133. if (hctx->ccid3hctx_p >= TFRC_SMALLEST_P) {
  134. hctx->ccid3hctx_x_calc = tfrc_calc_x(hctx->ccid3hctx_s,
  135. hctx->ccid3hctx_rtt,
  136. hctx->ccid3hctx_p);
  137. hctx->ccid3hctx_x = max_t(u32, min_t(u32, hctx->ccid3hctx_x_calc,
  138. 2 * hctx->ccid3hctx_x_recv),
  139. (hctx->ccid3hctx_s /
  140. TFRC_MAX_BACK_OFF_TIME));
  141. } else {
  142. struct timeval now;
  143. dccp_timestamp(sk, &now);
  144. if (timeval_delta(&now, &hctx->ccid3hctx_t_ld) >=
  145. hctx->ccid3hctx_rtt) {
  146. hctx->ccid3hctx_x = max_t(u32, min_t(u32, hctx->ccid3hctx_x_recv,
  147. hctx->ccid3hctx_x) * 2,
  148. usecs_div(hctx->ccid3hctx_s,
  149. hctx->ccid3hctx_rtt));
  150. hctx->ccid3hctx_t_ld = now;
  151. }
  152. }
  153. }
  154. static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
  155. {
  156. struct sock *sk = (struct sock *)data;
  157. unsigned long next_tmout = 0;
  158. struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  159. bh_lock_sock(sk);
  160. if (sock_owned_by_user(sk)) {
  161. /* Try again later. */
  162. /* XXX: set some sensible MIB */
  163. sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
  164. jiffies + HZ / 5);
  165. goto out;
  166. }
  167. ccid3_pr_debug("%s, sk=%p, state=%s\n", dccp_role(sk), sk,
  168. ccid3_tx_state_name(hctx->ccid3hctx_state));
  169. switch (hctx->ccid3hctx_state) {
  170. case TFRC_SSTATE_TERM:
  171. goto out;
  172. case TFRC_SSTATE_NO_FBACK:
  173. /* Halve send rate */
  174. hctx->ccid3hctx_x /= 2;
  175. if (hctx->ccid3hctx_x < (hctx->ccid3hctx_s /
  176. TFRC_MAX_BACK_OFF_TIME))
  177. hctx->ccid3hctx_x = (hctx->ccid3hctx_s /
  178. TFRC_MAX_BACK_OFF_TIME);
  179. ccid3_pr_debug("%s, sk=%p, state=%s, updated tx rate to %d "
  180. "bytes/s\n",
  181. dccp_role(sk), sk,
  182. ccid3_tx_state_name(hctx->ccid3hctx_state),
  183. hctx->ccid3hctx_x);
  184. next_tmout = max_t(u32, 2 * usecs_div(hctx->ccid3hctx_s,
  185. hctx->ccid3hctx_x),
  186. TFRC_INITIAL_TIMEOUT);
  187. /*
  188. * FIXME - not sure above calculation is correct. See section
  189. * 5 of CCID3 11 should adjust tx_t_ipi and double that to
  190. * achieve it really
  191. */
  192. break;
  193. case TFRC_SSTATE_FBACK:
  194. /*
  195. * Check if IDLE since last timeout and recv rate is less than
  196. * 4 packets per RTT
  197. */
  198. if (!hctx->ccid3hctx_idle ||
  199. (hctx->ccid3hctx_x_recv >=
  200. 4 * usecs_div(hctx->ccid3hctx_s, hctx->ccid3hctx_rtt))) {
  201. ccid3_pr_debug("%s, sk=%p, state=%s, not idle\n",
  202. dccp_role(sk), sk,
  203. ccid3_tx_state_name(hctx->ccid3hctx_state));
  204. /* Halve sending rate */
  205. /* If (X_calc > 2 * X_recv)
  206. * X_recv = max(X_recv / 2, s / (2 * t_mbi));
  207. * Else
  208. * X_recv = X_calc / 4;
  209. */
  210. BUG_ON(hctx->ccid3hctx_p >= TFRC_SMALLEST_P &&
  211. hctx->ccid3hctx_x_calc == 0);
  212. /* check also if p is zero -> x_calc is infinity? */
  213. if (hctx->ccid3hctx_p < TFRC_SMALLEST_P ||
  214. hctx->ccid3hctx_x_calc > 2 * hctx->ccid3hctx_x_recv)
  215. hctx->ccid3hctx_x_recv = max_t(u32, hctx->ccid3hctx_x_recv / 2,
  216. hctx->ccid3hctx_s / (2 * TFRC_MAX_BACK_OFF_TIME));
  217. else
  218. hctx->ccid3hctx_x_recv = hctx->ccid3hctx_x_calc / 4;
  219. /* Update sending rate */
  220. ccid3_hc_tx_update_x(sk);
  221. }
  222. /*
  223. * Schedule no feedback timer to expire in
  224. * max(4 * R, 2 * s / X)
  225. */
  226. next_tmout = max_t(u32, hctx->ccid3hctx_t_rto,
  227. 2 * usecs_div(hctx->ccid3hctx_s,
  228. hctx->ccid3hctx_x));
  229. break;
  230. default:
  231. printk(KERN_CRIT "%s: %s, sk=%p, Illegal state (%d)!\n",
  232. __FUNCTION__, dccp_role(sk), sk, hctx->ccid3hctx_state);
  233. dump_stack();
  234. goto out;
  235. }
  236. sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
  237. jiffies + max_t(u32, 1, usecs_to_jiffies(next_tmout)));
  238. hctx->ccid3hctx_idle = 1;
  239. out:
  240. bh_unlock_sock(sk);
  241. sock_put(sk);
  242. }
  243. static int ccid3_hc_tx_send_packet(struct sock *sk,
  244. struct sk_buff *skb, int len)
  245. {
  246. struct dccp_sock *dp = dccp_sk(sk);
  247. struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  248. struct dccp_tx_hist_entry *new_packet;
  249. struct timeval now;
  250. long delay;
  251. int rc = -ENOTCONN;
  252. BUG_ON(hctx == NULL || hctx->ccid3hctx_state == TFRC_SSTATE_TERM);
  253. /* Check if pure ACK or Terminating*/
  254. /*
  255. * XXX: We only call this function for DATA and DATAACK, on, these
  256. * packets can have zero length, but why the comment about "pure ACK"?
  257. */
  258. if (unlikely(len == 0))
  259. goto out;
  260. /* See if last packet allocated was not sent */
  261. new_packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist);
  262. if (new_packet == NULL || new_packet->dccphtx_sent) {
  263. new_packet = dccp_tx_hist_entry_new(ccid3_tx_hist,
  264. SLAB_ATOMIC);
  265. rc = -ENOBUFS;
  266. if (unlikely(new_packet == NULL)) {
  267. LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, not enough "
  268. "mem to add to history, send refused\n",
  269. __FUNCTION__, dccp_role(sk), sk);
  270. goto out;
  271. }
  272. dccp_tx_hist_add_entry(&hctx->ccid3hctx_hist, new_packet);
  273. }
  274. dccp_timestamp(sk, &now);
  275. switch (hctx->ccid3hctx_state) {
  276. case TFRC_SSTATE_NO_SENT:
  277. sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
  278. jiffies + usecs_to_jiffies(TFRC_INITIAL_TIMEOUT));
  279. hctx->ccid3hctx_last_win_count = 0;
  280. hctx->ccid3hctx_t_last_win_count = now;
  281. ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK);
  282. hctx->ccid3hctx_t_ipi = TFRC_INITIAL_IPI;
  283. /* Set nominal send time for initial packet */
  284. hctx->ccid3hctx_t_nom = now;
  285. timeval_add_usecs(&hctx->ccid3hctx_t_nom,
  286. hctx->ccid3hctx_t_ipi);
  287. ccid3_calc_new_delta(hctx);
  288. rc = 0;
  289. break;
  290. case TFRC_SSTATE_NO_FBACK:
  291. case TFRC_SSTATE_FBACK:
  292. delay = (timeval_delta(&now, &hctx->ccid3hctx_t_nom) -
  293. hctx->ccid3hctx_delta);
  294. delay /= -1000;
  295. /* divide by -1000 is to convert to ms and get sign right */
  296. rc = delay > 0 ? delay : 0;
  297. break;
  298. default:
  299. printk(KERN_CRIT "%s: %s, sk=%p, Illegal state (%d)!\n",
  300. __FUNCTION__, dccp_role(sk), sk, hctx->ccid3hctx_state);
  301. dump_stack();
  302. rc = -EINVAL;
  303. break;
  304. }
  305. /* Can we send? if so add options and add to packet history */
  306. if (rc == 0) {
  307. dp->dccps_hc_tx_insert_options = 1;
  308. new_packet->dccphtx_ccval =
  309. DCCP_SKB_CB(skb)->dccpd_ccval =
  310. hctx->ccid3hctx_last_win_count;
  311. }
  312. out:
  313. return rc;
  314. }
  315. static void ccid3_hc_tx_packet_sent(struct sock *sk, int more, int len)
  316. {
  317. const struct dccp_sock *dp = dccp_sk(sk);
  318. struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  319. struct timeval now;
  320. BUG_ON(hctx == NULL || hctx->ccid3hctx_state == TFRC_SSTATE_TERM);
  321. dccp_timestamp(sk, &now);
  322. /* check if we have sent a data packet */
  323. if (len > 0) {
  324. unsigned long quarter_rtt;
  325. struct dccp_tx_hist_entry *packet;
  326. packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist);
  327. if (unlikely(packet == NULL)) {
  328. LIMIT_NETDEBUG(KERN_WARNING "%s: packet doesn't "
  329. "exists in history!\n", __FUNCTION__);
  330. return;
  331. }
  332. if (unlikely(packet->dccphtx_sent)) {
  333. LIMIT_NETDEBUG(KERN_WARNING "%s: no unsent packet in "
  334. "history!\n", __FUNCTION__);
  335. return;
  336. }
  337. packet->dccphtx_tstamp = now;
  338. packet->dccphtx_seqno = dp->dccps_gss;
  339. /*
  340. * Check if win_count have changed
  341. * Algorithm in "8.1. Window Counter Valuer" in
  342. * draft-ietf-dccp-ccid3-11.txt
  343. */
  344. quarter_rtt = timeval_delta(&now, &hctx->ccid3hctx_t_last_win_count);
  345. if (likely(hctx->ccid3hctx_rtt > 8))
  346. quarter_rtt /= hctx->ccid3hctx_rtt / 4;
  347. if (quarter_rtt > 0) {
  348. hctx->ccid3hctx_t_last_win_count = now;
  349. hctx->ccid3hctx_last_win_count = (hctx->ccid3hctx_last_win_count +
  350. min_t(unsigned long, quarter_rtt, 5)) % 16;
  351. ccid3_pr_debug("%s, sk=%p, window changed from "
  352. "%u to %u!\n",
  353. dccp_role(sk), sk,
  354. packet->dccphtx_ccval,
  355. hctx->ccid3hctx_last_win_count);
  356. }
  357. hctx->ccid3hctx_idle = 0;
  358. packet->dccphtx_rtt = hctx->ccid3hctx_rtt;
  359. packet->dccphtx_sent = 1;
  360. } else
  361. ccid3_pr_debug("%s, sk=%p, seqno=%llu NOT inserted!\n",
  362. dccp_role(sk), sk, dp->dccps_gss);
  363. switch (hctx->ccid3hctx_state) {
  364. case TFRC_SSTATE_NO_SENT:
  365. /* if first wasn't pure ack */
  366. if (len != 0)
  367. printk(KERN_CRIT "%s: %s, First packet sent is noted "
  368. "as a data packet\n",
  369. __FUNCTION__, dccp_role(sk));
  370. return;
  371. case TFRC_SSTATE_NO_FBACK:
  372. case TFRC_SSTATE_FBACK:
  373. if (len > 0) {
  374. hctx->ccid3hctx_t_nom = now;
  375. ccid3_calc_new_t_ipi(hctx);
  376. ccid3_calc_new_delta(hctx);
  377. timeval_add_usecs(&hctx->ccid3hctx_t_nom,
  378. hctx->ccid3hctx_t_ipi);
  379. }
  380. break;
  381. default:
  382. printk(KERN_CRIT "%s: %s, sk=%p, Illegal state (%d)!\n",
  383. __FUNCTION__, dccp_role(sk), sk, hctx->ccid3hctx_state);
  384. dump_stack();
  385. break;
  386. }
  387. }
  388. static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
  389. {
  390. const struct dccp_sock *dp = dccp_sk(sk);
  391. struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  392. struct ccid3_options_received *opt_recv;
  393. struct dccp_tx_hist_entry *packet;
  394. struct timeval now;
  395. unsigned long next_tmout;
  396. u32 t_elapsed;
  397. u32 pinv;
  398. u32 x_recv;
  399. u32 r_sample;
  400. BUG_ON(hctx == NULL || hctx->ccid3hctx_state == TFRC_SSTATE_TERM);
  401. /* we are only interested in ACKs */
  402. if (!(DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK ||
  403. DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_DATAACK))
  404. return;
  405. opt_recv = &hctx->ccid3hctx_options_received;
  406. t_elapsed = dp->dccps_options_received.dccpor_elapsed_time * 10;
  407. x_recv = opt_recv->ccid3or_receive_rate;
  408. pinv = opt_recv->ccid3or_loss_event_rate;
  409. switch (hctx->ccid3hctx_state) {
  410. case TFRC_SSTATE_NO_SENT:
  411. /* FIXME: what to do here? */
  412. return;
  413. case TFRC_SSTATE_NO_FBACK:
  414. case TFRC_SSTATE_FBACK:
  415. /* Calculate new round trip sample by
  416. * R_sample = (now - t_recvdata) - t_delay */
  417. /* get t_recvdata from history */
  418. packet = dccp_tx_hist_find_entry(&hctx->ccid3hctx_hist,
  419. DCCP_SKB_CB(skb)->dccpd_ack_seq);
  420. if (unlikely(packet == NULL)) {
  421. LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, seqno "
  422. "%llu(%s) does't exist in history!\n",
  423. __FUNCTION__, dccp_role(sk), sk,
  424. (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq,
  425. dccp_packet_name(DCCP_SKB_CB(skb)->dccpd_type));
  426. return;
  427. }
  428. /* Update RTT */
  429. dccp_timestamp(sk, &now);
  430. r_sample = timeval_delta(&now, &packet->dccphtx_tstamp);
  431. if (unlikely(r_sample <= t_elapsed))
  432. LIMIT_NETDEBUG(KERN_WARNING "%s: r_sample=%uus, "
  433. "t_elapsed=%uus\n",
  434. __FUNCTION__, r_sample, t_elapsed);
  435. else
  436. r_sample -= t_elapsed;
  437. /* Update RTT estimate by
  438. * If (No feedback recv)
  439. * R = R_sample;
  440. * Else
  441. * R = q * R + (1 - q) * R_sample;
  442. *
  443. * q is a constant, RFC 3448 recomments 0.9
  444. */
  445. if (hctx->ccid3hctx_state == TFRC_SSTATE_NO_FBACK) {
  446. ccid3_hc_tx_set_state(sk, TFRC_SSTATE_FBACK);
  447. hctx->ccid3hctx_rtt = r_sample;
  448. } else
  449. hctx->ccid3hctx_rtt = (hctx->ccid3hctx_rtt * 9) / 10 +
  450. r_sample / 10;
  451. ccid3_pr_debug("%s, sk=%p, New RTT estimate=%uus, "
  452. "r_sample=%us\n", dccp_role(sk), sk,
  453. hctx->ccid3hctx_rtt, r_sample);
  454. /* Update timeout interval */
  455. hctx->ccid3hctx_t_rto = max_t(u32, 4 * hctx->ccid3hctx_rtt,
  456. USEC_PER_SEC);
  457. /* Update receive rate */
  458. hctx->ccid3hctx_x_recv = x_recv;/* X_recv in bytes per sec */
  459. /* Update loss event rate */
  460. if (pinv == ~0 || pinv == 0)
  461. hctx->ccid3hctx_p = 0;
  462. else {
  463. hctx->ccid3hctx_p = 1000000 / pinv;
  464. if (hctx->ccid3hctx_p < TFRC_SMALLEST_P) {
  465. hctx->ccid3hctx_p = TFRC_SMALLEST_P;
  466. ccid3_pr_debug("%s, sk=%p, Smallest p used!\n",
  467. dccp_role(sk), sk);
  468. }
  469. }
  470. /* unschedule no feedback timer */
  471. sk_stop_timer(sk, &hctx->ccid3hctx_no_feedback_timer);
  472. /* Update sending rate */
  473. ccid3_hc_tx_update_x(sk);
  474. /* Update next send time */
  475. timeval_sub_usecs(&hctx->ccid3hctx_t_nom,
  476. hctx->ccid3hctx_t_ipi);
  477. ccid3_calc_new_t_ipi(hctx);
  478. timeval_add_usecs(&hctx->ccid3hctx_t_nom,
  479. hctx->ccid3hctx_t_ipi);
  480. ccid3_calc_new_delta(hctx);
  481. /* remove all packets older than the one acked from history */
  482. dccp_tx_hist_purge_older(ccid3_tx_hist,
  483. &hctx->ccid3hctx_hist, packet);
  484. /*
  485. * As we have calculated new ipi, delta, t_nom it is possible that
  486. * we now can send a packet, so wake up dccp_wait_for_ccids.
  487. */
  488. sk->sk_write_space(sk);
  489. /*
  490. * Schedule no feedback timer to expire in
  491. * max(4 * R, 2 * s / X)
  492. */
  493. next_tmout = max(hctx->ccid3hctx_t_rto,
  494. 2 * usecs_div(hctx->ccid3hctx_s,
  495. hctx->ccid3hctx_x));
  496. ccid3_pr_debug("%s, sk=%p, Scheduled no feedback timer to "
  497. "expire in %lu jiffies (%luus)\n",
  498. dccp_role(sk), sk,
  499. usecs_to_jiffies(next_tmout), next_tmout);
  500. sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
  501. jiffies + max_t(u32, 1, usecs_to_jiffies(next_tmout)));
  502. /* set idle flag */
  503. hctx->ccid3hctx_idle = 1;
  504. break;
  505. default:
  506. printk(KERN_CRIT "%s: %s, sk=%p, Illegal state (%d)!\n",
  507. __FUNCTION__, dccp_role(sk), sk, hctx->ccid3hctx_state);
  508. dump_stack();
  509. break;
  510. }
  511. }
  512. static int ccid3_hc_tx_insert_options(struct sock *sk, struct sk_buff *skb)
  513. {
  514. const struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  515. BUG_ON(hctx == NULL);
  516. if (sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN)
  517. DCCP_SKB_CB(skb)->dccpd_ccval = hctx->ccid3hctx_last_win_count;
  518. return 0;
  519. }
  520. static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option,
  521. unsigned char len, u16 idx,
  522. unsigned char *value)
  523. {
  524. int rc = 0;
  525. const struct dccp_sock *dp = dccp_sk(sk);
  526. struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  527. struct ccid3_options_received *opt_recv;
  528. BUG_ON(hctx == NULL);
  529. opt_recv = &hctx->ccid3hctx_options_received;
  530. if (opt_recv->ccid3or_seqno != dp->dccps_gsr) {
  531. opt_recv->ccid3or_seqno = dp->dccps_gsr;
  532. opt_recv->ccid3or_loss_event_rate = ~0;
  533. opt_recv->ccid3or_loss_intervals_idx = 0;
  534. opt_recv->ccid3or_loss_intervals_len = 0;
  535. opt_recv->ccid3or_receive_rate = 0;
  536. }
  537. switch (option) {
  538. case TFRC_OPT_LOSS_EVENT_RATE:
  539. if (unlikely(len != 4)) {
  540. LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, invalid "
  541. "len for TFRC_OPT_LOSS_EVENT_RATE\n",
  542. __FUNCTION__, dccp_role(sk), sk);
  543. rc = -EINVAL;
  544. } else {
  545. opt_recv->ccid3or_loss_event_rate = ntohl(*(__be32 *)value);
  546. ccid3_pr_debug("%s, sk=%p, LOSS_EVENT_RATE=%u\n",
  547. dccp_role(sk), sk,
  548. opt_recv->ccid3or_loss_event_rate);
  549. }
  550. break;
  551. case TFRC_OPT_LOSS_INTERVALS:
  552. opt_recv->ccid3or_loss_intervals_idx = idx;
  553. opt_recv->ccid3or_loss_intervals_len = len;
  554. ccid3_pr_debug("%s, sk=%p, LOSS_INTERVALS=(%u, %u)\n",
  555. dccp_role(sk), sk,
  556. opt_recv->ccid3or_loss_intervals_idx,
  557. opt_recv->ccid3or_loss_intervals_len);
  558. break;
  559. case TFRC_OPT_RECEIVE_RATE:
  560. if (unlikely(len != 4)) {
  561. LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, invalid "
  562. "len for TFRC_OPT_RECEIVE_RATE\n",
  563. __FUNCTION__, dccp_role(sk), sk);
  564. rc = -EINVAL;
  565. } else {
  566. opt_recv->ccid3or_receive_rate = ntohl(*(__be32 *)value);
  567. ccid3_pr_debug("%s, sk=%p, RECEIVE_RATE=%u\n",
  568. dccp_role(sk), sk,
  569. opt_recv->ccid3or_receive_rate);
  570. }
  571. break;
  572. }
  573. return rc;
  574. }
  575. static int ccid3_hc_tx_init(struct ccid *ccid, struct sock *sk)
  576. {
  577. struct dccp_sock *dp = dccp_sk(sk);
  578. struct ccid3_hc_tx_sock *hctx = ccid_priv(ccid);
  579. if (dp->dccps_packet_size >= TFRC_MIN_PACKET_SIZE &&
  580. dp->dccps_packet_size <= TFRC_MAX_PACKET_SIZE)
  581. hctx->ccid3hctx_s = dp->dccps_packet_size;
  582. else
  583. hctx->ccid3hctx_s = TFRC_STD_PACKET_SIZE;
  584. /* Set transmission rate to 1 packet per second */
  585. hctx->ccid3hctx_x = hctx->ccid3hctx_s;
  586. hctx->ccid3hctx_t_rto = USEC_PER_SEC;
  587. hctx->ccid3hctx_state = TFRC_SSTATE_NO_SENT;
  588. INIT_LIST_HEAD(&hctx->ccid3hctx_hist);
  589. hctx->ccid3hctx_no_feedback_timer.function = ccid3_hc_tx_no_feedback_timer;
  590. hctx->ccid3hctx_no_feedback_timer.data = (unsigned long)sk;
  591. init_timer(&hctx->ccid3hctx_no_feedback_timer);
  592. return 0;
  593. }
  594. static void ccid3_hc_tx_exit(struct sock *sk)
  595. {
  596. struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  597. BUG_ON(hctx == NULL);
  598. ccid3_hc_tx_set_state(sk, TFRC_SSTATE_TERM);
  599. sk_stop_timer(sk, &hctx->ccid3hctx_no_feedback_timer);
  600. /* Empty packet history */
  601. dccp_tx_hist_purge(ccid3_tx_hist, &hctx->ccid3hctx_hist);
  602. }
  603. /*
  604. * RX Half Connection methods
  605. */
  606. /* TFRC receiver states */
  607. enum ccid3_hc_rx_states {
  608. TFRC_RSTATE_NO_DATA = 1,
  609. TFRC_RSTATE_DATA,
  610. TFRC_RSTATE_TERM = 127,
  611. };
  612. #ifdef CCID3_DEBUG
  613. static const char *ccid3_rx_state_name(enum ccid3_hc_rx_states state)
  614. {
  615. static char *ccid3_rx_state_names[] = {
  616. [TFRC_RSTATE_NO_DATA] = "NO_DATA",
  617. [TFRC_RSTATE_DATA] = "DATA",
  618. [TFRC_RSTATE_TERM] = "TERM",
  619. };
  620. return ccid3_rx_state_names[state];
  621. }
  622. #endif
  623. static void ccid3_hc_rx_set_state(struct sock *sk,
  624. enum ccid3_hc_rx_states state)
  625. {
  626. struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  627. enum ccid3_hc_rx_states oldstate = hcrx->ccid3hcrx_state;
  628. ccid3_pr_debug("%s(%p) %-8.8s -> %s\n",
  629. dccp_role(sk), sk, ccid3_rx_state_name(oldstate),
  630. ccid3_rx_state_name(state));
  631. WARN_ON(state == oldstate);
  632. hcrx->ccid3hcrx_state = state;
  633. }
  634. static void ccid3_hc_rx_send_feedback(struct sock *sk)
  635. {
  636. struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  637. struct dccp_sock *dp = dccp_sk(sk);
  638. struct dccp_rx_hist_entry *packet;
  639. struct timeval now;
  640. ccid3_pr_debug("%s, sk=%p\n", dccp_role(sk), sk);
  641. dccp_timestamp(sk, &now);
  642. switch (hcrx->ccid3hcrx_state) {
  643. case TFRC_RSTATE_NO_DATA:
  644. hcrx->ccid3hcrx_x_recv = 0;
  645. break;
  646. case TFRC_RSTATE_DATA: {
  647. const u32 delta = timeval_delta(&now,
  648. &hcrx->ccid3hcrx_tstamp_last_feedback);
  649. hcrx->ccid3hcrx_x_recv = usecs_div(hcrx->ccid3hcrx_bytes_recv,
  650. delta);
  651. }
  652. break;
  653. default:
  654. printk(KERN_CRIT "%s: %s, sk=%p, Illegal state (%d)!\n",
  655. __FUNCTION__, dccp_role(sk), sk, hcrx->ccid3hcrx_state);
  656. dump_stack();
  657. return;
  658. }
  659. packet = dccp_rx_hist_find_data_packet(&hcrx->ccid3hcrx_hist);
  660. if (unlikely(packet == NULL)) {
  661. LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, no data packet "
  662. "in history!\n",
  663. __FUNCTION__, dccp_role(sk), sk);
  664. return;
  665. }
  666. hcrx->ccid3hcrx_tstamp_last_feedback = now;
  667. hcrx->ccid3hcrx_last_counter = packet->dccphrx_ccval;
  668. hcrx->ccid3hcrx_seqno_last_counter = packet->dccphrx_seqno;
  669. hcrx->ccid3hcrx_bytes_recv = 0;
  670. /* Convert to multiples of 10us */
  671. hcrx->ccid3hcrx_elapsed_time =
  672. timeval_delta(&now, &packet->dccphrx_tstamp) / 10;
  673. if (hcrx->ccid3hcrx_p == 0)
  674. hcrx->ccid3hcrx_pinv = ~0;
  675. else
  676. hcrx->ccid3hcrx_pinv = 1000000 / hcrx->ccid3hcrx_p;
  677. dp->dccps_hc_rx_insert_options = 1;
  678. dccp_send_ack(sk);
  679. }
  680. static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
  681. {
  682. const struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  683. __be32 x_recv, pinv;
  684. BUG_ON(hcrx == NULL);
  685. if (!(sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN))
  686. return 0;
  687. DCCP_SKB_CB(skb)->dccpd_ccval = hcrx->ccid3hcrx_last_counter;
  688. if (dccp_packet_without_ack(skb))
  689. return 0;
  690. x_recv = htonl(hcrx->ccid3hcrx_x_recv);
  691. pinv = htonl(hcrx->ccid3hcrx_pinv);
  692. if ((hcrx->ccid3hcrx_elapsed_time != 0 &&
  693. dccp_insert_option_elapsed_time(sk, skb,
  694. hcrx->ccid3hcrx_elapsed_time)) ||
  695. dccp_insert_option_timestamp(sk, skb) ||
  696. dccp_insert_option(sk, skb, TFRC_OPT_LOSS_EVENT_RATE,
  697. &pinv, sizeof(pinv)) ||
  698. dccp_insert_option(sk, skb, TFRC_OPT_RECEIVE_RATE,
  699. &x_recv, sizeof(x_recv)))
  700. return -1;
  701. return 0;
  702. }
  703. /* calculate first loss interval
  704. *
  705. * returns estimated loss interval in usecs */
  706. static u32 ccid3_hc_rx_calc_first_li(struct sock *sk)
  707. {
  708. struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  709. struct dccp_rx_hist_entry *entry, *next, *tail = NULL;
  710. u32 rtt, delta, x_recv, fval, p, tmp2;
  711. struct timeval tstamp = { 0, };
  712. int interval = 0;
  713. int win_count = 0;
  714. int step = 0;
  715. u64 tmp1;
  716. list_for_each_entry_safe(entry, next, &hcrx->ccid3hcrx_hist,
  717. dccphrx_node) {
  718. if (dccp_rx_hist_entry_data_packet(entry)) {
  719. tail = entry;
  720. switch (step) {
  721. case 0:
  722. tstamp = entry->dccphrx_tstamp;
  723. win_count = entry->dccphrx_ccval;
  724. step = 1;
  725. break;
  726. case 1:
  727. interval = win_count - entry->dccphrx_ccval;
  728. if (interval < 0)
  729. interval += TFRC_WIN_COUNT_LIMIT;
  730. if (interval > 4)
  731. goto found;
  732. break;
  733. }
  734. }
  735. }
  736. if (unlikely(step == 0)) {
  737. LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, packet history "
  738. "contains no data packets!\n",
  739. __FUNCTION__, dccp_role(sk), sk);
  740. return ~0;
  741. }
  742. if (unlikely(interval == 0)) {
  743. LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, Could not find a "
  744. "win_count interval > 0. Defaulting to 1\n",
  745. __FUNCTION__, dccp_role(sk), sk);
  746. interval = 1;
  747. }
  748. found:
  749. rtt = timeval_delta(&tstamp, &tail->dccphrx_tstamp) * 4 / interval;
  750. ccid3_pr_debug("%s, sk=%p, approximated RTT to %uus\n",
  751. dccp_role(sk), sk, rtt);
  752. if (rtt == 0)
  753. rtt = 1;
  754. dccp_timestamp(sk, &tstamp);
  755. delta = timeval_delta(&tstamp, &hcrx->ccid3hcrx_tstamp_last_feedback);
  756. x_recv = usecs_div(hcrx->ccid3hcrx_bytes_recv, delta);
  757. tmp1 = (u64)x_recv * (u64)rtt;
  758. do_div(tmp1,10000000);
  759. tmp2 = (u32)tmp1;
  760. fval = (hcrx->ccid3hcrx_s * 100000) / tmp2;
  761. /* do not alter order above or you will get overflow on 32 bit */
  762. p = tfrc_calc_x_reverse_lookup(fval);
  763. ccid3_pr_debug("%s, sk=%p, receive rate=%u bytes/s, implied "
  764. "loss rate=%u\n", dccp_role(sk), sk, x_recv, p);
  765. if (p == 0)
  766. return ~0;
  767. else
  768. return 1000000 / p;
  769. }
  770. static void ccid3_hc_rx_update_li(struct sock *sk, u64 seq_loss, u8 win_loss)
  771. {
  772. struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  773. if (seq_loss != DCCP_MAX_SEQNO + 1 &&
  774. list_empty(&hcrx->ccid3hcrx_li_hist)) {
  775. struct dccp_li_hist_entry *li_tail;
  776. li_tail = dccp_li_hist_interval_new(ccid3_li_hist,
  777. &hcrx->ccid3hcrx_li_hist,
  778. seq_loss, win_loss);
  779. if (li_tail == NULL)
  780. return;
  781. li_tail->dccplih_interval = ccid3_hc_rx_calc_first_li(sk);
  782. } else
  783. LIMIT_NETDEBUG(KERN_WARNING "%s: FIXME: find end of "
  784. "interval\n", __FUNCTION__);
  785. }
  786. static void ccid3_hc_rx_detect_loss(struct sock *sk)
  787. {
  788. struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  789. u8 win_loss;
  790. const u64 seq_loss = dccp_rx_hist_detect_loss(&hcrx->ccid3hcrx_hist,
  791. &hcrx->ccid3hcrx_li_hist,
  792. &win_loss);
  793. ccid3_hc_rx_update_li(sk, seq_loss, win_loss);
  794. }
  795. static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
  796. {
  797. struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  798. const struct dccp_options_received *opt_recv;
  799. struct dccp_rx_hist_entry *packet;
  800. struct timeval now;
  801. u8 win_count;
  802. u32 p_prev, r_sample, t_elapsed;
  803. int ins;
  804. BUG_ON(hcrx == NULL ||
  805. !(hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA ||
  806. hcrx->ccid3hcrx_state == TFRC_RSTATE_DATA));
  807. opt_recv = &dccp_sk(sk)->dccps_options_received;
  808. switch (DCCP_SKB_CB(skb)->dccpd_type) {
  809. case DCCP_PKT_ACK:
  810. if (hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA)
  811. return;
  812. case DCCP_PKT_DATAACK:
  813. if (opt_recv->dccpor_timestamp_echo == 0)
  814. break;
  815. p_prev = hcrx->ccid3hcrx_rtt;
  816. dccp_timestamp(sk, &now);
  817. timeval_sub_usecs(&now, opt_recv->dccpor_timestamp_echo * 10);
  818. r_sample = timeval_usecs(&now);
  819. t_elapsed = opt_recv->dccpor_elapsed_time * 10;
  820. if (unlikely(r_sample <= t_elapsed))
  821. LIMIT_NETDEBUG(KERN_WARNING "%s: r_sample=%uus, "
  822. "t_elapsed=%uus\n",
  823. __FUNCTION__, r_sample, t_elapsed);
  824. else
  825. r_sample -= t_elapsed;
  826. if (hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA)
  827. hcrx->ccid3hcrx_rtt = r_sample;
  828. else
  829. hcrx->ccid3hcrx_rtt = (hcrx->ccid3hcrx_rtt * 9) / 10 +
  830. r_sample / 10;
  831. if (p_prev != hcrx->ccid3hcrx_rtt)
  832. ccid3_pr_debug("%s, New RTT=%luus, elapsed time=%u\n",
  833. dccp_role(sk), hcrx->ccid3hcrx_rtt,
  834. opt_recv->dccpor_elapsed_time);
  835. break;
  836. case DCCP_PKT_DATA:
  837. break;
  838. default: /* We're not interested in other packet types, move along */
  839. return;
  840. }
  841. packet = dccp_rx_hist_entry_new(ccid3_rx_hist, sk, opt_recv->dccpor_ndp,
  842. skb, SLAB_ATOMIC);
  843. if (unlikely(packet == NULL)) {
  844. LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, Not enough mem to "
  845. "add rx packet to history, consider it lost!\n",
  846. __FUNCTION__, dccp_role(sk), sk);
  847. return;
  848. }
  849. win_count = packet->dccphrx_ccval;
  850. ins = dccp_rx_hist_add_packet(ccid3_rx_hist, &hcrx->ccid3hcrx_hist,
  851. &hcrx->ccid3hcrx_li_hist, packet);
  852. if (DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK)
  853. return;
  854. switch (hcrx->ccid3hcrx_state) {
  855. case TFRC_RSTATE_NO_DATA:
  856. ccid3_pr_debug("%s, sk=%p(%s), skb=%p, sending initial "
  857. "feedback\n",
  858. dccp_role(sk), sk,
  859. dccp_state_name(sk->sk_state), skb);
  860. ccid3_hc_rx_send_feedback(sk);
  861. ccid3_hc_rx_set_state(sk, TFRC_RSTATE_DATA);
  862. return;
  863. case TFRC_RSTATE_DATA:
  864. hcrx->ccid3hcrx_bytes_recv += skb->len -
  865. dccp_hdr(skb)->dccph_doff * 4;
  866. if (ins != 0)
  867. break;
  868. dccp_timestamp(sk, &now);
  869. if (timeval_delta(&now, &hcrx->ccid3hcrx_tstamp_last_ack) >=
  870. hcrx->ccid3hcrx_rtt) {
  871. hcrx->ccid3hcrx_tstamp_last_ack = now;
  872. ccid3_hc_rx_send_feedback(sk);
  873. }
  874. return;
  875. default:
  876. printk(KERN_CRIT "%s: %s, sk=%p, Illegal state (%d)!\n",
  877. __FUNCTION__, dccp_role(sk), sk, hcrx->ccid3hcrx_state);
  878. dump_stack();
  879. return;
  880. }
  881. /* Dealing with packet loss */
  882. ccid3_pr_debug("%s, sk=%p(%s), data loss! Reacting...\n",
  883. dccp_role(sk), sk, dccp_state_name(sk->sk_state));
  884. ccid3_hc_rx_detect_loss(sk);
  885. p_prev = hcrx->ccid3hcrx_p;
  886. /* Calculate loss event rate */
  887. if (!list_empty(&hcrx->ccid3hcrx_li_hist)) {
  888. u32 i_mean = dccp_li_hist_calc_i_mean(&hcrx->ccid3hcrx_li_hist);
  889. /* Scaling up by 1000000 as fixed decimal */
  890. if (i_mean != 0)
  891. hcrx->ccid3hcrx_p = 1000000 / i_mean;
  892. }
  893. if (hcrx->ccid3hcrx_p > p_prev) {
  894. ccid3_hc_rx_send_feedback(sk);
  895. return;
  896. }
  897. }
  898. static int ccid3_hc_rx_init(struct ccid *ccid, struct sock *sk)
  899. {
  900. struct dccp_sock *dp = dccp_sk(sk);
  901. struct ccid3_hc_rx_sock *hcrx = ccid_priv(ccid);
  902. ccid3_pr_debug("%s, sk=%p\n", dccp_role(sk), sk);
  903. if (dp->dccps_packet_size >= TFRC_MIN_PACKET_SIZE &&
  904. dp->dccps_packet_size <= TFRC_MAX_PACKET_SIZE)
  905. hcrx->ccid3hcrx_s = dp->dccps_packet_size;
  906. else
  907. hcrx->ccid3hcrx_s = TFRC_STD_PACKET_SIZE;
  908. hcrx->ccid3hcrx_state = TFRC_RSTATE_NO_DATA;
  909. INIT_LIST_HEAD(&hcrx->ccid3hcrx_hist);
  910. INIT_LIST_HEAD(&hcrx->ccid3hcrx_li_hist);
  911. dccp_timestamp(sk, &hcrx->ccid3hcrx_tstamp_last_ack);
  912. hcrx->ccid3hcrx_tstamp_last_feedback = hcrx->ccid3hcrx_tstamp_last_ack;
  913. hcrx->ccid3hcrx_rtt = 5000; /* XXX 5ms for now... */
  914. return 0;
  915. }
  916. static void ccid3_hc_rx_exit(struct sock *sk)
  917. {
  918. struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  919. BUG_ON(hcrx == NULL);
  920. ccid3_hc_rx_set_state(sk, TFRC_RSTATE_TERM);
  921. /* Empty packet history */
  922. dccp_rx_hist_purge(ccid3_rx_hist, &hcrx->ccid3hcrx_hist);
  923. /* Empty loss interval history */
  924. dccp_li_hist_purge(ccid3_li_hist, &hcrx->ccid3hcrx_li_hist);
  925. }
  926. static void ccid3_hc_rx_get_info(struct sock *sk, struct tcp_info *info)
  927. {
  928. const struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  929. /* Listen socks doesn't have a private CCID block */
  930. if (sk->sk_state == DCCP_LISTEN)
  931. return;
  932. BUG_ON(hcrx == NULL);
  933. info->tcpi_ca_state = hcrx->ccid3hcrx_state;
  934. info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
  935. info->tcpi_rcv_rtt = hcrx->ccid3hcrx_rtt;
  936. }
  937. static void ccid3_hc_tx_get_info(struct sock *sk, struct tcp_info *info)
  938. {
  939. const struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  940. /* Listen socks doesn't have a private CCID block */
  941. if (sk->sk_state == DCCP_LISTEN)
  942. return;
  943. BUG_ON(hctx == NULL);
  944. info->tcpi_rto = hctx->ccid3hctx_t_rto;
  945. info->tcpi_rtt = hctx->ccid3hctx_rtt;
  946. }
  947. static int ccid3_hc_rx_getsockopt(struct sock *sk, const int optname, int len,
  948. u32 __user *optval, int __user *optlen)
  949. {
  950. const struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  951. const void *val;
  952. /* Listen socks doesn't have a private CCID block */
  953. if (sk->sk_state == DCCP_LISTEN)
  954. return -EINVAL;
  955. switch (optname) {
  956. case DCCP_SOCKOPT_CCID_RX_INFO:
  957. if (len < sizeof(hcrx->ccid3hcrx_tfrc))
  958. return -EINVAL;
  959. len = sizeof(hcrx->ccid3hcrx_tfrc);
  960. val = &hcrx->ccid3hcrx_tfrc;
  961. break;
  962. default:
  963. return -ENOPROTOOPT;
  964. }
  965. if (put_user(len, optlen) || copy_to_user(optval, val, len))
  966. return -EFAULT;
  967. return 0;
  968. }
  969. static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len,
  970. u32 __user *optval, int __user *optlen)
  971. {
  972. const struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  973. const void *val;
  974. /* Listen socks doesn't have a private CCID block */
  975. if (sk->sk_state == DCCP_LISTEN)
  976. return -EINVAL;
  977. switch (optname) {
  978. case DCCP_SOCKOPT_CCID_TX_INFO:
  979. if (len < sizeof(hctx->ccid3hctx_tfrc))
  980. return -EINVAL;
  981. len = sizeof(hctx->ccid3hctx_tfrc);
  982. val = &hctx->ccid3hctx_tfrc;
  983. break;
  984. default:
  985. return -ENOPROTOOPT;
  986. }
  987. if (put_user(len, optlen) || copy_to_user(optval, val, len))
  988. return -EFAULT;
  989. return 0;
  990. }
  991. static struct ccid_operations ccid3 = {
  992. .ccid_id = 3,
  993. .ccid_name = "ccid3",
  994. .ccid_owner = THIS_MODULE,
  995. .ccid_hc_tx_obj_size = sizeof(struct ccid3_hc_tx_sock),
  996. .ccid_hc_tx_init = ccid3_hc_tx_init,
  997. .ccid_hc_tx_exit = ccid3_hc_tx_exit,
  998. .ccid_hc_tx_send_packet = ccid3_hc_tx_send_packet,
  999. .ccid_hc_tx_packet_sent = ccid3_hc_tx_packet_sent,
  1000. .ccid_hc_tx_packet_recv = ccid3_hc_tx_packet_recv,
  1001. .ccid_hc_tx_insert_options = ccid3_hc_tx_insert_options,
  1002. .ccid_hc_tx_parse_options = ccid3_hc_tx_parse_options,
  1003. .ccid_hc_rx_obj_size = sizeof(struct ccid3_hc_rx_sock),
  1004. .ccid_hc_rx_init = ccid3_hc_rx_init,
  1005. .ccid_hc_rx_exit = ccid3_hc_rx_exit,
  1006. .ccid_hc_rx_insert_options = ccid3_hc_rx_insert_options,
  1007. .ccid_hc_rx_packet_recv = ccid3_hc_rx_packet_recv,
  1008. .ccid_hc_rx_get_info = ccid3_hc_rx_get_info,
  1009. .ccid_hc_tx_get_info = ccid3_hc_tx_get_info,
  1010. .ccid_hc_rx_getsockopt = ccid3_hc_rx_getsockopt,
  1011. .ccid_hc_tx_getsockopt = ccid3_hc_tx_getsockopt,
  1012. };
  1013. module_param(ccid3_debug, int, 0444);
  1014. MODULE_PARM_DESC(ccid3_debug, "Enable debug messages");
  1015. static __init int ccid3_module_init(void)
  1016. {
  1017. int rc = -ENOBUFS;
  1018. ccid3_rx_hist = dccp_rx_hist_new("ccid3");
  1019. if (ccid3_rx_hist == NULL)
  1020. goto out;
  1021. ccid3_tx_hist = dccp_tx_hist_new("ccid3");
  1022. if (ccid3_tx_hist == NULL)
  1023. goto out_free_rx;
  1024. ccid3_li_hist = dccp_li_hist_new("ccid3");
  1025. if (ccid3_li_hist == NULL)
  1026. goto out_free_tx;
  1027. rc = ccid_register(&ccid3);
  1028. if (rc != 0)
  1029. goto out_free_loss_interval_history;
  1030. out:
  1031. return rc;
  1032. out_free_loss_interval_history:
  1033. dccp_li_hist_delete(ccid3_li_hist);
  1034. ccid3_li_hist = NULL;
  1035. out_free_tx:
  1036. dccp_tx_hist_delete(ccid3_tx_hist);
  1037. ccid3_tx_hist = NULL;
  1038. out_free_rx:
  1039. dccp_rx_hist_delete(ccid3_rx_hist);
  1040. ccid3_rx_hist = NULL;
  1041. goto out;
  1042. }
  1043. module_init(ccid3_module_init);
  1044. static __exit void ccid3_module_exit(void)
  1045. {
  1046. ccid_unregister(&ccid3);
  1047. if (ccid3_tx_hist != NULL) {
  1048. dccp_tx_hist_delete(ccid3_tx_hist);
  1049. ccid3_tx_hist = NULL;
  1050. }
  1051. if (ccid3_rx_hist != NULL) {
  1052. dccp_rx_hist_delete(ccid3_rx_hist);
  1053. ccid3_rx_hist = NULL;
  1054. }
  1055. if (ccid3_li_hist != NULL) {
  1056. dccp_li_hist_delete(ccid3_li_hist);
  1057. ccid3_li_hist = NULL;
  1058. }
  1059. }
  1060. module_exit(ccid3_module_exit);
  1061. MODULE_AUTHOR("Ian McDonald <iam4@cs.waikato.ac.nz>, "
  1062. "Arnaldo Carvalho de Melo <acme@ghostprotocols.net>");
  1063. MODULE_DESCRIPTION("DCCP TFRC CCID3 CCID");
  1064. MODULE_LICENSE("GPL");
  1065. MODULE_ALIAS("net-dccp-ccid-3");