ccid3.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249
  1. /*
  2. * net/dccp/ccids/ccid3.c
  3. *
  4. * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
  5. * Copyright (c) 2005-6 Ian McDonald <ian.mcdonald@jandi.co.nz>
  6. *
  7. * An implementation of the DCCP protocol
  8. *
  9. * This code has been developed by the University of Waikato WAND
  10. * research group. For further information please see http://www.wand.net.nz/
  11. *
  12. * This code also uses code from Lulea University, rereleased as GPL by its
  13. * authors:
  14. * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon
  15. *
  16. * Changes to meet Linux coding standards, to make it meet latest ccid3 draft
  17. * and to make it work as a loadable module in the DCCP stack written by
  18. * Arnaldo Carvalho de Melo <acme@conectiva.com.br>.
  19. *
  20. * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
  21. *
  22. * This program is free software; you can redistribute it and/or modify
  23. * it under the terms of the GNU General Public License as published by
  24. * the Free Software Foundation; either version 2 of the License, or
  25. * (at your option) any later version.
  26. *
  27. * This program is distributed in the hope that it will be useful,
  28. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  29. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  30. * GNU General Public License for more details.
  31. *
  32. * You should have received a copy of the GNU General Public License
  33. * along with this program; if not, write to the Free Software
  34. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  35. */
  36. #include "../ccid.h"
  37. #include "../dccp.h"
  38. #include "lib/packet_history.h"
  39. #include "lib/loss_interval.h"
  40. #include "lib/tfrc.h"
  41. #include "ccid3.h"
  42. /*
  43. * Reason for maths here is to avoid 32 bit overflow when a is big.
  44. * With this we get close to the limit.
  45. */
  46. static u32 usecs_div(const u32 a, const u32 b)
  47. {
  48. const u32 div = a < (UINT_MAX / (USEC_PER_SEC / 10)) ? 10 :
  49. a < (UINT_MAX / (USEC_PER_SEC / 50)) ? 50 :
  50. a < (UINT_MAX / (USEC_PER_SEC / 100)) ? 100 :
  51. a < (UINT_MAX / (USEC_PER_SEC / 500)) ? 500 :
  52. a < (UINT_MAX / (USEC_PER_SEC / 1000)) ? 1000 :
  53. a < (UINT_MAX / (USEC_PER_SEC / 5000)) ? 5000 :
  54. a < (UINT_MAX / (USEC_PER_SEC / 10000)) ? 10000 :
  55. a < (UINT_MAX / (USEC_PER_SEC / 50000)) ? 50000 :
  56. 100000;
  57. const u32 tmp = a * (USEC_PER_SEC / div);
  58. return (b >= 2 * div) ? tmp / (b / div) : tmp;
  59. }
  60. #ifdef CONFIG_IP_DCCP_CCID3_DEBUG
  61. static int ccid3_debug;
  62. #define ccid3_pr_debug(format, a...) DCCP_PR_DEBUG(ccid3_debug, format, ##a)
  63. #else
  64. #define ccid3_pr_debug(format, a...)
  65. #endif
  66. static struct dccp_tx_hist *ccid3_tx_hist;
  67. static struct dccp_rx_hist *ccid3_rx_hist;
  68. static struct dccp_li_hist *ccid3_li_hist;
  69. #ifdef CONFIG_IP_DCCP_CCID3_DEBUG
  70. static const char *ccid3_tx_state_name(enum ccid3_hc_tx_states state)
  71. {
  72. static char *ccid3_state_names[] = {
  73. [TFRC_SSTATE_NO_SENT] = "NO_SENT",
  74. [TFRC_SSTATE_NO_FBACK] = "NO_FBACK",
  75. [TFRC_SSTATE_FBACK] = "FBACK",
  76. [TFRC_SSTATE_TERM] = "TERM",
  77. };
  78. return ccid3_state_names[state];
  79. }
  80. #endif
  81. static void ccid3_hc_tx_set_state(struct sock *sk,
  82. enum ccid3_hc_tx_states state)
  83. {
  84. struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  85. enum ccid3_hc_tx_states oldstate = hctx->ccid3hctx_state;
  86. ccid3_pr_debug("%s(%p) %-8.8s -> %s\n",
  87. dccp_role(sk), sk, ccid3_tx_state_name(oldstate),
  88. ccid3_tx_state_name(state));
  89. WARN_ON(state == oldstate);
  90. hctx->ccid3hctx_state = state;
  91. }
  92. /*
  93. * Recalculate scheduled nominal send time t_nom, inter-packet interval
  94. * t_ipi, and delta value. Should be called after each change to X.
  95. */
  96. static inline void ccid3_update_send_time(struct ccid3_hc_tx_sock *hctx)
  97. {
  98. timeval_sub_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi);
  99. /* Calculate new t_ipi (inter packet interval) by t_ipi = s / X_inst */
  100. hctx->ccid3hctx_t_ipi = usecs_div(hctx->ccid3hctx_s, hctx->ccid3hctx_x);
  101. /* Update nominal send time with regard to the new t_ipi */
  102. timeval_add_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi);
  103. /* Calculate new delta by delta = min(t_ipi / 2, t_gran / 2) */
  104. hctx->ccid3hctx_delta = min_t(u32, hctx->ccid3hctx_t_ipi / 2,
  105. TFRC_OPSYS_HALF_TIME_GRAN);
  106. }
  107. /*
  108. * Update X by
  109. * If (p > 0)
  110. * x_calc = calcX(s, R, p);
  111. * X = max(min(X_calc, 2 * X_recv), s / t_mbi);
  112. * Else
  113. * If (now - tld >= R)
  114. * X = max(min(2 * X, 2 * X_recv), s / R);
  115. * tld = now;
  116. */
  117. static void ccid3_hc_tx_update_x(struct sock *sk)
  118. {
  119. struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  120. const __u32 old_x = hctx->ccid3hctx_x;
  121. /* To avoid large error in calcX */
  122. if (hctx->ccid3hctx_p >= TFRC_SMALLEST_P) {
  123. hctx->ccid3hctx_x_calc = tfrc_calc_x(hctx->ccid3hctx_s,
  124. hctx->ccid3hctx_rtt,
  125. hctx->ccid3hctx_p);
  126. hctx->ccid3hctx_x = max_t(u32, min_t(u32, hctx->ccid3hctx_x_calc,
  127. 2 * hctx->ccid3hctx_x_recv),
  128. (hctx->ccid3hctx_s /
  129. TFRC_MAX_BACK_OFF_TIME));
  130. } else {
  131. struct timeval now;
  132. dccp_timestamp(sk, &now);
  133. if (timeval_delta(&now, &hctx->ccid3hctx_t_ld) >=
  134. hctx->ccid3hctx_rtt) {
  135. hctx->ccid3hctx_x = max_t(u32, min_t(u32, hctx->ccid3hctx_x_recv,
  136. hctx->ccid3hctx_x) * 2,
  137. usecs_div(hctx->ccid3hctx_s,
  138. hctx->ccid3hctx_rtt));
  139. hctx->ccid3hctx_t_ld = now;
  140. }
  141. }
  142. if (hctx->ccid3hctx_x != old_x)
  143. ccid3_update_send_time(hctx);
  144. }
  145. static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
  146. {
  147. struct sock *sk = (struct sock *)data;
  148. struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  149. unsigned long next_tmout = USEC_PER_SEC / 5;
  150. bh_lock_sock(sk);
  151. if (sock_owned_by_user(sk)) {
  152. /* Try again later. */
  153. /* XXX: set some sensible MIB */
  154. goto restart_timer;
  155. }
  156. ccid3_pr_debug("%s, sk=%p, state=%s\n", dccp_role(sk), sk,
  157. ccid3_tx_state_name(hctx->ccid3hctx_state));
  158. switch (hctx->ccid3hctx_state) {
  159. case TFRC_SSTATE_NO_FBACK:
  160. /* Halve send rate */
  161. hctx->ccid3hctx_x /= 2;
  162. if (hctx->ccid3hctx_x < (hctx->ccid3hctx_s /
  163. TFRC_MAX_BACK_OFF_TIME))
  164. hctx->ccid3hctx_x = (hctx->ccid3hctx_s /
  165. TFRC_MAX_BACK_OFF_TIME);
  166. ccid3_pr_debug("%s, sk=%p, state=%s, updated tx rate to %d "
  167. "bytes/s\n",
  168. dccp_role(sk), sk,
  169. ccid3_tx_state_name(hctx->ccid3hctx_state),
  170. hctx->ccid3hctx_x);
  171. /* The value of R is still undefined and so we can not recompute
  172. * the timout value. Keep initial value as per [RFC 4342, 5]. */
  173. next_tmout = TFRC_INITIAL_TIMEOUT;
  174. ccid3_update_send_time(hctx);
  175. break;
  176. case TFRC_SSTATE_FBACK:
  177. /*
  178. * Check if IDLE since last timeout and recv rate is less than
  179. * 4 packets per RTT
  180. */
  181. if (!hctx->ccid3hctx_idle ||
  182. (hctx->ccid3hctx_x_recv >=
  183. 4 * usecs_div(hctx->ccid3hctx_s, hctx->ccid3hctx_rtt))) {
  184. ccid3_pr_debug("%s, sk=%p, state=%s, not idle\n",
  185. dccp_role(sk), sk,
  186. ccid3_tx_state_name(hctx->ccid3hctx_state));
  187. /* Halve sending rate */
  188. /* If (X_calc > 2 * X_recv)
  189. * X_recv = max(X_recv / 2, s / (2 * t_mbi));
  190. * Else
  191. * X_recv = X_calc / 4;
  192. */
  193. BUG_ON(hctx->ccid3hctx_p >= TFRC_SMALLEST_P &&
  194. hctx->ccid3hctx_x_calc == 0);
  195. /* check also if p is zero -> x_calc is infinity? */
  196. if (hctx->ccid3hctx_p < TFRC_SMALLEST_P ||
  197. hctx->ccid3hctx_x_calc > 2 * hctx->ccid3hctx_x_recv)
  198. hctx->ccid3hctx_x_recv = max_t(u32, hctx->ccid3hctx_x_recv / 2,
  199. hctx->ccid3hctx_s / (2 * TFRC_MAX_BACK_OFF_TIME));
  200. else
  201. hctx->ccid3hctx_x_recv = hctx->ccid3hctx_x_calc / 4;
  202. /* Update sending rate */
  203. ccid3_hc_tx_update_x(sk);
  204. }
  205. /*
  206. * Schedule no feedback timer to expire in
  207. * max(4 * t_RTO, 2 * s/X) = max(4 * t_RTO, 2 * t_ipi)
  208. * XXX This is non-standard, RFC 3448, 4.3 uses 4 * R
  209. */
  210. next_tmout = max(hctx->ccid3hctx_t_rto, 2*hctx->ccid3hctx_t_ipi);
  211. break;
  212. case TFRC_SSTATE_NO_SENT:
  213. DCCP_BUG("Illegal %s state NO_SENT, sk=%p", dccp_role(sk), sk);
  214. /* fall through */
  215. case TFRC_SSTATE_TERM:
  216. goto out;
  217. }
  218. hctx->ccid3hctx_idle = 1;
  219. restart_timer:
  220. sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
  221. jiffies + usecs_to_jiffies(next_tmout));
  222. out:
  223. bh_unlock_sock(sk);
  224. sock_put(sk);
  225. }
  226. /*
  227. * returns
  228. * > 0: delay (in msecs) that should pass before actually sending
  229. * = 0: can send immediately
  230. * < 0: error condition; do not send packet
  231. */
  232. static int ccid3_hc_tx_send_packet(struct sock *sk,
  233. struct sk_buff *skb, int len)
  234. {
  235. struct dccp_sock *dp = dccp_sk(sk);
  236. struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  237. struct dccp_tx_hist_entry *new_packet;
  238. struct timeval now;
  239. long delay;
  240. BUG_ON(hctx == NULL);
  241. /*
  242. * This function is called only for Data and DataAck packets. Sending
  243. * zero-sized Data(Ack)s is theoretically possible, but for congestion
  244. * control this case is pathological - ignore it.
  245. */
  246. if (unlikely(len == 0))
  247. return -EBADMSG;
  248. /* See if last packet allocated was not sent */
  249. new_packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist);
  250. if (new_packet == NULL || new_packet->dccphtx_sent) {
  251. new_packet = dccp_tx_hist_entry_new(ccid3_tx_hist,
  252. SLAB_ATOMIC);
  253. if (unlikely(new_packet == NULL)) {
  254. DCCP_WARN("%s, sk=%p, not enough mem to add to history,"
  255. "send refused\n", dccp_role(sk), sk);
  256. return -ENOBUFS;
  257. }
  258. dccp_tx_hist_add_entry(&hctx->ccid3hctx_hist, new_packet);
  259. }
  260. dccp_timestamp(sk, &now);
  261. switch (hctx->ccid3hctx_state) {
  262. case TFRC_SSTATE_NO_SENT:
  263. sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
  264. jiffies + usecs_to_jiffies(TFRC_INITIAL_TIMEOUT));
  265. hctx->ccid3hctx_last_win_count = 0;
  266. hctx->ccid3hctx_t_last_win_count = now;
  267. ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK);
  268. /* First timeout, according to [RFC 3448, 4.2], is 1 second */
  269. hctx->ccid3hctx_t_ipi = USEC_PER_SEC;
  270. /* Initial delta: minimum of 0.5 sec and t_gran/2 */
  271. hctx->ccid3hctx_delta = TFRC_OPSYS_HALF_TIME_GRAN;
  272. /* Set t_0 for initial packet */
  273. hctx->ccid3hctx_t_nom = now;
  274. break;
  275. case TFRC_SSTATE_NO_FBACK:
  276. case TFRC_SSTATE_FBACK:
  277. delay = timeval_delta(&hctx->ccid3hctx_t_nom, &now);
  278. /*
  279. * Scheduling of packet transmissions [RFC 3448, 4.6]
  280. *
  281. * if (t_now > t_nom - delta)
  282. * // send the packet now
  283. * else
  284. * // send the packet in (t_nom - t_now) milliseconds.
  285. */
  286. if (delay >= hctx->ccid3hctx_delta)
  287. return delay / 1000L;
  288. break;
  289. case TFRC_SSTATE_TERM:
  290. DCCP_BUG("Illegal %s state TERM, sk=%p", dccp_role(sk), sk);
  291. return -EINVAL;
  292. }
  293. /* prepare to send now (add options etc.) */
  294. dp->dccps_hc_tx_insert_options = 1;
  295. new_packet->dccphtx_ccval = DCCP_SKB_CB(skb)->dccpd_ccval =
  296. hctx->ccid3hctx_last_win_count;
  297. timeval_add_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi);
  298. return 0;
  299. }
  300. static void ccid3_hc_tx_packet_sent(struct sock *sk, int more, int len)
  301. {
  302. const struct dccp_sock *dp = dccp_sk(sk);
  303. struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  304. struct timeval now;
  305. BUG_ON(hctx == NULL);
  306. dccp_timestamp(sk, &now);
  307. /* check if we have sent a data packet */
  308. if (len > 0) {
  309. unsigned long quarter_rtt;
  310. struct dccp_tx_hist_entry *packet;
  311. packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist);
  312. if (unlikely(packet == NULL)) {
  313. DCCP_WARN("packet doesn't exist in history!\n");
  314. return;
  315. }
  316. if (unlikely(packet->dccphtx_sent)) {
  317. DCCP_WARN("no unsent packet in history!\n");
  318. return;
  319. }
  320. packet->dccphtx_tstamp = now;
  321. packet->dccphtx_seqno = dp->dccps_gss;
  322. /*
  323. * Check if win_count have changed
  324. * Algorithm in "8.1. Window Counter Value" in RFC 4342.
  325. */
  326. quarter_rtt = timeval_delta(&now, &hctx->ccid3hctx_t_last_win_count);
  327. if (likely(hctx->ccid3hctx_rtt > 8))
  328. quarter_rtt /= hctx->ccid3hctx_rtt / 4;
  329. if (quarter_rtt > 0) {
  330. hctx->ccid3hctx_t_last_win_count = now;
  331. hctx->ccid3hctx_last_win_count = (hctx->ccid3hctx_last_win_count +
  332. min_t(unsigned long, quarter_rtt, 5)) % 16;
  333. ccid3_pr_debug("%s, sk=%p, window changed from "
  334. "%u to %u!\n",
  335. dccp_role(sk), sk,
  336. packet->dccphtx_ccval,
  337. hctx->ccid3hctx_last_win_count);
  338. }
  339. hctx->ccid3hctx_idle = 0;
  340. packet->dccphtx_rtt = hctx->ccid3hctx_rtt;
  341. packet->dccphtx_sent = 1;
  342. } else
  343. ccid3_pr_debug("%s, sk=%p, seqno=%llu NOT inserted!\n",
  344. dccp_role(sk), sk, dp->dccps_gss);
  345. }
  346. static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
  347. {
  348. const struct dccp_sock *dp = dccp_sk(sk);
  349. struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  350. struct ccid3_options_received *opt_recv;
  351. struct dccp_tx_hist_entry *packet;
  352. struct timeval now;
  353. unsigned long next_tmout;
  354. u32 t_elapsed;
  355. u32 pinv;
  356. u32 x_recv;
  357. u32 r_sample;
  358. BUG_ON(hctx == NULL);
  359. /* we are only interested in ACKs */
  360. if (!(DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK ||
  361. DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_DATAACK))
  362. return;
  363. opt_recv = &hctx->ccid3hctx_options_received;
  364. t_elapsed = dp->dccps_options_received.dccpor_elapsed_time * 10;
  365. x_recv = opt_recv->ccid3or_receive_rate;
  366. pinv = opt_recv->ccid3or_loss_event_rate;
  367. switch (hctx->ccid3hctx_state) {
  368. case TFRC_SSTATE_NO_FBACK:
  369. case TFRC_SSTATE_FBACK:
  370. /* Calculate new round trip sample by
  371. * R_sample = (now - t_recvdata) - t_delay */
  372. /* get t_recvdata from history */
  373. packet = dccp_tx_hist_find_entry(&hctx->ccid3hctx_hist,
  374. DCCP_SKB_CB(skb)->dccpd_ack_seq);
  375. if (unlikely(packet == NULL)) {
  376. DCCP_WARN("%s, sk=%p, seqno %llu(%s) does't exist "
  377. "in history!\n", dccp_role(sk), sk,
  378. (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq,
  379. dccp_packet_name(DCCP_SKB_CB(skb)->dccpd_type));
  380. return;
  381. }
  382. /* Update RTT */
  383. dccp_timestamp(sk, &now);
  384. r_sample = timeval_delta(&now, &packet->dccphtx_tstamp);
  385. if (unlikely(r_sample <= t_elapsed))
  386. DCCP_WARN("r_sample=%uus,t_elapsed=%uus\n",
  387. r_sample, t_elapsed);
  388. else
  389. r_sample -= t_elapsed;
  390. /* Update RTT estimate by
  391. * If (No feedback recv)
  392. * R = R_sample;
  393. * Else
  394. * R = q * R + (1 - q) * R_sample;
  395. *
  396. * q is a constant, RFC 3448 recomments 0.9
  397. */
  398. if (hctx->ccid3hctx_state == TFRC_SSTATE_NO_FBACK) {
  399. ccid3_hc_tx_set_state(sk, TFRC_SSTATE_FBACK);
  400. hctx->ccid3hctx_rtt = r_sample;
  401. } else
  402. hctx->ccid3hctx_rtt = (hctx->ccid3hctx_rtt * 9) / 10 +
  403. r_sample / 10;
  404. ccid3_pr_debug("%s, sk=%p, New RTT estimate=%uus, "
  405. "r_sample=%us\n", dccp_role(sk), sk,
  406. hctx->ccid3hctx_rtt, r_sample);
  407. /* Update receive rate */
  408. hctx->ccid3hctx_x_recv = x_recv;/* X_recv in bytes per sec */
  409. /* Update loss event rate */
  410. if (pinv == ~0 || pinv == 0)
  411. hctx->ccid3hctx_p = 0;
  412. else {
  413. hctx->ccid3hctx_p = 1000000 / pinv;
  414. if (hctx->ccid3hctx_p < TFRC_SMALLEST_P) {
  415. hctx->ccid3hctx_p = TFRC_SMALLEST_P;
  416. ccid3_pr_debug("%s, sk=%p, Smallest p used!\n",
  417. dccp_role(sk), sk);
  418. }
  419. }
  420. /* unschedule no feedback timer */
  421. sk_stop_timer(sk, &hctx->ccid3hctx_no_feedback_timer);
  422. /* Update sending rate (and likely t_ipi, t_nom, and delta) */
  423. ccid3_hc_tx_update_x(sk);
  424. /* remove all packets older than the one acked from history */
  425. dccp_tx_hist_purge_older(ccid3_tx_hist,
  426. &hctx->ccid3hctx_hist, packet);
  427. /*
  428. * As we have calculated new ipi, delta, t_nom it is possible that
  429. * we now can send a packet, so wake up dccp_wait_for_ccid
  430. */
  431. sk->sk_write_space(sk);
  432. /* Update timeout interval. We use the alternative variant of
  433. * [RFC 3448, 3.1] which sets the upper bound of t_rto to one
  434. * second, as it is suggested for TCP (see RFC 2988, 2.4). */
  435. hctx->ccid3hctx_t_rto = max_t(u32, 4 * hctx->ccid3hctx_rtt,
  436. USEC_PER_SEC );
  437. /*
  438. * Schedule no feedback timer to expire in
  439. * max(4 * t_RTO, 2 * s/X) = max(4 * t_RTO, 2 * t_ipi)
  440. * XXX This is non-standard, RFC 3448, 4.3 uses 4 * R
  441. */
  442. next_tmout = max(hctx->ccid3hctx_t_rto, 2*hctx->ccid3hctx_t_ipi);
  443. ccid3_pr_debug("%s, sk=%p, Scheduled no feedback timer to "
  444. "expire in %lu jiffies (%luus)\n",
  445. dccp_role(sk), sk,
  446. usecs_to_jiffies(next_tmout), next_tmout);
  447. sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
  448. jiffies + usecs_to_jiffies(next_tmout));
  449. /* set idle flag */
  450. hctx->ccid3hctx_idle = 1;
  451. break;
  452. case TFRC_SSTATE_NO_SENT:
  453. DCCP_WARN("Illegal ACK received - no packet has been sent\n");
  454. /* fall through */
  455. case TFRC_SSTATE_TERM: /* ignore feedback when closing */
  456. break;
  457. }
  458. }
  459. static int ccid3_hc_tx_insert_options(struct sock *sk, struct sk_buff *skb)
  460. {
  461. const struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  462. BUG_ON(hctx == NULL);
  463. if (sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN)
  464. DCCP_SKB_CB(skb)->dccpd_ccval = hctx->ccid3hctx_last_win_count;
  465. return 0;
  466. }
  467. static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option,
  468. unsigned char len, u16 idx,
  469. unsigned char *value)
  470. {
  471. int rc = 0;
  472. const struct dccp_sock *dp = dccp_sk(sk);
  473. struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  474. struct ccid3_options_received *opt_recv;
  475. BUG_ON(hctx == NULL);
  476. opt_recv = &hctx->ccid3hctx_options_received;
  477. if (opt_recv->ccid3or_seqno != dp->dccps_gsr) {
  478. opt_recv->ccid3or_seqno = dp->dccps_gsr;
  479. opt_recv->ccid3or_loss_event_rate = ~0;
  480. opt_recv->ccid3or_loss_intervals_idx = 0;
  481. opt_recv->ccid3or_loss_intervals_len = 0;
  482. opt_recv->ccid3or_receive_rate = 0;
  483. }
  484. switch (option) {
  485. case TFRC_OPT_LOSS_EVENT_RATE:
  486. if (unlikely(len != 4)) {
  487. DCCP_WARN("%s, sk=%p, invalid len %d "
  488. "for TFRC_OPT_LOSS_EVENT_RATE\n",
  489. dccp_role(sk), sk, len);
  490. rc = -EINVAL;
  491. } else {
  492. opt_recv->ccid3or_loss_event_rate = ntohl(*(__be32 *)value);
  493. ccid3_pr_debug("%s, sk=%p, LOSS_EVENT_RATE=%u\n",
  494. dccp_role(sk), sk,
  495. opt_recv->ccid3or_loss_event_rate);
  496. }
  497. break;
  498. case TFRC_OPT_LOSS_INTERVALS:
  499. opt_recv->ccid3or_loss_intervals_idx = idx;
  500. opt_recv->ccid3or_loss_intervals_len = len;
  501. ccid3_pr_debug("%s, sk=%p, LOSS_INTERVALS=(%u, %u)\n",
  502. dccp_role(sk), sk,
  503. opt_recv->ccid3or_loss_intervals_idx,
  504. opt_recv->ccid3or_loss_intervals_len);
  505. break;
  506. case TFRC_OPT_RECEIVE_RATE:
  507. if (unlikely(len != 4)) {
  508. DCCP_WARN("%s, sk=%p, invalid len %d "
  509. "for TFRC_OPT_RECEIVE_RATE\n",
  510. dccp_role(sk), sk, len);
  511. rc = -EINVAL;
  512. } else {
  513. opt_recv->ccid3or_receive_rate = ntohl(*(__be32 *)value);
  514. ccid3_pr_debug("%s, sk=%p, RECEIVE_RATE=%u\n",
  515. dccp_role(sk), sk,
  516. opt_recv->ccid3or_receive_rate);
  517. }
  518. break;
  519. }
  520. return rc;
  521. }
  522. static int ccid3_hc_tx_init(struct ccid *ccid, struct sock *sk)
  523. {
  524. struct dccp_sock *dp = dccp_sk(sk);
  525. struct ccid3_hc_tx_sock *hctx = ccid_priv(ccid);
  526. if (dp->dccps_packet_size >= TFRC_MIN_PACKET_SIZE &&
  527. dp->dccps_packet_size <= TFRC_MAX_PACKET_SIZE)
  528. hctx->ccid3hctx_s = dp->dccps_packet_size;
  529. else
  530. hctx->ccid3hctx_s = TFRC_STD_PACKET_SIZE;
  531. /* Set transmission rate to 1 packet per second */
  532. hctx->ccid3hctx_x = hctx->ccid3hctx_s;
  533. hctx->ccid3hctx_state = TFRC_SSTATE_NO_SENT;
  534. INIT_LIST_HEAD(&hctx->ccid3hctx_hist);
  535. hctx->ccid3hctx_no_feedback_timer.function = ccid3_hc_tx_no_feedback_timer;
  536. hctx->ccid3hctx_no_feedback_timer.data = (unsigned long)sk;
  537. init_timer(&hctx->ccid3hctx_no_feedback_timer);
  538. return 0;
  539. }
  540. static void ccid3_hc_tx_exit(struct sock *sk)
  541. {
  542. struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  543. BUG_ON(hctx == NULL);
  544. ccid3_hc_tx_set_state(sk, TFRC_SSTATE_TERM);
  545. sk_stop_timer(sk, &hctx->ccid3hctx_no_feedback_timer);
  546. /* Empty packet history */
  547. dccp_tx_hist_purge(ccid3_tx_hist, &hctx->ccid3hctx_hist);
  548. }
  549. /*
  550. * RX Half Connection methods
  551. */
  552. #ifdef CONFIG_IP_DCCP_CCID3_DEBUG
  553. static const char *ccid3_rx_state_name(enum ccid3_hc_rx_states state)
  554. {
  555. static char *ccid3_rx_state_names[] = {
  556. [TFRC_RSTATE_NO_DATA] = "NO_DATA",
  557. [TFRC_RSTATE_DATA] = "DATA",
  558. [TFRC_RSTATE_TERM] = "TERM",
  559. };
  560. return ccid3_rx_state_names[state];
  561. }
  562. #endif
  563. static void ccid3_hc_rx_set_state(struct sock *sk,
  564. enum ccid3_hc_rx_states state)
  565. {
  566. struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  567. enum ccid3_hc_rx_states oldstate = hcrx->ccid3hcrx_state;
  568. ccid3_pr_debug("%s(%p) %-8.8s -> %s\n",
  569. dccp_role(sk), sk, ccid3_rx_state_name(oldstate),
  570. ccid3_rx_state_name(state));
  571. WARN_ON(state == oldstate);
  572. hcrx->ccid3hcrx_state = state;
  573. }
  574. static void ccid3_hc_rx_send_feedback(struct sock *sk)
  575. {
  576. struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  577. struct dccp_sock *dp = dccp_sk(sk);
  578. struct dccp_rx_hist_entry *packet;
  579. struct timeval now;
  580. ccid3_pr_debug("%s, sk=%p\n", dccp_role(sk), sk);
  581. dccp_timestamp(sk, &now);
  582. switch (hcrx->ccid3hcrx_state) {
  583. case TFRC_RSTATE_NO_DATA:
  584. hcrx->ccid3hcrx_x_recv = 0;
  585. break;
  586. case TFRC_RSTATE_DATA: {
  587. const u32 delta = timeval_delta(&now,
  588. &hcrx->ccid3hcrx_tstamp_last_feedback);
  589. hcrx->ccid3hcrx_x_recv = usecs_div(hcrx->ccid3hcrx_bytes_recv,
  590. delta);
  591. }
  592. break;
  593. case TFRC_RSTATE_TERM:
  594. DCCP_BUG("Illegal %s state TERM, sk=%p", dccp_role(sk), sk);
  595. return;
  596. }
  597. packet = dccp_rx_hist_find_data_packet(&hcrx->ccid3hcrx_hist);
  598. if (unlikely(packet == NULL)) {
  599. DCCP_WARN("%s, sk=%p, no data packet in history!\n",
  600. dccp_role(sk), sk);
  601. return;
  602. }
  603. hcrx->ccid3hcrx_tstamp_last_feedback = now;
  604. hcrx->ccid3hcrx_ccval_last_counter = packet->dccphrx_ccval;
  605. hcrx->ccid3hcrx_bytes_recv = 0;
  606. /* Convert to multiples of 10us */
  607. hcrx->ccid3hcrx_elapsed_time =
  608. timeval_delta(&now, &packet->dccphrx_tstamp) / 10;
  609. if (hcrx->ccid3hcrx_p == 0)
  610. hcrx->ccid3hcrx_pinv = ~0;
  611. else
  612. hcrx->ccid3hcrx_pinv = 1000000 / hcrx->ccid3hcrx_p;
  613. dp->dccps_hc_rx_insert_options = 1;
  614. dccp_send_ack(sk);
  615. }
  616. static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
  617. {
  618. const struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  619. __be32 x_recv, pinv;
  620. BUG_ON(hcrx == NULL);
  621. if (!(sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN))
  622. return 0;
  623. DCCP_SKB_CB(skb)->dccpd_ccval = hcrx->ccid3hcrx_ccval_last_counter;
  624. if (dccp_packet_without_ack(skb))
  625. return 0;
  626. x_recv = htonl(hcrx->ccid3hcrx_x_recv);
  627. pinv = htonl(hcrx->ccid3hcrx_pinv);
  628. if ((hcrx->ccid3hcrx_elapsed_time != 0 &&
  629. dccp_insert_option_elapsed_time(sk, skb,
  630. hcrx->ccid3hcrx_elapsed_time)) ||
  631. dccp_insert_option_timestamp(sk, skb) ||
  632. dccp_insert_option(sk, skb, TFRC_OPT_LOSS_EVENT_RATE,
  633. &pinv, sizeof(pinv)) ||
  634. dccp_insert_option(sk, skb, TFRC_OPT_RECEIVE_RATE,
  635. &x_recv, sizeof(x_recv)))
  636. return -1;
  637. return 0;
  638. }
  639. /* calculate first loss interval
  640. *
  641. * returns estimated loss interval in usecs */
  642. static u32 ccid3_hc_rx_calc_first_li(struct sock *sk)
  643. {
  644. struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  645. struct dccp_rx_hist_entry *entry, *next, *tail = NULL;
  646. u32 rtt, delta, x_recv, fval, p, tmp2;
  647. struct timeval tstamp = { 0, };
  648. int interval = 0;
  649. int win_count = 0;
  650. int step = 0;
  651. u64 tmp1;
  652. list_for_each_entry_safe(entry, next, &hcrx->ccid3hcrx_hist,
  653. dccphrx_node) {
  654. if (dccp_rx_hist_entry_data_packet(entry)) {
  655. tail = entry;
  656. switch (step) {
  657. case 0:
  658. tstamp = entry->dccphrx_tstamp;
  659. win_count = entry->dccphrx_ccval;
  660. step = 1;
  661. break;
  662. case 1:
  663. interval = win_count - entry->dccphrx_ccval;
  664. if (interval < 0)
  665. interval += TFRC_WIN_COUNT_LIMIT;
  666. if (interval > 4)
  667. goto found;
  668. break;
  669. }
  670. }
  671. }
  672. if (unlikely(step == 0)) {
  673. DCCP_WARN("%s, sk=%p, packet history has no data packets!\n",
  674. dccp_role(sk), sk);
  675. return ~0;
  676. }
  677. if (unlikely(interval == 0)) {
  678. DCCP_WARN("%s, sk=%p, Could not find a win_count interval > 0."
  679. "Defaulting to 1\n", dccp_role(sk), sk);
  680. interval = 1;
  681. }
  682. found:
  683. if (!tail) {
  684. DCCP_CRIT("tail is null\n");
  685. return ~0;
  686. }
  687. rtt = timeval_delta(&tstamp, &tail->dccphrx_tstamp) * 4 / interval;
  688. ccid3_pr_debug("%s, sk=%p, approximated RTT to %uus\n",
  689. dccp_role(sk), sk, rtt);
  690. if (rtt == 0) {
  691. DCCP_WARN("RTT==0, setting to 1\n");
  692. rtt = 1;
  693. }
  694. dccp_timestamp(sk, &tstamp);
  695. delta = timeval_delta(&tstamp, &hcrx->ccid3hcrx_tstamp_last_feedback);
  696. x_recv = usecs_div(hcrx->ccid3hcrx_bytes_recv, delta);
  697. if (x_recv == 0)
  698. x_recv = hcrx->ccid3hcrx_x_recv;
  699. tmp1 = (u64)x_recv * (u64)rtt;
  700. do_div(tmp1,10000000);
  701. tmp2 = (u32)tmp1;
  702. if (!tmp2) {
  703. DCCP_CRIT("tmp2 = 0, x_recv = %u, rtt =%u\n", x_recv, rtt);
  704. return ~0;
  705. }
  706. fval = (hcrx->ccid3hcrx_s * 100000) / tmp2;
  707. /* do not alter order above or you will get overflow on 32 bit */
  708. p = tfrc_calc_x_reverse_lookup(fval);
  709. ccid3_pr_debug("%s, sk=%p, receive rate=%u bytes/s, implied "
  710. "loss rate=%u\n", dccp_role(sk), sk, x_recv, p);
  711. if (p == 0)
  712. return ~0;
  713. else
  714. return 1000000 / p;
  715. }
  716. static void ccid3_hc_rx_update_li(struct sock *sk, u64 seq_loss, u8 win_loss)
  717. {
  718. struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  719. struct dccp_li_hist_entry *head;
  720. u64 seq_temp;
  721. if (list_empty(&hcrx->ccid3hcrx_li_hist)) {
  722. if (!dccp_li_hist_interval_new(ccid3_li_hist,
  723. &hcrx->ccid3hcrx_li_hist, seq_loss, win_loss))
  724. return;
  725. head = list_entry(hcrx->ccid3hcrx_li_hist.next,
  726. struct dccp_li_hist_entry, dccplih_node);
  727. head->dccplih_interval = ccid3_hc_rx_calc_first_li(sk);
  728. } else {
  729. struct dccp_li_hist_entry *entry;
  730. struct list_head *tail;
  731. head = list_entry(hcrx->ccid3hcrx_li_hist.next,
  732. struct dccp_li_hist_entry, dccplih_node);
  733. /* FIXME win count check removed as was wrong */
  734. /* should make this check with receive history */
  735. /* and compare there as per section 10.2 of RFC4342 */
  736. /* new loss event detected */
  737. /* calculate last interval length */
  738. seq_temp = dccp_delta_seqno(head->dccplih_seqno, seq_loss);
  739. entry = dccp_li_hist_entry_new(ccid3_li_hist, SLAB_ATOMIC);
  740. if (entry == NULL) {
  741. DCCP_BUG("out of memory - can not allocate entry");
  742. return;
  743. }
  744. list_add(&entry->dccplih_node, &hcrx->ccid3hcrx_li_hist);
  745. tail = hcrx->ccid3hcrx_li_hist.prev;
  746. list_del(tail);
  747. kmem_cache_free(ccid3_li_hist->dccplih_slab, tail);
  748. /* Create the newest interval */
  749. entry->dccplih_seqno = seq_loss;
  750. entry->dccplih_interval = seq_temp;
  751. entry->dccplih_win_count = win_loss;
  752. }
  753. }
  754. static int ccid3_hc_rx_detect_loss(struct sock *sk,
  755. struct dccp_rx_hist_entry *packet)
  756. {
  757. struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  758. struct dccp_rx_hist_entry *rx_hist = dccp_rx_hist_head(&hcrx->ccid3hcrx_hist);
  759. u64 seqno = packet->dccphrx_seqno;
  760. u64 tmp_seqno;
  761. int loss = 0;
  762. u8 ccval;
  763. tmp_seqno = hcrx->ccid3hcrx_seqno_nonloss;
  764. if (!rx_hist ||
  765. follows48(packet->dccphrx_seqno, hcrx->ccid3hcrx_seqno_nonloss)) {
  766. hcrx->ccid3hcrx_seqno_nonloss = seqno;
  767. hcrx->ccid3hcrx_ccval_nonloss = packet->dccphrx_ccval;
  768. goto detect_out;
  769. }
  770. while (dccp_delta_seqno(hcrx->ccid3hcrx_seqno_nonloss, seqno)
  771. > TFRC_RECV_NUM_LATE_LOSS) {
  772. loss = 1;
  773. ccid3_hc_rx_update_li(sk, hcrx->ccid3hcrx_seqno_nonloss,
  774. hcrx->ccid3hcrx_ccval_nonloss);
  775. tmp_seqno = hcrx->ccid3hcrx_seqno_nonloss;
  776. dccp_inc_seqno(&tmp_seqno);
  777. hcrx->ccid3hcrx_seqno_nonloss = tmp_seqno;
  778. dccp_inc_seqno(&tmp_seqno);
  779. while (dccp_rx_hist_find_entry(&hcrx->ccid3hcrx_hist,
  780. tmp_seqno, &ccval)) {
  781. hcrx->ccid3hcrx_seqno_nonloss = tmp_seqno;
  782. hcrx->ccid3hcrx_ccval_nonloss = ccval;
  783. dccp_inc_seqno(&tmp_seqno);
  784. }
  785. }
  786. /* FIXME - this code could be simplified with above while */
  787. /* but works at moment */
  788. if (follows48(packet->dccphrx_seqno, hcrx->ccid3hcrx_seqno_nonloss)) {
  789. hcrx->ccid3hcrx_seqno_nonloss = seqno;
  790. hcrx->ccid3hcrx_ccval_nonloss = packet->dccphrx_ccval;
  791. }
  792. detect_out:
  793. dccp_rx_hist_add_packet(ccid3_rx_hist, &hcrx->ccid3hcrx_hist,
  794. &hcrx->ccid3hcrx_li_hist, packet,
  795. hcrx->ccid3hcrx_seqno_nonloss);
  796. return loss;
  797. }
  798. static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
  799. {
  800. struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  801. const struct dccp_options_received *opt_recv;
  802. struct dccp_rx_hist_entry *packet;
  803. struct timeval now;
  804. u32 p_prev, rtt_prev, r_sample, t_elapsed;
  805. int loss;
  806. BUG_ON(hcrx == NULL);
  807. opt_recv = &dccp_sk(sk)->dccps_options_received;
  808. switch (DCCP_SKB_CB(skb)->dccpd_type) {
  809. case DCCP_PKT_ACK:
  810. if (hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA)
  811. return;
  812. case DCCP_PKT_DATAACK:
  813. if (opt_recv->dccpor_timestamp_echo == 0)
  814. break;
  815. rtt_prev = hcrx->ccid3hcrx_rtt;
  816. dccp_timestamp(sk, &now);
  817. timeval_sub_usecs(&now, opt_recv->dccpor_timestamp_echo * 10);
  818. r_sample = timeval_usecs(&now);
  819. t_elapsed = opt_recv->dccpor_elapsed_time * 10;
  820. if (unlikely(r_sample <= t_elapsed))
  821. DCCP_WARN("r_sample=%uus, t_elapsed=%uus\n",
  822. r_sample, t_elapsed);
  823. else
  824. r_sample -= t_elapsed;
  825. if (hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA)
  826. hcrx->ccid3hcrx_rtt = r_sample;
  827. else
  828. hcrx->ccid3hcrx_rtt = (hcrx->ccid3hcrx_rtt * 9) / 10 +
  829. r_sample / 10;
  830. if (rtt_prev != hcrx->ccid3hcrx_rtt)
  831. ccid3_pr_debug("%s, New RTT=%uus, elapsed time=%u\n",
  832. dccp_role(sk), hcrx->ccid3hcrx_rtt,
  833. opt_recv->dccpor_elapsed_time);
  834. break;
  835. case DCCP_PKT_DATA:
  836. break;
  837. default: /* We're not interested in other packet types, move along */
  838. return;
  839. }
  840. packet = dccp_rx_hist_entry_new(ccid3_rx_hist, sk, opt_recv->dccpor_ndp,
  841. skb, SLAB_ATOMIC);
  842. if (unlikely(packet == NULL)) {
  843. DCCP_WARN("%s, sk=%p, Not enough mem to add rx packet "
  844. "to history, consider it lost!\n", dccp_role(sk), sk);
  845. return;
  846. }
  847. loss = ccid3_hc_rx_detect_loss(sk, packet);
  848. if (DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK)
  849. return;
  850. switch (hcrx->ccid3hcrx_state) {
  851. case TFRC_RSTATE_NO_DATA:
  852. ccid3_pr_debug("%s, sk=%p(%s), skb=%p, sending initial "
  853. "feedback\n",
  854. dccp_role(sk), sk,
  855. dccp_state_name(sk->sk_state), skb);
  856. ccid3_hc_rx_send_feedback(sk);
  857. ccid3_hc_rx_set_state(sk, TFRC_RSTATE_DATA);
  858. return;
  859. case TFRC_RSTATE_DATA:
  860. hcrx->ccid3hcrx_bytes_recv += skb->len -
  861. dccp_hdr(skb)->dccph_doff * 4;
  862. if (loss)
  863. break;
  864. dccp_timestamp(sk, &now);
  865. if (timeval_delta(&now, &hcrx->ccid3hcrx_tstamp_last_ack) >=
  866. hcrx->ccid3hcrx_rtt) {
  867. hcrx->ccid3hcrx_tstamp_last_ack = now;
  868. ccid3_hc_rx_send_feedback(sk);
  869. }
  870. return;
  871. case TFRC_RSTATE_TERM:
  872. DCCP_BUG("Illegal %s state TERM, sk=%p", dccp_role(sk), sk);
  873. return;
  874. }
  875. /* Dealing with packet loss */
  876. ccid3_pr_debug("%s, sk=%p(%s), data loss! Reacting...\n",
  877. dccp_role(sk), sk, dccp_state_name(sk->sk_state));
  878. p_prev = hcrx->ccid3hcrx_p;
  879. /* Calculate loss event rate */
  880. if (!list_empty(&hcrx->ccid3hcrx_li_hist)) {
  881. u32 i_mean = dccp_li_hist_calc_i_mean(&hcrx->ccid3hcrx_li_hist);
  882. /* Scaling up by 1000000 as fixed decimal */
  883. if (i_mean != 0)
  884. hcrx->ccid3hcrx_p = 1000000 / i_mean;
  885. } else
  886. DCCP_BUG("empty loss history");
  887. if (hcrx->ccid3hcrx_p > p_prev) {
  888. ccid3_hc_rx_send_feedback(sk);
  889. return;
  890. }
  891. }
  892. static int ccid3_hc_rx_init(struct ccid *ccid, struct sock *sk)
  893. {
  894. struct dccp_sock *dp = dccp_sk(sk);
  895. struct ccid3_hc_rx_sock *hcrx = ccid_priv(ccid);
  896. ccid3_pr_debug("%s, sk=%p\n", dccp_role(sk), sk);
  897. if (dp->dccps_packet_size >= TFRC_MIN_PACKET_SIZE &&
  898. dp->dccps_packet_size <= TFRC_MAX_PACKET_SIZE)
  899. hcrx->ccid3hcrx_s = dp->dccps_packet_size;
  900. else
  901. hcrx->ccid3hcrx_s = TFRC_STD_PACKET_SIZE;
  902. hcrx->ccid3hcrx_state = TFRC_RSTATE_NO_DATA;
  903. INIT_LIST_HEAD(&hcrx->ccid3hcrx_hist);
  904. INIT_LIST_HEAD(&hcrx->ccid3hcrx_li_hist);
  905. dccp_timestamp(sk, &hcrx->ccid3hcrx_tstamp_last_ack);
  906. hcrx->ccid3hcrx_tstamp_last_feedback = hcrx->ccid3hcrx_tstamp_last_ack;
  907. hcrx->ccid3hcrx_rtt = 5000; /* XXX 5ms for now... */
  908. return 0;
  909. }
  910. static void ccid3_hc_rx_exit(struct sock *sk)
  911. {
  912. struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  913. BUG_ON(hcrx == NULL);
  914. ccid3_hc_rx_set_state(sk, TFRC_RSTATE_TERM);
  915. /* Empty packet history */
  916. dccp_rx_hist_purge(ccid3_rx_hist, &hcrx->ccid3hcrx_hist);
  917. /* Empty loss interval history */
  918. dccp_li_hist_purge(ccid3_li_hist, &hcrx->ccid3hcrx_li_hist);
  919. }
  920. static void ccid3_hc_rx_get_info(struct sock *sk, struct tcp_info *info)
  921. {
  922. const struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  923. /* Listen socks doesn't have a private CCID block */
  924. if (sk->sk_state == DCCP_LISTEN)
  925. return;
  926. BUG_ON(hcrx == NULL);
  927. info->tcpi_ca_state = hcrx->ccid3hcrx_state;
  928. info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
  929. info->tcpi_rcv_rtt = hcrx->ccid3hcrx_rtt;
  930. }
  931. static void ccid3_hc_tx_get_info(struct sock *sk, struct tcp_info *info)
  932. {
  933. const struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  934. /* Listen socks doesn't have a private CCID block */
  935. if (sk->sk_state == DCCP_LISTEN)
  936. return;
  937. BUG_ON(hctx == NULL);
  938. info->tcpi_rto = hctx->ccid3hctx_t_rto;
  939. info->tcpi_rtt = hctx->ccid3hctx_rtt;
  940. }
  941. static int ccid3_hc_rx_getsockopt(struct sock *sk, const int optname, int len,
  942. u32 __user *optval, int __user *optlen)
  943. {
  944. const struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  945. const void *val;
  946. /* Listen socks doesn't have a private CCID block */
  947. if (sk->sk_state == DCCP_LISTEN)
  948. return -EINVAL;
  949. switch (optname) {
  950. case DCCP_SOCKOPT_CCID_RX_INFO:
  951. if (len < sizeof(hcrx->ccid3hcrx_tfrc))
  952. return -EINVAL;
  953. len = sizeof(hcrx->ccid3hcrx_tfrc);
  954. val = &hcrx->ccid3hcrx_tfrc;
  955. break;
  956. default:
  957. return -ENOPROTOOPT;
  958. }
  959. if (put_user(len, optlen) || copy_to_user(optval, val, len))
  960. return -EFAULT;
  961. return 0;
  962. }
  963. static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len,
  964. u32 __user *optval, int __user *optlen)
  965. {
  966. const struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  967. const void *val;
  968. /* Listen socks doesn't have a private CCID block */
  969. if (sk->sk_state == DCCP_LISTEN)
  970. return -EINVAL;
  971. switch (optname) {
  972. case DCCP_SOCKOPT_CCID_TX_INFO:
  973. if (len < sizeof(hctx->ccid3hctx_tfrc))
  974. return -EINVAL;
  975. len = sizeof(hctx->ccid3hctx_tfrc);
  976. val = &hctx->ccid3hctx_tfrc;
  977. break;
  978. default:
  979. return -ENOPROTOOPT;
  980. }
  981. if (put_user(len, optlen) || copy_to_user(optval, val, len))
  982. return -EFAULT;
  983. return 0;
  984. }
  985. static struct ccid_operations ccid3 = {
  986. .ccid_id = DCCPC_CCID3,
  987. .ccid_name = "ccid3",
  988. .ccid_owner = THIS_MODULE,
  989. .ccid_hc_tx_obj_size = sizeof(struct ccid3_hc_tx_sock),
  990. .ccid_hc_tx_init = ccid3_hc_tx_init,
  991. .ccid_hc_tx_exit = ccid3_hc_tx_exit,
  992. .ccid_hc_tx_send_packet = ccid3_hc_tx_send_packet,
  993. .ccid_hc_tx_packet_sent = ccid3_hc_tx_packet_sent,
  994. .ccid_hc_tx_packet_recv = ccid3_hc_tx_packet_recv,
  995. .ccid_hc_tx_insert_options = ccid3_hc_tx_insert_options,
  996. .ccid_hc_tx_parse_options = ccid3_hc_tx_parse_options,
  997. .ccid_hc_rx_obj_size = sizeof(struct ccid3_hc_rx_sock),
  998. .ccid_hc_rx_init = ccid3_hc_rx_init,
  999. .ccid_hc_rx_exit = ccid3_hc_rx_exit,
  1000. .ccid_hc_rx_insert_options = ccid3_hc_rx_insert_options,
  1001. .ccid_hc_rx_packet_recv = ccid3_hc_rx_packet_recv,
  1002. .ccid_hc_rx_get_info = ccid3_hc_rx_get_info,
  1003. .ccid_hc_tx_get_info = ccid3_hc_tx_get_info,
  1004. .ccid_hc_rx_getsockopt = ccid3_hc_rx_getsockopt,
  1005. .ccid_hc_tx_getsockopt = ccid3_hc_tx_getsockopt,
  1006. };
  1007. #ifdef CONFIG_IP_DCCP_CCID3_DEBUG
  1008. module_param(ccid3_debug, int, 0444);
  1009. MODULE_PARM_DESC(ccid3_debug, "Enable debug messages");
  1010. #endif
  1011. static __init int ccid3_module_init(void)
  1012. {
  1013. int rc = -ENOBUFS;
  1014. ccid3_rx_hist = dccp_rx_hist_new("ccid3");
  1015. if (ccid3_rx_hist == NULL)
  1016. goto out;
  1017. ccid3_tx_hist = dccp_tx_hist_new("ccid3");
  1018. if (ccid3_tx_hist == NULL)
  1019. goto out_free_rx;
  1020. ccid3_li_hist = dccp_li_hist_new("ccid3");
  1021. if (ccid3_li_hist == NULL)
  1022. goto out_free_tx;
  1023. rc = ccid_register(&ccid3);
  1024. if (rc != 0)
  1025. goto out_free_loss_interval_history;
  1026. out:
  1027. return rc;
  1028. out_free_loss_interval_history:
  1029. dccp_li_hist_delete(ccid3_li_hist);
  1030. ccid3_li_hist = NULL;
  1031. out_free_tx:
  1032. dccp_tx_hist_delete(ccid3_tx_hist);
  1033. ccid3_tx_hist = NULL;
  1034. out_free_rx:
  1035. dccp_rx_hist_delete(ccid3_rx_hist);
  1036. ccid3_rx_hist = NULL;
  1037. goto out;
  1038. }
  1039. module_init(ccid3_module_init);
  1040. static __exit void ccid3_module_exit(void)
  1041. {
  1042. ccid_unregister(&ccid3);
  1043. if (ccid3_tx_hist != NULL) {
  1044. dccp_tx_hist_delete(ccid3_tx_hist);
  1045. ccid3_tx_hist = NULL;
  1046. }
  1047. if (ccid3_rx_hist != NULL) {
  1048. dccp_rx_hist_delete(ccid3_rx_hist);
  1049. ccid3_rx_hist = NULL;
  1050. }
  1051. if (ccid3_li_hist != NULL) {
  1052. dccp_li_hist_delete(ccid3_li_hist);
  1053. ccid3_li_hist = NULL;
  1054. }
  1055. }
  1056. module_exit(ccid3_module_exit);
  1057. MODULE_AUTHOR("Ian McDonald <ian.mcdonald@jandi.co.nz>, "
  1058. "Arnaldo Carvalho de Melo <acme@ghostprotocols.net>");
  1059. MODULE_DESCRIPTION("DCCP TFRC CCID3 CCID");
  1060. MODULE_LICENSE("GPL");
  1061. MODULE_ALIAS("net-dccp-ccid-3");