ccid3.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266
  1. /*
  2. * net/dccp/ccids/ccid3.c
  3. *
  4. * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
  5. * Copyright (c) 2005-6 Ian McDonald <ian.mcdonald@jandi.co.nz>
  6. *
  7. * An implementation of the DCCP protocol
  8. *
  9. * This code has been developed by the University of Waikato WAND
  10. * research group. For further information please see http://www.wand.net.nz/
  11. *
  12. * This code also uses code from Lulea University, rereleased as GPL by its
  13. * authors:
  14. * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon
  15. *
  16. * Changes to meet Linux coding standards, to make it meet latest ccid3 draft
  17. * and to make it work as a loadable module in the DCCP stack written by
  18. * Arnaldo Carvalho de Melo <acme@conectiva.com.br>.
  19. *
  20. * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
  21. *
  22. * This program is free software; you can redistribute it and/or modify
  23. * it under the terms of the GNU General Public License as published by
  24. * the Free Software Foundation; either version 2 of the License, or
  25. * (at your option) any later version.
  26. *
  27. * This program is distributed in the hope that it will be useful,
  28. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  29. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  30. * GNU General Public License for more details.
  31. *
  32. * You should have received a copy of the GNU General Public License
  33. * along with this program; if not, write to the Free Software
  34. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  35. */
  36. #include "../ccid.h"
  37. #include "../dccp.h"
  38. #include "lib/packet_history.h"
  39. #include "lib/loss_interval.h"
  40. #include "lib/tfrc.h"
  41. #include "ccid3.h"
  42. /*
  43. * Reason for maths here is to avoid 32 bit overflow when a is big.
  44. * With this we get close to the limit.
  45. */
  46. static u32 usecs_div(const u32 a, const u32 b)
  47. {
  48. const u32 div = a < (UINT_MAX / (USEC_PER_SEC / 10)) ? 10 :
  49. a < (UINT_MAX / (USEC_PER_SEC / 50)) ? 50 :
  50. a < (UINT_MAX / (USEC_PER_SEC / 100)) ? 100 :
  51. a < (UINT_MAX / (USEC_PER_SEC / 500)) ? 500 :
  52. a < (UINT_MAX / (USEC_PER_SEC / 1000)) ? 1000 :
  53. a < (UINT_MAX / (USEC_PER_SEC / 5000)) ? 5000 :
  54. a < (UINT_MAX / (USEC_PER_SEC / 10000)) ? 10000 :
  55. a < (UINT_MAX / (USEC_PER_SEC / 50000)) ? 50000 :
  56. 100000;
  57. const u32 tmp = a * (USEC_PER_SEC / div);
  58. return (b >= 2 * div) ? tmp / (b / div) : tmp;
  59. }
  60. #ifdef CONFIG_IP_DCCP_CCID3_DEBUG
  61. static int ccid3_debug;
  62. #define ccid3_pr_debug(format, a...) DCCP_PR_DEBUG(ccid3_debug, format, ##a)
  63. #else
  64. #define ccid3_pr_debug(format, a...)
  65. #endif
  66. static struct dccp_tx_hist *ccid3_tx_hist;
  67. static struct dccp_rx_hist *ccid3_rx_hist;
  68. static struct dccp_li_hist *ccid3_li_hist;
  69. #ifdef CONFIG_IP_DCCP_CCID3_DEBUG
  70. static const char *ccid3_tx_state_name(enum ccid3_hc_tx_states state)
  71. {
  72. static char *ccid3_state_names[] = {
  73. [TFRC_SSTATE_NO_SENT] = "NO_SENT",
  74. [TFRC_SSTATE_NO_FBACK] = "NO_FBACK",
  75. [TFRC_SSTATE_FBACK] = "FBACK",
  76. [TFRC_SSTATE_TERM] = "TERM",
  77. };
  78. return ccid3_state_names[state];
  79. }
  80. #endif
  81. static void ccid3_hc_tx_set_state(struct sock *sk,
  82. enum ccid3_hc_tx_states state)
  83. {
  84. struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  85. enum ccid3_hc_tx_states oldstate = hctx->ccid3hctx_state;
  86. ccid3_pr_debug("%s(%p) %-8.8s -> %s\n",
  87. dccp_role(sk), sk, ccid3_tx_state_name(oldstate),
  88. ccid3_tx_state_name(state));
  89. WARN_ON(state == oldstate);
  90. hctx->ccid3hctx_state = state;
  91. }
  92. /*
  93. * Recalculate scheduled nominal send time t_nom, inter-packet interval
  94. * t_ipi, and delta value. Should be called after each change to X.
  95. */
  96. static inline void ccid3_update_send_time(struct ccid3_hc_tx_sock *hctx)
  97. {
  98. timeval_sub_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi);
  99. /* Calculate new t_ipi (inter packet interval) by t_ipi = s / X_inst */
  100. hctx->ccid3hctx_t_ipi = usecs_div(hctx->ccid3hctx_s, hctx->ccid3hctx_x);
  101. /* Update nominal send time with regard to the new t_ipi */
  102. timeval_add_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi);
  103. /* Calculate new delta by delta = min(t_ipi / 2, t_gran / 2) */
  104. hctx->ccid3hctx_delta = min_t(u32, hctx->ccid3hctx_t_ipi / 2,
  105. TFRC_OPSYS_HALF_TIME_GRAN);
  106. }
  107. /*
  108. * Update X by
  109. * If (p > 0)
  110. * x_calc = calcX(s, R, p);
  111. * X = max(min(X_calc, 2 * X_recv), s / t_mbi);
  112. * Else
  113. * If (now - tld >= R)
  114. * X = max(min(2 * X, 2 * X_recv), s / R);
  115. * tld = now;
  116. */
  117. static void ccid3_hc_tx_update_x(struct sock *sk, struct timeval *now)
  118. {
  119. struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  120. const __u32 old_x = hctx->ccid3hctx_x;
  121. /* To avoid large error in calcX */
  122. if (hctx->ccid3hctx_p >= TFRC_SMALLEST_P) {
  123. hctx->ccid3hctx_x_calc = tfrc_calc_x(hctx->ccid3hctx_s,
  124. hctx->ccid3hctx_rtt,
  125. hctx->ccid3hctx_p);
  126. hctx->ccid3hctx_x = max_t(u32, min(hctx->ccid3hctx_x_calc,
  127. hctx->ccid3hctx_x_recv * 2),
  128. hctx->ccid3hctx_s / TFRC_T_MBI);
  129. } else if (timeval_delta(now, &hctx->ccid3hctx_t_ld) >=
  130. hctx->ccid3hctx_rtt) {
  131. hctx->ccid3hctx_x = max(min(hctx->ccid3hctx_x_recv,
  132. hctx->ccid3hctx_x ) * 2,
  133. usecs_div(hctx->ccid3hctx_s,
  134. hctx->ccid3hctx_rtt) );
  135. hctx->ccid3hctx_t_ld = *now;
  136. } else
  137. ccid3_pr_debug("Not changing X\n");
  138. if (hctx->ccid3hctx_x != old_x)
  139. ccid3_update_send_time(hctx);
  140. }
  141. /*
  142. * Track the mean packet size `s' (cf. RFC 4342, 5.3 and RFC 3448, 4.1)
  143. * @len: DCCP packet payload size in bytes
  144. */
  145. static inline void ccid3_hc_tx_update_s(struct ccid3_hc_tx_sock *hctx, int len)
  146. {
  147. if (unlikely(len == 0))
  148. ccid3_pr_debug("Packet payload length is 0 - not updating\n");
  149. else
  150. hctx->ccid3hctx_s = hctx->ccid3hctx_s == 0 ? len :
  151. (9 * hctx->ccid3hctx_s + len) / 10;
  152. /*
  153. * Note: We could do a potential optimisation here - when `s' changes,
  154. * recalculate sending rate and consequently t_ipi, t_delta, and
  155. * t_now. This is however non-standard, and the benefits are not
  156. * clear, so it is currently left out.
  157. */
  158. }
  159. static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
  160. {
  161. struct sock *sk = (struct sock *)data;
  162. struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  163. unsigned long t_nfb = USEC_PER_SEC / 5;
  164. bh_lock_sock(sk);
  165. if (sock_owned_by_user(sk)) {
  166. /* Try again later. */
  167. /* XXX: set some sensible MIB */
  168. goto restart_timer;
  169. }
  170. ccid3_pr_debug("%s, sk=%p, state=%s\n", dccp_role(sk), sk,
  171. ccid3_tx_state_name(hctx->ccid3hctx_state));
  172. switch (hctx->ccid3hctx_state) {
  173. case TFRC_SSTATE_NO_FBACK:
  174. /* RFC 3448, 4.4: Halve send rate directly */
  175. hctx->ccid3hctx_x = min_t(u32, hctx->ccid3hctx_x / 2,
  176. hctx->ccid3hctx_s / TFRC_T_MBI);
  177. ccid3_pr_debug("%s, sk=%p, state=%s, updated tx rate to %d "
  178. "bytes/s\n",
  179. dccp_role(sk), sk,
  180. ccid3_tx_state_name(hctx->ccid3hctx_state),
  181. hctx->ccid3hctx_x);
  182. /* The value of R is still undefined and so we can not recompute
  183. * the timout value. Keep initial value as per [RFC 4342, 5]. */
  184. t_nfb = TFRC_INITIAL_TIMEOUT;
  185. ccid3_update_send_time(hctx);
  186. break;
  187. case TFRC_SSTATE_FBACK:
  188. /*
  189. * Check if IDLE since last timeout and recv rate is less than
  190. * 4 packets per RTT
  191. */
  192. if (!hctx->ccid3hctx_idle ||
  193. (hctx->ccid3hctx_x_recv >=
  194. 4 * usecs_div(hctx->ccid3hctx_s, hctx->ccid3hctx_rtt))) {
  195. struct timeval now;
  196. ccid3_pr_debug("%s, sk=%p, state=%s, not idle\n",
  197. dccp_role(sk), sk,
  198. ccid3_tx_state_name(hctx->ccid3hctx_state));
  199. /* Halve sending rate */
  200. /* If (X_calc > 2 * X_recv)
  201. * X_recv = max(X_recv / 2, s / (2 * t_mbi));
  202. * Else
  203. * X_recv = X_calc / 4;
  204. */
  205. BUG_ON(hctx->ccid3hctx_p >= TFRC_SMALLEST_P &&
  206. hctx->ccid3hctx_x_calc == 0);
  207. /* check also if p is zero -> x_calc is infinity? */
  208. if (hctx->ccid3hctx_p < TFRC_SMALLEST_P ||
  209. hctx->ccid3hctx_x_calc > 2 * hctx->ccid3hctx_x_recv)
  210. hctx->ccid3hctx_x_recv = max_t(u32, hctx->ccid3hctx_x_recv / 2,
  211. hctx->ccid3hctx_s / (2 * TFRC_T_MBI));
  212. else
  213. hctx->ccid3hctx_x_recv = hctx->ccid3hctx_x_calc / 4;
  214. /* Update sending rate */
  215. dccp_timestamp(sk, &now);
  216. ccid3_hc_tx_update_x(sk, &now);
  217. }
  218. /*
  219. * Schedule no feedback timer to expire in
  220. * max(4 * R, 2 * s/X) = max(4 * R, 2 * t_ipi)
  221. */
  222. t_nfb = max(4 * hctx->ccid3hctx_rtt, 2 * hctx->ccid3hctx_t_ipi);
  223. break;
  224. case TFRC_SSTATE_NO_SENT:
  225. DCCP_BUG("Illegal %s state NO_SENT, sk=%p", dccp_role(sk), sk);
  226. /* fall through */
  227. case TFRC_SSTATE_TERM:
  228. goto out;
  229. }
  230. hctx->ccid3hctx_idle = 1;
  231. restart_timer:
  232. sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
  233. jiffies + usecs_to_jiffies(t_nfb));
  234. out:
  235. bh_unlock_sock(sk);
  236. sock_put(sk);
  237. }
  238. /*
  239. * returns
  240. * > 0: delay (in msecs) that should pass before actually sending
  241. * = 0: can send immediately
  242. * < 0: error condition; do not send packet
  243. */
  244. static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
  245. {
  246. struct dccp_sock *dp = dccp_sk(sk);
  247. struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  248. struct dccp_tx_hist_entry *new_packet;
  249. struct timeval now;
  250. long delay;
  251. BUG_ON(hctx == NULL);
  252. /*
  253. * This function is called only for Data and DataAck packets. Sending
  254. * zero-sized Data(Ack)s is theoretically possible, but for congestion
  255. * control this case is pathological - ignore it.
  256. */
  257. if (unlikely(skb->len == 0))
  258. return -EBADMSG;
  259. /* See if last packet allocated was not sent */
  260. new_packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist);
  261. if (new_packet == NULL || new_packet->dccphtx_sent) {
  262. new_packet = dccp_tx_hist_entry_new(ccid3_tx_hist,
  263. SLAB_ATOMIC);
  264. if (unlikely(new_packet == NULL)) {
  265. DCCP_WARN("%s, sk=%p, not enough mem to add to history,"
  266. "send refused\n", dccp_role(sk), sk);
  267. return -ENOBUFS;
  268. }
  269. dccp_tx_hist_add_entry(&hctx->ccid3hctx_hist, new_packet);
  270. }
  271. dccp_timestamp(sk, &now);
  272. switch (hctx->ccid3hctx_state) {
  273. case TFRC_SSTATE_NO_SENT:
  274. sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
  275. jiffies + usecs_to_jiffies(TFRC_INITIAL_TIMEOUT));
  276. hctx->ccid3hctx_last_win_count = 0;
  277. hctx->ccid3hctx_t_last_win_count = now;
  278. ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK);
  279. /* Set initial sending rate to 1 packet per second */
  280. ccid3_hc_tx_update_s(hctx, skb->len);
  281. hctx->ccid3hctx_x = hctx->ccid3hctx_s;
  282. /* First timeout, according to [RFC 3448, 4.2], is 1 second */
  283. hctx->ccid3hctx_t_ipi = USEC_PER_SEC;
  284. /* Initial delta: minimum of 0.5 sec and t_gran/2 */
  285. hctx->ccid3hctx_delta = TFRC_OPSYS_HALF_TIME_GRAN;
  286. /* Set t_0 for initial packet */
  287. hctx->ccid3hctx_t_nom = now;
  288. break;
  289. case TFRC_SSTATE_NO_FBACK:
  290. case TFRC_SSTATE_FBACK:
  291. delay = timeval_delta(&hctx->ccid3hctx_t_nom, &now);
  292. /*
  293. * Scheduling of packet transmissions [RFC 3448, 4.6]
  294. *
  295. * if (t_now > t_nom - delta)
  296. * // send the packet now
  297. * else
  298. * // send the packet in (t_nom - t_now) milliseconds.
  299. */
  300. if (delay >= hctx->ccid3hctx_delta)
  301. return delay / 1000L;
  302. break;
  303. case TFRC_SSTATE_TERM:
  304. DCCP_BUG("Illegal %s state TERM, sk=%p", dccp_role(sk), sk);
  305. return -EINVAL;
  306. }
  307. /* prepare to send now (add options etc.) */
  308. dp->dccps_hc_tx_insert_options = 1;
  309. new_packet->dccphtx_ccval = DCCP_SKB_CB(skb)->dccpd_ccval =
  310. hctx->ccid3hctx_last_win_count;
  311. timeval_add_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi);
  312. return 0;
  313. }
  314. static void ccid3_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len)
  315. {
  316. const struct dccp_sock *dp = dccp_sk(sk);
  317. struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  318. struct timeval now;
  319. unsigned long quarter_rtt;
  320. struct dccp_tx_hist_entry *packet;
  321. BUG_ON(hctx == NULL);
  322. dccp_timestamp(sk, &now);
  323. ccid3_hc_tx_update_s(hctx, len);
  324. packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist);
  325. if (unlikely(packet == NULL)) {
  326. DCCP_WARN("packet doesn't exist in history!\n");
  327. return;
  328. }
  329. if (unlikely(packet->dccphtx_sent)) {
  330. DCCP_WARN("no unsent packet in history!\n");
  331. return;
  332. }
  333. packet->dccphtx_tstamp = now;
  334. packet->dccphtx_seqno = dp->dccps_gss;
  335. /*
  336. * Check if win_count have changed
  337. * Algorithm in "8.1. Window Counter Value" in RFC 4342.
  338. */
  339. quarter_rtt = timeval_delta(&now, &hctx->ccid3hctx_t_last_win_count);
  340. if (likely(hctx->ccid3hctx_rtt > 8))
  341. quarter_rtt /= hctx->ccid3hctx_rtt / 4;
  342. if (quarter_rtt > 0) {
  343. hctx->ccid3hctx_t_last_win_count = now;
  344. hctx->ccid3hctx_last_win_count = (hctx->ccid3hctx_last_win_count +
  345. min_t(unsigned long, quarter_rtt, 5)) % 16;
  346. ccid3_pr_debug("%s, sk=%p, window changed from "
  347. "%u to %u!\n",
  348. dccp_role(sk), sk,
  349. packet->dccphtx_ccval,
  350. hctx->ccid3hctx_last_win_count);
  351. }
  352. hctx->ccid3hctx_idle = 0;
  353. packet->dccphtx_rtt = hctx->ccid3hctx_rtt;
  354. packet->dccphtx_sent = 1;
  355. }
  356. static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
  357. {
  358. const struct dccp_sock *dp = dccp_sk(sk);
  359. struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  360. struct ccid3_options_received *opt_recv;
  361. struct dccp_tx_hist_entry *packet;
  362. struct timeval now;
  363. unsigned long t_nfb;
  364. u32 t_elapsed;
  365. u32 pinv;
  366. u32 x_recv;
  367. u32 r_sample;
  368. BUG_ON(hctx == NULL);
  369. /* we are only interested in ACKs */
  370. if (!(DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK ||
  371. DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_DATAACK))
  372. return;
  373. opt_recv = &hctx->ccid3hctx_options_received;
  374. t_elapsed = dp->dccps_options_received.dccpor_elapsed_time * 10;
  375. x_recv = opt_recv->ccid3or_receive_rate;
  376. pinv = opt_recv->ccid3or_loss_event_rate;
  377. switch (hctx->ccid3hctx_state) {
  378. case TFRC_SSTATE_NO_FBACK:
  379. case TFRC_SSTATE_FBACK:
  380. /* Calculate new round trip sample by
  381. * R_sample = (now - t_recvdata) - t_delay */
  382. /* get t_recvdata from history */
  383. packet = dccp_tx_hist_find_entry(&hctx->ccid3hctx_hist,
  384. DCCP_SKB_CB(skb)->dccpd_ack_seq);
  385. if (unlikely(packet == NULL)) {
  386. DCCP_WARN("%s, sk=%p, seqno %llu(%s) does't exist "
  387. "in history!\n", dccp_role(sk), sk,
  388. (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq,
  389. dccp_packet_name(DCCP_SKB_CB(skb)->dccpd_type));
  390. return;
  391. }
  392. /* Update RTT */
  393. dccp_timestamp(sk, &now);
  394. r_sample = timeval_delta(&now, &packet->dccphtx_tstamp);
  395. if (unlikely(r_sample <= t_elapsed))
  396. DCCP_WARN("r_sample=%uus,t_elapsed=%uus\n",
  397. r_sample, t_elapsed);
  398. else
  399. r_sample -= t_elapsed;
  400. /* Update RTT estimate by
  401. * If (No feedback recv)
  402. * R = R_sample;
  403. * Else
  404. * R = q * R + (1 - q) * R_sample;
  405. *
  406. * q is a constant, RFC 3448 recomments 0.9
  407. */
  408. if (hctx->ccid3hctx_state == TFRC_SSTATE_NO_FBACK) {
  409. /* Use Larger Initial Windows [RFC 4342, sec. 5]
  410. * We deviate in that we use `s' instead of `MSS'. */
  411. u16 w_init = max( 4 * hctx->ccid3hctx_s,
  412. max(2 * hctx->ccid3hctx_s, 4380));
  413. hctx->ccid3hctx_rtt = r_sample;
  414. hctx->ccid3hctx_x = usecs_div(w_init, r_sample);
  415. hctx->ccid3hctx_t_ld = now;
  416. ccid3_update_send_time(hctx);
  417. ccid3_hc_tx_set_state(sk, TFRC_SSTATE_FBACK);
  418. } else {
  419. hctx->ccid3hctx_rtt = (hctx->ccid3hctx_rtt * 9) / 10 +
  420. r_sample / 10;
  421. ccid3_hc_tx_update_x(sk, &now);
  422. }
  423. ccid3_pr_debug("%s, sk=%p, New RTT estimate=%uus, "
  424. "r_sample=%us\n", dccp_role(sk), sk,
  425. hctx->ccid3hctx_rtt, r_sample);
  426. /* Update receive rate */
  427. hctx->ccid3hctx_x_recv = x_recv;/* X_recv in bytes per sec */
  428. /* Update loss event rate */
  429. if (pinv == ~0 || pinv == 0)
  430. hctx->ccid3hctx_p = 0;
  431. else {
  432. hctx->ccid3hctx_p = 1000000 / pinv;
  433. if (hctx->ccid3hctx_p < TFRC_SMALLEST_P) {
  434. hctx->ccid3hctx_p = TFRC_SMALLEST_P;
  435. ccid3_pr_debug("%s, sk=%p, Smallest p used!\n",
  436. dccp_role(sk), sk);
  437. }
  438. }
  439. /* unschedule no feedback timer */
  440. sk_stop_timer(sk, &hctx->ccid3hctx_no_feedback_timer);
  441. /* remove all packets older than the one acked from history */
  442. dccp_tx_hist_purge_older(ccid3_tx_hist,
  443. &hctx->ccid3hctx_hist, packet);
  444. /*
  445. * As we have calculated new ipi, delta, t_nom it is possible that
  446. * we now can send a packet, so wake up dccp_wait_for_ccid
  447. */
  448. sk->sk_write_space(sk);
  449. /* Update timeout interval. We use the alternative variant of
  450. * [RFC 3448, 3.1] which sets the upper bound of t_rto to one
  451. * second, as it is suggested for TCP (see RFC 2988, 2.4). */
  452. hctx->ccid3hctx_t_rto = max_t(u32, 4 * hctx->ccid3hctx_rtt,
  453. USEC_PER_SEC );
  454. /*
  455. * Schedule no feedback timer to expire in
  456. * max(4 * R, 2 * s/X) = max(4 * R, 2 * t_ipi)
  457. */
  458. t_nfb = max(4 * hctx->ccid3hctx_rtt, 2 * hctx->ccid3hctx_t_ipi);
  459. ccid3_pr_debug("%s, sk=%p, Scheduled no feedback timer to "
  460. "expire in %lu jiffies (%luus)\n",
  461. dccp_role(sk), sk,
  462. usecs_to_jiffies(t_nfb), t_nfb);
  463. sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
  464. jiffies + usecs_to_jiffies(t_nfb));
  465. /* set idle flag */
  466. hctx->ccid3hctx_idle = 1;
  467. break;
  468. case TFRC_SSTATE_NO_SENT:
  469. DCCP_WARN("Illegal ACK received - no packet has been sent\n");
  470. /* fall through */
  471. case TFRC_SSTATE_TERM: /* ignore feedback when closing */
  472. break;
  473. }
  474. }
  475. static int ccid3_hc_tx_insert_options(struct sock *sk, struct sk_buff *skb)
  476. {
  477. const struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  478. BUG_ON(hctx == NULL);
  479. if (sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN)
  480. DCCP_SKB_CB(skb)->dccpd_ccval = hctx->ccid3hctx_last_win_count;
  481. return 0;
  482. }
  483. static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option,
  484. unsigned char len, u16 idx,
  485. unsigned char *value)
  486. {
  487. int rc = 0;
  488. const struct dccp_sock *dp = dccp_sk(sk);
  489. struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  490. struct ccid3_options_received *opt_recv;
  491. BUG_ON(hctx == NULL);
  492. opt_recv = &hctx->ccid3hctx_options_received;
  493. if (opt_recv->ccid3or_seqno != dp->dccps_gsr) {
  494. opt_recv->ccid3or_seqno = dp->dccps_gsr;
  495. opt_recv->ccid3or_loss_event_rate = ~0;
  496. opt_recv->ccid3or_loss_intervals_idx = 0;
  497. opt_recv->ccid3or_loss_intervals_len = 0;
  498. opt_recv->ccid3or_receive_rate = 0;
  499. }
  500. switch (option) {
  501. case TFRC_OPT_LOSS_EVENT_RATE:
  502. if (unlikely(len != 4)) {
  503. DCCP_WARN("%s, sk=%p, invalid len %d "
  504. "for TFRC_OPT_LOSS_EVENT_RATE\n",
  505. dccp_role(sk), sk, len);
  506. rc = -EINVAL;
  507. } else {
  508. opt_recv->ccid3or_loss_event_rate = ntohl(*(__be32 *)value);
  509. ccid3_pr_debug("%s, sk=%p, LOSS_EVENT_RATE=%u\n",
  510. dccp_role(sk), sk,
  511. opt_recv->ccid3or_loss_event_rate);
  512. }
  513. break;
  514. case TFRC_OPT_LOSS_INTERVALS:
  515. opt_recv->ccid3or_loss_intervals_idx = idx;
  516. opt_recv->ccid3or_loss_intervals_len = len;
  517. ccid3_pr_debug("%s, sk=%p, LOSS_INTERVALS=(%u, %u)\n",
  518. dccp_role(sk), sk,
  519. opt_recv->ccid3or_loss_intervals_idx,
  520. opt_recv->ccid3or_loss_intervals_len);
  521. break;
  522. case TFRC_OPT_RECEIVE_RATE:
  523. if (unlikely(len != 4)) {
  524. DCCP_WARN("%s, sk=%p, invalid len %d "
  525. "for TFRC_OPT_RECEIVE_RATE\n",
  526. dccp_role(sk), sk, len);
  527. rc = -EINVAL;
  528. } else {
  529. opt_recv->ccid3or_receive_rate = ntohl(*(__be32 *)value);
  530. ccid3_pr_debug("%s, sk=%p, RECEIVE_RATE=%u\n",
  531. dccp_role(sk), sk,
  532. opt_recv->ccid3or_receive_rate);
  533. }
  534. break;
  535. }
  536. return rc;
  537. }
  538. static int ccid3_hc_tx_init(struct ccid *ccid, struct sock *sk)
  539. {
  540. struct ccid3_hc_tx_sock *hctx = ccid_priv(ccid);
  541. hctx->ccid3hctx_s = 0;
  542. hctx->ccid3hctx_state = TFRC_SSTATE_NO_SENT;
  543. INIT_LIST_HEAD(&hctx->ccid3hctx_hist);
  544. hctx->ccid3hctx_no_feedback_timer.function = ccid3_hc_tx_no_feedback_timer;
  545. hctx->ccid3hctx_no_feedback_timer.data = (unsigned long)sk;
  546. init_timer(&hctx->ccid3hctx_no_feedback_timer);
  547. return 0;
  548. }
  549. static void ccid3_hc_tx_exit(struct sock *sk)
  550. {
  551. struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  552. BUG_ON(hctx == NULL);
  553. ccid3_hc_tx_set_state(sk, TFRC_SSTATE_TERM);
  554. sk_stop_timer(sk, &hctx->ccid3hctx_no_feedback_timer);
  555. /* Empty packet history */
  556. dccp_tx_hist_purge(ccid3_tx_hist, &hctx->ccid3hctx_hist);
  557. }
  558. /*
  559. * RX Half Connection methods
  560. */
  561. #ifdef CONFIG_IP_DCCP_CCID3_DEBUG
  562. static const char *ccid3_rx_state_name(enum ccid3_hc_rx_states state)
  563. {
  564. static char *ccid3_rx_state_names[] = {
  565. [TFRC_RSTATE_NO_DATA] = "NO_DATA",
  566. [TFRC_RSTATE_DATA] = "DATA",
  567. [TFRC_RSTATE_TERM] = "TERM",
  568. };
  569. return ccid3_rx_state_names[state];
  570. }
  571. #endif
  572. static void ccid3_hc_rx_set_state(struct sock *sk,
  573. enum ccid3_hc_rx_states state)
  574. {
  575. struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  576. enum ccid3_hc_rx_states oldstate = hcrx->ccid3hcrx_state;
  577. ccid3_pr_debug("%s(%p) %-8.8s -> %s\n",
  578. dccp_role(sk), sk, ccid3_rx_state_name(oldstate),
  579. ccid3_rx_state_name(state));
  580. WARN_ON(state == oldstate);
  581. hcrx->ccid3hcrx_state = state;
  582. }
  583. static inline void ccid3_hc_rx_update_s(struct ccid3_hc_rx_sock *hcrx, int len)
  584. {
  585. if (unlikely(len == 0)) /* don't update on empty packets (e.g. ACKs) */
  586. ccid3_pr_debug("Packet payload length is 0 - not updating\n");
  587. else
  588. hcrx->ccid3hcrx_s = hcrx->ccid3hcrx_s == 0 ? len :
  589. (9 * hcrx->ccid3hcrx_s + len) / 10;
  590. }
  591. static void ccid3_hc_rx_send_feedback(struct sock *sk)
  592. {
  593. struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  594. struct dccp_sock *dp = dccp_sk(sk);
  595. struct dccp_rx_hist_entry *packet;
  596. struct timeval now;
  597. ccid3_pr_debug("%s, sk=%p\n", dccp_role(sk), sk);
  598. dccp_timestamp(sk, &now);
  599. switch (hcrx->ccid3hcrx_state) {
  600. case TFRC_RSTATE_NO_DATA:
  601. hcrx->ccid3hcrx_x_recv = 0;
  602. break;
  603. case TFRC_RSTATE_DATA: {
  604. const u32 delta = timeval_delta(&now,
  605. &hcrx->ccid3hcrx_tstamp_last_feedback);
  606. hcrx->ccid3hcrx_x_recv = usecs_div(hcrx->ccid3hcrx_bytes_recv,
  607. delta);
  608. }
  609. break;
  610. case TFRC_RSTATE_TERM:
  611. DCCP_BUG("Illegal %s state TERM, sk=%p", dccp_role(sk), sk);
  612. return;
  613. }
  614. packet = dccp_rx_hist_find_data_packet(&hcrx->ccid3hcrx_hist);
  615. if (unlikely(packet == NULL)) {
  616. DCCP_WARN("%s, sk=%p, no data packet in history!\n",
  617. dccp_role(sk), sk);
  618. return;
  619. }
  620. hcrx->ccid3hcrx_tstamp_last_feedback = now;
  621. hcrx->ccid3hcrx_ccval_last_counter = packet->dccphrx_ccval;
  622. hcrx->ccid3hcrx_bytes_recv = 0;
  623. /* Convert to multiples of 10us */
  624. hcrx->ccid3hcrx_elapsed_time =
  625. timeval_delta(&now, &packet->dccphrx_tstamp) / 10;
  626. if (hcrx->ccid3hcrx_p == 0)
  627. hcrx->ccid3hcrx_pinv = ~0;
  628. else
  629. hcrx->ccid3hcrx_pinv = 1000000 / hcrx->ccid3hcrx_p;
  630. dp->dccps_hc_rx_insert_options = 1;
  631. dccp_send_ack(sk);
  632. }
  633. static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
  634. {
  635. const struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  636. __be32 x_recv, pinv;
  637. BUG_ON(hcrx == NULL);
  638. if (!(sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN))
  639. return 0;
  640. DCCP_SKB_CB(skb)->dccpd_ccval = hcrx->ccid3hcrx_ccval_last_counter;
  641. if (dccp_packet_without_ack(skb))
  642. return 0;
  643. x_recv = htonl(hcrx->ccid3hcrx_x_recv);
  644. pinv = htonl(hcrx->ccid3hcrx_pinv);
  645. if ((hcrx->ccid3hcrx_elapsed_time != 0 &&
  646. dccp_insert_option_elapsed_time(sk, skb,
  647. hcrx->ccid3hcrx_elapsed_time)) ||
  648. dccp_insert_option_timestamp(sk, skb) ||
  649. dccp_insert_option(sk, skb, TFRC_OPT_LOSS_EVENT_RATE,
  650. &pinv, sizeof(pinv)) ||
  651. dccp_insert_option(sk, skb, TFRC_OPT_RECEIVE_RATE,
  652. &x_recv, sizeof(x_recv)))
  653. return -1;
  654. return 0;
  655. }
  656. /* calculate first loss interval
  657. *
  658. * returns estimated loss interval in usecs */
  659. static u32 ccid3_hc_rx_calc_first_li(struct sock *sk)
  660. {
  661. struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  662. struct dccp_rx_hist_entry *entry, *next, *tail = NULL;
  663. u32 rtt, delta, x_recv, fval, p, tmp2;
  664. struct timeval tstamp = { 0, };
  665. int interval = 0;
  666. int win_count = 0;
  667. int step = 0;
  668. u64 tmp1;
  669. list_for_each_entry_safe(entry, next, &hcrx->ccid3hcrx_hist,
  670. dccphrx_node) {
  671. if (dccp_rx_hist_entry_data_packet(entry)) {
  672. tail = entry;
  673. switch (step) {
  674. case 0:
  675. tstamp = entry->dccphrx_tstamp;
  676. win_count = entry->dccphrx_ccval;
  677. step = 1;
  678. break;
  679. case 1:
  680. interval = win_count - entry->dccphrx_ccval;
  681. if (interval < 0)
  682. interval += TFRC_WIN_COUNT_LIMIT;
  683. if (interval > 4)
  684. goto found;
  685. break;
  686. }
  687. }
  688. }
  689. if (unlikely(step == 0)) {
  690. DCCP_WARN("%s, sk=%p, packet history has no data packets!\n",
  691. dccp_role(sk), sk);
  692. return ~0;
  693. }
  694. if (unlikely(interval == 0)) {
  695. DCCP_WARN("%s, sk=%p, Could not find a win_count interval > 0."
  696. "Defaulting to 1\n", dccp_role(sk), sk);
  697. interval = 1;
  698. }
  699. found:
  700. if (!tail) {
  701. DCCP_CRIT("tail is null\n");
  702. return ~0;
  703. }
  704. rtt = timeval_delta(&tstamp, &tail->dccphrx_tstamp) * 4 / interval;
  705. ccid3_pr_debug("%s, sk=%p, approximated RTT to %uus\n",
  706. dccp_role(sk), sk, rtt);
  707. if (rtt == 0) {
  708. DCCP_WARN("RTT==0, setting to 1\n");
  709. rtt = 1;
  710. }
  711. dccp_timestamp(sk, &tstamp);
  712. delta = timeval_delta(&tstamp, &hcrx->ccid3hcrx_tstamp_last_feedback);
  713. x_recv = usecs_div(hcrx->ccid3hcrx_bytes_recv, delta);
  714. if (x_recv == 0)
  715. x_recv = hcrx->ccid3hcrx_x_recv;
  716. tmp1 = (u64)x_recv * (u64)rtt;
  717. do_div(tmp1,10000000);
  718. tmp2 = (u32)tmp1;
  719. if (!tmp2) {
  720. DCCP_CRIT("tmp2 = 0, x_recv = %u, rtt =%u\n", x_recv, rtt);
  721. return ~0;
  722. }
  723. fval = (hcrx->ccid3hcrx_s * 100000) / tmp2;
  724. /* do not alter order above or you will get overflow on 32 bit */
  725. p = tfrc_calc_x_reverse_lookup(fval);
  726. ccid3_pr_debug("%s, sk=%p, receive rate=%u bytes/s, implied "
  727. "loss rate=%u\n", dccp_role(sk), sk, x_recv, p);
  728. if (p == 0)
  729. return ~0;
  730. else
  731. return 1000000 / p;
  732. }
  733. static void ccid3_hc_rx_update_li(struct sock *sk, u64 seq_loss, u8 win_loss)
  734. {
  735. struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  736. struct dccp_li_hist_entry *head;
  737. u64 seq_temp;
  738. if (list_empty(&hcrx->ccid3hcrx_li_hist)) {
  739. if (!dccp_li_hist_interval_new(ccid3_li_hist,
  740. &hcrx->ccid3hcrx_li_hist, seq_loss, win_loss))
  741. return;
  742. head = list_entry(hcrx->ccid3hcrx_li_hist.next,
  743. struct dccp_li_hist_entry, dccplih_node);
  744. head->dccplih_interval = ccid3_hc_rx_calc_first_li(sk);
  745. } else {
  746. struct dccp_li_hist_entry *entry;
  747. struct list_head *tail;
  748. head = list_entry(hcrx->ccid3hcrx_li_hist.next,
  749. struct dccp_li_hist_entry, dccplih_node);
  750. /* FIXME win count check removed as was wrong */
  751. /* should make this check with receive history */
  752. /* and compare there as per section 10.2 of RFC4342 */
  753. /* new loss event detected */
  754. /* calculate last interval length */
  755. seq_temp = dccp_delta_seqno(head->dccplih_seqno, seq_loss);
  756. entry = dccp_li_hist_entry_new(ccid3_li_hist, SLAB_ATOMIC);
  757. if (entry == NULL) {
  758. DCCP_BUG("out of memory - can not allocate entry");
  759. return;
  760. }
  761. list_add(&entry->dccplih_node, &hcrx->ccid3hcrx_li_hist);
  762. tail = hcrx->ccid3hcrx_li_hist.prev;
  763. list_del(tail);
  764. kmem_cache_free(ccid3_li_hist->dccplih_slab, tail);
  765. /* Create the newest interval */
  766. entry->dccplih_seqno = seq_loss;
  767. entry->dccplih_interval = seq_temp;
  768. entry->dccplih_win_count = win_loss;
  769. }
  770. }
  771. static int ccid3_hc_rx_detect_loss(struct sock *sk,
  772. struct dccp_rx_hist_entry *packet)
  773. {
  774. struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  775. struct dccp_rx_hist_entry *rx_hist = dccp_rx_hist_head(&hcrx->ccid3hcrx_hist);
  776. u64 seqno = packet->dccphrx_seqno;
  777. u64 tmp_seqno;
  778. int loss = 0;
  779. u8 ccval;
  780. tmp_seqno = hcrx->ccid3hcrx_seqno_nonloss;
  781. if (!rx_hist ||
  782. follows48(packet->dccphrx_seqno, hcrx->ccid3hcrx_seqno_nonloss)) {
  783. hcrx->ccid3hcrx_seqno_nonloss = seqno;
  784. hcrx->ccid3hcrx_ccval_nonloss = packet->dccphrx_ccval;
  785. goto detect_out;
  786. }
  787. while (dccp_delta_seqno(hcrx->ccid3hcrx_seqno_nonloss, seqno)
  788. > TFRC_RECV_NUM_LATE_LOSS) {
  789. loss = 1;
  790. ccid3_hc_rx_update_li(sk, hcrx->ccid3hcrx_seqno_nonloss,
  791. hcrx->ccid3hcrx_ccval_nonloss);
  792. tmp_seqno = hcrx->ccid3hcrx_seqno_nonloss;
  793. dccp_inc_seqno(&tmp_seqno);
  794. hcrx->ccid3hcrx_seqno_nonloss = tmp_seqno;
  795. dccp_inc_seqno(&tmp_seqno);
  796. while (dccp_rx_hist_find_entry(&hcrx->ccid3hcrx_hist,
  797. tmp_seqno, &ccval)) {
  798. hcrx->ccid3hcrx_seqno_nonloss = tmp_seqno;
  799. hcrx->ccid3hcrx_ccval_nonloss = ccval;
  800. dccp_inc_seqno(&tmp_seqno);
  801. }
  802. }
  803. /* FIXME - this code could be simplified with above while */
  804. /* but works at moment */
  805. if (follows48(packet->dccphrx_seqno, hcrx->ccid3hcrx_seqno_nonloss)) {
  806. hcrx->ccid3hcrx_seqno_nonloss = seqno;
  807. hcrx->ccid3hcrx_ccval_nonloss = packet->dccphrx_ccval;
  808. }
  809. detect_out:
  810. dccp_rx_hist_add_packet(ccid3_rx_hist, &hcrx->ccid3hcrx_hist,
  811. &hcrx->ccid3hcrx_li_hist, packet,
  812. hcrx->ccid3hcrx_seqno_nonloss);
  813. return loss;
  814. }
  815. static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
  816. {
  817. struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  818. const struct dccp_options_received *opt_recv;
  819. struct dccp_rx_hist_entry *packet;
  820. struct timeval now;
  821. u32 p_prev, rtt_prev, r_sample, t_elapsed;
  822. int loss, payload_size;
  823. BUG_ON(hcrx == NULL);
  824. opt_recv = &dccp_sk(sk)->dccps_options_received;
  825. switch (DCCP_SKB_CB(skb)->dccpd_type) {
  826. case DCCP_PKT_ACK:
  827. if (hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA)
  828. return;
  829. case DCCP_PKT_DATAACK:
  830. if (opt_recv->dccpor_timestamp_echo == 0)
  831. break;
  832. rtt_prev = hcrx->ccid3hcrx_rtt;
  833. dccp_timestamp(sk, &now);
  834. timeval_sub_usecs(&now, opt_recv->dccpor_timestamp_echo * 10);
  835. r_sample = timeval_usecs(&now);
  836. t_elapsed = opt_recv->dccpor_elapsed_time * 10;
  837. if (unlikely(r_sample <= t_elapsed))
  838. DCCP_WARN("r_sample=%uus, t_elapsed=%uus\n",
  839. r_sample, t_elapsed);
  840. else
  841. r_sample -= t_elapsed;
  842. if (hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA)
  843. hcrx->ccid3hcrx_rtt = r_sample;
  844. else
  845. hcrx->ccid3hcrx_rtt = (hcrx->ccid3hcrx_rtt * 9) / 10 +
  846. r_sample / 10;
  847. if (rtt_prev != hcrx->ccid3hcrx_rtt)
  848. ccid3_pr_debug("%s, New RTT=%uus, elapsed time=%u\n",
  849. dccp_role(sk), hcrx->ccid3hcrx_rtt,
  850. opt_recv->dccpor_elapsed_time);
  851. break;
  852. case DCCP_PKT_DATA:
  853. break;
  854. default: /* We're not interested in other packet types, move along */
  855. return;
  856. }
  857. packet = dccp_rx_hist_entry_new(ccid3_rx_hist, sk, opt_recv->dccpor_ndp,
  858. skb, SLAB_ATOMIC);
  859. if (unlikely(packet == NULL)) {
  860. DCCP_WARN("%s, sk=%p, Not enough mem to add rx packet "
  861. "to history, consider it lost!\n", dccp_role(sk), sk);
  862. return;
  863. }
  864. loss = ccid3_hc_rx_detect_loss(sk, packet);
  865. if (DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK)
  866. return;
  867. payload_size = skb->len - dccp_hdr(skb)->dccph_doff * 4;
  868. ccid3_hc_rx_update_s(hcrx, payload_size);
  869. switch (hcrx->ccid3hcrx_state) {
  870. case TFRC_RSTATE_NO_DATA:
  871. ccid3_pr_debug("%s, sk=%p(%s), skb=%p, sending initial "
  872. "feedback\n",
  873. dccp_role(sk), sk,
  874. dccp_state_name(sk->sk_state), skb);
  875. ccid3_hc_rx_send_feedback(sk);
  876. ccid3_hc_rx_set_state(sk, TFRC_RSTATE_DATA);
  877. return;
  878. case TFRC_RSTATE_DATA:
  879. hcrx->ccid3hcrx_bytes_recv += payload_size;
  880. if (loss)
  881. break;
  882. dccp_timestamp(sk, &now);
  883. if (timeval_delta(&now, &hcrx->ccid3hcrx_tstamp_last_ack) >=
  884. hcrx->ccid3hcrx_rtt) {
  885. hcrx->ccid3hcrx_tstamp_last_ack = now;
  886. ccid3_hc_rx_send_feedback(sk);
  887. }
  888. return;
  889. case TFRC_RSTATE_TERM:
  890. DCCP_BUG("Illegal %s state TERM, sk=%p", dccp_role(sk), sk);
  891. return;
  892. }
  893. /* Dealing with packet loss */
  894. ccid3_pr_debug("%s, sk=%p(%s), data loss! Reacting...\n",
  895. dccp_role(sk), sk, dccp_state_name(sk->sk_state));
  896. p_prev = hcrx->ccid3hcrx_p;
  897. /* Calculate loss event rate */
  898. if (!list_empty(&hcrx->ccid3hcrx_li_hist)) {
  899. u32 i_mean = dccp_li_hist_calc_i_mean(&hcrx->ccid3hcrx_li_hist);
  900. /* Scaling up by 1000000 as fixed decimal */
  901. if (i_mean != 0)
  902. hcrx->ccid3hcrx_p = 1000000 / i_mean;
  903. } else
  904. DCCP_BUG("empty loss history");
  905. if (hcrx->ccid3hcrx_p > p_prev) {
  906. ccid3_hc_rx_send_feedback(sk);
  907. return;
  908. }
  909. }
  910. static int ccid3_hc_rx_init(struct ccid *ccid, struct sock *sk)
  911. {
  912. struct ccid3_hc_rx_sock *hcrx = ccid_priv(ccid);
  913. ccid3_pr_debug("%s, sk=%p\n", dccp_role(sk), sk);
  914. hcrx->ccid3hcrx_state = TFRC_RSTATE_NO_DATA;
  915. INIT_LIST_HEAD(&hcrx->ccid3hcrx_hist);
  916. INIT_LIST_HEAD(&hcrx->ccid3hcrx_li_hist);
  917. dccp_timestamp(sk, &hcrx->ccid3hcrx_tstamp_last_ack);
  918. hcrx->ccid3hcrx_tstamp_last_feedback = hcrx->ccid3hcrx_tstamp_last_ack;
  919. hcrx->ccid3hcrx_s = 0;
  920. hcrx->ccid3hcrx_rtt = 5000; /* XXX 5ms for now... */
  921. return 0;
  922. }
  923. static void ccid3_hc_rx_exit(struct sock *sk)
  924. {
  925. struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  926. BUG_ON(hcrx == NULL);
  927. ccid3_hc_rx_set_state(sk, TFRC_RSTATE_TERM);
  928. /* Empty packet history */
  929. dccp_rx_hist_purge(ccid3_rx_hist, &hcrx->ccid3hcrx_hist);
  930. /* Empty loss interval history */
  931. dccp_li_hist_purge(ccid3_li_hist, &hcrx->ccid3hcrx_li_hist);
  932. }
  933. static void ccid3_hc_rx_get_info(struct sock *sk, struct tcp_info *info)
  934. {
  935. const struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  936. /* Listen socks doesn't have a private CCID block */
  937. if (sk->sk_state == DCCP_LISTEN)
  938. return;
  939. BUG_ON(hcrx == NULL);
  940. info->tcpi_ca_state = hcrx->ccid3hcrx_state;
  941. info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
  942. info->tcpi_rcv_rtt = hcrx->ccid3hcrx_rtt;
  943. }
  944. static void ccid3_hc_tx_get_info(struct sock *sk, struct tcp_info *info)
  945. {
  946. const struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  947. /* Listen socks doesn't have a private CCID block */
  948. if (sk->sk_state == DCCP_LISTEN)
  949. return;
  950. BUG_ON(hctx == NULL);
  951. info->tcpi_rto = hctx->ccid3hctx_t_rto;
  952. info->tcpi_rtt = hctx->ccid3hctx_rtt;
  953. }
  954. static int ccid3_hc_rx_getsockopt(struct sock *sk, const int optname, int len,
  955. u32 __user *optval, int __user *optlen)
  956. {
  957. const struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  958. const void *val;
  959. /* Listen socks doesn't have a private CCID block */
  960. if (sk->sk_state == DCCP_LISTEN)
  961. return -EINVAL;
  962. switch (optname) {
  963. case DCCP_SOCKOPT_CCID_RX_INFO:
  964. if (len < sizeof(hcrx->ccid3hcrx_tfrc))
  965. return -EINVAL;
  966. len = sizeof(hcrx->ccid3hcrx_tfrc);
  967. val = &hcrx->ccid3hcrx_tfrc;
  968. break;
  969. default:
  970. return -ENOPROTOOPT;
  971. }
  972. if (put_user(len, optlen) || copy_to_user(optval, val, len))
  973. return -EFAULT;
  974. return 0;
  975. }
  976. static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len,
  977. u32 __user *optval, int __user *optlen)
  978. {
  979. const struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  980. const void *val;
  981. /* Listen socks doesn't have a private CCID block */
  982. if (sk->sk_state == DCCP_LISTEN)
  983. return -EINVAL;
  984. switch (optname) {
  985. case DCCP_SOCKOPT_CCID_TX_INFO:
  986. if (len < sizeof(hctx->ccid3hctx_tfrc))
  987. return -EINVAL;
  988. len = sizeof(hctx->ccid3hctx_tfrc);
  989. val = &hctx->ccid3hctx_tfrc;
  990. break;
  991. default:
  992. return -ENOPROTOOPT;
  993. }
  994. if (put_user(len, optlen) || copy_to_user(optval, val, len))
  995. return -EFAULT;
  996. return 0;
  997. }
  998. static struct ccid_operations ccid3 = {
  999. .ccid_id = DCCPC_CCID3,
  1000. .ccid_name = "ccid3",
  1001. .ccid_owner = THIS_MODULE,
  1002. .ccid_hc_tx_obj_size = sizeof(struct ccid3_hc_tx_sock),
  1003. .ccid_hc_tx_init = ccid3_hc_tx_init,
  1004. .ccid_hc_tx_exit = ccid3_hc_tx_exit,
  1005. .ccid_hc_tx_send_packet = ccid3_hc_tx_send_packet,
  1006. .ccid_hc_tx_packet_sent = ccid3_hc_tx_packet_sent,
  1007. .ccid_hc_tx_packet_recv = ccid3_hc_tx_packet_recv,
  1008. .ccid_hc_tx_insert_options = ccid3_hc_tx_insert_options,
  1009. .ccid_hc_tx_parse_options = ccid3_hc_tx_parse_options,
  1010. .ccid_hc_rx_obj_size = sizeof(struct ccid3_hc_rx_sock),
  1011. .ccid_hc_rx_init = ccid3_hc_rx_init,
  1012. .ccid_hc_rx_exit = ccid3_hc_rx_exit,
  1013. .ccid_hc_rx_insert_options = ccid3_hc_rx_insert_options,
  1014. .ccid_hc_rx_packet_recv = ccid3_hc_rx_packet_recv,
  1015. .ccid_hc_rx_get_info = ccid3_hc_rx_get_info,
  1016. .ccid_hc_tx_get_info = ccid3_hc_tx_get_info,
  1017. .ccid_hc_rx_getsockopt = ccid3_hc_rx_getsockopt,
  1018. .ccid_hc_tx_getsockopt = ccid3_hc_tx_getsockopt,
  1019. };
  1020. #ifdef CONFIG_IP_DCCP_CCID3_DEBUG
  1021. module_param(ccid3_debug, int, 0444);
  1022. MODULE_PARM_DESC(ccid3_debug, "Enable debug messages");
  1023. #endif
  1024. static __init int ccid3_module_init(void)
  1025. {
  1026. int rc = -ENOBUFS;
  1027. ccid3_rx_hist = dccp_rx_hist_new("ccid3");
  1028. if (ccid3_rx_hist == NULL)
  1029. goto out;
  1030. ccid3_tx_hist = dccp_tx_hist_new("ccid3");
  1031. if (ccid3_tx_hist == NULL)
  1032. goto out_free_rx;
  1033. ccid3_li_hist = dccp_li_hist_new("ccid3");
  1034. if (ccid3_li_hist == NULL)
  1035. goto out_free_tx;
  1036. rc = ccid_register(&ccid3);
  1037. if (rc != 0)
  1038. goto out_free_loss_interval_history;
  1039. out:
  1040. return rc;
  1041. out_free_loss_interval_history:
  1042. dccp_li_hist_delete(ccid3_li_hist);
  1043. ccid3_li_hist = NULL;
  1044. out_free_tx:
  1045. dccp_tx_hist_delete(ccid3_tx_hist);
  1046. ccid3_tx_hist = NULL;
  1047. out_free_rx:
  1048. dccp_rx_hist_delete(ccid3_rx_hist);
  1049. ccid3_rx_hist = NULL;
  1050. goto out;
  1051. }
  1052. module_init(ccid3_module_init);
  1053. static __exit void ccid3_module_exit(void)
  1054. {
  1055. ccid_unregister(&ccid3);
  1056. if (ccid3_tx_hist != NULL) {
  1057. dccp_tx_hist_delete(ccid3_tx_hist);
  1058. ccid3_tx_hist = NULL;
  1059. }
  1060. if (ccid3_rx_hist != NULL) {
  1061. dccp_rx_hist_delete(ccid3_rx_hist);
  1062. ccid3_rx_hist = NULL;
  1063. }
  1064. if (ccid3_li_hist != NULL) {
  1065. dccp_li_hist_delete(ccid3_li_hist);
  1066. ccid3_li_hist = NULL;
  1067. }
  1068. }
  1069. module_exit(ccid3_module_exit);
  1070. MODULE_AUTHOR("Ian McDonald <ian.mcdonald@jandi.co.nz>, "
  1071. "Arnaldo Carvalho de Melo <acme@ghostprotocols.net>");
  1072. MODULE_DESCRIPTION("DCCP TFRC CCID3 CCID");
  1073. MODULE_LICENSE("GPL");
  1074. MODULE_ALIAS("net-dccp-ccid-3");