ccid3.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826
  1. /*
  2. * net/dccp/ccids/ccid3.c
  3. *
  4. * Copyright (c) 2007 The University of Aberdeen, Scotland, UK
  5. * Copyright (c) 2005-7 The University of Waikato, Hamilton, New Zealand.
  6. * Copyright (c) 2005-7 Ian McDonald <ian.mcdonald@jandi.co.nz>
  7. *
  8. * An implementation of the DCCP protocol
  9. *
  10. * This code has been developed by the University of Waikato WAND
  11. * research group. For further information please see http://www.wand.net.nz/
  12. *
  13. * This code also uses code from Lulea University, rereleased as GPL by its
  14. * authors:
  15. * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon
  16. *
  17. * Changes to meet Linux coding standards, to make it meet latest ccid3 draft
  18. * and to make it work as a loadable module in the DCCP stack written by
  19. * Arnaldo Carvalho de Melo <acme@conectiva.com.br>.
  20. *
  21. * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
  22. *
  23. * This program is free software; you can redistribute it and/or modify
  24. * it under the terms of the GNU General Public License as published by
  25. * the Free Software Foundation; either version 2 of the License, or
  26. * (at your option) any later version.
  27. *
  28. * This program is distributed in the hope that it will be useful,
  29. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  30. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  31. * GNU General Public License for more details.
  32. *
  33. * You should have received a copy of the GNU General Public License
  34. * along with this program; if not, write to the Free Software
  35. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  36. */
  37. #include "../dccp.h"
  38. #include "ccid3.h"
  39. #include <asm/unaligned.h>
  40. #ifdef CONFIG_IP_DCCP_CCID3_DEBUG
  41. static int ccid3_debug;
  42. #define ccid3_pr_debug(format, a...) DCCP_PR_DEBUG(ccid3_debug, format, ##a)
  43. #else
  44. #define ccid3_pr_debug(format, a...)
  45. #endif
  46. /*
  47. * Transmitter Half-Connection Routines
  48. */
  49. /* Oscillation Prevention/Reduction: recommended by rfc3448bis, on by default */
  50. static int do_osc_prev = true;
  51. /*
  52. * Compute the initial sending rate X_init in the manner of RFC 3390:
  53. *
  54. * X_init = min(4 * MPS, max(2 * MPS, 4380 bytes)) / RTT
  55. *
  56. * For consistency with other parts of the code, X_init is scaled by 2^6.
  57. */
  58. static inline u64 rfc3390_initial_rate(struct sock *sk)
  59. {
  60. const u32 mps = dccp_sk(sk)->dccps_mss_cache,
  61. w_init = clamp(4380U, 2 * mps, 4 * mps);
  62. return scaled_div(w_init << 6, ccid3_hc_tx_sk(sk)->rtt);
  63. }
  64. /**
  65. * ccid3_update_send_interval - Calculate new t_ipi = s / X
  66. * This respects the granularity of X (64 * bytes/second) and enforces the
  67. * scaled minimum of s * 64 / t_mbi = `s' bytes/second as per RFC 3448/4342.
  68. */
  69. static void ccid3_update_send_interval(struct ccid3_hc_tx_sock *hctx)
  70. {
  71. if (unlikely(hctx->x <= hctx->s))
  72. hctx->x = hctx->s;
  73. hctx->t_ipi = scaled_div32(((u64)hctx->s) << 6, hctx->x);
  74. }
  75. static u32 ccid3_hc_tx_idle_rtt(struct ccid3_hc_tx_sock *hctx, ktime_t now)
  76. {
  77. u32 delta = ktime_us_delta(now, hctx->t_last_win_count);
  78. return delta / hctx->rtt;
  79. }
  80. /**
  81. * ccid3_hc_tx_update_x - Update allowed sending rate X
  82. * @stamp: most recent time if available - can be left NULL.
  83. * This function tracks draft rfc3448bis, check there for latest details.
  84. *
  85. * Note: X and X_recv are both stored in units of 64 * bytes/second, to support
  86. * fine-grained resolution of sending rates. This requires scaling by 2^6
  87. * throughout the code. Only X_calc is unscaled (in bytes/second).
  88. *
  89. */
  90. static void ccid3_hc_tx_update_x(struct sock *sk, ktime_t *stamp)
  91. {
  92. struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  93. u64 min_rate = 2 * hctx->x_recv;
  94. const u64 old_x = hctx->x;
  95. ktime_t now = stamp ? *stamp : ktime_get_real();
  96. /*
  97. * Handle IDLE periods: do not reduce below RFC3390 initial sending rate
  98. * when idling [RFC 4342, 5.1]. Definition of idling is from rfc3448bis:
  99. * a sender is idle if it has not sent anything over a 2-RTT-period.
  100. * For consistency with X and X_recv, min_rate is also scaled by 2^6.
  101. */
  102. if (ccid3_hc_tx_idle_rtt(hctx, now) >= 2) {
  103. min_rate = rfc3390_initial_rate(sk);
  104. min_rate = max(min_rate, 2 * hctx->x_recv);
  105. }
  106. if (hctx->p > 0) {
  107. hctx->x = min(((u64)hctx->x_calc) << 6, min_rate);
  108. } else if (ktime_us_delta(now, hctx->t_ld) - (s64)hctx->rtt >= 0) {
  109. hctx->x = min(2 * hctx->x, min_rate);
  110. hctx->x = max(hctx->x,
  111. scaled_div(((u64)hctx->s) << 6, hctx->rtt));
  112. hctx->t_ld = now;
  113. }
  114. if (hctx->x != old_x) {
  115. ccid3_pr_debug("X_prev=%u, X_now=%u, X_calc=%u, "
  116. "X_recv=%u\n", (unsigned)(old_x >> 6),
  117. (unsigned)(hctx->x >> 6), hctx->x_calc,
  118. (unsigned)(hctx->x_recv >> 6));
  119. ccid3_update_send_interval(hctx);
  120. }
  121. }
  122. /*
  123. * ccid3_hc_tx_measure_packet_size - Measuring the packet size `s' (sec 4.1)
  124. * @new_len: DCCP payload size in bytes (not used by all methods)
  125. */
  126. static u32 ccid3_hc_tx_measure_packet_size(struct sock *sk, const u16 new_len)
  127. {
  128. #if defined(CONFIG_IP_DCCP_CCID3_MEASURE_S_AS_AVG)
  129. return tfrc_ewma(ccid3_hc_tx_sk(sk)->s, new_len, 9);
  130. #elif defined(CONFIG_IP_DCCP_CCID3_MEASURE_S_AS_MAX)
  131. return max(ccid3_hc_tx_sk(sk)->s, new_len);
  132. #else /* CONFIG_IP_DCCP_CCID3_MEASURE_S_AS_MPS */
  133. return dccp_sk(sk)->dccps_mss_cache;
  134. #endif
  135. }
  136. /*
  137. * Update Window Counter using the algorithm from [RFC 4342, 8.1].
  138. * As elsewhere, RTT > 0 is assumed by using dccp_sample_rtt().
  139. */
  140. static inline void ccid3_hc_tx_update_win_count(struct ccid3_hc_tx_sock *hctx,
  141. ktime_t now)
  142. {
  143. u32 delta = ktime_us_delta(now, hctx->t_last_win_count),
  144. quarter_rtts = (4 * delta) / hctx->rtt;
  145. if (quarter_rtts > 0) {
  146. hctx->t_last_win_count = now;
  147. hctx->last_win_count += min(quarter_rtts, 5U);
  148. hctx->last_win_count &= 0xF; /* mod 16 */
  149. }
  150. }
  151. static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
  152. {
  153. struct sock *sk = (struct sock *)data;
  154. struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  155. unsigned long t_nfb = USEC_PER_SEC / 5;
  156. bh_lock_sock(sk);
  157. if (sock_owned_by_user(sk)) {
  158. /* Try again later. */
  159. /* XXX: set some sensible MIB */
  160. goto restart_timer;
  161. }
  162. ccid3_pr_debug("%s(%p) entry with%s feedback\n", dccp_role(sk), sk,
  163. hctx->feedback ? "" : "out");
  164. /* Ignore and do not restart after leaving the established state */
  165. if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN))
  166. goto out;
  167. /* Reset feedback state to "no feedback received" */
  168. hctx->feedback = false;
  169. /*
  170. * Determine new allowed sending rate X as per draft rfc3448bis-00, 4.4
  171. * RTO is 0 if and only if no feedback has been received yet.
  172. */
  173. if (hctx->t_rto == 0 || hctx->p == 0) {
  174. /* halve send rate directly */
  175. hctx->x /= 2;
  176. ccid3_update_send_interval(hctx);
  177. } else {
  178. /*
  179. * Modify the cached value of X_recv
  180. *
  181. * If (X_calc > 2 * X_recv)
  182. * X_recv = max(X_recv / 2, s / (2 * t_mbi));
  183. * Else
  184. * X_recv = X_calc / 4;
  185. *
  186. * Note that X_recv is scaled by 2^6 while X_calc is not
  187. */
  188. BUG_ON(hctx->p && !hctx->x_calc);
  189. if (hctx->x_calc > (hctx->x_recv >> 5))
  190. hctx->x_recv /= 2;
  191. else {
  192. hctx->x_recv = hctx->x_calc;
  193. hctx->x_recv <<= 4;
  194. }
  195. ccid3_hc_tx_update_x(sk, NULL);
  196. }
  197. ccid3_pr_debug("Reduced X to %llu/64 bytes/sec\n",
  198. (unsigned long long)hctx->x);
  199. /*
  200. * Set new timeout for the nofeedback timer.
  201. * See comments in packet_recv() regarding the value of t_RTO.
  202. */
  203. if (unlikely(hctx->t_rto == 0)) /* no feedback received yet */
  204. t_nfb = TFRC_INITIAL_TIMEOUT;
  205. else
  206. t_nfb = max(hctx->t_rto, 2 * hctx->t_ipi);
  207. restart_timer:
  208. sk_reset_timer(sk, &hctx->no_feedback_timer,
  209. jiffies + usecs_to_jiffies(t_nfb));
  210. out:
  211. bh_unlock_sock(sk);
  212. sock_put(sk);
  213. }
  214. /**
  215. * ccid3_hc_tx_send_packet - Delay-based dequeueing of TX packets
  216. * @skb: next packet candidate to send on @sk
  217. * This function uses the convention of ccid_packet_dequeue_eval() and
  218. * returns a millisecond-delay value between 0 and t_mbi = 64000 msec.
  219. */
  220. static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
  221. {
  222. struct dccp_sock *dp = dccp_sk(sk);
  223. struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  224. ktime_t now = ktime_get_real();
  225. s64 delay;
  226. /*
  227. * This function is called only for Data and DataAck packets. Sending
  228. * zero-sized Data(Ack)s is theoretically possible, but for congestion
  229. * control this case is pathological - ignore it.
  230. */
  231. if (unlikely(skb->len == 0))
  232. return -EBADMSG;
  233. if (hctx->s == 0) {
  234. sk_reset_timer(sk, &hctx->no_feedback_timer, (jiffies +
  235. usecs_to_jiffies(TFRC_INITIAL_TIMEOUT)));
  236. hctx->last_win_count = 0;
  237. hctx->t_last_win_count = now;
  238. /* Set t_0 for initial packet */
  239. hctx->t_nom = now;
  240. /*
  241. * Use initial RTT sample when available: recommended by erratum
  242. * to RFC 4342. This implements the initialisation procedure of
  243. * draft rfc3448bis, section 4.2. Remember, X is scaled by 2^6.
  244. */
  245. if (dp->dccps_syn_rtt) {
  246. ccid3_pr_debug("SYN RTT = %uus\n", dp->dccps_syn_rtt);
  247. hctx->rtt = dp->dccps_syn_rtt;
  248. hctx->x = rfc3390_initial_rate(sk);
  249. hctx->t_ld = now;
  250. } else {
  251. /*
  252. * Sender does not have RTT sample:
  253. * - set fallback RTT (RFC 4340, 3.4) since a RTT value
  254. * is needed in several parts (e.g. window counter);
  255. * - set sending rate X_pps = 1pps as per RFC 3448, 4.2.
  256. */
  257. hctx->rtt = DCCP_FALLBACK_RTT;
  258. hctx->x = dp->dccps_mss_cache;
  259. hctx->x <<= 6;
  260. }
  261. /* Compute t_ipi = s / X */
  262. hctx->s = ccid3_hc_tx_measure_packet_size(sk, skb->len);
  263. ccid3_update_send_interval(hctx);
  264. /* Seed value for Oscillation Prevention (sec. 4.5) */
  265. hctx->r_sqmean = tfrc_scaled_sqrt(hctx->rtt);
  266. } else {
  267. delay = ktime_us_delta(hctx->t_nom, now);
  268. ccid3_pr_debug("delay=%ld\n", (long)delay);
  269. /*
  270. * Scheduling of packet transmissions [RFC 3448, 4.6]
  271. *
  272. * if (t_now > t_nom - delta)
  273. * // send the packet now
  274. * else
  275. * // send the packet in (t_nom - t_now) milliseconds.
  276. */
  277. if (delay >= TFRC_T_DELTA)
  278. return (u32)delay / USEC_PER_MSEC;
  279. ccid3_hc_tx_update_win_count(hctx, now);
  280. }
  281. /* prepare to send now (add options etc.) */
  282. dp->dccps_hc_tx_insert_options = 1;
  283. DCCP_SKB_CB(skb)->dccpd_ccval = hctx->last_win_count;
  284. /* set the nominal send time for the next following packet */
  285. hctx->t_nom = ktime_add_us(hctx->t_nom, hctx->t_ipi);
  286. return CCID_PACKET_SEND_AT_ONCE;
  287. }
  288. static void ccid3_hc_tx_packet_sent(struct sock *sk, unsigned int len)
  289. {
  290. struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  291. /* Changes to s will become effective the next time X is computed */
  292. hctx->s = ccid3_hc_tx_measure_packet_size(sk, len);
  293. if (tfrc_tx_hist_add(&hctx->hist, dccp_sk(sk)->dccps_gss))
  294. DCCP_CRIT("packet history - out of memory!");
  295. }
  296. static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
  297. {
  298. struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  299. struct tfrc_tx_hist_entry *acked;
  300. ktime_t now;
  301. unsigned long t_nfb;
  302. u32 r_sample;
  303. /* we are only interested in ACKs */
  304. if (!(DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK ||
  305. DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_DATAACK))
  306. return;
  307. /*
  308. * Locate the acknowledged packet in the TX history.
  309. *
  310. * Returning "entry not found" here can for instance happen when
  311. * - the host has not sent out anything (e.g. a passive server),
  312. * - the Ack is outdated (packet with higher Ack number was received),
  313. * - it is a bogus Ack (for a packet not sent on this connection).
  314. */
  315. acked = tfrc_tx_hist_find_entry(hctx->hist, dccp_hdr_ack_seq(skb));
  316. if (acked == NULL)
  317. return;
  318. /* For the sake of RTT sampling, ignore/remove all older entries */
  319. tfrc_tx_hist_purge(&acked->next);
  320. /* Update the moving average for the RTT estimate (RFC 3448, 4.3) */
  321. now = ktime_get_real();
  322. r_sample = dccp_sample_rtt(sk, ktime_us_delta(now, acked->stamp));
  323. hctx->rtt = tfrc_ewma(hctx->rtt, r_sample, 9);
  324. /*
  325. * Update allowed sending rate X as per draft rfc3448bis-00, 4.2/3
  326. */
  327. if (!hctx->feedback) {
  328. hctx->feedback = true;
  329. if (hctx->t_rto == 0) {
  330. /*
  331. * Initial feedback packet: Larger Initial Windows (4.2)
  332. */
  333. hctx->x = rfc3390_initial_rate(sk);
  334. hctx->t_ld = now;
  335. ccid3_update_send_interval(hctx);
  336. goto done_computing_x;
  337. } else if (hctx->p == 0) {
  338. /*
  339. * First feedback after nofeedback timer expiry (4.3)
  340. */
  341. goto done_computing_x;
  342. }
  343. }
  344. /* Update sending rate (step 4 of [RFC 3448, 4.3]) */
  345. if (hctx->p > 0)
  346. hctx->x_calc = tfrc_calc_x(hctx->s, hctx->rtt, hctx->p);
  347. ccid3_hc_tx_update_x(sk, &now);
  348. done_computing_x:
  349. ccid3_pr_debug("%s(%p), RTT=%uus (sample=%uus), s=%u, "
  350. "p=%u, X_calc=%u, X_recv=%u, X=%u\n",
  351. dccp_role(sk), sk, hctx->rtt, r_sample,
  352. hctx->s, hctx->p, hctx->x_calc,
  353. (unsigned)(hctx->x_recv >> 6),
  354. (unsigned)(hctx->x >> 6));
  355. /*
  356. * Oscillation Reduction (RFC 3448, 4.5) - modifying t_ipi according to
  357. * RTT changes, multiplying by X/X_inst = sqrt(R_sample)/R_sqmean. This
  358. * can be useful if few connections share a link, avoiding that buffer
  359. * fill levels (RTT) oscillate as a result of frequent adjustments to X.
  360. * A useful presentation with background information is in
  361. * Joerg Widmer, "Equation-Based Congestion Control",
  362. * MSc Thesis, University of Mannheim, Germany, 2000
  363. * (sec. 3.6.4), who calls this ISM ("Inter-packet Space Modulation").
  364. */
  365. if (do_osc_prev) {
  366. r_sample = tfrc_scaled_sqrt(r_sample);
  367. /*
  368. * The modulation can work in both ways: increase/decrease t_ipi
  369. * according to long-term increases/decreases of the RTT. The
  370. * former is a useful measure, since it works against queue
  371. * build-up. The latter temporarily increases the sending rate,
  372. * so that buffers fill up more quickly. This in turn causes
  373. * the RTT to increase, so that either later reduction becomes
  374. * necessary or the RTT stays at a very high level. Decreasing
  375. * t_ipi is therefore not supported.
  376. * Furthermore, during the initial slow-start phase the RTT
  377. * naturally increases, where using the algorithm would cause
  378. * delays. Hence it is disabled during the initial slow-start.
  379. */
  380. if (r_sample > hctx->r_sqmean && hctx->p > 0)
  381. hctx->t_ipi = div_u64((u64)hctx->t_ipi * (u64)r_sample,
  382. hctx->r_sqmean);
  383. hctx->t_ipi = min_t(u32, hctx->t_ipi, TFRC_T_MBI);
  384. /* update R_sqmean _after_ computing the modulation factor */
  385. hctx->r_sqmean = tfrc_ewma(hctx->r_sqmean, r_sample, 9);
  386. }
  387. /* unschedule no feedback timer */
  388. sk_stop_timer(sk, &hctx->no_feedback_timer);
  389. /*
  390. * As we have calculated new ipi, delta, t_nom it is possible
  391. * that we now can send a packet, so wake up dccp_wait_for_ccid
  392. */
  393. sk->sk_write_space(sk);
  394. /*
  395. * Update timeout interval for the nofeedback timer.
  396. * We use a configuration option to increase the lower bound.
  397. * This can help avoid triggering the nofeedback timer too
  398. * often ('spinning') on LANs with small RTTs.
  399. */
  400. hctx->t_rto = max_t(u32, 4 * hctx->rtt, (CONFIG_IP_DCCP_CCID3_RTO *
  401. (USEC_PER_SEC / 1000)));
  402. /*
  403. * Schedule no feedback timer to expire in
  404. * max(t_RTO, 2 * s/X) = max(t_RTO, 2 * t_ipi)
  405. */
  406. t_nfb = max(hctx->t_rto, 2 * hctx->t_ipi);
  407. ccid3_pr_debug("%s(%p), Scheduled no feedback timer to "
  408. "expire in %lu jiffies (%luus)\n",
  409. dccp_role(sk), sk, usecs_to_jiffies(t_nfb), t_nfb);
  410. sk_reset_timer(sk, &hctx->no_feedback_timer,
  411. jiffies + usecs_to_jiffies(t_nfb));
  412. }
  413. static int ccid3_hc_tx_parse_options(struct sock *sk, u8 packet_type,
  414. u8 option, u8 *optval, u8 optlen)
  415. {
  416. struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  417. __be32 opt_val;
  418. switch (option) {
  419. case TFRC_OPT_RECEIVE_RATE:
  420. case TFRC_OPT_LOSS_EVENT_RATE:
  421. /* Must be ignored on Data packets, cf. RFC 4342 8.3 and 8.5 */
  422. if (packet_type == DCCP_PKT_DATA)
  423. break;
  424. if (unlikely(optlen != 4)) {
  425. DCCP_WARN("%s(%p), invalid len %d for %u\n",
  426. dccp_role(sk), sk, optlen, option);
  427. return -EINVAL;
  428. }
  429. opt_val = ntohl(get_unaligned((__be32 *)optval));
  430. if (option == TFRC_OPT_RECEIVE_RATE) {
  431. /* Receive Rate is kept in units of 64 bytes/second */
  432. hctx->x_recv = opt_val;
  433. hctx->x_recv <<= 6;
  434. ccid3_pr_debug("%s(%p), RECEIVE_RATE=%u\n",
  435. dccp_role(sk), sk, opt_val);
  436. } else {
  437. /* Update the fixpoint Loss Event Rate fraction */
  438. hctx->p = tfrc_invert_loss_event_rate(opt_val);
  439. ccid3_pr_debug("%s(%p), LOSS_EVENT_RATE=%u\n",
  440. dccp_role(sk), sk, opt_val);
  441. }
  442. }
  443. return 0;
  444. }
  445. static int ccid3_hc_tx_init(struct ccid *ccid, struct sock *sk)
  446. {
  447. struct ccid3_hc_tx_sock *hctx = ccid_priv(ccid);
  448. hctx->hist = NULL;
  449. setup_timer(&hctx->no_feedback_timer,
  450. ccid3_hc_tx_no_feedback_timer, (unsigned long)sk);
  451. return 0;
  452. }
  453. static void ccid3_hc_tx_exit(struct sock *sk)
  454. {
  455. struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  456. sk_stop_timer(sk, &hctx->no_feedback_timer);
  457. tfrc_tx_hist_purge(&hctx->hist);
  458. }
  459. static void ccid3_hc_tx_get_info(struct sock *sk, struct tcp_info *info)
  460. {
  461. info->tcpi_rto = ccid3_hc_tx_sk(sk)->t_rto;
  462. info->tcpi_rtt = ccid3_hc_tx_sk(sk)->rtt;
  463. }
  464. static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len,
  465. u32 __user *optval, int __user *optlen)
  466. {
  467. const struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  468. struct tfrc_tx_info tfrc;
  469. const void *val;
  470. switch (optname) {
  471. case DCCP_SOCKOPT_CCID_TX_INFO:
  472. if (len < sizeof(tfrc))
  473. return -EINVAL;
  474. tfrc.tfrctx_x = hctx->x;
  475. tfrc.tfrctx_x_recv = hctx->x_recv;
  476. tfrc.tfrctx_x_calc = hctx->x_calc;
  477. tfrc.tfrctx_rtt = hctx->rtt;
  478. tfrc.tfrctx_p = hctx->p;
  479. tfrc.tfrctx_rto = hctx->t_rto;
  480. tfrc.tfrctx_ipi = hctx->t_ipi;
  481. len = sizeof(tfrc);
  482. val = &tfrc;
  483. break;
  484. default:
  485. return -ENOPROTOOPT;
  486. }
  487. if (put_user(len, optlen) || copy_to_user(optval, val, len))
  488. return -EFAULT;
  489. return 0;
  490. }
  491. /*
  492. * Receiver Half-Connection Routines
  493. */
  494. static void ccid3_hc_rx_send_feedback(struct sock *sk,
  495. const struct sk_buff *skb,
  496. enum ccid3_fback_type fbtype)
  497. {
  498. struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  499. switch (fbtype) {
  500. case CCID3_FBACK_INITIAL:
  501. hcrx->x_recv = 0;
  502. hcrx->p_inverse = ~0U; /* see RFC 4342, 8.5 */
  503. break;
  504. case CCID3_FBACK_PARAM_CHANGE:
  505. if (unlikely(hcrx->feedback == CCID3_FBACK_NONE)) {
  506. /*
  507. * rfc3448bis-06, 6.3.1: First packet(s) lost or marked
  508. * FIXME: in rfc3448bis the receiver returns X_recv=0
  509. * here as it normally would in the first feedback packet.
  510. * However this is not possible yet, since the code still
  511. * uses RFC 3448, i.e.
  512. * If (p > 0)
  513. * Calculate X_calc using the TCP throughput equation.
  514. * X = max(min(X_calc, 2*X_recv), s/t_mbi);
  515. * would bring X down to s/t_mbi. That is why we return
  516. * X_recv according to rfc3448bis-06 for the moment.
  517. */
  518. u32 s = tfrc_rx_hist_packet_size(&hcrx->hist),
  519. rtt = tfrc_rx_hist_rtt(&hcrx->hist);
  520. hcrx->x_recv = scaled_div32(s, 2 * rtt);
  521. break;
  522. }
  523. /*
  524. * When parameters change (new loss or p > p_prev), we do not
  525. * have a reliable estimate for R_m of [RFC 3448, 6.2] and so
  526. * always check whether at least RTT time units were covered.
  527. */
  528. hcrx->x_recv = tfrc_rx_hist_x_recv(&hcrx->hist, hcrx->x_recv);
  529. break;
  530. case CCID3_FBACK_PERIODIC:
  531. /*
  532. * Step (2) of rfc3448bis-06, 6.2:
  533. * - if no data packets have been received, just restart timer
  534. * - if data packets have been received, re-compute X_recv
  535. */
  536. if (hcrx->hist.bytes_recvd == 0)
  537. goto prepare_for_next_time;
  538. hcrx->x_recv = tfrc_rx_hist_x_recv(&hcrx->hist, hcrx->x_recv);
  539. break;
  540. default:
  541. return;
  542. }
  543. ccid3_pr_debug("X_recv=%u, 1/p=%u\n", hcrx->x_recv, hcrx->p_inverse);
  544. dccp_sk(sk)->dccps_hc_rx_insert_options = 1;
  545. dccp_send_ack(sk);
  546. prepare_for_next_time:
  547. tfrc_rx_hist_restart_byte_counter(&hcrx->hist);
  548. hcrx->last_counter = dccp_hdr(skb)->dccph_ccval;
  549. hcrx->feedback = fbtype;
  550. }
  551. static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
  552. {
  553. const struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  554. __be32 x_recv, pinv;
  555. if (!(sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN))
  556. return 0;
  557. if (dccp_packet_without_ack(skb))
  558. return 0;
  559. x_recv = htonl(hcrx->x_recv);
  560. pinv = htonl(hcrx->p_inverse);
  561. if (dccp_insert_option(sk, skb, TFRC_OPT_LOSS_EVENT_RATE,
  562. &pinv, sizeof(pinv)) ||
  563. dccp_insert_option(sk, skb, TFRC_OPT_RECEIVE_RATE,
  564. &x_recv, sizeof(x_recv)))
  565. return -1;
  566. return 0;
  567. }
  568. /** ccid3_first_li - Implements [RFC 3448, 6.3.1]
  569. *
  570. * Determine the length of the first loss interval via inverse lookup.
  571. * Assume that X_recv can be computed by the throughput equation
  572. * s
  573. * X_recv = --------
  574. * R * fval
  575. * Find some p such that f(p) = fval; return 1/p (scaled).
  576. */
  577. static u32 ccid3_first_li(struct sock *sk)
  578. {
  579. struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  580. u32 s = tfrc_rx_hist_packet_size(&hcrx->hist),
  581. rtt = tfrc_rx_hist_rtt(&hcrx->hist), x_recv, p;
  582. u64 fval;
  583. /*
  584. * rfc3448bis-06, 6.3.1: First data packet(s) are marked or lost. Set p
  585. * to give the equivalent of X_target = s/(2*R). Thus fval = 2 and so p
  586. * is about 20.64%. This yields an interval length of 4.84 (rounded up).
  587. */
  588. if (unlikely(hcrx->feedback == CCID3_FBACK_NONE))
  589. return 5;
  590. x_recv = tfrc_rx_hist_x_recv(&hcrx->hist, hcrx->x_recv);
  591. if (x_recv == 0)
  592. goto failed;
  593. fval = scaled_div32(scaled_div(s, rtt), x_recv);
  594. p = tfrc_calc_x_reverse_lookup(fval);
  595. ccid3_pr_debug("%s(%p), receive rate=%u bytes/s, implied "
  596. "loss rate=%u\n", dccp_role(sk), sk, x_recv, p);
  597. if (p > 0)
  598. return scaled_div(1, p);
  599. failed:
  600. return UINT_MAX;
  601. }
  602. static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
  603. {
  604. struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  605. const u64 ndp = dccp_sk(sk)->dccps_options_received.dccpor_ndp;
  606. const bool is_data_packet = dccp_data_packet(skb);
  607. /*
  608. * Perform loss detection and handle pending losses
  609. */
  610. if (tfrc_rx_congestion_event(&hcrx->hist, &hcrx->li_hist,
  611. skb, ndp, ccid3_first_li, sk))
  612. ccid3_hc_rx_send_feedback(sk, skb, CCID3_FBACK_PARAM_CHANGE);
  613. /*
  614. * Feedback for first non-empty data packet (RFC 3448, 6.3)
  615. */
  616. else if (unlikely(hcrx->feedback == CCID3_FBACK_NONE && is_data_packet))
  617. ccid3_hc_rx_send_feedback(sk, skb, CCID3_FBACK_INITIAL);
  618. /*
  619. * Check if the periodic once-per-RTT feedback is due; RFC 4342, 10.3
  620. */
  621. else if (!tfrc_rx_hist_loss_pending(&hcrx->hist) && is_data_packet &&
  622. SUB16(dccp_hdr(skb)->dccph_ccval, hcrx->last_counter) > 3)
  623. ccid3_hc_rx_send_feedback(sk, skb, CCID3_FBACK_PERIODIC);
  624. }
  625. static int ccid3_hc_rx_init(struct ccid *ccid, struct sock *sk)
  626. {
  627. struct ccid3_hc_rx_sock *hcrx = ccid_priv(ccid);
  628. tfrc_lh_init(&hcrx->li_hist);
  629. return tfrc_rx_hist_init(&hcrx->hist, sk);
  630. }
  631. static void ccid3_hc_rx_exit(struct sock *sk)
  632. {
  633. struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  634. tfrc_rx_hist_purge(&hcrx->hist);
  635. tfrc_lh_cleanup(&hcrx->li_hist);
  636. }
  637. static void ccid3_hc_rx_get_info(struct sock *sk, struct tcp_info *info)
  638. {
  639. info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
  640. info->tcpi_rcv_rtt = tfrc_rx_hist_rtt(&ccid3_hc_rx_sk(sk)->hist);
  641. }
  642. static int ccid3_hc_rx_getsockopt(struct sock *sk, const int optname, int len,
  643. u32 __user *optval, int __user *optlen)
  644. {
  645. const struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  646. struct tfrc_rx_info rx_info;
  647. const void *val;
  648. switch (optname) {
  649. case DCCP_SOCKOPT_CCID_RX_INFO:
  650. if (len < sizeof(rx_info))
  651. return -EINVAL;
  652. rx_info.tfrcrx_x_recv = hcrx->x_recv;
  653. rx_info.tfrcrx_rtt = tfrc_rx_hist_rtt(&hcrx->hist);
  654. rx_info.tfrcrx_p = tfrc_invert_loss_event_rate(hcrx->p_inverse);
  655. len = sizeof(rx_info);
  656. val = &rx_info;
  657. break;
  658. default:
  659. return -ENOPROTOOPT;
  660. }
  661. if (put_user(len, optlen) || copy_to_user(optval, val, len))
  662. return -EFAULT;
  663. return 0;
  664. }
  665. static struct ccid_operations ccid3 = {
  666. .ccid_id = DCCPC_CCID3,
  667. .ccid_name = "TCP-Friendly Rate Control",
  668. .ccid_owner = THIS_MODULE,
  669. .ccid_hc_tx_obj_size = sizeof(struct ccid3_hc_tx_sock),
  670. .ccid_hc_tx_init = ccid3_hc_tx_init,
  671. .ccid_hc_tx_exit = ccid3_hc_tx_exit,
  672. .ccid_hc_tx_send_packet = ccid3_hc_tx_send_packet,
  673. .ccid_hc_tx_packet_sent = ccid3_hc_tx_packet_sent,
  674. .ccid_hc_tx_packet_recv = ccid3_hc_tx_packet_recv,
  675. .ccid_hc_tx_parse_options = ccid3_hc_tx_parse_options,
  676. .ccid_hc_rx_obj_size = sizeof(struct ccid3_hc_rx_sock),
  677. .ccid_hc_rx_init = ccid3_hc_rx_init,
  678. .ccid_hc_rx_exit = ccid3_hc_rx_exit,
  679. .ccid_hc_rx_insert_options = ccid3_hc_rx_insert_options,
  680. .ccid_hc_rx_packet_recv = ccid3_hc_rx_packet_recv,
  681. .ccid_hc_rx_get_info = ccid3_hc_rx_get_info,
  682. .ccid_hc_tx_get_info = ccid3_hc_tx_get_info,
  683. .ccid_hc_rx_getsockopt = ccid3_hc_rx_getsockopt,
  684. .ccid_hc_tx_getsockopt = ccid3_hc_tx_getsockopt,
  685. };
  686. module_param(do_osc_prev, bool, 0644);
  687. MODULE_PARM_DESC(do_osc_prev, "Use Oscillation Prevention (RFC 3448, 4.5)");
  688. #ifdef CONFIG_IP_DCCP_CCID3_DEBUG
  689. module_param(ccid3_debug, bool, 0644);
  690. MODULE_PARM_DESC(ccid3_debug, "Enable debug messages");
  691. #endif
  692. static __init int ccid3_module_init(void)
  693. {
  694. struct timespec tp;
  695. /*
  696. * Without a fine-grained clock resolution, RTTs/X_recv are not sampled
  697. * correctly and feedback is sent either too early or too late.
  698. */
  699. hrtimer_get_res(CLOCK_MONOTONIC, &tp);
  700. if (tp.tv_sec || tp.tv_nsec > DCCP_TIME_RESOLUTION * NSEC_PER_USEC) {
  701. printk(KERN_ERR "%s: Timer too coarse (%ld usec), need %u-usec"
  702. " resolution - check your clocksource.\n", __func__,
  703. tp.tv_nsec/NSEC_PER_USEC, DCCP_TIME_RESOLUTION);
  704. return -ESOCKTNOSUPPORT;
  705. }
  706. return ccid_register(&ccid3);
  707. }
  708. module_init(ccid3_module_init);
  709. static __exit void ccid3_module_exit(void)
  710. {
  711. ccid_unregister(&ccid3);
  712. }
  713. module_exit(ccid3_module_exit);
  714. MODULE_AUTHOR("Ian McDonald <ian.mcdonald@jandi.co.nz>, "
  715. "Arnaldo Carvalho de Melo <acme@ghostprotocols.net>");
  716. MODULE_DESCRIPTION("DCCP TFRC CCID3 CCID");
  717. MODULE_LICENSE("GPL");
  718. MODULE_ALIAS("net-dccp-ccid-3");