ccid3.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250
  1. /*
  2. * net/dccp/ccids/ccid3.c
  3. *
  4. * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
  5. * Copyright (c) 2005-6 Ian McDonald <ian.mcdonald@jandi.co.nz>
  6. *
  7. * An implementation of the DCCP protocol
  8. *
  9. * This code has been developed by the University of Waikato WAND
  10. * research group. For further information please see http://www.wand.net.nz/
  11. *
  12. * This code also uses code from Lulea University, rereleased as GPL by its
  13. * authors:
  14. * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon
  15. *
  16. * Changes to meet Linux coding standards, to make it meet latest ccid3 draft
  17. * and to make it work as a loadable module in the DCCP stack written by
  18. * Arnaldo Carvalho de Melo <acme@conectiva.com.br>.
  19. *
  20. * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
  21. *
  22. * This program is free software; you can redistribute it and/or modify
  23. * it under the terms of the GNU General Public License as published by
  24. * the Free Software Foundation; either version 2 of the License, or
  25. * (at your option) any later version.
  26. *
  27. * This program is distributed in the hope that it will be useful,
  28. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  29. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  30. * GNU General Public License for more details.
  31. *
  32. * You should have received a copy of the GNU General Public License
  33. * along with this program; if not, write to the Free Software
  34. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  35. */
  36. #include "../ccid.h"
  37. #include "../dccp.h"
  38. #include "lib/packet_history.h"
  39. #include "lib/loss_interval.h"
  40. #include "lib/tfrc.h"
  41. #include "ccid3.h"
  42. /*
  43. * Reason for maths here is to avoid 32 bit overflow when a is big.
  44. * With this we get close to the limit.
  45. */
  46. static u32 usecs_div(const u32 a, const u32 b)
  47. {
  48. const u32 div = a < (UINT_MAX / (USEC_PER_SEC / 10)) ? 10 :
  49. a < (UINT_MAX / (USEC_PER_SEC / 50)) ? 50 :
  50. a < (UINT_MAX / (USEC_PER_SEC / 100)) ? 100 :
  51. a < (UINT_MAX / (USEC_PER_SEC / 500)) ? 500 :
  52. a < (UINT_MAX / (USEC_PER_SEC / 1000)) ? 1000 :
  53. a < (UINT_MAX / (USEC_PER_SEC / 5000)) ? 5000 :
  54. a < (UINT_MAX / (USEC_PER_SEC / 10000)) ? 10000 :
  55. a < (UINT_MAX / (USEC_PER_SEC / 50000)) ? 50000 :
  56. 100000;
  57. const u32 tmp = a * (USEC_PER_SEC / div);
  58. return (b >= 2 * div) ? tmp / (b / div) : tmp;
  59. }
  60. #ifdef CONFIG_IP_DCCP_CCID3_DEBUG
  61. static int ccid3_debug;
  62. #define ccid3_pr_debug(format, a...) DCCP_PR_DEBUG(ccid3_debug, format, ##a)
  63. #else
  64. #define ccid3_pr_debug(format, a...)
  65. #endif
  66. static struct dccp_tx_hist *ccid3_tx_hist;
  67. static struct dccp_rx_hist *ccid3_rx_hist;
  68. static struct dccp_li_hist *ccid3_li_hist;
  69. #ifdef CONFIG_IP_DCCP_CCID3_DEBUG
  70. static const char *ccid3_tx_state_name(enum ccid3_hc_tx_states state)
  71. {
  72. static char *ccid3_state_names[] = {
  73. [TFRC_SSTATE_NO_SENT] = "NO_SENT",
  74. [TFRC_SSTATE_NO_FBACK] = "NO_FBACK",
  75. [TFRC_SSTATE_FBACK] = "FBACK",
  76. [TFRC_SSTATE_TERM] = "TERM",
  77. };
  78. return ccid3_state_names[state];
  79. }
  80. #endif
  81. static void ccid3_hc_tx_set_state(struct sock *sk,
  82. enum ccid3_hc_tx_states state)
  83. {
  84. struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  85. enum ccid3_hc_tx_states oldstate = hctx->ccid3hctx_state;
  86. ccid3_pr_debug("%s(%p) %-8.8s -> %s\n",
  87. dccp_role(sk), sk, ccid3_tx_state_name(oldstate),
  88. ccid3_tx_state_name(state));
  89. WARN_ON(state == oldstate);
  90. hctx->ccid3hctx_state = state;
  91. }
  92. /*
  93. * Recalculate scheduled nominal send time t_nom, inter-packet interval
  94. * t_ipi, and delta value. Should be called after each change to X.
  95. */
  96. static inline void ccid3_update_send_time(struct ccid3_hc_tx_sock *hctx)
  97. {
  98. timeval_sub_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi);
  99. /* Calculate new t_ipi (inter packet interval) by t_ipi = s / X_inst */
  100. hctx->ccid3hctx_t_ipi = usecs_div(hctx->ccid3hctx_s, hctx->ccid3hctx_x);
  101. /* Update nominal send time with regard to the new t_ipi */
  102. timeval_add_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi);
  103. /* Calculate new delta by delta = min(t_ipi / 2, t_gran / 2) */
  104. hctx->ccid3hctx_delta = min_t(u32, hctx->ccid3hctx_t_ipi / 2,
  105. TFRC_OPSYS_HALF_TIME_GRAN);
  106. }
  107. /*
  108. * Update X by
  109. * If (p > 0)
  110. * x_calc = calcX(s, R, p);
  111. * X = max(min(X_calc, 2 * X_recv), s / t_mbi);
  112. * Else
  113. * If (now - tld >= R)
  114. * X = max(min(2 * X, 2 * X_recv), s / R);
  115. * tld = now;
  116. */
  117. static void ccid3_hc_tx_update_x(struct sock *sk)
  118. {
  119. struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  120. const __u32 old_x = hctx->ccid3hctx_x;
  121. /* To avoid large error in calcX */
  122. if (hctx->ccid3hctx_p >= TFRC_SMALLEST_P) {
  123. hctx->ccid3hctx_x_calc = tfrc_calc_x(hctx->ccid3hctx_s,
  124. hctx->ccid3hctx_rtt,
  125. hctx->ccid3hctx_p);
  126. hctx->ccid3hctx_x = max_t(u32, min_t(u32, hctx->ccid3hctx_x_calc,
  127. 2 * hctx->ccid3hctx_x_recv),
  128. (hctx->ccid3hctx_s /
  129. TFRC_MAX_BACK_OFF_TIME));
  130. } else {
  131. struct timeval now;
  132. dccp_timestamp(sk, &now);
  133. if (timeval_delta(&now, &hctx->ccid3hctx_t_ld) >=
  134. hctx->ccid3hctx_rtt) {
  135. hctx->ccid3hctx_x = max_t(u32, min_t(u32, hctx->ccid3hctx_x_recv,
  136. hctx->ccid3hctx_x) * 2,
  137. usecs_div(hctx->ccid3hctx_s,
  138. hctx->ccid3hctx_rtt));
  139. hctx->ccid3hctx_t_ld = now;
  140. }
  141. }
  142. if (hctx->ccid3hctx_x != old_x)
  143. ccid3_update_send_time(hctx);
  144. }
  145. static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
  146. {
  147. struct sock *sk = (struct sock *)data;
  148. struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  149. unsigned long next_tmout = USEC_PER_SEC / 5;
  150. bh_lock_sock(sk);
  151. if (sock_owned_by_user(sk)) {
  152. /* Try again later. */
  153. /* XXX: set some sensible MIB */
  154. goto restart_timer;
  155. }
  156. ccid3_pr_debug("%s, sk=%p, state=%s\n", dccp_role(sk), sk,
  157. ccid3_tx_state_name(hctx->ccid3hctx_state));
  158. switch (hctx->ccid3hctx_state) {
  159. case TFRC_SSTATE_NO_FBACK:
  160. /* Halve send rate */
  161. hctx->ccid3hctx_x /= 2;
  162. if (hctx->ccid3hctx_x < (hctx->ccid3hctx_s /
  163. TFRC_MAX_BACK_OFF_TIME))
  164. hctx->ccid3hctx_x = (hctx->ccid3hctx_s /
  165. TFRC_MAX_BACK_OFF_TIME);
  166. ccid3_pr_debug("%s, sk=%p, state=%s, updated tx rate to %d "
  167. "bytes/s\n",
  168. dccp_role(sk), sk,
  169. ccid3_tx_state_name(hctx->ccid3hctx_state),
  170. hctx->ccid3hctx_x);
  171. /* The value of R is still undefined and so we can not recompute
  172. * the timout value. Keep initial value as per [RFC 4342, 5]. */
  173. next_tmout = TFRC_INITIAL_TIMEOUT;
  174. ccid3_update_send_time(hctx);
  175. break;
  176. case TFRC_SSTATE_FBACK:
  177. /*
  178. * Check if IDLE since last timeout and recv rate is less than
  179. * 4 packets per RTT
  180. */
  181. if (!hctx->ccid3hctx_idle ||
  182. (hctx->ccid3hctx_x_recv >=
  183. 4 * usecs_div(hctx->ccid3hctx_s, hctx->ccid3hctx_rtt))) {
  184. ccid3_pr_debug("%s, sk=%p, state=%s, not idle\n",
  185. dccp_role(sk), sk,
  186. ccid3_tx_state_name(hctx->ccid3hctx_state));
  187. /* Halve sending rate */
  188. /* If (X_calc > 2 * X_recv)
  189. * X_recv = max(X_recv / 2, s / (2 * t_mbi));
  190. * Else
  191. * X_recv = X_calc / 4;
  192. */
  193. BUG_ON(hctx->ccid3hctx_p >= TFRC_SMALLEST_P &&
  194. hctx->ccid3hctx_x_calc == 0);
  195. /* check also if p is zero -> x_calc is infinity? */
  196. if (hctx->ccid3hctx_p < TFRC_SMALLEST_P ||
  197. hctx->ccid3hctx_x_calc > 2 * hctx->ccid3hctx_x_recv)
  198. hctx->ccid3hctx_x_recv = max_t(u32, hctx->ccid3hctx_x_recv / 2,
  199. hctx->ccid3hctx_s / (2 * TFRC_MAX_BACK_OFF_TIME));
  200. else
  201. hctx->ccid3hctx_x_recv = hctx->ccid3hctx_x_calc / 4;
  202. /* Update sending rate */
  203. ccid3_hc_tx_update_x(sk);
  204. }
  205. /*
  206. * Schedule no feedback timer to expire in
  207. * max(4 * R, 2 * s / X)
  208. */
  209. next_tmout = max_t(u32, hctx->ccid3hctx_t_rto,
  210. 2 * usecs_div(hctx->ccid3hctx_s,
  211. hctx->ccid3hctx_x));
  212. break;
  213. case TFRC_SSTATE_NO_SENT:
  214. DCCP_BUG("Illegal %s state NO_SENT, sk=%p", dccp_role(sk), sk);
  215. /* fall through */
  216. case TFRC_SSTATE_TERM:
  217. goto out;
  218. }
  219. hctx->ccid3hctx_idle = 1;
  220. restart_timer:
  221. sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
  222. jiffies + usecs_to_jiffies(next_tmout));
  223. out:
  224. bh_unlock_sock(sk);
  225. sock_put(sk);
  226. }
  227. /*
  228. * returns
  229. * > 0: delay (in msecs) that should pass before actually sending
  230. * = 0: can send immediately
  231. * < 0: error condition; do not send packet
  232. */
  233. static int ccid3_hc_tx_send_packet(struct sock *sk,
  234. struct sk_buff *skb, int len)
  235. {
  236. struct dccp_sock *dp = dccp_sk(sk);
  237. struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  238. struct dccp_tx_hist_entry *new_packet;
  239. struct timeval now;
  240. long delay;
  241. BUG_ON(hctx == NULL);
  242. /*
  243. * This function is called only for Data and DataAck packets. Sending
  244. * zero-sized Data(Ack)s is theoretically possible, but for congestion
  245. * control this case is pathological - ignore it.
  246. */
  247. if (unlikely(len == 0))
  248. return -EBADMSG;
  249. /* See if last packet allocated was not sent */
  250. new_packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist);
  251. if (new_packet == NULL || new_packet->dccphtx_sent) {
  252. new_packet = dccp_tx_hist_entry_new(ccid3_tx_hist,
  253. SLAB_ATOMIC);
  254. if (unlikely(new_packet == NULL)) {
  255. DCCP_WARN("%s, sk=%p, not enough mem to add to history,"
  256. "send refused\n", dccp_role(sk), sk);
  257. return -ENOBUFS;
  258. }
  259. dccp_tx_hist_add_entry(&hctx->ccid3hctx_hist, new_packet);
  260. }
  261. dccp_timestamp(sk, &now);
  262. switch (hctx->ccid3hctx_state) {
  263. case TFRC_SSTATE_NO_SENT:
  264. sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
  265. jiffies + usecs_to_jiffies(TFRC_INITIAL_TIMEOUT));
  266. hctx->ccid3hctx_last_win_count = 0;
  267. hctx->ccid3hctx_t_last_win_count = now;
  268. ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK);
  269. /* First timeout, according to [RFC 3448, 4.2], is 1 second */
  270. hctx->ccid3hctx_t_ipi = USEC_PER_SEC;
  271. /* Initial delta: minimum of 0.5 sec and t_gran/2 */
  272. hctx->ccid3hctx_delta = TFRC_OPSYS_HALF_TIME_GRAN;
  273. /* Set t_0 for initial packet */
  274. hctx->ccid3hctx_t_nom = now;
  275. break;
  276. case TFRC_SSTATE_NO_FBACK:
  277. case TFRC_SSTATE_FBACK:
  278. delay = timeval_delta(&hctx->ccid3hctx_t_nom, &now);
  279. /*
  280. * Scheduling of packet transmissions [RFC 3448, 4.6]
  281. *
  282. * if (t_now > t_nom - delta)
  283. * // send the packet now
  284. * else
  285. * // send the packet in (t_nom - t_now) milliseconds.
  286. */
  287. if (delay >= hctx->ccid3hctx_delta)
  288. return delay / 1000L;
  289. break;
  290. case TFRC_SSTATE_TERM:
  291. DCCP_BUG("Illegal %s state TERM, sk=%p", dccp_role(sk), sk);
  292. return -EINVAL;
  293. }
  294. /* prepare to send now (add options etc.) */
  295. dp->dccps_hc_tx_insert_options = 1;
  296. new_packet->dccphtx_ccval = DCCP_SKB_CB(skb)->dccpd_ccval =
  297. hctx->ccid3hctx_last_win_count;
  298. timeval_add_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi);
  299. return 0;
  300. }
  301. static void ccid3_hc_tx_packet_sent(struct sock *sk, int more, int len)
  302. {
  303. const struct dccp_sock *dp = dccp_sk(sk);
  304. struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  305. struct timeval now;
  306. BUG_ON(hctx == NULL);
  307. dccp_timestamp(sk, &now);
  308. /* check if we have sent a data packet */
  309. if (len > 0) {
  310. unsigned long quarter_rtt;
  311. struct dccp_tx_hist_entry *packet;
  312. packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist);
  313. if (unlikely(packet == NULL)) {
  314. DCCP_WARN("packet doesn't exist in history!\n");
  315. return;
  316. }
  317. if (unlikely(packet->dccphtx_sent)) {
  318. DCCP_WARN("no unsent packet in history!\n");
  319. return;
  320. }
  321. packet->dccphtx_tstamp = now;
  322. packet->dccphtx_seqno = dp->dccps_gss;
  323. /*
  324. * Check if win_count have changed
  325. * Algorithm in "8.1. Window Counter Value" in RFC 4342.
  326. */
  327. quarter_rtt = timeval_delta(&now, &hctx->ccid3hctx_t_last_win_count);
  328. if (likely(hctx->ccid3hctx_rtt > 8))
  329. quarter_rtt /= hctx->ccid3hctx_rtt / 4;
  330. if (quarter_rtt > 0) {
  331. hctx->ccid3hctx_t_last_win_count = now;
  332. hctx->ccid3hctx_last_win_count = (hctx->ccid3hctx_last_win_count +
  333. min_t(unsigned long, quarter_rtt, 5)) % 16;
  334. ccid3_pr_debug("%s, sk=%p, window changed from "
  335. "%u to %u!\n",
  336. dccp_role(sk), sk,
  337. packet->dccphtx_ccval,
  338. hctx->ccid3hctx_last_win_count);
  339. }
  340. hctx->ccid3hctx_idle = 0;
  341. packet->dccphtx_rtt = hctx->ccid3hctx_rtt;
  342. packet->dccphtx_sent = 1;
  343. } else
  344. ccid3_pr_debug("%s, sk=%p, seqno=%llu NOT inserted!\n",
  345. dccp_role(sk), sk, dp->dccps_gss);
  346. }
  347. static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
  348. {
  349. const struct dccp_sock *dp = dccp_sk(sk);
  350. struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  351. struct ccid3_options_received *opt_recv;
  352. struct dccp_tx_hist_entry *packet;
  353. struct timeval now;
  354. unsigned long next_tmout;
  355. u32 t_elapsed;
  356. u32 pinv;
  357. u32 x_recv;
  358. u32 r_sample;
  359. BUG_ON(hctx == NULL);
  360. /* we are only interested in ACKs */
  361. if (!(DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK ||
  362. DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_DATAACK))
  363. return;
  364. opt_recv = &hctx->ccid3hctx_options_received;
  365. t_elapsed = dp->dccps_options_received.dccpor_elapsed_time * 10;
  366. x_recv = opt_recv->ccid3or_receive_rate;
  367. pinv = opt_recv->ccid3or_loss_event_rate;
  368. switch (hctx->ccid3hctx_state) {
  369. case TFRC_SSTATE_NO_FBACK:
  370. case TFRC_SSTATE_FBACK:
  371. /* Calculate new round trip sample by
  372. * R_sample = (now - t_recvdata) - t_delay */
  373. /* get t_recvdata from history */
  374. packet = dccp_tx_hist_find_entry(&hctx->ccid3hctx_hist,
  375. DCCP_SKB_CB(skb)->dccpd_ack_seq);
  376. if (unlikely(packet == NULL)) {
  377. DCCP_WARN("%s, sk=%p, seqno %llu(%s) does't exist "
  378. "in history!\n", dccp_role(sk), sk,
  379. (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq,
  380. dccp_packet_name(DCCP_SKB_CB(skb)->dccpd_type));
  381. return;
  382. }
  383. /* Update RTT */
  384. dccp_timestamp(sk, &now);
  385. r_sample = timeval_delta(&now, &packet->dccphtx_tstamp);
  386. if (unlikely(r_sample <= t_elapsed))
  387. DCCP_WARN("r_sample=%uus,t_elapsed=%uus\n",
  388. r_sample, t_elapsed);
  389. else
  390. r_sample -= t_elapsed;
  391. /* Update RTT estimate by
  392. * If (No feedback recv)
  393. * R = R_sample;
  394. * Else
  395. * R = q * R + (1 - q) * R_sample;
  396. *
  397. * q is a constant, RFC 3448 recomments 0.9
  398. */
  399. if (hctx->ccid3hctx_state == TFRC_SSTATE_NO_FBACK) {
  400. ccid3_hc_tx_set_state(sk, TFRC_SSTATE_FBACK);
  401. hctx->ccid3hctx_rtt = r_sample;
  402. } else
  403. hctx->ccid3hctx_rtt = (hctx->ccid3hctx_rtt * 9) / 10 +
  404. r_sample / 10;
  405. ccid3_pr_debug("%s, sk=%p, New RTT estimate=%uus, "
  406. "r_sample=%us\n", dccp_role(sk), sk,
  407. hctx->ccid3hctx_rtt, r_sample);
  408. /* Update timeout interval */
  409. hctx->ccid3hctx_t_rto = max_t(u32, 4 * hctx->ccid3hctx_rtt,
  410. USEC_PER_SEC);
  411. /* Update receive rate */
  412. hctx->ccid3hctx_x_recv = x_recv;/* X_recv in bytes per sec */
  413. /* Update loss event rate */
  414. if (pinv == ~0 || pinv == 0)
  415. hctx->ccid3hctx_p = 0;
  416. else {
  417. hctx->ccid3hctx_p = 1000000 / pinv;
  418. if (hctx->ccid3hctx_p < TFRC_SMALLEST_P) {
  419. hctx->ccid3hctx_p = TFRC_SMALLEST_P;
  420. ccid3_pr_debug("%s, sk=%p, Smallest p used!\n",
  421. dccp_role(sk), sk);
  422. }
  423. }
  424. /* unschedule no feedback timer */
  425. sk_stop_timer(sk, &hctx->ccid3hctx_no_feedback_timer);
  426. /* Update sending rate (and likely t_ipi, t_nom, and delta) */
  427. ccid3_hc_tx_update_x(sk);
  428. /* remove all packets older than the one acked from history */
  429. dccp_tx_hist_purge_older(ccid3_tx_hist,
  430. &hctx->ccid3hctx_hist, packet);
  431. /*
  432. * As we have calculated new ipi, delta, t_nom it is possible that
  433. * we now can send a packet, so wake up dccp_wait_for_ccids.
  434. */
  435. sk->sk_write_space(sk);
  436. /*
  437. * Schedule no feedback timer to expire in
  438. * max(4 * R, 2 * s / X)
  439. */
  440. next_tmout = max(hctx->ccid3hctx_t_rto,
  441. 2 * usecs_div(hctx->ccid3hctx_s,
  442. hctx->ccid3hctx_x));
  443. ccid3_pr_debug("%s, sk=%p, Scheduled no feedback timer to "
  444. "expire in %lu jiffies (%luus)\n",
  445. dccp_role(sk), sk,
  446. usecs_to_jiffies(next_tmout), next_tmout);
  447. sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
  448. jiffies + max_t(u32, 1, usecs_to_jiffies(next_tmout)));
  449. /* set idle flag */
  450. hctx->ccid3hctx_idle = 1;
  451. break;
  452. case TFRC_SSTATE_NO_SENT:
  453. DCCP_WARN("Illegal ACK received - no packet has been sent\n");
  454. /* fall through */
  455. case TFRC_SSTATE_TERM: /* ignore feedback when closing */
  456. break;
  457. }
  458. }
  459. static int ccid3_hc_tx_insert_options(struct sock *sk, struct sk_buff *skb)
  460. {
  461. const struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  462. BUG_ON(hctx == NULL);
  463. if (sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN)
  464. DCCP_SKB_CB(skb)->dccpd_ccval = hctx->ccid3hctx_last_win_count;
  465. return 0;
  466. }
  467. static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option,
  468. unsigned char len, u16 idx,
  469. unsigned char *value)
  470. {
  471. int rc = 0;
  472. const struct dccp_sock *dp = dccp_sk(sk);
  473. struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  474. struct ccid3_options_received *opt_recv;
  475. BUG_ON(hctx == NULL);
  476. opt_recv = &hctx->ccid3hctx_options_received;
  477. if (opt_recv->ccid3or_seqno != dp->dccps_gsr) {
  478. opt_recv->ccid3or_seqno = dp->dccps_gsr;
  479. opt_recv->ccid3or_loss_event_rate = ~0;
  480. opt_recv->ccid3or_loss_intervals_idx = 0;
  481. opt_recv->ccid3or_loss_intervals_len = 0;
  482. opt_recv->ccid3or_receive_rate = 0;
  483. }
  484. switch (option) {
  485. case TFRC_OPT_LOSS_EVENT_RATE:
  486. if (unlikely(len != 4)) {
  487. DCCP_WARN("%s, sk=%p, invalid len %d "
  488. "for TFRC_OPT_LOSS_EVENT_RATE\n",
  489. dccp_role(sk), sk, len);
  490. rc = -EINVAL;
  491. } else {
  492. opt_recv->ccid3or_loss_event_rate = ntohl(*(__be32 *)value);
  493. ccid3_pr_debug("%s, sk=%p, LOSS_EVENT_RATE=%u\n",
  494. dccp_role(sk), sk,
  495. opt_recv->ccid3or_loss_event_rate);
  496. }
  497. break;
  498. case TFRC_OPT_LOSS_INTERVALS:
  499. opt_recv->ccid3or_loss_intervals_idx = idx;
  500. opt_recv->ccid3or_loss_intervals_len = len;
  501. ccid3_pr_debug("%s, sk=%p, LOSS_INTERVALS=(%u, %u)\n",
  502. dccp_role(sk), sk,
  503. opt_recv->ccid3or_loss_intervals_idx,
  504. opt_recv->ccid3or_loss_intervals_len);
  505. break;
  506. case TFRC_OPT_RECEIVE_RATE:
  507. if (unlikely(len != 4)) {
  508. DCCP_WARN("%s, sk=%p, invalid len %d "
  509. "for TFRC_OPT_RECEIVE_RATE\n",
  510. dccp_role(sk), sk, len);
  511. rc = -EINVAL;
  512. } else {
  513. opt_recv->ccid3or_receive_rate = ntohl(*(__be32 *)value);
  514. ccid3_pr_debug("%s, sk=%p, RECEIVE_RATE=%u\n",
  515. dccp_role(sk), sk,
  516. opt_recv->ccid3or_receive_rate);
  517. }
  518. break;
  519. }
  520. return rc;
  521. }
  522. static int ccid3_hc_tx_init(struct ccid *ccid, struct sock *sk)
  523. {
  524. struct dccp_sock *dp = dccp_sk(sk);
  525. struct ccid3_hc_tx_sock *hctx = ccid_priv(ccid);
  526. if (dp->dccps_packet_size >= TFRC_MIN_PACKET_SIZE &&
  527. dp->dccps_packet_size <= TFRC_MAX_PACKET_SIZE)
  528. hctx->ccid3hctx_s = dp->dccps_packet_size;
  529. else
  530. hctx->ccid3hctx_s = TFRC_STD_PACKET_SIZE;
  531. /* Set transmission rate to 1 packet per second */
  532. hctx->ccid3hctx_x = hctx->ccid3hctx_s;
  533. hctx->ccid3hctx_t_rto = USEC_PER_SEC;
  534. hctx->ccid3hctx_state = TFRC_SSTATE_NO_SENT;
  535. INIT_LIST_HEAD(&hctx->ccid3hctx_hist);
  536. hctx->ccid3hctx_no_feedback_timer.function = ccid3_hc_tx_no_feedback_timer;
  537. hctx->ccid3hctx_no_feedback_timer.data = (unsigned long)sk;
  538. init_timer(&hctx->ccid3hctx_no_feedback_timer);
  539. return 0;
  540. }
  541. static void ccid3_hc_tx_exit(struct sock *sk)
  542. {
  543. struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  544. BUG_ON(hctx == NULL);
  545. ccid3_hc_tx_set_state(sk, TFRC_SSTATE_TERM);
  546. sk_stop_timer(sk, &hctx->ccid3hctx_no_feedback_timer);
  547. /* Empty packet history */
  548. dccp_tx_hist_purge(ccid3_tx_hist, &hctx->ccid3hctx_hist);
  549. }
  550. /*
  551. * RX Half Connection methods
  552. */
  553. #ifdef CONFIG_IP_DCCP_CCID3_DEBUG
  554. static const char *ccid3_rx_state_name(enum ccid3_hc_rx_states state)
  555. {
  556. static char *ccid3_rx_state_names[] = {
  557. [TFRC_RSTATE_NO_DATA] = "NO_DATA",
  558. [TFRC_RSTATE_DATA] = "DATA",
  559. [TFRC_RSTATE_TERM] = "TERM",
  560. };
  561. return ccid3_rx_state_names[state];
  562. }
  563. #endif
  564. static void ccid3_hc_rx_set_state(struct sock *sk,
  565. enum ccid3_hc_rx_states state)
  566. {
  567. struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  568. enum ccid3_hc_rx_states oldstate = hcrx->ccid3hcrx_state;
  569. ccid3_pr_debug("%s(%p) %-8.8s -> %s\n",
  570. dccp_role(sk), sk, ccid3_rx_state_name(oldstate),
  571. ccid3_rx_state_name(state));
  572. WARN_ON(state == oldstate);
  573. hcrx->ccid3hcrx_state = state;
  574. }
  575. static void ccid3_hc_rx_send_feedback(struct sock *sk)
  576. {
  577. struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  578. struct dccp_sock *dp = dccp_sk(sk);
  579. struct dccp_rx_hist_entry *packet;
  580. struct timeval now;
  581. ccid3_pr_debug("%s, sk=%p\n", dccp_role(sk), sk);
  582. dccp_timestamp(sk, &now);
  583. switch (hcrx->ccid3hcrx_state) {
  584. case TFRC_RSTATE_NO_DATA:
  585. hcrx->ccid3hcrx_x_recv = 0;
  586. break;
  587. case TFRC_RSTATE_DATA: {
  588. const u32 delta = timeval_delta(&now,
  589. &hcrx->ccid3hcrx_tstamp_last_feedback);
  590. hcrx->ccid3hcrx_x_recv = usecs_div(hcrx->ccid3hcrx_bytes_recv,
  591. delta);
  592. }
  593. break;
  594. case TFRC_RSTATE_TERM:
  595. DCCP_BUG("Illegal %s state TERM, sk=%p", dccp_role(sk), sk);
  596. return;
  597. }
  598. packet = dccp_rx_hist_find_data_packet(&hcrx->ccid3hcrx_hist);
  599. if (unlikely(packet == NULL)) {
  600. DCCP_WARN("%s, sk=%p, no data packet in history!\n",
  601. dccp_role(sk), sk);
  602. return;
  603. }
  604. hcrx->ccid3hcrx_tstamp_last_feedback = now;
  605. hcrx->ccid3hcrx_ccval_last_counter = packet->dccphrx_ccval;
  606. hcrx->ccid3hcrx_bytes_recv = 0;
  607. /* Convert to multiples of 10us */
  608. hcrx->ccid3hcrx_elapsed_time =
  609. timeval_delta(&now, &packet->dccphrx_tstamp) / 10;
  610. if (hcrx->ccid3hcrx_p == 0)
  611. hcrx->ccid3hcrx_pinv = ~0;
  612. else
  613. hcrx->ccid3hcrx_pinv = 1000000 / hcrx->ccid3hcrx_p;
  614. dp->dccps_hc_rx_insert_options = 1;
  615. dccp_send_ack(sk);
  616. }
  617. static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
  618. {
  619. const struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  620. __be32 x_recv, pinv;
  621. BUG_ON(hcrx == NULL);
  622. if (!(sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN))
  623. return 0;
  624. DCCP_SKB_CB(skb)->dccpd_ccval = hcrx->ccid3hcrx_ccval_last_counter;
  625. if (dccp_packet_without_ack(skb))
  626. return 0;
  627. x_recv = htonl(hcrx->ccid3hcrx_x_recv);
  628. pinv = htonl(hcrx->ccid3hcrx_pinv);
  629. if ((hcrx->ccid3hcrx_elapsed_time != 0 &&
  630. dccp_insert_option_elapsed_time(sk, skb,
  631. hcrx->ccid3hcrx_elapsed_time)) ||
  632. dccp_insert_option_timestamp(sk, skb) ||
  633. dccp_insert_option(sk, skb, TFRC_OPT_LOSS_EVENT_RATE,
  634. &pinv, sizeof(pinv)) ||
  635. dccp_insert_option(sk, skb, TFRC_OPT_RECEIVE_RATE,
  636. &x_recv, sizeof(x_recv)))
  637. return -1;
  638. return 0;
  639. }
  640. /* calculate first loss interval
  641. *
  642. * returns estimated loss interval in usecs */
  643. static u32 ccid3_hc_rx_calc_first_li(struct sock *sk)
  644. {
  645. struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  646. struct dccp_rx_hist_entry *entry, *next, *tail = NULL;
  647. u32 rtt, delta, x_recv, fval, p, tmp2;
  648. struct timeval tstamp = { 0, };
  649. int interval = 0;
  650. int win_count = 0;
  651. int step = 0;
  652. u64 tmp1;
  653. list_for_each_entry_safe(entry, next, &hcrx->ccid3hcrx_hist,
  654. dccphrx_node) {
  655. if (dccp_rx_hist_entry_data_packet(entry)) {
  656. tail = entry;
  657. switch (step) {
  658. case 0:
  659. tstamp = entry->dccphrx_tstamp;
  660. win_count = entry->dccphrx_ccval;
  661. step = 1;
  662. break;
  663. case 1:
  664. interval = win_count - entry->dccphrx_ccval;
  665. if (interval < 0)
  666. interval += TFRC_WIN_COUNT_LIMIT;
  667. if (interval > 4)
  668. goto found;
  669. break;
  670. }
  671. }
  672. }
  673. if (unlikely(step == 0)) {
  674. DCCP_WARN("%s, sk=%p, packet history has no data packets!\n",
  675. dccp_role(sk), sk);
  676. return ~0;
  677. }
  678. if (unlikely(interval == 0)) {
  679. DCCP_WARN("%s, sk=%p, Could not find a win_count interval > 0."
  680. "Defaulting to 1\n", dccp_role(sk), sk);
  681. interval = 1;
  682. }
  683. found:
  684. if (!tail) {
  685. DCCP_CRIT("tail is null\n");
  686. return ~0;
  687. }
  688. rtt = timeval_delta(&tstamp, &tail->dccphrx_tstamp) * 4 / interval;
  689. ccid3_pr_debug("%s, sk=%p, approximated RTT to %uus\n",
  690. dccp_role(sk), sk, rtt);
  691. if (rtt == 0) {
  692. DCCP_WARN("RTT==0, setting to 1\n");
  693. rtt = 1;
  694. }
  695. dccp_timestamp(sk, &tstamp);
  696. delta = timeval_delta(&tstamp, &hcrx->ccid3hcrx_tstamp_last_feedback);
  697. x_recv = usecs_div(hcrx->ccid3hcrx_bytes_recv, delta);
  698. if (x_recv == 0)
  699. x_recv = hcrx->ccid3hcrx_x_recv;
  700. tmp1 = (u64)x_recv * (u64)rtt;
  701. do_div(tmp1,10000000);
  702. tmp2 = (u32)tmp1;
  703. if (!tmp2) {
  704. DCCP_CRIT("tmp2 = 0, x_recv = %u, rtt =%u\n", x_recv, rtt);
  705. return ~0;
  706. }
  707. fval = (hcrx->ccid3hcrx_s * 100000) / tmp2;
  708. /* do not alter order above or you will get overflow on 32 bit */
  709. p = tfrc_calc_x_reverse_lookup(fval);
  710. ccid3_pr_debug("%s, sk=%p, receive rate=%u bytes/s, implied "
  711. "loss rate=%u\n", dccp_role(sk), sk, x_recv, p);
  712. if (p == 0)
  713. return ~0;
  714. else
  715. return 1000000 / p;
  716. }
  717. static void ccid3_hc_rx_update_li(struct sock *sk, u64 seq_loss, u8 win_loss)
  718. {
  719. struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  720. struct dccp_li_hist_entry *head;
  721. u64 seq_temp;
  722. if (list_empty(&hcrx->ccid3hcrx_li_hist)) {
  723. if (!dccp_li_hist_interval_new(ccid3_li_hist,
  724. &hcrx->ccid3hcrx_li_hist, seq_loss, win_loss))
  725. return;
  726. head = list_entry(hcrx->ccid3hcrx_li_hist.next,
  727. struct dccp_li_hist_entry, dccplih_node);
  728. head->dccplih_interval = ccid3_hc_rx_calc_first_li(sk);
  729. } else {
  730. struct dccp_li_hist_entry *entry;
  731. struct list_head *tail;
  732. head = list_entry(hcrx->ccid3hcrx_li_hist.next,
  733. struct dccp_li_hist_entry, dccplih_node);
  734. /* FIXME win count check removed as was wrong */
  735. /* should make this check with receive history */
  736. /* and compare there as per section 10.2 of RFC4342 */
  737. /* new loss event detected */
  738. /* calculate last interval length */
  739. seq_temp = dccp_delta_seqno(head->dccplih_seqno, seq_loss);
  740. entry = dccp_li_hist_entry_new(ccid3_li_hist, SLAB_ATOMIC);
  741. if (entry == NULL) {
  742. DCCP_BUG("out of memory - can not allocate entry");
  743. return;
  744. }
  745. list_add(&entry->dccplih_node, &hcrx->ccid3hcrx_li_hist);
  746. tail = hcrx->ccid3hcrx_li_hist.prev;
  747. list_del(tail);
  748. kmem_cache_free(ccid3_li_hist->dccplih_slab, tail);
  749. /* Create the newest interval */
  750. entry->dccplih_seqno = seq_loss;
  751. entry->dccplih_interval = seq_temp;
  752. entry->dccplih_win_count = win_loss;
  753. }
  754. }
  755. static int ccid3_hc_rx_detect_loss(struct sock *sk,
  756. struct dccp_rx_hist_entry *packet)
  757. {
  758. struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  759. struct dccp_rx_hist_entry *rx_hist = dccp_rx_hist_head(&hcrx->ccid3hcrx_hist);
  760. u64 seqno = packet->dccphrx_seqno;
  761. u64 tmp_seqno;
  762. int loss = 0;
  763. u8 ccval;
  764. tmp_seqno = hcrx->ccid3hcrx_seqno_nonloss;
  765. if (!rx_hist ||
  766. follows48(packet->dccphrx_seqno, hcrx->ccid3hcrx_seqno_nonloss)) {
  767. hcrx->ccid3hcrx_seqno_nonloss = seqno;
  768. hcrx->ccid3hcrx_ccval_nonloss = packet->dccphrx_ccval;
  769. goto detect_out;
  770. }
  771. while (dccp_delta_seqno(hcrx->ccid3hcrx_seqno_nonloss, seqno)
  772. > TFRC_RECV_NUM_LATE_LOSS) {
  773. loss = 1;
  774. ccid3_hc_rx_update_li(sk, hcrx->ccid3hcrx_seqno_nonloss,
  775. hcrx->ccid3hcrx_ccval_nonloss);
  776. tmp_seqno = hcrx->ccid3hcrx_seqno_nonloss;
  777. dccp_inc_seqno(&tmp_seqno);
  778. hcrx->ccid3hcrx_seqno_nonloss = tmp_seqno;
  779. dccp_inc_seqno(&tmp_seqno);
  780. while (dccp_rx_hist_find_entry(&hcrx->ccid3hcrx_hist,
  781. tmp_seqno, &ccval)) {
  782. hcrx->ccid3hcrx_seqno_nonloss = tmp_seqno;
  783. hcrx->ccid3hcrx_ccval_nonloss = ccval;
  784. dccp_inc_seqno(&tmp_seqno);
  785. }
  786. }
  787. /* FIXME - this code could be simplified with above while */
  788. /* but works at moment */
  789. if (follows48(packet->dccphrx_seqno, hcrx->ccid3hcrx_seqno_nonloss)) {
  790. hcrx->ccid3hcrx_seqno_nonloss = seqno;
  791. hcrx->ccid3hcrx_ccval_nonloss = packet->dccphrx_ccval;
  792. }
  793. detect_out:
  794. dccp_rx_hist_add_packet(ccid3_rx_hist, &hcrx->ccid3hcrx_hist,
  795. &hcrx->ccid3hcrx_li_hist, packet,
  796. hcrx->ccid3hcrx_seqno_nonloss);
  797. return loss;
  798. }
  799. static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
  800. {
  801. struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  802. const struct dccp_options_received *opt_recv;
  803. struct dccp_rx_hist_entry *packet;
  804. struct timeval now;
  805. u32 p_prev, rtt_prev, r_sample, t_elapsed;
  806. int loss;
  807. BUG_ON(hcrx == NULL);
  808. opt_recv = &dccp_sk(sk)->dccps_options_received;
  809. switch (DCCP_SKB_CB(skb)->dccpd_type) {
  810. case DCCP_PKT_ACK:
  811. if (hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA)
  812. return;
  813. case DCCP_PKT_DATAACK:
  814. if (opt_recv->dccpor_timestamp_echo == 0)
  815. break;
  816. rtt_prev = hcrx->ccid3hcrx_rtt;
  817. dccp_timestamp(sk, &now);
  818. timeval_sub_usecs(&now, opt_recv->dccpor_timestamp_echo * 10);
  819. r_sample = timeval_usecs(&now);
  820. t_elapsed = opt_recv->dccpor_elapsed_time * 10;
  821. if (unlikely(r_sample <= t_elapsed))
  822. DCCP_WARN("r_sample=%uus, t_elapsed=%uus\n",
  823. r_sample, t_elapsed);
  824. else
  825. r_sample -= t_elapsed;
  826. if (hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA)
  827. hcrx->ccid3hcrx_rtt = r_sample;
  828. else
  829. hcrx->ccid3hcrx_rtt = (hcrx->ccid3hcrx_rtt * 9) / 10 +
  830. r_sample / 10;
  831. if (rtt_prev != hcrx->ccid3hcrx_rtt)
  832. ccid3_pr_debug("%s, New RTT=%uus, elapsed time=%u\n",
  833. dccp_role(sk), hcrx->ccid3hcrx_rtt,
  834. opt_recv->dccpor_elapsed_time);
  835. break;
  836. case DCCP_PKT_DATA:
  837. break;
  838. default: /* We're not interested in other packet types, move along */
  839. return;
  840. }
  841. packet = dccp_rx_hist_entry_new(ccid3_rx_hist, sk, opt_recv->dccpor_ndp,
  842. skb, SLAB_ATOMIC);
  843. if (unlikely(packet == NULL)) {
  844. DCCP_WARN("%s, sk=%p, Not enough mem to add rx packet "
  845. "to history, consider it lost!\n", dccp_role(sk), sk);
  846. return;
  847. }
  848. loss = ccid3_hc_rx_detect_loss(sk, packet);
  849. if (DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK)
  850. return;
  851. switch (hcrx->ccid3hcrx_state) {
  852. case TFRC_RSTATE_NO_DATA:
  853. ccid3_pr_debug("%s, sk=%p(%s), skb=%p, sending initial "
  854. "feedback\n",
  855. dccp_role(sk), sk,
  856. dccp_state_name(sk->sk_state), skb);
  857. ccid3_hc_rx_send_feedback(sk);
  858. ccid3_hc_rx_set_state(sk, TFRC_RSTATE_DATA);
  859. return;
  860. case TFRC_RSTATE_DATA:
  861. hcrx->ccid3hcrx_bytes_recv += skb->len -
  862. dccp_hdr(skb)->dccph_doff * 4;
  863. if (loss)
  864. break;
  865. dccp_timestamp(sk, &now);
  866. if (timeval_delta(&now, &hcrx->ccid3hcrx_tstamp_last_ack) >=
  867. hcrx->ccid3hcrx_rtt) {
  868. hcrx->ccid3hcrx_tstamp_last_ack = now;
  869. ccid3_hc_rx_send_feedback(sk);
  870. }
  871. return;
  872. case TFRC_RSTATE_TERM:
  873. DCCP_BUG("Illegal %s state TERM, sk=%p", dccp_role(sk), sk);
  874. return;
  875. }
  876. /* Dealing with packet loss */
  877. ccid3_pr_debug("%s, sk=%p(%s), data loss! Reacting...\n",
  878. dccp_role(sk), sk, dccp_state_name(sk->sk_state));
  879. p_prev = hcrx->ccid3hcrx_p;
  880. /* Calculate loss event rate */
  881. if (!list_empty(&hcrx->ccid3hcrx_li_hist)) {
  882. u32 i_mean = dccp_li_hist_calc_i_mean(&hcrx->ccid3hcrx_li_hist);
  883. /* Scaling up by 1000000 as fixed decimal */
  884. if (i_mean != 0)
  885. hcrx->ccid3hcrx_p = 1000000 / i_mean;
  886. } else
  887. DCCP_BUG("empty loss history");
  888. if (hcrx->ccid3hcrx_p > p_prev) {
  889. ccid3_hc_rx_send_feedback(sk);
  890. return;
  891. }
  892. }
  893. static int ccid3_hc_rx_init(struct ccid *ccid, struct sock *sk)
  894. {
  895. struct dccp_sock *dp = dccp_sk(sk);
  896. struct ccid3_hc_rx_sock *hcrx = ccid_priv(ccid);
  897. ccid3_pr_debug("%s, sk=%p\n", dccp_role(sk), sk);
  898. if (dp->dccps_packet_size >= TFRC_MIN_PACKET_SIZE &&
  899. dp->dccps_packet_size <= TFRC_MAX_PACKET_SIZE)
  900. hcrx->ccid3hcrx_s = dp->dccps_packet_size;
  901. else
  902. hcrx->ccid3hcrx_s = TFRC_STD_PACKET_SIZE;
  903. hcrx->ccid3hcrx_state = TFRC_RSTATE_NO_DATA;
  904. INIT_LIST_HEAD(&hcrx->ccid3hcrx_hist);
  905. INIT_LIST_HEAD(&hcrx->ccid3hcrx_li_hist);
  906. dccp_timestamp(sk, &hcrx->ccid3hcrx_tstamp_last_ack);
  907. hcrx->ccid3hcrx_tstamp_last_feedback = hcrx->ccid3hcrx_tstamp_last_ack;
  908. hcrx->ccid3hcrx_rtt = 5000; /* XXX 5ms for now... */
  909. return 0;
  910. }
  911. static void ccid3_hc_rx_exit(struct sock *sk)
  912. {
  913. struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  914. BUG_ON(hcrx == NULL);
  915. ccid3_hc_rx_set_state(sk, TFRC_RSTATE_TERM);
  916. /* Empty packet history */
  917. dccp_rx_hist_purge(ccid3_rx_hist, &hcrx->ccid3hcrx_hist);
  918. /* Empty loss interval history */
  919. dccp_li_hist_purge(ccid3_li_hist, &hcrx->ccid3hcrx_li_hist);
  920. }
  921. static void ccid3_hc_rx_get_info(struct sock *sk, struct tcp_info *info)
  922. {
  923. const struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  924. /* Listen socks doesn't have a private CCID block */
  925. if (sk->sk_state == DCCP_LISTEN)
  926. return;
  927. BUG_ON(hcrx == NULL);
  928. info->tcpi_ca_state = hcrx->ccid3hcrx_state;
  929. info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
  930. info->tcpi_rcv_rtt = hcrx->ccid3hcrx_rtt;
  931. }
  932. static void ccid3_hc_tx_get_info(struct sock *sk, struct tcp_info *info)
  933. {
  934. const struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  935. /* Listen socks doesn't have a private CCID block */
  936. if (sk->sk_state == DCCP_LISTEN)
  937. return;
  938. BUG_ON(hctx == NULL);
  939. info->tcpi_rto = hctx->ccid3hctx_t_rto;
  940. info->tcpi_rtt = hctx->ccid3hctx_rtt;
  941. }
  942. static int ccid3_hc_rx_getsockopt(struct sock *sk, const int optname, int len,
  943. u32 __user *optval, int __user *optlen)
  944. {
  945. const struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
  946. const void *val;
  947. /* Listen socks doesn't have a private CCID block */
  948. if (sk->sk_state == DCCP_LISTEN)
  949. return -EINVAL;
  950. switch (optname) {
  951. case DCCP_SOCKOPT_CCID_RX_INFO:
  952. if (len < sizeof(hcrx->ccid3hcrx_tfrc))
  953. return -EINVAL;
  954. len = sizeof(hcrx->ccid3hcrx_tfrc);
  955. val = &hcrx->ccid3hcrx_tfrc;
  956. break;
  957. default:
  958. return -ENOPROTOOPT;
  959. }
  960. if (put_user(len, optlen) || copy_to_user(optval, val, len))
  961. return -EFAULT;
  962. return 0;
  963. }
  964. static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len,
  965. u32 __user *optval, int __user *optlen)
  966. {
  967. const struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
  968. const void *val;
  969. /* Listen socks doesn't have a private CCID block */
  970. if (sk->sk_state == DCCP_LISTEN)
  971. return -EINVAL;
  972. switch (optname) {
  973. case DCCP_SOCKOPT_CCID_TX_INFO:
  974. if (len < sizeof(hctx->ccid3hctx_tfrc))
  975. return -EINVAL;
  976. len = sizeof(hctx->ccid3hctx_tfrc);
  977. val = &hctx->ccid3hctx_tfrc;
  978. break;
  979. default:
  980. return -ENOPROTOOPT;
  981. }
  982. if (put_user(len, optlen) || copy_to_user(optval, val, len))
  983. return -EFAULT;
  984. return 0;
  985. }
  986. static struct ccid_operations ccid3 = {
  987. .ccid_id = DCCPC_CCID3,
  988. .ccid_name = "ccid3",
  989. .ccid_owner = THIS_MODULE,
  990. .ccid_hc_tx_obj_size = sizeof(struct ccid3_hc_tx_sock),
  991. .ccid_hc_tx_init = ccid3_hc_tx_init,
  992. .ccid_hc_tx_exit = ccid3_hc_tx_exit,
  993. .ccid_hc_tx_send_packet = ccid3_hc_tx_send_packet,
  994. .ccid_hc_tx_packet_sent = ccid3_hc_tx_packet_sent,
  995. .ccid_hc_tx_packet_recv = ccid3_hc_tx_packet_recv,
  996. .ccid_hc_tx_insert_options = ccid3_hc_tx_insert_options,
  997. .ccid_hc_tx_parse_options = ccid3_hc_tx_parse_options,
  998. .ccid_hc_rx_obj_size = sizeof(struct ccid3_hc_rx_sock),
  999. .ccid_hc_rx_init = ccid3_hc_rx_init,
  1000. .ccid_hc_rx_exit = ccid3_hc_rx_exit,
  1001. .ccid_hc_rx_insert_options = ccid3_hc_rx_insert_options,
  1002. .ccid_hc_rx_packet_recv = ccid3_hc_rx_packet_recv,
  1003. .ccid_hc_rx_get_info = ccid3_hc_rx_get_info,
  1004. .ccid_hc_tx_get_info = ccid3_hc_tx_get_info,
  1005. .ccid_hc_rx_getsockopt = ccid3_hc_rx_getsockopt,
  1006. .ccid_hc_tx_getsockopt = ccid3_hc_tx_getsockopt,
  1007. };
  1008. #ifdef CONFIG_IP_DCCP_CCID3_DEBUG
  1009. module_param(ccid3_debug, int, 0444);
  1010. MODULE_PARM_DESC(ccid3_debug, "Enable debug messages");
  1011. #endif
  1012. static __init int ccid3_module_init(void)
  1013. {
  1014. int rc = -ENOBUFS;
  1015. ccid3_rx_hist = dccp_rx_hist_new("ccid3");
  1016. if (ccid3_rx_hist == NULL)
  1017. goto out;
  1018. ccid3_tx_hist = dccp_tx_hist_new("ccid3");
  1019. if (ccid3_tx_hist == NULL)
  1020. goto out_free_rx;
  1021. ccid3_li_hist = dccp_li_hist_new("ccid3");
  1022. if (ccid3_li_hist == NULL)
  1023. goto out_free_tx;
  1024. rc = ccid_register(&ccid3);
  1025. if (rc != 0)
  1026. goto out_free_loss_interval_history;
  1027. out:
  1028. return rc;
  1029. out_free_loss_interval_history:
  1030. dccp_li_hist_delete(ccid3_li_hist);
  1031. ccid3_li_hist = NULL;
  1032. out_free_tx:
  1033. dccp_tx_hist_delete(ccid3_tx_hist);
  1034. ccid3_tx_hist = NULL;
  1035. out_free_rx:
  1036. dccp_rx_hist_delete(ccid3_rx_hist);
  1037. ccid3_rx_hist = NULL;
  1038. goto out;
  1039. }
  1040. module_init(ccid3_module_init);
  1041. static __exit void ccid3_module_exit(void)
  1042. {
  1043. ccid_unregister(&ccid3);
  1044. if (ccid3_tx_hist != NULL) {
  1045. dccp_tx_hist_delete(ccid3_tx_hist);
  1046. ccid3_tx_hist = NULL;
  1047. }
  1048. if (ccid3_rx_hist != NULL) {
  1049. dccp_rx_hist_delete(ccid3_rx_hist);
  1050. ccid3_rx_hist = NULL;
  1051. }
  1052. if (ccid3_li_hist != NULL) {
  1053. dccp_li_hist_delete(ccid3_li_hist);
  1054. ccid3_li_hist = NULL;
  1055. }
  1056. }
  1057. module_exit(ccid3_module_exit);
  1058. MODULE_AUTHOR("Ian McDonald <ian.mcdonald@jandi.co.nz>, "
  1059. "Arnaldo Carvalho de Melo <acme@ghostprotocols.net>");
  1060. MODULE_DESCRIPTION("DCCP TFRC CCID3 CCID");
  1061. MODULE_LICENSE("GPL");
  1062. MODULE_ALIAS("net-dccp-ccid-3");