call.c 57 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278
  1. /* call.c: Rx call routines
  2. *
  3. * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/sched.h>
  12. #include <linux/slab.h>
  13. #include <linux/module.h>
  14. #include <rxrpc/rxrpc.h>
  15. #include <rxrpc/transport.h>
  16. #include <rxrpc/peer.h>
  17. #include <rxrpc/connection.h>
  18. #include <rxrpc/call.h>
  19. #include <rxrpc/message.h>
  20. #include "internal.h"
  21. __RXACCT_DECL(atomic_t rxrpc_call_count);
  22. __RXACCT_DECL(atomic_t rxrpc_message_count);
  23. LIST_HEAD(rxrpc_calls);
  24. DECLARE_RWSEM(rxrpc_calls_sem);
  25. unsigned rxrpc_call_rcv_timeout = HZ/3;
  26. static unsigned rxrpc_call_acks_timeout = HZ/3;
  27. static unsigned rxrpc_call_dfr_ack_timeout = HZ/20;
  28. static unsigned short rxrpc_call_max_resend = HZ/10;
  29. const char *rxrpc_call_states[] = {
  30. "COMPLETE",
  31. "ERROR",
  32. "SRVR_RCV_OPID",
  33. "SRVR_RCV_ARGS",
  34. "SRVR_GOT_ARGS",
  35. "SRVR_SND_REPLY",
  36. "SRVR_RCV_FINAL_ACK",
  37. "CLNT_SND_ARGS",
  38. "CLNT_RCV_REPLY",
  39. "CLNT_GOT_REPLY"
  40. };
  41. const char *rxrpc_call_error_states[] = {
  42. "NO_ERROR",
  43. "LOCAL_ABORT",
  44. "PEER_ABORT",
  45. "LOCAL_ERROR",
  46. "REMOTE_ERROR"
  47. };
  48. const char *rxrpc_pkts[] = {
  49. "?00",
  50. "data", "ack", "busy", "abort", "ackall", "chall", "resp", "debug",
  51. "?09", "?10", "?11", "?12", "?13", "?14", "?15"
  52. };
  53. static const char *rxrpc_acks[] = {
  54. "---", "REQ", "DUP", "SEQ", "WIN", "MEM", "PNG", "PNR", "DLY", "IDL",
  55. "-?-"
  56. };
  57. static const char _acktype[] = "NA-";
  58. static void rxrpc_call_receive_packet(struct rxrpc_call *call);
  59. static void rxrpc_call_receive_data_packet(struct rxrpc_call *call,
  60. struct rxrpc_message *msg);
  61. static void rxrpc_call_receive_ack_packet(struct rxrpc_call *call,
  62. struct rxrpc_message *msg);
  63. static void rxrpc_call_definitively_ACK(struct rxrpc_call *call,
  64. rxrpc_seq_t higest);
  65. static void rxrpc_call_resend(struct rxrpc_call *call, rxrpc_seq_t highest);
  66. static int __rxrpc_call_read_data(struct rxrpc_call *call);
  67. static int rxrpc_call_record_ACK(struct rxrpc_call *call,
  68. struct rxrpc_message *msg,
  69. rxrpc_seq_t seq,
  70. size_t count);
  71. static int rxrpc_call_flush(struct rxrpc_call *call);
  72. #define _state(call) \
  73. _debug("[[[ state %s ]]]", rxrpc_call_states[call->app_call_state]);
  74. static void rxrpc_call_default_attn_func(struct rxrpc_call *call)
  75. {
  76. wake_up(&call->waitq);
  77. }
  78. static void rxrpc_call_default_error_func(struct rxrpc_call *call)
  79. {
  80. wake_up(&call->waitq);
  81. }
  82. static void rxrpc_call_default_aemap_func(struct rxrpc_call *call)
  83. {
  84. switch (call->app_err_state) {
  85. case RXRPC_ESTATE_LOCAL_ABORT:
  86. call->app_abort_code = -call->app_errno;
  87. case RXRPC_ESTATE_PEER_ABORT:
  88. call->app_errno = -ECONNABORTED;
  89. default:
  90. break;
  91. }
  92. }
  93. static void __rxrpc_call_acks_timeout(unsigned long _call)
  94. {
  95. struct rxrpc_call *call = (struct rxrpc_call *) _call;
  96. _debug("ACKS TIMEOUT %05lu", jiffies - call->cjif);
  97. call->flags |= RXRPC_CALL_ACKS_TIMO;
  98. rxrpc_krxiod_queue_call(call);
  99. }
  100. static void __rxrpc_call_rcv_timeout(unsigned long _call)
  101. {
  102. struct rxrpc_call *call = (struct rxrpc_call *) _call;
  103. _debug("RCV TIMEOUT %05lu", jiffies - call->cjif);
  104. call->flags |= RXRPC_CALL_RCV_TIMO;
  105. rxrpc_krxiod_queue_call(call);
  106. }
  107. static void __rxrpc_call_ackr_timeout(unsigned long _call)
  108. {
  109. struct rxrpc_call *call = (struct rxrpc_call *) _call;
  110. _debug("ACKR TIMEOUT %05lu",jiffies - call->cjif);
  111. call->flags |= RXRPC_CALL_ACKR_TIMO;
  112. rxrpc_krxiod_queue_call(call);
  113. }
  114. /*****************************************************************************/
  115. /*
  116. * calculate a timeout based on an RTT value
  117. */
  118. static inline unsigned long __rxrpc_rtt_based_timeout(struct rxrpc_call *call,
  119. unsigned long val)
  120. {
  121. unsigned long expiry = call->conn->peer->rtt / (1000000 / HZ);
  122. expiry += 10;
  123. if (expiry < HZ / 25)
  124. expiry = HZ / 25;
  125. if (expiry > HZ)
  126. expiry = HZ;
  127. _leave(" = %lu jiffies", expiry);
  128. return jiffies + expiry;
  129. } /* end __rxrpc_rtt_based_timeout() */
  130. /*****************************************************************************/
  131. /*
  132. * create a new call record
  133. */
  134. static inline int __rxrpc_create_call(struct rxrpc_connection *conn,
  135. struct rxrpc_call **_call)
  136. {
  137. struct rxrpc_call *call;
  138. _enter("%p", conn);
  139. /* allocate and initialise a call record */
  140. call = (struct rxrpc_call *) get_zeroed_page(GFP_KERNEL);
  141. if (!call) {
  142. _leave(" ENOMEM");
  143. return -ENOMEM;
  144. }
  145. atomic_set(&call->usage, 1);
  146. init_waitqueue_head(&call->waitq);
  147. spin_lock_init(&call->lock);
  148. INIT_LIST_HEAD(&call->link);
  149. INIT_LIST_HEAD(&call->acks_pendq);
  150. INIT_LIST_HEAD(&call->rcv_receiveq);
  151. INIT_LIST_HEAD(&call->rcv_krxiodq_lk);
  152. INIT_LIST_HEAD(&call->app_readyq);
  153. INIT_LIST_HEAD(&call->app_unreadyq);
  154. INIT_LIST_HEAD(&call->app_link);
  155. INIT_LIST_HEAD(&call->app_attn_link);
  156. init_timer(&call->acks_timeout);
  157. call->acks_timeout.data = (unsigned long) call;
  158. call->acks_timeout.function = __rxrpc_call_acks_timeout;
  159. init_timer(&call->rcv_timeout);
  160. call->rcv_timeout.data = (unsigned long) call;
  161. call->rcv_timeout.function = __rxrpc_call_rcv_timeout;
  162. init_timer(&call->ackr_dfr_timo);
  163. call->ackr_dfr_timo.data = (unsigned long) call;
  164. call->ackr_dfr_timo.function = __rxrpc_call_ackr_timeout;
  165. call->conn = conn;
  166. call->ackr_win_bot = 1;
  167. call->ackr_win_top = call->ackr_win_bot + RXRPC_CALL_ACK_WINDOW_SIZE - 1;
  168. call->ackr_prev_seq = 0;
  169. call->app_mark = RXRPC_APP_MARK_EOF;
  170. call->app_attn_func = rxrpc_call_default_attn_func;
  171. call->app_error_func = rxrpc_call_default_error_func;
  172. call->app_aemap_func = rxrpc_call_default_aemap_func;
  173. call->app_scr_alloc = call->app_scratch;
  174. call->cjif = jiffies;
  175. _leave(" = 0 (%p)", call);
  176. *_call = call;
  177. return 0;
  178. } /* end __rxrpc_create_call() */
  179. /*****************************************************************************/
  180. /*
  181. * create a new call record for outgoing calls
  182. */
  183. int rxrpc_create_call(struct rxrpc_connection *conn,
  184. rxrpc_call_attn_func_t attn,
  185. rxrpc_call_error_func_t error,
  186. rxrpc_call_aemap_func_t aemap,
  187. struct rxrpc_call **_call)
  188. {
  189. DECLARE_WAITQUEUE(myself, current);
  190. struct rxrpc_call *call;
  191. int ret, cix, loop;
  192. _enter("%p", conn);
  193. /* allocate and initialise a call record */
  194. ret = __rxrpc_create_call(conn, &call);
  195. if (ret < 0) {
  196. _leave(" = %d", ret);
  197. return ret;
  198. }
  199. call->app_call_state = RXRPC_CSTATE_CLNT_SND_ARGS;
  200. if (attn)
  201. call->app_attn_func = attn;
  202. if (error)
  203. call->app_error_func = error;
  204. if (aemap)
  205. call->app_aemap_func = aemap;
  206. _state(call);
  207. spin_lock(&conn->lock);
  208. set_current_state(TASK_INTERRUPTIBLE);
  209. add_wait_queue(&conn->chanwait, &myself);
  210. try_again:
  211. /* try to find an unused channel */
  212. for (cix = 0; cix < 4; cix++)
  213. if (!conn->channels[cix])
  214. goto obtained_chan;
  215. /* no free channels - wait for one to become available */
  216. ret = -EINTR;
  217. if (signal_pending(current))
  218. goto error_unwait;
  219. spin_unlock(&conn->lock);
  220. schedule();
  221. set_current_state(TASK_INTERRUPTIBLE);
  222. spin_lock(&conn->lock);
  223. goto try_again;
  224. /* got a channel - now attach to the connection */
  225. obtained_chan:
  226. remove_wait_queue(&conn->chanwait, &myself);
  227. set_current_state(TASK_RUNNING);
  228. /* concoct a unique call number */
  229. next_callid:
  230. call->call_id = htonl(++conn->call_counter);
  231. for (loop = 0; loop < 4; loop++)
  232. if (conn->channels[loop] &&
  233. conn->channels[loop]->call_id == call->call_id)
  234. goto next_callid;
  235. rxrpc_get_connection(conn);
  236. conn->channels[cix] = call; /* assign _after_ done callid check loop */
  237. do_gettimeofday(&conn->atime);
  238. call->chan_ix = htonl(cix);
  239. spin_unlock(&conn->lock);
  240. down_write(&rxrpc_calls_sem);
  241. list_add_tail(&call->call_link, &rxrpc_calls);
  242. up_write(&rxrpc_calls_sem);
  243. __RXACCT(atomic_inc(&rxrpc_call_count));
  244. *_call = call;
  245. _leave(" = 0 (call=%p cix=%u)", call, cix);
  246. return 0;
  247. error_unwait:
  248. remove_wait_queue(&conn->chanwait, &myself);
  249. set_current_state(TASK_RUNNING);
  250. spin_unlock(&conn->lock);
  251. free_page((unsigned long) call);
  252. _leave(" = %d", ret);
  253. return ret;
  254. } /* end rxrpc_create_call() */
  255. /*****************************************************************************/
  256. /*
  257. * create a new call record for incoming calls
  258. */
  259. int rxrpc_incoming_call(struct rxrpc_connection *conn,
  260. struct rxrpc_message *msg,
  261. struct rxrpc_call **_call)
  262. {
  263. struct rxrpc_call *call;
  264. unsigned cix;
  265. int ret;
  266. cix = ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK;
  267. _enter("%p,%u,%u", conn, ntohl(msg->hdr.callNumber), cix);
  268. /* allocate and initialise a call record */
  269. ret = __rxrpc_create_call(conn, &call);
  270. if (ret < 0) {
  271. _leave(" = %d", ret);
  272. return ret;
  273. }
  274. call->pkt_rcv_count = 1;
  275. call->app_call_state = RXRPC_CSTATE_SRVR_RCV_OPID;
  276. call->app_mark = sizeof(uint32_t);
  277. _state(call);
  278. /* attach to the connection */
  279. ret = -EBUSY;
  280. call->chan_ix = htonl(cix);
  281. call->call_id = msg->hdr.callNumber;
  282. spin_lock(&conn->lock);
  283. if (!conn->channels[cix] ||
  284. conn->channels[cix]->app_call_state == RXRPC_CSTATE_COMPLETE ||
  285. conn->channels[cix]->app_call_state == RXRPC_CSTATE_ERROR
  286. ) {
  287. conn->channels[cix] = call;
  288. rxrpc_get_connection(conn);
  289. ret = 0;
  290. }
  291. spin_unlock(&conn->lock);
  292. if (ret < 0) {
  293. free_page((unsigned long) call);
  294. call = NULL;
  295. }
  296. if (ret == 0) {
  297. down_write(&rxrpc_calls_sem);
  298. list_add_tail(&call->call_link, &rxrpc_calls);
  299. up_write(&rxrpc_calls_sem);
  300. __RXACCT(atomic_inc(&rxrpc_call_count));
  301. *_call = call;
  302. }
  303. _leave(" = %d [%p]", ret, call);
  304. return ret;
  305. } /* end rxrpc_incoming_call() */
  306. /*****************************************************************************/
  307. /*
  308. * free a call record
  309. */
  310. void rxrpc_put_call(struct rxrpc_call *call)
  311. {
  312. struct rxrpc_connection *conn = call->conn;
  313. struct rxrpc_message *msg;
  314. _enter("%p{u=%d}",call,atomic_read(&call->usage));
  315. /* sanity check */
  316. if (atomic_read(&call->usage) <= 0)
  317. BUG();
  318. /* to prevent a race, the decrement and the de-list must be effectively
  319. * atomic */
  320. spin_lock(&conn->lock);
  321. if (likely(!atomic_dec_and_test(&call->usage))) {
  322. spin_unlock(&conn->lock);
  323. _leave("");
  324. return;
  325. }
  326. if (conn->channels[ntohl(call->chan_ix)] == call)
  327. conn->channels[ntohl(call->chan_ix)] = NULL;
  328. spin_unlock(&conn->lock);
  329. wake_up(&conn->chanwait);
  330. rxrpc_put_connection(conn);
  331. /* clear the timers and dequeue from krxiod */
  332. del_timer_sync(&call->acks_timeout);
  333. del_timer_sync(&call->rcv_timeout);
  334. del_timer_sync(&call->ackr_dfr_timo);
  335. rxrpc_krxiod_dequeue_call(call);
  336. /* clean up the contents of the struct */
  337. if (call->snd_nextmsg)
  338. rxrpc_put_message(call->snd_nextmsg);
  339. if (call->snd_ping)
  340. rxrpc_put_message(call->snd_ping);
  341. while (!list_empty(&call->acks_pendq)) {
  342. msg = list_entry(call->acks_pendq.next,
  343. struct rxrpc_message, link);
  344. list_del(&msg->link);
  345. rxrpc_put_message(msg);
  346. }
  347. while (!list_empty(&call->rcv_receiveq)) {
  348. msg = list_entry(call->rcv_receiveq.next,
  349. struct rxrpc_message, link);
  350. list_del(&msg->link);
  351. rxrpc_put_message(msg);
  352. }
  353. while (!list_empty(&call->app_readyq)) {
  354. msg = list_entry(call->app_readyq.next,
  355. struct rxrpc_message, link);
  356. list_del(&msg->link);
  357. rxrpc_put_message(msg);
  358. }
  359. while (!list_empty(&call->app_unreadyq)) {
  360. msg = list_entry(call->app_unreadyq.next,
  361. struct rxrpc_message, link);
  362. list_del(&msg->link);
  363. rxrpc_put_message(msg);
  364. }
  365. module_put(call->owner);
  366. down_write(&rxrpc_calls_sem);
  367. list_del(&call->call_link);
  368. up_write(&rxrpc_calls_sem);
  369. __RXACCT(atomic_dec(&rxrpc_call_count));
  370. free_page((unsigned long) call);
  371. _leave(" [destroyed]");
  372. } /* end rxrpc_put_call() */
  373. /*****************************************************************************/
  374. /*
  375. * actually generate a normal ACK
  376. */
  377. static inline int __rxrpc_call_gen_normal_ACK(struct rxrpc_call *call,
  378. rxrpc_seq_t seq)
  379. {
  380. struct rxrpc_message *msg;
  381. struct kvec diov[3];
  382. __be32 aux[4];
  383. int delta, ret;
  384. /* ACKs default to DELAY */
  385. if (!call->ackr.reason)
  386. call->ackr.reason = RXRPC_ACK_DELAY;
  387. _proto("Rx %05lu Sending ACK { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
  388. jiffies - call->cjif,
  389. ntohs(call->ackr.maxSkew),
  390. ntohl(call->ackr.firstPacket),
  391. ntohl(call->ackr.previousPacket),
  392. ntohl(call->ackr.serial),
  393. rxrpc_acks[call->ackr.reason],
  394. call->ackr.nAcks);
  395. aux[0] = htonl(call->conn->peer->if_mtu); /* interface MTU */
  396. aux[1] = htonl(1444); /* max MTU */
  397. aux[2] = htonl(16); /* rwind */
  398. aux[3] = htonl(4); /* max packets */
  399. diov[0].iov_len = sizeof(struct rxrpc_ackpacket);
  400. diov[0].iov_base = &call->ackr;
  401. diov[1].iov_len = call->ackr_pend_cnt + 3;
  402. diov[1].iov_base = call->ackr_array;
  403. diov[2].iov_len = sizeof(aux);
  404. diov[2].iov_base = &aux;
  405. /* build and send the message */
  406. ret = rxrpc_conn_newmsg(call->conn,call, RXRPC_PACKET_TYPE_ACK,
  407. 3, diov, GFP_KERNEL, &msg);
  408. if (ret < 0)
  409. goto out;
  410. msg->seq = seq;
  411. msg->hdr.seq = htonl(seq);
  412. msg->hdr.flags |= RXRPC_SLOW_START_OK;
  413. ret = rxrpc_conn_sendmsg(call->conn, msg);
  414. rxrpc_put_message(msg);
  415. if (ret < 0)
  416. goto out;
  417. call->pkt_snd_count++;
  418. /* count how many actual ACKs there were at the front */
  419. for (delta = 0; delta < call->ackr_pend_cnt; delta++)
  420. if (call->ackr_array[delta] != RXRPC_ACK_TYPE_ACK)
  421. break;
  422. call->ackr_pend_cnt -= delta; /* all ACK'd to this point */
  423. /* crank the ACK window around */
  424. if (delta == 0) {
  425. /* un-ACK'd window */
  426. }
  427. else if (delta < RXRPC_CALL_ACK_WINDOW_SIZE) {
  428. /* partially ACK'd window
  429. * - shuffle down to avoid losing out-of-sequence packets
  430. */
  431. call->ackr_win_bot += delta;
  432. call->ackr_win_top += delta;
  433. memmove(&call->ackr_array[0],
  434. &call->ackr_array[delta],
  435. call->ackr_pend_cnt);
  436. memset(&call->ackr_array[call->ackr_pend_cnt],
  437. RXRPC_ACK_TYPE_NACK,
  438. sizeof(call->ackr_array) - call->ackr_pend_cnt);
  439. }
  440. else {
  441. /* fully ACK'd window
  442. * - just clear the whole thing
  443. */
  444. memset(&call->ackr_array,
  445. RXRPC_ACK_TYPE_NACK,
  446. sizeof(call->ackr_array));
  447. }
  448. /* clear this ACK */
  449. memset(&call->ackr, 0, sizeof(call->ackr));
  450. out:
  451. if (!call->app_call_state)
  452. printk("___ STATE 0 ___\n");
  453. return ret;
  454. } /* end __rxrpc_call_gen_normal_ACK() */
  455. /*****************************************************************************/
  456. /*
  457. * note the reception of a packet in the call's ACK records and generate an
  458. * appropriate ACK packet if necessary
  459. * - returns 0 if packet should be processed, 1 if packet should be ignored
  460. * and -ve on an error
  461. */
  462. static int rxrpc_call_generate_ACK(struct rxrpc_call *call,
  463. struct rxrpc_header *hdr,
  464. struct rxrpc_ackpacket *ack)
  465. {
  466. struct rxrpc_message *msg;
  467. rxrpc_seq_t seq;
  468. unsigned offset;
  469. int ret = 0, err;
  470. u8 special_ACK, do_ACK, force;
  471. _enter("%p,%p { seq=%d tp=%d fl=%02x }",
  472. call, hdr, ntohl(hdr->seq), hdr->type, hdr->flags);
  473. seq = ntohl(hdr->seq);
  474. offset = seq - call->ackr_win_bot;
  475. do_ACK = RXRPC_ACK_DELAY;
  476. special_ACK = 0;
  477. force = (seq == 1);
  478. if (call->ackr_high_seq < seq)
  479. call->ackr_high_seq = seq;
  480. /* deal with generation of obvious special ACKs first */
  481. if (ack && ack->reason == RXRPC_ACK_PING) {
  482. special_ACK = RXRPC_ACK_PING_RESPONSE;
  483. ret = 1;
  484. goto gen_ACK;
  485. }
  486. if (seq < call->ackr_win_bot) {
  487. special_ACK = RXRPC_ACK_DUPLICATE;
  488. ret = 1;
  489. goto gen_ACK;
  490. }
  491. if (seq >= call->ackr_win_top) {
  492. special_ACK = RXRPC_ACK_EXCEEDS_WINDOW;
  493. ret = 1;
  494. goto gen_ACK;
  495. }
  496. if (call->ackr_array[offset] != RXRPC_ACK_TYPE_NACK) {
  497. special_ACK = RXRPC_ACK_DUPLICATE;
  498. ret = 1;
  499. goto gen_ACK;
  500. }
  501. /* okay... it's a normal data packet inside the ACK window */
  502. call->ackr_array[offset] = RXRPC_ACK_TYPE_ACK;
  503. if (offset < call->ackr_pend_cnt) {
  504. }
  505. else if (offset > call->ackr_pend_cnt) {
  506. do_ACK = RXRPC_ACK_OUT_OF_SEQUENCE;
  507. call->ackr_pend_cnt = offset;
  508. goto gen_ACK;
  509. }
  510. if (hdr->flags & RXRPC_REQUEST_ACK) {
  511. do_ACK = RXRPC_ACK_REQUESTED;
  512. }
  513. /* generate an ACK on the final packet of a reply just received */
  514. if (hdr->flags & RXRPC_LAST_PACKET) {
  515. if (call->conn->out_clientflag)
  516. force = 1;
  517. }
  518. else if (!(hdr->flags & RXRPC_MORE_PACKETS)) {
  519. do_ACK = RXRPC_ACK_REQUESTED;
  520. }
  521. /* re-ACK packets previously received out-of-order */
  522. for (offset++; offset < RXRPC_CALL_ACK_WINDOW_SIZE; offset++)
  523. if (call->ackr_array[offset] != RXRPC_ACK_TYPE_ACK)
  524. break;
  525. call->ackr_pend_cnt = offset;
  526. /* generate an ACK if we fill up the window */
  527. if (call->ackr_pend_cnt >= RXRPC_CALL_ACK_WINDOW_SIZE)
  528. force = 1;
  529. gen_ACK:
  530. _debug("%05lu ACKs pend=%u norm=%s special=%s%s",
  531. jiffies - call->cjif,
  532. call->ackr_pend_cnt,
  533. rxrpc_acks[do_ACK],
  534. rxrpc_acks[special_ACK],
  535. force ? " immediate" :
  536. do_ACK == RXRPC_ACK_REQUESTED ? " merge-req" :
  537. hdr->flags & RXRPC_LAST_PACKET ? " finalise" :
  538. " defer"
  539. );
  540. /* send any pending normal ACKs if need be */
  541. if (call->ackr_pend_cnt > 0) {
  542. /* fill out the appropriate form */
  543. call->ackr.bufferSpace = htons(RXRPC_CALL_ACK_WINDOW_SIZE);
  544. call->ackr.maxSkew = htons(min(call->ackr_high_seq - seq,
  545. 65535U));
  546. call->ackr.firstPacket = htonl(call->ackr_win_bot);
  547. call->ackr.previousPacket = call->ackr_prev_seq;
  548. call->ackr.serial = hdr->serial;
  549. call->ackr.nAcks = call->ackr_pend_cnt;
  550. if (do_ACK == RXRPC_ACK_REQUESTED)
  551. call->ackr.reason = do_ACK;
  552. /* generate the ACK immediately if necessary */
  553. if (special_ACK || force) {
  554. err = __rxrpc_call_gen_normal_ACK(
  555. call, do_ACK == RXRPC_ACK_DELAY ? 0 : seq);
  556. if (err < 0) {
  557. ret = err;
  558. goto out;
  559. }
  560. }
  561. }
  562. if (call->ackr.reason == RXRPC_ACK_REQUESTED)
  563. call->ackr_dfr_seq = seq;
  564. /* start the ACK timer if not running if there are any pending deferred
  565. * ACKs */
  566. if (call->ackr_pend_cnt > 0 &&
  567. call->ackr.reason != RXRPC_ACK_REQUESTED &&
  568. !timer_pending(&call->ackr_dfr_timo)
  569. ) {
  570. unsigned long timo;
  571. timo = rxrpc_call_dfr_ack_timeout + jiffies;
  572. _debug("START ACKR TIMER for cj=%lu", timo - call->cjif);
  573. spin_lock(&call->lock);
  574. mod_timer(&call->ackr_dfr_timo, timo);
  575. spin_unlock(&call->lock);
  576. }
  577. else if ((call->ackr_pend_cnt == 0 ||
  578. call->ackr.reason == RXRPC_ACK_REQUESTED) &&
  579. timer_pending(&call->ackr_dfr_timo)
  580. ) {
  581. /* stop timer if no pending ACKs */
  582. _debug("CLEAR ACKR TIMER");
  583. del_timer_sync(&call->ackr_dfr_timo);
  584. }
  585. /* send a special ACK if one is required */
  586. if (special_ACK) {
  587. struct rxrpc_ackpacket ack;
  588. struct kvec diov[2];
  589. uint8_t acks[1] = { RXRPC_ACK_TYPE_ACK };
  590. /* fill out the appropriate form */
  591. ack.bufferSpace = htons(RXRPC_CALL_ACK_WINDOW_SIZE);
  592. ack.maxSkew = htons(min(call->ackr_high_seq - seq,
  593. 65535U));
  594. ack.firstPacket = htonl(call->ackr_win_bot);
  595. ack.previousPacket = call->ackr_prev_seq;
  596. ack.serial = hdr->serial;
  597. ack.reason = special_ACK;
  598. ack.nAcks = 0;
  599. _proto("Rx Sending s-ACK"
  600. " { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
  601. ntohs(ack.maxSkew),
  602. ntohl(ack.firstPacket),
  603. ntohl(ack.previousPacket),
  604. ntohl(ack.serial),
  605. rxrpc_acks[ack.reason],
  606. ack.nAcks);
  607. diov[0].iov_len = sizeof(struct rxrpc_ackpacket);
  608. diov[0].iov_base = &ack;
  609. diov[1].iov_len = sizeof(acks);
  610. diov[1].iov_base = acks;
  611. /* build and send the message */
  612. err = rxrpc_conn_newmsg(call->conn,call, RXRPC_PACKET_TYPE_ACK,
  613. hdr->seq ? 2 : 1, diov,
  614. GFP_KERNEL,
  615. &msg);
  616. if (err < 0) {
  617. ret = err;
  618. goto out;
  619. }
  620. msg->seq = seq;
  621. msg->hdr.seq = htonl(seq);
  622. msg->hdr.flags |= RXRPC_SLOW_START_OK;
  623. err = rxrpc_conn_sendmsg(call->conn, msg);
  624. rxrpc_put_message(msg);
  625. if (err < 0) {
  626. ret = err;
  627. goto out;
  628. }
  629. call->pkt_snd_count++;
  630. }
  631. out:
  632. if (hdr->seq)
  633. call->ackr_prev_seq = hdr->seq;
  634. _leave(" = %d", ret);
  635. return ret;
  636. } /* end rxrpc_call_generate_ACK() */
  637. /*****************************************************************************/
  638. /*
  639. * handle work to be done on a call
  640. * - includes packet reception and timeout processing
  641. */
  642. void rxrpc_call_do_stuff(struct rxrpc_call *call)
  643. {
  644. _enter("%p{flags=%lx}", call, call->flags);
  645. /* handle packet reception */
  646. if (call->flags & RXRPC_CALL_RCV_PKT) {
  647. _debug("- receive packet");
  648. call->flags &= ~RXRPC_CALL_RCV_PKT;
  649. rxrpc_call_receive_packet(call);
  650. }
  651. /* handle overdue ACKs */
  652. if (call->flags & RXRPC_CALL_ACKS_TIMO) {
  653. _debug("- overdue ACK timeout");
  654. call->flags &= ~RXRPC_CALL_ACKS_TIMO;
  655. rxrpc_call_resend(call, call->snd_seq_count);
  656. }
  657. /* handle lack of reception */
  658. if (call->flags & RXRPC_CALL_RCV_TIMO) {
  659. _debug("- reception timeout");
  660. call->flags &= ~RXRPC_CALL_RCV_TIMO;
  661. rxrpc_call_abort(call, -EIO);
  662. }
  663. /* handle deferred ACKs */
  664. if (call->flags & RXRPC_CALL_ACKR_TIMO ||
  665. (call->ackr.nAcks > 0 && call->ackr.reason == RXRPC_ACK_REQUESTED)
  666. ) {
  667. _debug("- deferred ACK timeout: cj=%05lu r=%s n=%u",
  668. jiffies - call->cjif,
  669. rxrpc_acks[call->ackr.reason],
  670. call->ackr.nAcks);
  671. call->flags &= ~RXRPC_CALL_ACKR_TIMO;
  672. if (call->ackr.nAcks > 0 &&
  673. call->app_call_state != RXRPC_CSTATE_ERROR) {
  674. /* generate ACK */
  675. __rxrpc_call_gen_normal_ACK(call, call->ackr_dfr_seq);
  676. call->ackr_dfr_seq = 0;
  677. }
  678. }
  679. _leave("");
  680. } /* end rxrpc_call_do_stuff() */
  681. /*****************************************************************************/
  682. /*
  683. * send an abort message at call or connection level
  684. * - must be called with call->lock held
  685. * - the supplied error code is sent as the packet data
  686. */
  687. static int __rxrpc_call_abort(struct rxrpc_call *call, int errno)
  688. {
  689. struct rxrpc_connection *conn = call->conn;
  690. struct rxrpc_message *msg;
  691. struct kvec diov[1];
  692. int ret;
  693. __be32 _error;
  694. _enter("%p{%08x},%p{%d},%d",
  695. conn, ntohl(conn->conn_id), call, ntohl(call->call_id), errno);
  696. /* if this call is already aborted, then just wake up any waiters */
  697. if (call->app_call_state == RXRPC_CSTATE_ERROR) {
  698. spin_unlock(&call->lock);
  699. call->app_error_func(call);
  700. _leave(" = 0");
  701. return 0;
  702. }
  703. rxrpc_get_call(call);
  704. /* change the state _with_ the lock still held */
  705. call->app_call_state = RXRPC_CSTATE_ERROR;
  706. call->app_err_state = RXRPC_ESTATE_LOCAL_ABORT;
  707. call->app_errno = errno;
  708. call->app_mark = RXRPC_APP_MARK_EOF;
  709. call->app_read_buf = NULL;
  710. call->app_async_read = 0;
  711. _state(call);
  712. /* ask the app to translate the error code */
  713. call->app_aemap_func(call);
  714. spin_unlock(&call->lock);
  715. /* flush any outstanding ACKs */
  716. del_timer_sync(&call->acks_timeout);
  717. del_timer_sync(&call->rcv_timeout);
  718. del_timer_sync(&call->ackr_dfr_timo);
  719. if (rxrpc_call_is_ack_pending(call))
  720. __rxrpc_call_gen_normal_ACK(call, 0);
  721. /* send the abort packet only if we actually traded some other
  722. * packets */
  723. ret = 0;
  724. if (call->pkt_snd_count || call->pkt_rcv_count) {
  725. /* actually send the abort */
  726. _proto("Rx Sending Call ABORT { data=%d }",
  727. call->app_abort_code);
  728. _error = htonl(call->app_abort_code);
  729. diov[0].iov_len = sizeof(_error);
  730. diov[0].iov_base = &_error;
  731. ret = rxrpc_conn_newmsg(conn, call, RXRPC_PACKET_TYPE_ABORT,
  732. 1, diov, GFP_KERNEL, &msg);
  733. if (ret == 0) {
  734. ret = rxrpc_conn_sendmsg(conn, msg);
  735. rxrpc_put_message(msg);
  736. }
  737. }
  738. /* tell the app layer to let go */
  739. call->app_error_func(call);
  740. rxrpc_put_call(call);
  741. _leave(" = %d", ret);
  742. return ret;
  743. } /* end __rxrpc_call_abort() */
  744. /*****************************************************************************/
  745. /*
  746. * send an abort message at call or connection level
  747. * - the supplied error code is sent as the packet data
  748. */
  749. int rxrpc_call_abort(struct rxrpc_call *call, int error)
  750. {
  751. spin_lock(&call->lock);
  752. return __rxrpc_call_abort(call, error);
  753. } /* end rxrpc_call_abort() */
  754. /*****************************************************************************/
  755. /*
  756. * process packets waiting for this call
  757. */
  758. static void rxrpc_call_receive_packet(struct rxrpc_call *call)
  759. {
  760. struct rxrpc_message *msg;
  761. struct list_head *_p;
  762. _enter("%p", call);
  763. rxrpc_get_call(call); /* must not go away too soon if aborted by
  764. * app-layer */
  765. while (!list_empty(&call->rcv_receiveq)) {
  766. /* try to get next packet */
  767. _p = NULL;
  768. spin_lock(&call->lock);
  769. if (!list_empty(&call->rcv_receiveq)) {
  770. _p = call->rcv_receiveq.next;
  771. list_del_init(_p);
  772. }
  773. spin_unlock(&call->lock);
  774. if (!_p)
  775. break;
  776. msg = list_entry(_p, struct rxrpc_message, link);
  777. _proto("Rx %05lu Received %s packet (%%%u,#%u,%c%c%c%c%c)",
  778. jiffies - call->cjif,
  779. rxrpc_pkts[msg->hdr.type],
  780. ntohl(msg->hdr.serial),
  781. msg->seq,
  782. msg->hdr.flags & RXRPC_JUMBO_PACKET ? 'j' : '-',
  783. msg->hdr.flags & RXRPC_MORE_PACKETS ? 'm' : '-',
  784. msg->hdr.flags & RXRPC_LAST_PACKET ? 'l' : '-',
  785. msg->hdr.flags & RXRPC_REQUEST_ACK ? 'r' : '-',
  786. msg->hdr.flags & RXRPC_CLIENT_INITIATED ? 'C' : 'S'
  787. );
  788. switch (msg->hdr.type) {
  789. /* deal with data packets */
  790. case RXRPC_PACKET_TYPE_DATA:
  791. /* ACK the packet if necessary */
  792. switch (rxrpc_call_generate_ACK(call, &msg->hdr,
  793. NULL)) {
  794. case 0: /* useful packet */
  795. rxrpc_call_receive_data_packet(call, msg);
  796. break;
  797. case 1: /* duplicate or out-of-window packet */
  798. break;
  799. default:
  800. rxrpc_put_message(msg);
  801. goto out;
  802. }
  803. break;
  804. /* deal with ACK packets */
  805. case RXRPC_PACKET_TYPE_ACK:
  806. rxrpc_call_receive_ack_packet(call, msg);
  807. break;
  808. /* deal with abort packets */
  809. case RXRPC_PACKET_TYPE_ABORT: {
  810. __be32 _dbuf, *dp;
  811. dp = skb_header_pointer(msg->pkt, msg->offset,
  812. sizeof(_dbuf), &_dbuf);
  813. if (dp == NULL)
  814. printk("Rx Received short ABORT packet\n");
  815. _proto("Rx Received Call ABORT { data=%d }",
  816. (dp ? ntohl(*dp) : 0));
  817. spin_lock(&call->lock);
  818. call->app_call_state = RXRPC_CSTATE_ERROR;
  819. call->app_err_state = RXRPC_ESTATE_PEER_ABORT;
  820. call->app_abort_code = (dp ? ntohl(*dp) : 0);
  821. call->app_errno = -ECONNABORTED;
  822. call->app_mark = RXRPC_APP_MARK_EOF;
  823. call->app_read_buf = NULL;
  824. call->app_async_read = 0;
  825. /* ask the app to translate the error code */
  826. call->app_aemap_func(call);
  827. _state(call);
  828. spin_unlock(&call->lock);
  829. call->app_error_func(call);
  830. break;
  831. }
  832. default:
  833. /* deal with other packet types */
  834. _proto("Rx Unsupported packet type %u (#%u)",
  835. msg->hdr.type, msg->seq);
  836. break;
  837. }
  838. rxrpc_put_message(msg);
  839. }
  840. out:
  841. rxrpc_put_call(call);
  842. _leave("");
  843. } /* end rxrpc_call_receive_packet() */
  844. /*****************************************************************************/
  845. /*
  846. * process next data packet
  847. * - as the next data packet arrives:
  848. * - it is queued on app_readyq _if_ it is the next one expected
  849. * (app_ready_seq+1)
  850. * - it is queued on app_unreadyq _if_ it is not the next one expected
  851. * - if a packet placed on app_readyq completely fills a hole leading up to
  852. * the first packet on app_unreadyq, then packets now in sequence are
  853. * tranferred to app_readyq
  854. * - the application layer can only see packets on app_readyq
  855. * (app_ready_qty bytes)
  856. * - the application layer is prodded every time a new packet arrives
  857. */
  858. static void rxrpc_call_receive_data_packet(struct rxrpc_call *call,
  859. struct rxrpc_message *msg)
  860. {
  861. const struct rxrpc_operation *optbl, *op;
  862. struct rxrpc_message *pmsg;
  863. struct list_head *_p;
  864. int ret, lo, hi, rmtimo;
  865. __be32 opid;
  866. _enter("%p{%u},%p{%u}", call, ntohl(call->call_id), msg, msg->seq);
  867. rxrpc_get_message(msg);
  868. /* add to the unready queue if we'd have to create a hole in the ready
  869. * queue otherwise */
  870. if (msg->seq != call->app_ready_seq + 1) {
  871. _debug("Call add packet %d to unreadyq", msg->seq);
  872. /* insert in seq order */
  873. list_for_each(_p, &call->app_unreadyq) {
  874. pmsg = list_entry(_p, struct rxrpc_message, link);
  875. if (pmsg->seq > msg->seq)
  876. break;
  877. }
  878. list_add_tail(&msg->link, _p);
  879. _leave(" [unreadyq]");
  880. return;
  881. }
  882. /* next in sequence - simply append into the call's ready queue */
  883. _debug("Call add packet %d to readyq (+%Zd => %Zd bytes)",
  884. msg->seq, msg->dsize, call->app_ready_qty);
  885. spin_lock(&call->lock);
  886. call->app_ready_seq = msg->seq;
  887. call->app_ready_qty += msg->dsize;
  888. list_add_tail(&msg->link, &call->app_readyq);
  889. /* move unready packets to the readyq if we got rid of a hole */
  890. while (!list_empty(&call->app_unreadyq)) {
  891. pmsg = list_entry(call->app_unreadyq.next,
  892. struct rxrpc_message, link);
  893. if (pmsg->seq != call->app_ready_seq + 1)
  894. break;
  895. /* next in sequence - just move list-to-list */
  896. _debug("Call transfer packet %d to readyq (+%Zd => %Zd bytes)",
  897. pmsg->seq, pmsg->dsize, call->app_ready_qty);
  898. call->app_ready_seq = pmsg->seq;
  899. call->app_ready_qty += pmsg->dsize;
  900. list_del_init(&pmsg->link);
  901. list_add_tail(&pmsg->link, &call->app_readyq);
  902. }
  903. /* see if we've got the last packet yet */
  904. if (!list_empty(&call->app_readyq)) {
  905. pmsg = list_entry(call->app_readyq.prev,
  906. struct rxrpc_message, link);
  907. if (pmsg->hdr.flags & RXRPC_LAST_PACKET) {
  908. call->app_last_rcv = 1;
  909. _debug("Last packet on readyq");
  910. }
  911. }
  912. switch (call->app_call_state) {
  913. /* do nothing if call already aborted */
  914. case RXRPC_CSTATE_ERROR:
  915. spin_unlock(&call->lock);
  916. _leave(" [error]");
  917. return;
  918. /* extract the operation ID from an incoming call if that's not
  919. * yet been done */
  920. case RXRPC_CSTATE_SRVR_RCV_OPID:
  921. spin_unlock(&call->lock);
  922. /* handle as yet insufficient data for the operation ID */
  923. if (call->app_ready_qty < 4) {
  924. if (call->app_last_rcv)
  925. /* trouble - last packet seen */
  926. rxrpc_call_abort(call, -EINVAL);
  927. _leave("");
  928. return;
  929. }
  930. /* pull the operation ID out of the buffer */
  931. ret = rxrpc_call_read_data(call, &opid, sizeof(opid), 0);
  932. if (ret < 0) {
  933. printk("Unexpected error from read-data: %d\n", ret);
  934. if (call->app_call_state != RXRPC_CSTATE_ERROR)
  935. rxrpc_call_abort(call, ret);
  936. _leave("");
  937. return;
  938. }
  939. call->app_opcode = ntohl(opid);
  940. /* locate the operation in the available ops table */
  941. optbl = call->conn->service->ops_begin;
  942. lo = 0;
  943. hi = call->conn->service->ops_end - optbl;
  944. while (lo < hi) {
  945. int mid = (hi + lo) / 2;
  946. op = &optbl[mid];
  947. if (call->app_opcode == op->id)
  948. goto found_op;
  949. if (call->app_opcode > op->id)
  950. lo = mid + 1;
  951. else
  952. hi = mid;
  953. }
  954. /* search failed */
  955. kproto("Rx Client requested operation %d from %s service",
  956. call->app_opcode, call->conn->service->name);
  957. rxrpc_call_abort(call, -EINVAL);
  958. _leave(" [inval]");
  959. return;
  960. found_op:
  961. _proto("Rx Client requested operation %s from %s service",
  962. op->name, call->conn->service->name);
  963. /* we're now waiting for the argument block (unless the call
  964. * was aborted) */
  965. spin_lock(&call->lock);
  966. if (call->app_call_state == RXRPC_CSTATE_SRVR_RCV_OPID ||
  967. call->app_call_state == RXRPC_CSTATE_SRVR_SND_REPLY) {
  968. if (!call->app_last_rcv)
  969. call->app_call_state =
  970. RXRPC_CSTATE_SRVR_RCV_ARGS;
  971. else if (call->app_ready_qty > 0)
  972. call->app_call_state =
  973. RXRPC_CSTATE_SRVR_GOT_ARGS;
  974. else
  975. call->app_call_state =
  976. RXRPC_CSTATE_SRVR_SND_REPLY;
  977. call->app_mark = op->asize;
  978. call->app_user = op->user;
  979. }
  980. spin_unlock(&call->lock);
  981. _state(call);
  982. break;
  983. case RXRPC_CSTATE_SRVR_RCV_ARGS:
  984. /* change state if just received last packet of arg block */
  985. if (call->app_last_rcv)
  986. call->app_call_state = RXRPC_CSTATE_SRVR_GOT_ARGS;
  987. spin_unlock(&call->lock);
  988. _state(call);
  989. break;
  990. case RXRPC_CSTATE_CLNT_RCV_REPLY:
  991. /* change state if just received last packet of reply block */
  992. rmtimo = 0;
  993. if (call->app_last_rcv) {
  994. call->app_call_state = RXRPC_CSTATE_CLNT_GOT_REPLY;
  995. rmtimo = 1;
  996. }
  997. spin_unlock(&call->lock);
  998. if (rmtimo) {
  999. del_timer_sync(&call->acks_timeout);
  1000. del_timer_sync(&call->rcv_timeout);
  1001. del_timer_sync(&call->ackr_dfr_timo);
  1002. }
  1003. _state(call);
  1004. break;
  1005. default:
  1006. /* deal with data reception in an unexpected state */
  1007. printk("Unexpected state [[[ %u ]]]\n", call->app_call_state);
  1008. __rxrpc_call_abort(call, -EBADMSG);
  1009. _leave("");
  1010. return;
  1011. }
  1012. if (call->app_call_state == RXRPC_CSTATE_CLNT_RCV_REPLY &&
  1013. call->app_last_rcv)
  1014. BUG();
  1015. /* otherwise just invoke the data function whenever we can satisfy its desire for more
  1016. * data
  1017. */
  1018. _proto("Rx Received Op Data: st=%u qty=%Zu mk=%Zu%s",
  1019. call->app_call_state, call->app_ready_qty, call->app_mark,
  1020. call->app_last_rcv ? " last-rcvd" : "");
  1021. spin_lock(&call->lock);
  1022. ret = __rxrpc_call_read_data(call);
  1023. switch (ret) {
  1024. case 0:
  1025. spin_unlock(&call->lock);
  1026. call->app_attn_func(call);
  1027. break;
  1028. case -EAGAIN:
  1029. spin_unlock(&call->lock);
  1030. break;
  1031. case -ECONNABORTED:
  1032. spin_unlock(&call->lock);
  1033. break;
  1034. default:
  1035. __rxrpc_call_abort(call, ret);
  1036. break;
  1037. }
  1038. _state(call);
  1039. _leave("");
  1040. } /* end rxrpc_call_receive_data_packet() */
  1041. /*****************************************************************************/
  1042. /*
  1043. * received an ACK packet
  1044. */
  1045. static void rxrpc_call_receive_ack_packet(struct rxrpc_call *call,
  1046. struct rxrpc_message *msg)
  1047. {
  1048. struct rxrpc_ackpacket _ack, *ap;
  1049. rxrpc_serial_net_t serial;
  1050. rxrpc_seq_t seq;
  1051. int ret;
  1052. _enter("%p{%u},%p{%u}", call, ntohl(call->call_id), msg, msg->seq);
  1053. /* extract the basic ACK record */
  1054. ap = skb_header_pointer(msg->pkt, msg->offset, sizeof(_ack), &_ack);
  1055. if (ap == NULL) {
  1056. printk("Rx Received short ACK packet\n");
  1057. return;
  1058. }
  1059. msg->offset += sizeof(_ack);
  1060. serial = ap->serial;
  1061. seq = ntohl(ap->firstPacket);
  1062. _proto("Rx Received ACK %%%d { b=%hu m=%hu f=%u p=%u s=%u r=%s n=%u }",
  1063. ntohl(msg->hdr.serial),
  1064. ntohs(ap->bufferSpace),
  1065. ntohs(ap->maxSkew),
  1066. seq,
  1067. ntohl(ap->previousPacket),
  1068. ntohl(serial),
  1069. rxrpc_acks[ap->reason],
  1070. call->ackr.nAcks
  1071. );
  1072. /* check the other side isn't ACK'ing a sequence number I haven't sent
  1073. * yet */
  1074. if (ap->nAcks > 0 &&
  1075. (seq > call->snd_seq_count ||
  1076. seq + ap->nAcks - 1 > call->snd_seq_count)) {
  1077. printk("Received ACK (#%u-#%u) for unsent packet\n",
  1078. seq, seq + ap->nAcks - 1);
  1079. rxrpc_call_abort(call, -EINVAL);
  1080. _leave("");
  1081. return;
  1082. }
  1083. /* deal with RTT calculation */
  1084. if (serial) {
  1085. struct rxrpc_message *rttmsg;
  1086. /* find the prompting packet */
  1087. spin_lock(&call->lock);
  1088. if (call->snd_ping && call->snd_ping->hdr.serial == serial) {
  1089. /* it was a ping packet */
  1090. rttmsg = call->snd_ping;
  1091. call->snd_ping = NULL;
  1092. spin_unlock(&call->lock);
  1093. if (rttmsg) {
  1094. rttmsg->rttdone = 1;
  1095. rxrpc_peer_calculate_rtt(call->conn->peer,
  1096. rttmsg, msg);
  1097. rxrpc_put_message(rttmsg);
  1098. }
  1099. }
  1100. else {
  1101. struct list_head *_p;
  1102. /* it ought to be a data packet - look in the pending
  1103. * ACK list */
  1104. list_for_each(_p, &call->acks_pendq) {
  1105. rttmsg = list_entry(_p, struct rxrpc_message,
  1106. link);
  1107. if (rttmsg->hdr.serial == serial) {
  1108. if (rttmsg->rttdone)
  1109. /* never do RTT twice without
  1110. * resending */
  1111. break;
  1112. rttmsg->rttdone = 1;
  1113. rxrpc_peer_calculate_rtt(
  1114. call->conn->peer, rttmsg, msg);
  1115. break;
  1116. }
  1117. }
  1118. spin_unlock(&call->lock);
  1119. }
  1120. }
  1121. switch (ap->reason) {
  1122. /* deal with negative/positive acknowledgement of data
  1123. * packets */
  1124. case RXRPC_ACK_REQUESTED:
  1125. case RXRPC_ACK_DELAY:
  1126. case RXRPC_ACK_IDLE:
  1127. rxrpc_call_definitively_ACK(call, seq - 1);
  1128. case RXRPC_ACK_DUPLICATE:
  1129. case RXRPC_ACK_OUT_OF_SEQUENCE:
  1130. case RXRPC_ACK_EXCEEDS_WINDOW:
  1131. call->snd_resend_cnt = 0;
  1132. ret = rxrpc_call_record_ACK(call, msg, seq, ap->nAcks);
  1133. if (ret < 0)
  1134. rxrpc_call_abort(call, ret);
  1135. break;
  1136. /* respond to ping packets immediately */
  1137. case RXRPC_ACK_PING:
  1138. rxrpc_call_generate_ACK(call, &msg->hdr, ap);
  1139. break;
  1140. /* only record RTT on ping response packets */
  1141. case RXRPC_ACK_PING_RESPONSE:
  1142. if (call->snd_ping) {
  1143. struct rxrpc_message *rttmsg;
  1144. /* only do RTT stuff if the response matches the
  1145. * retained ping */
  1146. rttmsg = NULL;
  1147. spin_lock(&call->lock);
  1148. if (call->snd_ping &&
  1149. call->snd_ping->hdr.serial == ap->serial) {
  1150. rttmsg = call->snd_ping;
  1151. call->snd_ping = NULL;
  1152. }
  1153. spin_unlock(&call->lock);
  1154. if (rttmsg) {
  1155. rttmsg->rttdone = 1;
  1156. rxrpc_peer_calculate_rtt(call->conn->peer,
  1157. rttmsg, msg);
  1158. rxrpc_put_message(rttmsg);
  1159. }
  1160. }
  1161. break;
  1162. default:
  1163. printk("Unsupported ACK reason %u\n", ap->reason);
  1164. break;
  1165. }
  1166. _leave("");
  1167. } /* end rxrpc_call_receive_ack_packet() */
  1168. /*****************************************************************************/
  1169. /*
  1170. * record definitive ACKs for all messages up to and including the one with the
  1171. * 'highest' seq
  1172. */
  1173. static void rxrpc_call_definitively_ACK(struct rxrpc_call *call,
  1174. rxrpc_seq_t highest)
  1175. {
  1176. struct rxrpc_message *msg;
  1177. int now_complete;
  1178. _enter("%p{ads=%u},%u", call, call->acks_dftv_seq, highest);
  1179. while (call->acks_dftv_seq < highest) {
  1180. call->acks_dftv_seq++;
  1181. _proto("Definitive ACK on packet #%u", call->acks_dftv_seq);
  1182. /* discard those at front of queue until message with highest
  1183. * ACK is found */
  1184. spin_lock(&call->lock);
  1185. msg = NULL;
  1186. if (!list_empty(&call->acks_pendq)) {
  1187. msg = list_entry(call->acks_pendq.next,
  1188. struct rxrpc_message, link);
  1189. list_del_init(&msg->link); /* dequeue */
  1190. if (msg->state == RXRPC_MSG_SENT)
  1191. call->acks_pend_cnt--;
  1192. }
  1193. spin_unlock(&call->lock);
  1194. /* insanity check */
  1195. if (!msg)
  1196. panic("%s(): acks_pendq unexpectedly empty\n",
  1197. __FUNCTION__);
  1198. if (msg->seq != call->acks_dftv_seq)
  1199. panic("%s(): Packet #%u expected at front of acks_pendq"
  1200. " (#%u found)\n",
  1201. __FUNCTION__, call->acks_dftv_seq, msg->seq);
  1202. /* discard the message */
  1203. msg->state = RXRPC_MSG_DONE;
  1204. rxrpc_put_message(msg);
  1205. }
  1206. /* if all sent packets are definitively ACK'd then prod any sleepers just in case */
  1207. now_complete = 0;
  1208. spin_lock(&call->lock);
  1209. if (call->acks_dftv_seq == call->snd_seq_count) {
  1210. if (call->app_call_state != RXRPC_CSTATE_COMPLETE) {
  1211. call->app_call_state = RXRPC_CSTATE_COMPLETE;
  1212. _state(call);
  1213. now_complete = 1;
  1214. }
  1215. }
  1216. spin_unlock(&call->lock);
  1217. if (now_complete) {
  1218. del_timer_sync(&call->acks_timeout);
  1219. del_timer_sync(&call->rcv_timeout);
  1220. del_timer_sync(&call->ackr_dfr_timo);
  1221. call->app_attn_func(call);
  1222. }
  1223. _leave("");
  1224. } /* end rxrpc_call_definitively_ACK() */
  1225. /*****************************************************************************/
  1226. /*
  1227. * record the specified amount of ACKs/NAKs
  1228. */
  1229. static int rxrpc_call_record_ACK(struct rxrpc_call *call,
  1230. struct rxrpc_message *msg,
  1231. rxrpc_seq_t seq,
  1232. size_t count)
  1233. {
  1234. struct rxrpc_message *dmsg;
  1235. struct list_head *_p;
  1236. rxrpc_seq_t highest;
  1237. unsigned ix;
  1238. size_t chunk;
  1239. char resend, now_complete;
  1240. u8 acks[16];
  1241. _enter("%p{apc=%u ads=%u},%p,%u,%Zu",
  1242. call, call->acks_pend_cnt, call->acks_dftv_seq,
  1243. msg, seq, count);
  1244. /* handle re-ACK'ing of definitively ACK'd packets (may be out-of-order
  1245. * ACKs) */
  1246. if (seq <= call->acks_dftv_seq) {
  1247. unsigned delta = call->acks_dftv_seq - seq;
  1248. if (count <= delta) {
  1249. _leave(" = 0 [all definitively ACK'd]");
  1250. return 0;
  1251. }
  1252. seq += delta;
  1253. count -= delta;
  1254. msg->offset += delta;
  1255. }
  1256. highest = seq + count - 1;
  1257. resend = 0;
  1258. while (count > 0) {
  1259. /* extract up to 16 ACK slots at a time */
  1260. chunk = min(count, sizeof(acks));
  1261. count -= chunk;
  1262. memset(acks, 2, sizeof(acks));
  1263. if (skb_copy_bits(msg->pkt, msg->offset, &acks, chunk) < 0) {
  1264. printk("Rx Received short ACK packet\n");
  1265. _leave(" = -EINVAL");
  1266. return -EINVAL;
  1267. }
  1268. msg->offset += chunk;
  1269. /* check that the ACK set is valid */
  1270. for (ix = 0; ix < chunk; ix++) {
  1271. switch (acks[ix]) {
  1272. case RXRPC_ACK_TYPE_ACK:
  1273. break;
  1274. case RXRPC_ACK_TYPE_NACK:
  1275. resend = 1;
  1276. break;
  1277. default:
  1278. printk("Rx Received unsupported ACK state"
  1279. " %u\n", acks[ix]);
  1280. _leave(" = -EINVAL");
  1281. return -EINVAL;
  1282. }
  1283. }
  1284. _proto("Rx ACK of packets #%u-#%u "
  1285. "[%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c] (pend=%u)",
  1286. seq, (unsigned) (seq + chunk - 1),
  1287. _acktype[acks[0x0]],
  1288. _acktype[acks[0x1]],
  1289. _acktype[acks[0x2]],
  1290. _acktype[acks[0x3]],
  1291. _acktype[acks[0x4]],
  1292. _acktype[acks[0x5]],
  1293. _acktype[acks[0x6]],
  1294. _acktype[acks[0x7]],
  1295. _acktype[acks[0x8]],
  1296. _acktype[acks[0x9]],
  1297. _acktype[acks[0xA]],
  1298. _acktype[acks[0xB]],
  1299. _acktype[acks[0xC]],
  1300. _acktype[acks[0xD]],
  1301. _acktype[acks[0xE]],
  1302. _acktype[acks[0xF]],
  1303. call->acks_pend_cnt
  1304. );
  1305. /* mark the packets in the ACK queue as being provisionally
  1306. * ACK'd */
  1307. ix = 0;
  1308. spin_lock(&call->lock);
  1309. /* find the first packet ACK'd/NAK'd here */
  1310. list_for_each(_p, &call->acks_pendq) {
  1311. dmsg = list_entry(_p, struct rxrpc_message, link);
  1312. if (dmsg->seq == seq)
  1313. goto found_first;
  1314. _debug("- %u: skipping #%u", ix, dmsg->seq);
  1315. }
  1316. goto bad_queue;
  1317. found_first:
  1318. do {
  1319. _debug("- %u: processing #%u (%c) apc=%u",
  1320. ix, dmsg->seq, _acktype[acks[ix]],
  1321. call->acks_pend_cnt);
  1322. if (acks[ix] == RXRPC_ACK_TYPE_ACK) {
  1323. if (dmsg->state == RXRPC_MSG_SENT)
  1324. call->acks_pend_cnt--;
  1325. dmsg->state = RXRPC_MSG_ACKED;
  1326. }
  1327. else {
  1328. if (dmsg->state == RXRPC_MSG_ACKED)
  1329. call->acks_pend_cnt++;
  1330. dmsg->state = RXRPC_MSG_SENT;
  1331. }
  1332. ix++;
  1333. seq++;
  1334. _p = dmsg->link.next;
  1335. dmsg = list_entry(_p, struct rxrpc_message, link);
  1336. } while(ix < chunk &&
  1337. _p != &call->acks_pendq &&
  1338. dmsg->seq == seq);
  1339. if (ix < chunk)
  1340. goto bad_queue;
  1341. spin_unlock(&call->lock);
  1342. }
  1343. if (resend)
  1344. rxrpc_call_resend(call, highest);
  1345. /* if all packets are provisionally ACK'd, then wake up anyone who's
  1346. * waiting for that */
  1347. now_complete = 0;
  1348. spin_lock(&call->lock);
  1349. if (call->acks_pend_cnt == 0) {
  1350. if (call->app_call_state == RXRPC_CSTATE_SRVR_RCV_FINAL_ACK) {
  1351. call->app_call_state = RXRPC_CSTATE_COMPLETE;
  1352. _state(call);
  1353. }
  1354. now_complete = 1;
  1355. }
  1356. spin_unlock(&call->lock);
  1357. if (now_complete) {
  1358. _debug("- wake up waiters");
  1359. del_timer_sync(&call->acks_timeout);
  1360. del_timer_sync(&call->rcv_timeout);
  1361. del_timer_sync(&call->ackr_dfr_timo);
  1362. call->app_attn_func(call);
  1363. }
  1364. _leave(" = 0 (apc=%u)", call->acks_pend_cnt);
  1365. return 0;
  1366. bad_queue:
  1367. panic("%s(): acks_pendq in bad state (packet #%u absent)\n",
  1368. __FUNCTION__, seq);
  1369. } /* end rxrpc_call_record_ACK() */
  1370. /*****************************************************************************/
  1371. /*
  1372. * transfer data from the ready packet queue to the asynchronous read buffer
  1373. * - since this func is the only one going to look at packets queued on
  1374. * app_readyq, we don't need a lock to modify or access them, only to modify
  1375. * the queue pointers
  1376. * - called with call->lock held
  1377. * - the buffer must be in kernel space
  1378. * - returns:
  1379. * 0 if buffer filled
  1380. * -EAGAIN if buffer not filled and more data to come
  1381. * -EBADMSG if last packet received and insufficient data left
  1382. * -ECONNABORTED if the call has in an error state
  1383. */
  1384. static int __rxrpc_call_read_data(struct rxrpc_call *call)
  1385. {
  1386. struct rxrpc_message *msg;
  1387. size_t qty;
  1388. int ret;
  1389. _enter("%p{as=%d buf=%p qty=%Zu/%Zu}",
  1390. call,
  1391. call->app_async_read, call->app_read_buf,
  1392. call->app_ready_qty, call->app_mark);
  1393. /* check the state */
  1394. switch (call->app_call_state) {
  1395. case RXRPC_CSTATE_SRVR_RCV_ARGS:
  1396. case RXRPC_CSTATE_CLNT_RCV_REPLY:
  1397. if (call->app_last_rcv) {
  1398. printk("%s(%p,%p,%Zd):"
  1399. " Inconsistent call state (%s, last pkt)",
  1400. __FUNCTION__,
  1401. call, call->app_read_buf, call->app_mark,
  1402. rxrpc_call_states[call->app_call_state]);
  1403. BUG();
  1404. }
  1405. break;
  1406. case RXRPC_CSTATE_SRVR_RCV_OPID:
  1407. case RXRPC_CSTATE_SRVR_GOT_ARGS:
  1408. case RXRPC_CSTATE_CLNT_GOT_REPLY:
  1409. break;
  1410. case RXRPC_CSTATE_SRVR_SND_REPLY:
  1411. if (!call->app_last_rcv) {
  1412. printk("%s(%p,%p,%Zd):"
  1413. " Inconsistent call state (%s, not last pkt)",
  1414. __FUNCTION__,
  1415. call, call->app_read_buf, call->app_mark,
  1416. rxrpc_call_states[call->app_call_state]);
  1417. BUG();
  1418. }
  1419. _debug("Trying to read data from call in SND_REPLY state");
  1420. break;
  1421. case RXRPC_CSTATE_ERROR:
  1422. _leave(" = -ECONNABORTED");
  1423. return -ECONNABORTED;
  1424. default:
  1425. printk("reading in unexpected state [[[ %u ]]]\n",
  1426. call->app_call_state);
  1427. BUG();
  1428. }
  1429. /* handle the case of not having an async buffer */
  1430. if (!call->app_async_read) {
  1431. if (call->app_mark == RXRPC_APP_MARK_EOF) {
  1432. ret = call->app_last_rcv ? 0 : -EAGAIN;
  1433. }
  1434. else {
  1435. if (call->app_mark >= call->app_ready_qty) {
  1436. call->app_mark = RXRPC_APP_MARK_EOF;
  1437. ret = 0;
  1438. }
  1439. else {
  1440. ret = call->app_last_rcv ? -EBADMSG : -EAGAIN;
  1441. }
  1442. }
  1443. _leave(" = %d [no buf]", ret);
  1444. return 0;
  1445. }
  1446. while (!list_empty(&call->app_readyq) && call->app_mark > 0) {
  1447. msg = list_entry(call->app_readyq.next,
  1448. struct rxrpc_message, link);
  1449. /* drag as much data as we need out of this packet */
  1450. qty = min(call->app_mark, msg->dsize);
  1451. _debug("reading %Zu from skb=%p off=%lu",
  1452. qty, msg->pkt, msg->offset);
  1453. if (call->app_read_buf)
  1454. if (skb_copy_bits(msg->pkt, msg->offset,
  1455. call->app_read_buf, qty) < 0)
  1456. panic("%s: Failed to copy data from packet:"
  1457. " (%p,%p,%Zd)",
  1458. __FUNCTION__,
  1459. call, call->app_read_buf, qty);
  1460. /* if that packet is now empty, discard it */
  1461. call->app_ready_qty -= qty;
  1462. msg->dsize -= qty;
  1463. if (msg->dsize == 0) {
  1464. list_del_init(&msg->link);
  1465. rxrpc_put_message(msg);
  1466. }
  1467. else {
  1468. msg->offset += qty;
  1469. }
  1470. call->app_mark -= qty;
  1471. if (call->app_read_buf)
  1472. call->app_read_buf += qty;
  1473. }
  1474. if (call->app_mark == 0) {
  1475. call->app_async_read = 0;
  1476. call->app_mark = RXRPC_APP_MARK_EOF;
  1477. call->app_read_buf = NULL;
  1478. /* adjust the state if used up all packets */
  1479. if (list_empty(&call->app_readyq) && call->app_last_rcv) {
  1480. switch (call->app_call_state) {
  1481. case RXRPC_CSTATE_SRVR_RCV_OPID:
  1482. call->app_call_state = RXRPC_CSTATE_SRVR_SND_REPLY;
  1483. call->app_mark = RXRPC_APP_MARK_EOF;
  1484. _state(call);
  1485. del_timer_sync(&call->rcv_timeout);
  1486. break;
  1487. case RXRPC_CSTATE_SRVR_GOT_ARGS:
  1488. call->app_call_state = RXRPC_CSTATE_SRVR_SND_REPLY;
  1489. _state(call);
  1490. del_timer_sync(&call->rcv_timeout);
  1491. break;
  1492. default:
  1493. call->app_call_state = RXRPC_CSTATE_COMPLETE;
  1494. _state(call);
  1495. del_timer_sync(&call->acks_timeout);
  1496. del_timer_sync(&call->ackr_dfr_timo);
  1497. del_timer_sync(&call->rcv_timeout);
  1498. break;
  1499. }
  1500. }
  1501. _leave(" = 0");
  1502. return 0;
  1503. }
  1504. if (call->app_last_rcv) {
  1505. _debug("Insufficient data (%Zu/%Zu)",
  1506. call->app_ready_qty, call->app_mark);
  1507. call->app_async_read = 0;
  1508. call->app_mark = RXRPC_APP_MARK_EOF;
  1509. call->app_read_buf = NULL;
  1510. _leave(" = -EBADMSG");
  1511. return -EBADMSG;
  1512. }
  1513. _leave(" = -EAGAIN");
  1514. return -EAGAIN;
  1515. } /* end __rxrpc_call_read_data() */
  1516. /*****************************************************************************/
  1517. /*
  1518. * attempt to read the specified amount of data from the call's ready queue
  1519. * into the buffer provided
  1520. * - since this func is the only one going to look at packets queued on
  1521. * app_readyq, we don't need a lock to modify or access them, only to modify
  1522. * the queue pointers
  1523. * - if the buffer pointer is NULL, then data is merely drained, not copied
  1524. * - if flags&RXRPC_CALL_READ_BLOCK, then the function will wait until there is
  1525. * enough data or an error will be generated
  1526. * - note that the caller must have added the calling task to the call's wait
  1527. * queue beforehand
  1528. * - if flags&RXRPC_CALL_READ_ALL, then an error will be generated if this
  1529. * function doesn't read all available data
  1530. */
  1531. int rxrpc_call_read_data(struct rxrpc_call *call,
  1532. void *buffer, size_t size, int flags)
  1533. {
  1534. int ret;
  1535. _enter("%p{arq=%Zu},%p,%Zd,%x",
  1536. call, call->app_ready_qty, buffer, size, flags);
  1537. spin_lock(&call->lock);
  1538. if (unlikely(!!call->app_read_buf)) {
  1539. spin_unlock(&call->lock);
  1540. _leave(" = -EBUSY");
  1541. return -EBUSY;
  1542. }
  1543. call->app_mark = size;
  1544. call->app_read_buf = buffer;
  1545. call->app_async_read = 1;
  1546. call->app_read_count++;
  1547. /* read as much data as possible */
  1548. ret = __rxrpc_call_read_data(call);
  1549. switch (ret) {
  1550. case 0:
  1551. if (flags & RXRPC_CALL_READ_ALL &&
  1552. (!call->app_last_rcv || call->app_ready_qty > 0)) {
  1553. _leave(" = -EBADMSG");
  1554. __rxrpc_call_abort(call, -EBADMSG);
  1555. return -EBADMSG;
  1556. }
  1557. spin_unlock(&call->lock);
  1558. call->app_attn_func(call);
  1559. _leave(" = 0");
  1560. return ret;
  1561. case -ECONNABORTED:
  1562. spin_unlock(&call->lock);
  1563. _leave(" = %d [aborted]", ret);
  1564. return ret;
  1565. default:
  1566. __rxrpc_call_abort(call, ret);
  1567. _leave(" = %d", ret);
  1568. return ret;
  1569. case -EAGAIN:
  1570. spin_unlock(&call->lock);
  1571. if (!(flags & RXRPC_CALL_READ_BLOCK)) {
  1572. _leave(" = -EAGAIN");
  1573. return -EAGAIN;
  1574. }
  1575. /* wait for the data to arrive */
  1576. _debug("blocking for data arrival");
  1577. for (;;) {
  1578. set_current_state(TASK_INTERRUPTIBLE);
  1579. if (!call->app_async_read || signal_pending(current))
  1580. break;
  1581. schedule();
  1582. }
  1583. set_current_state(TASK_RUNNING);
  1584. if (signal_pending(current)) {
  1585. _leave(" = -EINTR");
  1586. return -EINTR;
  1587. }
  1588. if (call->app_call_state == RXRPC_CSTATE_ERROR) {
  1589. _leave(" = -ECONNABORTED");
  1590. return -ECONNABORTED;
  1591. }
  1592. _leave(" = 0");
  1593. return 0;
  1594. }
  1595. } /* end rxrpc_call_read_data() */
  1596. /*****************************************************************************/
  1597. /*
  1598. * write data to a call
  1599. * - the data may not be sent immediately if it doesn't fill a buffer
  1600. * - if we can't queue all the data for buffering now, siov[] will have been
  1601. * adjusted to take account of what has been sent
  1602. */
  1603. int rxrpc_call_write_data(struct rxrpc_call *call,
  1604. size_t sioc,
  1605. struct kvec *siov,
  1606. u8 rxhdr_flags,
  1607. int alloc_flags,
  1608. int dup_data,
  1609. size_t *size_sent)
  1610. {
  1611. struct rxrpc_message *msg;
  1612. struct kvec *sptr;
  1613. size_t space, size, chunk, tmp;
  1614. char *buf;
  1615. int ret;
  1616. _enter("%p,%Zu,%p,%02x,%x,%d,%p",
  1617. call, sioc, siov, rxhdr_flags, alloc_flags, dup_data,
  1618. size_sent);
  1619. *size_sent = 0;
  1620. size = 0;
  1621. ret = -EINVAL;
  1622. /* can't send more if we've sent last packet from this end */
  1623. switch (call->app_call_state) {
  1624. case RXRPC_CSTATE_SRVR_SND_REPLY:
  1625. case RXRPC_CSTATE_CLNT_SND_ARGS:
  1626. break;
  1627. case RXRPC_CSTATE_ERROR:
  1628. ret = call->app_errno;
  1629. default:
  1630. goto out;
  1631. }
  1632. /* calculate how much data we've been given */
  1633. sptr = siov;
  1634. for (; sioc > 0; sptr++, sioc--) {
  1635. if (!sptr->iov_len)
  1636. continue;
  1637. if (!sptr->iov_base)
  1638. goto out;
  1639. size += sptr->iov_len;
  1640. }
  1641. _debug("- size=%Zu mtu=%Zu", size, call->conn->mtu_size);
  1642. do {
  1643. /* make sure there's a message under construction */
  1644. if (!call->snd_nextmsg) {
  1645. /* no - allocate a message with no data yet attached */
  1646. ret = rxrpc_conn_newmsg(call->conn, call,
  1647. RXRPC_PACKET_TYPE_DATA,
  1648. 0, NULL, alloc_flags,
  1649. &call->snd_nextmsg);
  1650. if (ret < 0)
  1651. goto out;
  1652. _debug("- allocated new message [ds=%Zu]",
  1653. call->snd_nextmsg->dsize);
  1654. }
  1655. msg = call->snd_nextmsg;
  1656. msg->hdr.flags |= rxhdr_flags;
  1657. /* deal with zero-length terminal packet */
  1658. if (size == 0) {
  1659. if (rxhdr_flags & RXRPC_LAST_PACKET) {
  1660. ret = rxrpc_call_flush(call);
  1661. if (ret < 0)
  1662. goto out;
  1663. }
  1664. break;
  1665. }
  1666. /* work out how much space current packet has available */
  1667. space = call->conn->mtu_size - msg->dsize;
  1668. chunk = min(space, size);
  1669. _debug("- [before] space=%Zu chunk=%Zu", space, chunk);
  1670. while (!siov->iov_len)
  1671. siov++;
  1672. /* if we are going to have to duplicate the data then coalesce
  1673. * it too */
  1674. if (dup_data) {
  1675. /* don't allocate more that 1 page at a time */
  1676. if (chunk > PAGE_SIZE)
  1677. chunk = PAGE_SIZE;
  1678. /* allocate a data buffer and attach to the message */
  1679. buf = kmalloc(chunk, alloc_flags);
  1680. if (unlikely(!buf)) {
  1681. if (msg->dsize ==
  1682. sizeof(struct rxrpc_header)) {
  1683. /* discard an empty msg and wind back
  1684. * the seq counter */
  1685. rxrpc_put_message(msg);
  1686. call->snd_nextmsg = NULL;
  1687. call->snd_seq_count--;
  1688. }
  1689. ret = -ENOMEM;
  1690. goto out;
  1691. }
  1692. tmp = msg->dcount++;
  1693. set_bit(tmp, &msg->dfree);
  1694. msg->data[tmp].iov_base = buf;
  1695. msg->data[tmp].iov_len = chunk;
  1696. msg->dsize += chunk;
  1697. *size_sent += chunk;
  1698. size -= chunk;
  1699. /* load the buffer with data */
  1700. while (chunk > 0) {
  1701. tmp = min(chunk, siov->iov_len);
  1702. memcpy(buf, siov->iov_base, tmp);
  1703. buf += tmp;
  1704. siov->iov_base += tmp;
  1705. siov->iov_len -= tmp;
  1706. if (!siov->iov_len)
  1707. siov++;
  1708. chunk -= tmp;
  1709. }
  1710. }
  1711. else {
  1712. /* we want to attach the supplied buffers directly */
  1713. while (chunk > 0 &&
  1714. msg->dcount < RXRPC_MSG_MAX_IOCS) {
  1715. tmp = msg->dcount++;
  1716. msg->data[tmp].iov_base = siov->iov_base;
  1717. msg->data[tmp].iov_len = siov->iov_len;
  1718. msg->dsize += siov->iov_len;
  1719. *size_sent += siov->iov_len;
  1720. size -= siov->iov_len;
  1721. chunk -= siov->iov_len;
  1722. siov++;
  1723. }
  1724. }
  1725. _debug("- [loaded] chunk=%Zu size=%Zu", chunk, size);
  1726. /* dispatch the message when full, final or requesting ACK */
  1727. if (msg->dsize >= call->conn->mtu_size || rxhdr_flags) {
  1728. ret = rxrpc_call_flush(call);
  1729. if (ret < 0)
  1730. goto out;
  1731. }
  1732. } while(size > 0);
  1733. ret = 0;
  1734. out:
  1735. _leave(" = %d (%Zd queued, %Zd rem)", ret, *size_sent, size);
  1736. return ret;
  1737. } /* end rxrpc_call_write_data() */
  1738. /*****************************************************************************/
  1739. /*
  1740. * flush outstanding packets to the network
  1741. */
  1742. static int rxrpc_call_flush(struct rxrpc_call *call)
  1743. {
  1744. struct rxrpc_message *msg;
  1745. int ret = 0;
  1746. _enter("%p", call);
  1747. rxrpc_get_call(call);
  1748. /* if there's a packet under construction, then dispatch it now */
  1749. if (call->snd_nextmsg) {
  1750. msg = call->snd_nextmsg;
  1751. call->snd_nextmsg = NULL;
  1752. if (msg->hdr.flags & RXRPC_LAST_PACKET) {
  1753. msg->hdr.flags &= ~RXRPC_MORE_PACKETS;
  1754. if (call->app_call_state != RXRPC_CSTATE_CLNT_SND_ARGS)
  1755. msg->hdr.flags |= RXRPC_REQUEST_ACK;
  1756. }
  1757. else {
  1758. msg->hdr.flags |= RXRPC_MORE_PACKETS;
  1759. }
  1760. _proto("Sending DATA message { ds=%Zu dc=%u df=%02lu }",
  1761. msg->dsize, msg->dcount, msg->dfree);
  1762. /* queue and adjust call state */
  1763. spin_lock(&call->lock);
  1764. list_add_tail(&msg->link, &call->acks_pendq);
  1765. /* decide what to do depending on current state and if this is
  1766. * the last packet */
  1767. ret = -EINVAL;
  1768. switch (call->app_call_state) {
  1769. case RXRPC_CSTATE_SRVR_SND_REPLY:
  1770. if (msg->hdr.flags & RXRPC_LAST_PACKET) {
  1771. call->app_call_state =
  1772. RXRPC_CSTATE_SRVR_RCV_FINAL_ACK;
  1773. _state(call);
  1774. }
  1775. break;
  1776. case RXRPC_CSTATE_CLNT_SND_ARGS:
  1777. if (msg->hdr.flags & RXRPC_LAST_PACKET) {
  1778. call->app_call_state =
  1779. RXRPC_CSTATE_CLNT_RCV_REPLY;
  1780. _state(call);
  1781. }
  1782. break;
  1783. case RXRPC_CSTATE_ERROR:
  1784. ret = call->app_errno;
  1785. default:
  1786. spin_unlock(&call->lock);
  1787. goto out;
  1788. }
  1789. call->acks_pend_cnt++;
  1790. mod_timer(&call->acks_timeout,
  1791. __rxrpc_rtt_based_timeout(call,
  1792. rxrpc_call_acks_timeout));
  1793. spin_unlock(&call->lock);
  1794. ret = rxrpc_conn_sendmsg(call->conn, msg);
  1795. if (ret == 0)
  1796. call->pkt_snd_count++;
  1797. }
  1798. out:
  1799. rxrpc_put_call(call);
  1800. _leave(" = %d", ret);
  1801. return ret;
  1802. } /* end rxrpc_call_flush() */
  1803. /*****************************************************************************/
  1804. /*
  1805. * resend NAK'd or unacknowledged packets up to the highest one specified
  1806. */
  1807. static void rxrpc_call_resend(struct rxrpc_call *call, rxrpc_seq_t highest)
  1808. {
  1809. struct rxrpc_message *msg;
  1810. struct list_head *_p;
  1811. rxrpc_seq_t seq = 0;
  1812. _enter("%p,%u", call, highest);
  1813. _proto("Rx Resend required");
  1814. /* handle too many resends */
  1815. if (call->snd_resend_cnt >= rxrpc_call_max_resend) {
  1816. _debug("Aborting due to too many resends (rcv=%d)",
  1817. call->pkt_rcv_count);
  1818. rxrpc_call_abort(call,
  1819. call->pkt_rcv_count > 0 ? -EIO : -ETIMEDOUT);
  1820. _leave("");
  1821. return;
  1822. }
  1823. spin_lock(&call->lock);
  1824. call->snd_resend_cnt++;
  1825. for (;;) {
  1826. /* determine which the next packet we might need to ACK is */
  1827. if (seq <= call->acks_dftv_seq)
  1828. seq = call->acks_dftv_seq;
  1829. seq++;
  1830. if (seq > highest)
  1831. break;
  1832. /* look for the packet in the pending-ACK queue */
  1833. list_for_each(_p, &call->acks_pendq) {
  1834. msg = list_entry(_p, struct rxrpc_message, link);
  1835. if (msg->seq == seq)
  1836. goto found_msg;
  1837. }
  1838. panic("%s(%p,%d):"
  1839. " Inconsistent pending-ACK queue (ds=%u sc=%u sq=%u)\n",
  1840. __FUNCTION__, call, highest,
  1841. call->acks_dftv_seq, call->snd_seq_count, seq);
  1842. found_msg:
  1843. if (msg->state != RXRPC_MSG_SENT)
  1844. continue; /* only un-ACK'd packets */
  1845. rxrpc_get_message(msg);
  1846. spin_unlock(&call->lock);
  1847. /* send each message again (and ignore any errors we might
  1848. * incur) */
  1849. _proto("Resending DATA message { ds=%Zu dc=%u df=%02lu }",
  1850. msg->dsize, msg->dcount, msg->dfree);
  1851. if (rxrpc_conn_sendmsg(call->conn, msg) == 0)
  1852. call->pkt_snd_count++;
  1853. rxrpc_put_message(msg);
  1854. spin_lock(&call->lock);
  1855. }
  1856. /* reset the timeout */
  1857. mod_timer(&call->acks_timeout,
  1858. __rxrpc_rtt_based_timeout(call, rxrpc_call_acks_timeout));
  1859. spin_unlock(&call->lock);
  1860. _leave("");
  1861. } /* end rxrpc_call_resend() */
  1862. /*****************************************************************************/
  1863. /*
  1864. * handle an ICMP error being applied to a call
  1865. */
  1866. void rxrpc_call_handle_error(struct rxrpc_call *call, int local, int errno)
  1867. {
  1868. _enter("%p{%u},%d", call, ntohl(call->call_id), errno);
  1869. /* if this call is already aborted, then just wake up any waiters */
  1870. if (call->app_call_state == RXRPC_CSTATE_ERROR) {
  1871. call->app_error_func(call);
  1872. }
  1873. else {
  1874. /* tell the app layer what happened */
  1875. spin_lock(&call->lock);
  1876. call->app_call_state = RXRPC_CSTATE_ERROR;
  1877. _state(call);
  1878. if (local)
  1879. call->app_err_state = RXRPC_ESTATE_LOCAL_ERROR;
  1880. else
  1881. call->app_err_state = RXRPC_ESTATE_REMOTE_ERROR;
  1882. call->app_errno = errno;
  1883. call->app_mark = RXRPC_APP_MARK_EOF;
  1884. call->app_read_buf = NULL;
  1885. call->app_async_read = 0;
  1886. /* map the error */
  1887. call->app_aemap_func(call);
  1888. del_timer_sync(&call->acks_timeout);
  1889. del_timer_sync(&call->rcv_timeout);
  1890. del_timer_sync(&call->ackr_dfr_timo);
  1891. spin_unlock(&call->lock);
  1892. call->app_error_func(call);
  1893. }
  1894. _leave("");
  1895. } /* end rxrpc_call_handle_error() */