caif_socket.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391
  1. /*
  2. * Copyright (C) ST-Ericsson AB 2010
  3. * Author: Sjur Brendeland sjur.brandeland@stericsson.com
  4. * Per Sigmond per.sigmond@stericsson.com
  5. * License terms: GNU General Public License (GPL) version 2
  6. */
  7. #include <linux/fs.h>
  8. #include <linux/init.h>
  9. #include <linux/module.h>
  10. #include <linux/sched.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/mutex.h>
  13. #include <linux/list.h>
  14. #include <linux/wait.h>
  15. #include <linux/poll.h>
  16. #include <linux/tcp.h>
  17. #include <linux/uaccess.h>
  18. #include <asm/atomic.h>
  19. #include <linux/caif/caif_socket.h>
  20. #include <net/caif/caif_layer.h>
  21. #include <net/caif/caif_dev.h>
  22. #include <net/caif/cfpkt.h>
  23. MODULE_LICENSE("GPL");
  24. #define CHNL_SKT_READ_QUEUE_HIGH 200
  25. #define CHNL_SKT_READ_QUEUE_LOW 100
  26. static int caif_sockbuf_size = 40000;
  27. static atomic_t caif_nr_socks = ATOMIC_INIT(0);
  28. #define CONN_STATE_OPEN_BIT 1
  29. #define CONN_STATE_PENDING_BIT 2
  30. #define CONN_STATE_PEND_DESTROY_BIT 3
  31. #define CONN_REMOTE_SHUTDOWN_BIT 4
  32. #define TX_FLOW_ON_BIT 1
  33. #define RX_FLOW_ON_BIT 2
  34. #define STATE_IS_OPEN(cf_sk) test_bit(CONN_STATE_OPEN_BIT,\
  35. (void *) &(cf_sk)->conn_state)
  36. #define STATE_IS_REMOTE_SHUTDOWN(cf_sk) test_bit(CONN_REMOTE_SHUTDOWN_BIT,\
  37. (void *) &(cf_sk)->conn_state)
  38. #define STATE_IS_PENDING(cf_sk) test_bit(CONN_STATE_PENDING_BIT,\
  39. (void *) &(cf_sk)->conn_state)
  40. #define STATE_IS_PENDING_DESTROY(cf_sk) test_bit(CONN_STATE_PEND_DESTROY_BIT,\
  41. (void *) &(cf_sk)->conn_state)
  42. #define SET_STATE_PENDING_DESTROY(cf_sk) set_bit(CONN_STATE_PEND_DESTROY_BIT,\
  43. (void *) &(cf_sk)->conn_state)
  44. #define SET_STATE_OPEN(cf_sk) set_bit(CONN_STATE_OPEN_BIT,\
  45. (void *) &(cf_sk)->conn_state)
  46. #define SET_STATE_CLOSED(cf_sk) clear_bit(CONN_STATE_OPEN_BIT,\
  47. (void *) &(cf_sk)->conn_state)
  48. #define SET_PENDING_ON(cf_sk) set_bit(CONN_STATE_PENDING_BIT,\
  49. (void *) &(cf_sk)->conn_state)
  50. #define SET_PENDING_OFF(cf_sk) clear_bit(CONN_STATE_PENDING_BIT,\
  51. (void *) &(cf_sk)->conn_state)
  52. #define SET_REMOTE_SHUTDOWN(cf_sk) set_bit(CONN_REMOTE_SHUTDOWN_BIT,\
  53. (void *) &(cf_sk)->conn_state)
  54. #define SET_REMOTE_SHUTDOWN_OFF(dev) clear_bit(CONN_REMOTE_SHUTDOWN_BIT,\
  55. (void *) &(dev)->conn_state)
  56. #define RX_FLOW_IS_ON(cf_sk) test_bit(RX_FLOW_ON_BIT,\
  57. (void *) &(cf_sk)->flow_state)
  58. #define TX_FLOW_IS_ON(cf_sk) test_bit(TX_FLOW_ON_BIT,\
  59. (void *) &(cf_sk)->flow_state)
  60. #define SET_RX_FLOW_OFF(cf_sk) clear_bit(RX_FLOW_ON_BIT,\
  61. (void *) &(cf_sk)->flow_state)
  62. #define SET_RX_FLOW_ON(cf_sk) set_bit(RX_FLOW_ON_BIT,\
  63. (void *) &(cf_sk)->flow_state)
  64. #define SET_TX_FLOW_OFF(cf_sk) clear_bit(TX_FLOW_ON_BIT,\
  65. (void *) &(cf_sk)->flow_state)
  66. #define SET_TX_FLOW_ON(cf_sk) set_bit(TX_FLOW_ON_BIT,\
  67. (void *) &(cf_sk)->flow_state)
  68. #define SKT_READ_FLAG 0x01
  69. #define SKT_WRITE_FLAG 0x02
  70. static struct dentry *debugfsdir;
  71. #include <linux/debugfs.h>
  72. #ifdef CONFIG_DEBUG_FS
  73. struct debug_fs_counter {
  74. atomic_t num_open;
  75. atomic_t num_close;
  76. atomic_t num_init;
  77. atomic_t num_init_resp;
  78. atomic_t num_init_fail_resp;
  79. atomic_t num_deinit;
  80. atomic_t num_deinit_resp;
  81. atomic_t num_remote_shutdown_ind;
  82. atomic_t num_tx_flow_off_ind;
  83. atomic_t num_tx_flow_on_ind;
  84. atomic_t num_rx_flow_off;
  85. atomic_t num_rx_flow_on;
  86. atomic_t skb_in_use;
  87. atomic_t skb_alloc;
  88. atomic_t skb_free;
  89. };
  90. static struct debug_fs_counter cnt;
  91. #define dbfs_atomic_inc(v) atomic_inc(v)
  92. #define dbfs_atomic_dec(v) atomic_dec(v)
  93. #else
  94. #define dbfs_atomic_inc(v)
  95. #define dbfs_atomic_dec(v)
  96. #endif
  97. /* The AF_CAIF socket */
  98. struct caifsock {
  99. /* NOTE: sk has to be the first member */
  100. struct sock sk;
  101. struct cflayer layer;
  102. char name[CAIF_LAYER_NAME_SZ];
  103. u32 conn_state;
  104. u32 flow_state;
  105. struct cfpktq *pktq;
  106. int file_mode;
  107. struct caif_connect_request conn_req;
  108. int read_queue_len;
  109. /* protect updates of read_queue_len */
  110. spinlock_t read_queue_len_lock;
  111. struct dentry *debugfs_socket_dir;
  112. };
  113. static void drain_queue(struct caifsock *cf_sk);
  114. /* Packet Receive Callback function called from CAIF Stack */
  115. static int caif_sktrecv_cb(struct cflayer *layr, struct cfpkt *pkt)
  116. {
  117. struct caifsock *cf_sk;
  118. int read_queue_high;
  119. cf_sk = container_of(layr, struct caifsock, layer);
  120. if (!STATE_IS_OPEN(cf_sk)) {
  121. /*FIXME: This should be allowed finally!*/
  122. pr_debug("CAIF: %s(): called after close request\n", __func__);
  123. cfpkt_destroy(pkt);
  124. return 0;
  125. }
  126. /* NOTE: This function may be called in Tasklet context! */
  127. /* The queue has its own lock */
  128. cfpkt_queue(cf_sk->pktq, pkt, 0);
  129. spin_lock(&cf_sk->read_queue_len_lock);
  130. cf_sk->read_queue_len++;
  131. read_queue_high = (cf_sk->read_queue_len > CHNL_SKT_READ_QUEUE_HIGH);
  132. spin_unlock(&cf_sk->read_queue_len_lock);
  133. if (RX_FLOW_IS_ON(cf_sk) && read_queue_high) {
  134. dbfs_atomic_inc(&cnt.num_rx_flow_off);
  135. SET_RX_FLOW_OFF(cf_sk);
  136. /* Send flow off (NOTE: must not sleep) */
  137. pr_debug("CAIF: %s():"
  138. " sending flow OFF (queue len = %d)\n",
  139. __func__,
  140. cf_sk->read_queue_len);
  141. caif_assert(cf_sk->layer.dn);
  142. caif_assert(cf_sk->layer.dn->ctrlcmd);
  143. (void) cf_sk->layer.dn->modemcmd(cf_sk->layer.dn,
  144. CAIF_MODEMCMD_FLOW_OFF_REQ);
  145. }
  146. /* Signal reader that data is available. */
  147. wake_up_interruptible(cf_sk->sk.sk_sleep);
  148. return 0;
  149. }
  150. /* Packet Flow Control Callback function called from CAIF */
  151. static void caif_sktflowctrl_cb(struct cflayer *layr,
  152. enum caif_ctrlcmd flow,
  153. int phyid)
  154. {
  155. struct caifsock *cf_sk;
  156. /* NOTE: This function may be called in Tasklet context! */
  157. pr_debug("CAIF: %s(): flowctrl func called: %s.\n",
  158. __func__,
  159. flow == CAIF_CTRLCMD_FLOW_ON_IND ? "ON" :
  160. flow == CAIF_CTRLCMD_FLOW_OFF_IND ? "OFF" :
  161. flow == CAIF_CTRLCMD_INIT_RSP ? "INIT_RSP" :
  162. flow == CAIF_CTRLCMD_DEINIT_RSP ? "DEINIT_RSP" :
  163. flow == CAIF_CTRLCMD_INIT_FAIL_RSP ? "INIT_FAIL_RSP" :
  164. flow ==
  165. CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND ? "REMOTE_SHUTDOWN" :
  166. "UKNOWN CTRL COMMAND");
  167. if (layr == NULL)
  168. return;
  169. cf_sk = container_of(layr, struct caifsock, layer);
  170. switch (flow) {
  171. case CAIF_CTRLCMD_FLOW_ON_IND:
  172. dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
  173. /* Signal reader that data is available. */
  174. SET_TX_FLOW_ON(cf_sk);
  175. wake_up_interruptible(cf_sk->sk.sk_sleep);
  176. break;
  177. case CAIF_CTRLCMD_FLOW_OFF_IND:
  178. dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
  179. SET_TX_FLOW_OFF(cf_sk);
  180. break;
  181. case CAIF_CTRLCMD_INIT_RSP:
  182. dbfs_atomic_inc(&cnt.num_init_resp);
  183. /* Signal reader that data is available. */
  184. caif_assert(STATE_IS_OPEN(cf_sk));
  185. SET_PENDING_OFF(cf_sk);
  186. SET_TX_FLOW_ON(cf_sk);
  187. wake_up_interruptible(cf_sk->sk.sk_sleep);
  188. break;
  189. case CAIF_CTRLCMD_DEINIT_RSP:
  190. dbfs_atomic_inc(&cnt.num_deinit_resp);
  191. caif_assert(!STATE_IS_OPEN(cf_sk));
  192. SET_PENDING_OFF(cf_sk);
  193. if (!STATE_IS_PENDING_DESTROY(cf_sk)) {
  194. if (cf_sk->sk.sk_sleep != NULL)
  195. wake_up_interruptible(cf_sk->sk.sk_sleep);
  196. }
  197. dbfs_atomic_inc(&cnt.num_deinit);
  198. sock_put(&cf_sk->sk);
  199. break;
  200. case CAIF_CTRLCMD_INIT_FAIL_RSP:
  201. dbfs_atomic_inc(&cnt.num_init_fail_resp);
  202. caif_assert(STATE_IS_OPEN(cf_sk));
  203. SET_STATE_CLOSED(cf_sk);
  204. SET_PENDING_OFF(cf_sk);
  205. SET_TX_FLOW_OFF(cf_sk);
  206. wake_up_interruptible(cf_sk->sk.sk_sleep);
  207. break;
  208. case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
  209. dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
  210. SET_REMOTE_SHUTDOWN(cf_sk);
  211. /* Use sk_shutdown to indicate remote shutdown indication */
  212. cf_sk->sk.sk_shutdown |= RCV_SHUTDOWN;
  213. cf_sk->file_mode = 0;
  214. wake_up_interruptible(cf_sk->sk.sk_sleep);
  215. break;
  216. default:
  217. pr_debug("CAIF: %s(): Unexpected flow command %d\n",
  218. __func__, flow);
  219. }
  220. }
  221. static void skb_destructor(struct sk_buff *skb)
  222. {
  223. dbfs_atomic_inc(&cnt.skb_free);
  224. dbfs_atomic_dec(&cnt.skb_in_use);
  225. }
  226. static int caif_recvmsg(struct kiocb *iocb, struct socket *sock,
  227. struct msghdr *m, size_t buf_len, int flags)
  228. {
  229. struct sock *sk = sock->sk;
  230. struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
  231. struct cfpkt *pkt = NULL;
  232. size_t len;
  233. int result;
  234. struct sk_buff *skb;
  235. ssize_t ret = -EIO;
  236. int read_queue_low;
  237. if (cf_sk == NULL) {
  238. pr_debug("CAIF: %s(): private_data not set!\n",
  239. __func__);
  240. ret = -EBADFD;
  241. goto read_error;
  242. }
  243. /* Don't do multiple iovec entries yet */
  244. if (m->msg_iovlen != 1)
  245. return -EOPNOTSUPP;
  246. if (unlikely(!buf_len))
  247. return -EINVAL;
  248. lock_sock(&(cf_sk->sk));
  249. caif_assert(cf_sk->pktq);
  250. if (!STATE_IS_OPEN(cf_sk)) {
  251. /* Socket is closed or closing. */
  252. if (!STATE_IS_PENDING(cf_sk)) {
  253. pr_debug("CAIF: %s(): socket is closed (by remote)\n",
  254. __func__);
  255. ret = -EPIPE;
  256. } else {
  257. pr_debug("CAIF: %s(): socket is closing..\n", __func__);
  258. ret = -EBADF;
  259. }
  260. goto read_error;
  261. }
  262. /* Socket is open or opening. */
  263. if (STATE_IS_PENDING(cf_sk)) {
  264. pr_debug("CAIF: %s(): socket is opening...\n", __func__);
  265. if (flags & MSG_DONTWAIT) {
  266. /* We can't block. */
  267. pr_debug("CAIF: %s():state pending and MSG_DONTWAIT\n",
  268. __func__);
  269. ret = -EAGAIN;
  270. goto read_error;
  271. }
  272. /*
  273. * Blocking mode; state is pending and we need to wait
  274. * for its conclusion.
  275. */
  276. release_sock(&cf_sk->sk);
  277. result =
  278. wait_event_interruptible(*cf_sk->sk.sk_sleep,
  279. !STATE_IS_PENDING(cf_sk));
  280. lock_sock(&(cf_sk->sk));
  281. if (result == -ERESTARTSYS) {
  282. pr_debug("CAIF: %s(): wait_event_interruptible"
  283. " woken by a signal (1)", __func__);
  284. ret = -ERESTARTSYS;
  285. goto read_error;
  286. }
  287. }
  288. if (STATE_IS_REMOTE_SHUTDOWN(cf_sk) ||
  289. !STATE_IS_OPEN(cf_sk) ||
  290. STATE_IS_PENDING(cf_sk)) {
  291. pr_debug("CAIF: %s(): socket closed\n",
  292. __func__);
  293. ret = -ESHUTDOWN;
  294. goto read_error;
  295. }
  296. /*
  297. * Block if we don't have any received buffers.
  298. * The queue has its own lock.
  299. */
  300. while ((pkt = cfpkt_qpeek(cf_sk->pktq)) == NULL) {
  301. if (flags & MSG_DONTWAIT) {
  302. pr_debug("CAIF: %s(): MSG_DONTWAIT\n", __func__);
  303. ret = -EAGAIN;
  304. goto read_error;
  305. }
  306. trace_printk("CAIF: %s() wait_event\n", __func__);
  307. /* Let writers in. */
  308. release_sock(&cf_sk->sk);
  309. /* Block reader until data arrives or socket is closed. */
  310. if (wait_event_interruptible(*cf_sk->sk.sk_sleep,
  311. cfpkt_qpeek(cf_sk->pktq)
  312. || STATE_IS_REMOTE_SHUTDOWN(cf_sk)
  313. || !STATE_IS_OPEN(cf_sk)) ==
  314. -ERESTARTSYS) {
  315. pr_debug("CAIF: %s():"
  316. " wait_event_interruptible woken by "
  317. "a signal, signal_pending(current) = %d\n",
  318. __func__,
  319. signal_pending(current));
  320. return -ERESTARTSYS;
  321. }
  322. trace_printk("CAIF: %s() awake\n", __func__);
  323. if (STATE_IS_REMOTE_SHUTDOWN(cf_sk)) {
  324. pr_debug("CAIF: %s(): "
  325. "received remote_shutdown indication\n",
  326. __func__);
  327. ret = -ESHUTDOWN;
  328. goto read_error_no_unlock;
  329. }
  330. /* I want to be alone on cf_sk (except status and queue). */
  331. lock_sock(&(cf_sk->sk));
  332. if (!STATE_IS_OPEN(cf_sk)) {
  333. /* Someone closed the link, report error. */
  334. pr_debug("CAIF: %s(): remote end shutdown!\n",
  335. __func__);
  336. ret = -EPIPE;
  337. goto read_error;
  338. }
  339. }
  340. /* The queue has its own lock. */
  341. len = cfpkt_getlen(pkt);
  342. /* Check max length that can be copied. */
  343. if (len <= buf_len)
  344. pkt = cfpkt_dequeue(cf_sk->pktq);
  345. else {
  346. pr_debug("CAIF: %s(): user buffer too small (%ld,%ld)\n",
  347. __func__, (long) len, (long) buf_len);
  348. if (sock->type == SOCK_SEQPACKET) {
  349. ret = -EMSGSIZE;
  350. goto read_error;
  351. }
  352. len = buf_len;
  353. }
  354. spin_lock(&cf_sk->read_queue_len_lock);
  355. cf_sk->read_queue_len--;
  356. read_queue_low = (cf_sk->read_queue_len < CHNL_SKT_READ_QUEUE_LOW);
  357. spin_unlock(&cf_sk->read_queue_len_lock);
  358. if (!RX_FLOW_IS_ON(cf_sk) && read_queue_low) {
  359. dbfs_atomic_inc(&cnt.num_rx_flow_on);
  360. SET_RX_FLOW_ON(cf_sk);
  361. /* Send flow on. */
  362. pr_debug("CAIF: %s(): sending flow ON (queue len = %d)\n",
  363. __func__, cf_sk->read_queue_len);
  364. caif_assert(cf_sk->layer.dn);
  365. caif_assert(cf_sk->layer.dn->ctrlcmd);
  366. (void) cf_sk->layer.dn->modemcmd(cf_sk->layer.dn,
  367. CAIF_MODEMCMD_FLOW_ON_REQ);
  368. caif_assert(cf_sk->read_queue_len >= 0);
  369. }
  370. skb = cfpkt_tonative(pkt);
  371. result = skb_copy_datagram_iovec(skb, 0, m->msg_iov, len);
  372. skb_pull(skb, len);
  373. if (result) {
  374. pr_debug("CAIF: %s(): copy to_iovec failed\n", __func__);
  375. cfpkt_destroy(pkt);
  376. ret = -EFAULT;
  377. goto read_error;
  378. }
  379. /* Free packet and remove from queue */
  380. if (skb->len == 0)
  381. skb_free_datagram(sk, skb);
  382. /* Let the others in. */
  383. release_sock(&cf_sk->sk);
  384. return len;
  385. read_error:
  386. release_sock(&cf_sk->sk);
  387. read_error_no_unlock:
  388. return ret;
  389. }
  390. /* Send a signal as a consequence of sendmsg, sendto or caif_sendmsg. */
  391. static int caif_sendmsg(struct kiocb *kiocb, struct socket *sock,
  392. struct msghdr *msg, size_t len)
  393. {
  394. struct sock *sk = sock->sk;
  395. struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
  396. size_t payload_size = msg->msg_iov->iov_len;
  397. struct cfpkt *pkt = NULL;
  398. struct caif_payload_info info;
  399. unsigned char *txbuf;
  400. ssize_t ret = -EIO;
  401. int result;
  402. struct sk_buff *skb;
  403. caif_assert(msg->msg_iovlen == 1);
  404. if (cf_sk == NULL) {
  405. pr_debug("CAIF: %s(): private_data not set!\n",
  406. __func__);
  407. ret = -EBADFD;
  408. goto write_error_no_unlock;
  409. }
  410. if (unlikely(msg->msg_iov->iov_base == NULL)) {
  411. pr_warning("CAIF: %s(): Buffer is NULL.\n", __func__);
  412. ret = -EINVAL;
  413. goto write_error_no_unlock;
  414. }
  415. if (payload_size > CAIF_MAX_PAYLOAD_SIZE) {
  416. pr_debug("CAIF: %s(): buffer too long\n", __func__);
  417. if (sock->type == SOCK_SEQPACKET) {
  418. ret = -EINVAL;
  419. goto write_error_no_unlock;
  420. }
  421. payload_size = CAIF_MAX_PAYLOAD_SIZE;
  422. }
  423. /* I want to be alone on cf_sk (except status and queue) */
  424. lock_sock(&(cf_sk->sk));
  425. caif_assert(cf_sk->pktq);
  426. if (!STATE_IS_OPEN(cf_sk)) {
  427. /* Socket is closed or closing */
  428. if (!STATE_IS_PENDING(cf_sk)) {
  429. pr_debug("CAIF: %s(): socket is closed (by remote)\n",
  430. __func__);
  431. ret = -EPIPE;
  432. } else {
  433. pr_debug("CAIF: %s(): socket is closing...\n",
  434. __func__);
  435. ret = -EBADF;
  436. }
  437. goto write_error;
  438. }
  439. /* Socket is open or opening */
  440. if (STATE_IS_PENDING(cf_sk)) {
  441. pr_debug("CAIF: %s(): socket is opening...\n", __func__);
  442. if (msg->msg_flags & MSG_DONTWAIT) {
  443. /* We can't block */
  444. trace_printk("CAIF: %s():state pending:"
  445. "state=MSG_DONTWAIT\n", __func__);
  446. ret = -EAGAIN;
  447. goto write_error;
  448. }
  449. /* Let readers in */
  450. release_sock(&cf_sk->sk);
  451. /*
  452. * Blocking mode; state is pending and we need to wait
  453. * for its conclusion.
  454. */
  455. result =
  456. wait_event_interruptible(*cf_sk->sk.sk_sleep,
  457. !STATE_IS_PENDING(cf_sk));
  458. /* I want to be alone on cf_sk (except status and queue) */
  459. lock_sock(&(cf_sk->sk));
  460. if (result == -ERESTARTSYS) {
  461. pr_debug("CAIF: %s(): wait_event_interruptible"
  462. " woken by a signal (1)", __func__);
  463. ret = -ERESTARTSYS;
  464. goto write_error;
  465. }
  466. }
  467. if (STATE_IS_REMOTE_SHUTDOWN(cf_sk) ||
  468. !STATE_IS_OPEN(cf_sk) ||
  469. STATE_IS_PENDING(cf_sk)) {
  470. pr_debug("CAIF: %s(): socket closed\n",
  471. __func__);
  472. ret = -ESHUTDOWN;
  473. goto write_error;
  474. }
  475. if (!TX_FLOW_IS_ON(cf_sk)) {
  476. /* Flow is off. Check non-block flag */
  477. if (msg->msg_flags & MSG_DONTWAIT) {
  478. trace_printk("CAIF: %s(): MSG_DONTWAIT and tx flow off",
  479. __func__);
  480. ret = -EAGAIN;
  481. goto write_error;
  482. }
  483. /* release lock before waiting */
  484. release_sock(&cf_sk->sk);
  485. /* Wait until flow is on or socket is closed */
  486. if (wait_event_interruptible(*cf_sk->sk.sk_sleep,
  487. TX_FLOW_IS_ON(cf_sk)
  488. || !STATE_IS_OPEN(cf_sk)
  489. || STATE_IS_REMOTE_SHUTDOWN(cf_sk)
  490. ) == -ERESTARTSYS) {
  491. pr_debug("CAIF: %s():"
  492. " wait_event_interruptible woken by a signal",
  493. __func__);
  494. ret = -ERESTARTSYS;
  495. goto write_error_no_unlock;
  496. }
  497. /* I want to be alone on cf_sk (except status and queue) */
  498. lock_sock(&(cf_sk->sk));
  499. if (!STATE_IS_OPEN(cf_sk)) {
  500. /* someone closed the link, report error */
  501. pr_debug("CAIF: %s(): remote end shutdown!\n",
  502. __func__);
  503. ret = -EPIPE;
  504. goto write_error;
  505. }
  506. if (STATE_IS_REMOTE_SHUTDOWN(cf_sk)) {
  507. pr_debug("CAIF: %s(): "
  508. "received remote_shutdown indication\n",
  509. __func__);
  510. ret = -ESHUTDOWN;
  511. goto write_error;
  512. }
  513. }
  514. pkt = cfpkt_create(payload_size);
  515. skb = (struct sk_buff *)pkt;
  516. skb->destructor = skb_destructor;
  517. skb->sk = sk;
  518. dbfs_atomic_inc(&cnt.skb_alloc);
  519. dbfs_atomic_inc(&cnt.skb_in_use);
  520. if (cfpkt_raw_append(pkt, (void **) &txbuf, payload_size) < 0) {
  521. pr_debug("CAIF: %s(): cfpkt_raw_append failed\n", __func__);
  522. cfpkt_destroy(pkt);
  523. ret = -EINVAL;
  524. goto write_error;
  525. }
  526. /* Copy data into buffer. */
  527. if (copy_from_user(txbuf, msg->msg_iov->iov_base, payload_size)) {
  528. pr_debug("CAIF: %s(): copy_from_user returned non zero.\n",
  529. __func__);
  530. cfpkt_destroy(pkt);
  531. ret = -EINVAL;
  532. goto write_error;
  533. }
  534. memset(&info, 0, sizeof(info));
  535. /* Send the packet down the stack. */
  536. caif_assert(cf_sk->layer.dn);
  537. caif_assert(cf_sk->layer.dn->transmit);
  538. do {
  539. ret = cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt);
  540. if (likely((ret >= 0) || (ret != -EAGAIN)))
  541. break;
  542. /* EAGAIN - retry */
  543. if (msg->msg_flags & MSG_DONTWAIT) {
  544. pr_debug("CAIF: %s(): NONBLOCK and transmit failed,"
  545. " error = %ld\n", __func__, (long) ret);
  546. ret = -EAGAIN;
  547. goto write_error;
  548. }
  549. /* Let readers in */
  550. release_sock(&cf_sk->sk);
  551. /* Wait until flow is on or socket is closed */
  552. if (wait_event_interruptible(*cf_sk->sk.sk_sleep,
  553. TX_FLOW_IS_ON(cf_sk)
  554. || !STATE_IS_OPEN(cf_sk)
  555. || STATE_IS_REMOTE_SHUTDOWN(cf_sk)
  556. ) == -ERESTARTSYS) {
  557. pr_debug("CAIF: %s(): wait_event_interruptible"
  558. " woken by a signal", __func__);
  559. ret = -ERESTARTSYS;
  560. goto write_error_no_unlock;
  561. }
  562. /* I want to be alone on cf_sk (except status and queue) */
  563. lock_sock(&(cf_sk->sk));
  564. } while (ret == -EAGAIN);
  565. if (ret < 0) {
  566. cfpkt_destroy(pkt);
  567. pr_debug("CAIF: %s(): transmit failed, error = %ld\n",
  568. __func__, (long) ret);
  569. goto write_error;
  570. }
  571. release_sock(&cf_sk->sk);
  572. return payload_size;
  573. write_error:
  574. release_sock(&cf_sk->sk);
  575. write_error_no_unlock:
  576. return ret;
  577. }
  578. static unsigned int caif_poll(struct file *file, struct socket *sock,
  579. poll_table *wait)
  580. {
  581. struct sock *sk = sock->sk;
  582. struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
  583. u32 mask = 0;
  584. poll_wait(file, sk_sleep(sk), wait);
  585. lock_sock(&(cf_sk->sk));
  586. if (!STATE_IS_OPEN(cf_sk)) {
  587. if (!STATE_IS_PENDING(cf_sk))
  588. mask |= POLLHUP;
  589. } else {
  590. if (cfpkt_qpeek(cf_sk->pktq) != NULL)
  591. mask |= (POLLIN | POLLRDNORM);
  592. if (TX_FLOW_IS_ON(cf_sk))
  593. mask |= (POLLOUT | POLLWRNORM);
  594. }
  595. release_sock(&cf_sk->sk);
  596. trace_printk("CAIF: %s(): poll mask=0x%04x\n",
  597. __func__, mask);
  598. return mask;
  599. }
  600. static void drain_queue(struct caifsock *cf_sk)
  601. {
  602. struct cfpkt *pkt = NULL;
  603. /* Empty the queue */
  604. do {
  605. /* The queue has its own lock */
  606. if (!cf_sk->pktq)
  607. break;
  608. pkt = cfpkt_dequeue(cf_sk->pktq);
  609. if (!pkt)
  610. break;
  611. pr_debug("CAIF: %s(): freeing packet from read queue\n",
  612. __func__);
  613. cfpkt_destroy(pkt);
  614. } while (1);
  615. cf_sk->read_queue_len = 0;
  616. }
  617. static int setsockopt(struct socket *sock,
  618. int lvl, int opt, char __user *ov, unsigned int ol)
  619. {
  620. struct sock *sk = sock->sk;
  621. struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
  622. int prio, linksel;
  623. struct ifreq ifreq;
  624. if (STATE_IS_OPEN(cf_sk)) {
  625. pr_debug("CAIF: %s(): setsockopt "
  626. "cannot be done on a connected socket\n",
  627. __func__);
  628. return -ENOPROTOOPT;
  629. }
  630. switch (opt) {
  631. case CAIFSO_LINK_SELECT:
  632. if (ol < sizeof(int)) {
  633. pr_debug("CAIF: %s(): setsockopt"
  634. " CAIFSO_CHANNEL_CONFIG bad size\n", __func__);
  635. return -EINVAL;
  636. }
  637. if (lvl != SOL_CAIF)
  638. goto bad_sol;
  639. if (copy_from_user(&linksel, ov, sizeof(int)))
  640. return -EINVAL;
  641. lock_sock(&(cf_sk->sk));
  642. cf_sk->conn_req.link_selector = linksel;
  643. release_sock(&cf_sk->sk);
  644. return 0;
  645. case SO_PRIORITY:
  646. if (lvl != SOL_SOCKET)
  647. goto bad_sol;
  648. if (ol < sizeof(int)) {
  649. pr_debug("CAIF: %s(): setsockopt"
  650. " SO_PRIORITY bad size\n", __func__);
  651. return -EINVAL;
  652. }
  653. if (copy_from_user(&prio, ov, sizeof(int)))
  654. return -EINVAL;
  655. lock_sock(&(cf_sk->sk));
  656. cf_sk->conn_req.priority = prio;
  657. pr_debug("CAIF: %s(): Setting sockopt priority=%d\n", __func__,
  658. cf_sk->conn_req.priority);
  659. release_sock(&cf_sk->sk);
  660. return 0;
  661. case SO_BINDTODEVICE:
  662. if (lvl != SOL_SOCKET)
  663. goto bad_sol;
  664. if (ol < sizeof(struct ifreq)) {
  665. pr_debug("CAIF: %s(): setsockopt"
  666. " SO_PRIORITY bad size\n", __func__);
  667. return -EINVAL;
  668. }
  669. if (copy_from_user(&ifreq, ov, sizeof(ifreq)))
  670. return -EFAULT;
  671. lock_sock(&(cf_sk->sk));
  672. strncpy(cf_sk->conn_req.link_name, ifreq.ifr_name,
  673. sizeof(cf_sk->conn_req.link_name));
  674. cf_sk->conn_req.link_name
  675. [sizeof(cf_sk->conn_req.link_name)-1] = 0;
  676. release_sock(&cf_sk->sk);
  677. return 0;
  678. case CAIFSO_REQ_PARAM:
  679. if (lvl != SOL_CAIF)
  680. goto bad_sol;
  681. if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL)
  682. return -ENOPROTOOPT;
  683. if (ol > sizeof(cf_sk->conn_req.param.data))
  684. goto req_param_bad_size;
  685. lock_sock(&(cf_sk->sk));
  686. cf_sk->conn_req.param.size = ol;
  687. if (copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) {
  688. release_sock(&cf_sk->sk);
  689. req_param_bad_size:
  690. pr_debug("CAIF: %s(): setsockopt"
  691. " CAIFSO_CHANNEL_CONFIG bad size\n", __func__);
  692. return -EINVAL;
  693. }
  694. release_sock(&cf_sk->sk);
  695. return 0;
  696. default:
  697. pr_debug("CAIF: %s(): unhandled option %d\n", __func__, opt);
  698. return -EINVAL;
  699. }
  700. return 0;
  701. bad_sol:
  702. pr_debug("CAIF: %s(): setsockopt bad level\n", __func__);
  703. return -ENOPROTOOPT;
  704. }
  705. static int caif_connect(struct socket *sock, struct sockaddr *uservaddr,
  706. int sockaddr_len, int flags)
  707. {
  708. struct caifsock *cf_sk = NULL;
  709. int result = -1;
  710. int mode = 0;
  711. int ret = -EIO;
  712. struct sock *sk = sock->sk;
  713. BUG_ON(sk == NULL);
  714. cf_sk = container_of(sk, struct caifsock, sk);
  715. trace_printk("CAIF: %s(): cf_sk=%p OPEN=%d, TX_FLOW=%d, RX_FLOW=%d\n",
  716. __func__, cf_sk,
  717. STATE_IS_OPEN(cf_sk),
  718. TX_FLOW_IS_ON(cf_sk), RX_FLOW_IS_ON(cf_sk));
  719. if (sock->type == SOCK_SEQPACKET || sock->type == SOCK_STREAM)
  720. sock->state = SS_CONNECTING;
  721. else
  722. goto out;
  723. /* I want to be alone on cf_sk (except status and queue) */
  724. lock_sock(&(cf_sk->sk));
  725. if (sockaddr_len != sizeof(struct sockaddr_caif)) {
  726. pr_debug("CAIF: %s(): Bad address len (%ld,%lu)\n",
  727. __func__, (long) sockaddr_len,
  728. (long unsigned) sizeof(struct sockaddr_caif));
  729. ret = -EINVAL;
  730. goto open_error;
  731. }
  732. if (uservaddr->sa_family != AF_CAIF) {
  733. pr_debug("CAIF: %s(): Bad address family (%d)\n",
  734. __func__, uservaddr->sa_family);
  735. ret = -EAFNOSUPPORT;
  736. goto open_error;
  737. }
  738. memcpy(&cf_sk->conn_req.sockaddr, uservaddr,
  739. sizeof(struct sockaddr_caif));
  740. dbfs_atomic_inc(&cnt.num_open);
  741. mode = SKT_READ_FLAG | SKT_WRITE_FLAG;
  742. /* If socket is not open, make sure socket is in fully closed state */
  743. if (!STATE_IS_OPEN(cf_sk)) {
  744. /* Has link close response been received (if we ever sent it)?*/
  745. if (STATE_IS_PENDING(cf_sk)) {
  746. /*
  747. * Still waiting for close response from remote.
  748. * If opened non-blocking, report "would block"
  749. */
  750. if (flags & O_NONBLOCK) {
  751. pr_debug("CAIF: %s(): O_NONBLOCK"
  752. " && close pending\n", __func__);
  753. ret = -EAGAIN;
  754. goto open_error;
  755. }
  756. pr_debug("CAIF: %s(): Wait for close response"
  757. " from remote...\n", __func__);
  758. release_sock(&cf_sk->sk);
  759. /*
  760. * Blocking mode; close is pending and we need to wait
  761. * for its conclusion.
  762. */
  763. result =
  764. wait_event_interruptible(*cf_sk->sk.sk_sleep,
  765. !STATE_IS_PENDING(cf_sk));
  766. lock_sock(&(cf_sk->sk));
  767. if (result == -ERESTARTSYS) {
  768. pr_debug("CAIF: %s(): wait_event_interruptible"
  769. "woken by a signal (1)", __func__);
  770. ret = -ERESTARTSYS;
  771. goto open_error;
  772. }
  773. }
  774. }
  775. /* socket is now either closed, pending open or open */
  776. if (STATE_IS_OPEN(cf_sk) && !STATE_IS_PENDING(cf_sk)) {
  777. /* Open */
  778. pr_debug("CAIF: %s(): Socket is already opened (cf_sk=%p)"
  779. " check access f_flags = 0x%x file_mode = 0x%x\n",
  780. __func__, cf_sk, mode, cf_sk->file_mode);
  781. } else {
  782. /* We are closed or pending open.
  783. * If closed: send link setup
  784. * If pending open: link setup already sent (we could have been
  785. * interrupted by a signal last time)
  786. */
  787. if (!STATE_IS_OPEN(cf_sk)) {
  788. /* First opening of file; connect lower layers: */
  789. /* Drain queue (very unlikely) */
  790. drain_queue(cf_sk);
  791. cf_sk->layer.receive = caif_sktrecv_cb;
  792. SET_STATE_OPEN(cf_sk);
  793. SET_PENDING_ON(cf_sk);
  794. /* Register this channel. */
  795. result =
  796. caif_connect_client(&cf_sk->conn_req,
  797. &cf_sk->layer);
  798. if (result < 0) {
  799. pr_debug("CAIF: %s(): can't register channel\n",
  800. __func__);
  801. ret = -EIO;
  802. SET_STATE_CLOSED(cf_sk);
  803. SET_PENDING_OFF(cf_sk);
  804. goto open_error;
  805. }
  806. dbfs_atomic_inc(&cnt.num_init);
  807. }
  808. /* If opened non-blocking, report "success".
  809. */
  810. if (flags & O_NONBLOCK) {
  811. pr_debug("CAIF: %s(): O_NONBLOCK success\n",
  812. __func__);
  813. ret = -EINPROGRESS;
  814. cf_sk->sk.sk_err = -EINPROGRESS;
  815. goto open_error;
  816. }
  817. trace_printk("CAIF: %s(): Wait for connect response\n",
  818. __func__);
  819. /* release lock before waiting */
  820. release_sock(&cf_sk->sk);
  821. result =
  822. wait_event_interruptible(*cf_sk->sk.sk_sleep,
  823. !STATE_IS_PENDING(cf_sk));
  824. lock_sock(&(cf_sk->sk));
  825. if (result == -ERESTARTSYS) {
  826. pr_debug("CAIF: %s(): wait_event_interruptible"
  827. "woken by a signal (2)", __func__);
  828. ret = -ERESTARTSYS;
  829. goto open_error;
  830. }
  831. if (!STATE_IS_OPEN(cf_sk)) {
  832. /* Lower layers said "no" */
  833. pr_debug("CAIF: %s(): Closed received\n", __func__);
  834. ret = -EPIPE;
  835. goto open_error;
  836. }
  837. trace_printk("CAIF: %s(): Connect received\n", __func__);
  838. }
  839. /* Open is ok */
  840. cf_sk->file_mode |= mode;
  841. trace_printk("CAIF: %s(): Connected - file mode = %x\n",
  842. __func__, cf_sk->file_mode);
  843. release_sock(&cf_sk->sk);
  844. return 0;
  845. open_error:
  846. sock->state = SS_UNCONNECTED;
  847. release_sock(&cf_sk->sk);
  848. out:
  849. return ret;
  850. }
  851. static int caif_shutdown(struct socket *sock, int how)
  852. {
  853. struct caifsock *cf_sk = NULL;
  854. int result = 0;
  855. int tx_flow_state_was_on;
  856. struct sock *sk = sock->sk;
  857. trace_printk("CAIF: %s(): enter\n", __func__);
  858. pr_debug("f_flags=%x\n", sock->file->f_flags);
  859. if (how != SHUT_RDWR)
  860. return -EOPNOTSUPP;
  861. cf_sk = container_of(sk, struct caifsock, sk);
  862. if (cf_sk == NULL) {
  863. pr_debug("CAIF: %s(): COULD NOT FIND SOCKET\n", __func__);
  864. return -EBADF;
  865. }
  866. /* I want to be alone on cf_sk (except status queue) */
  867. lock_sock(&(cf_sk->sk));
  868. sock_hold(&cf_sk->sk);
  869. /* IS_CLOSED have double meaning:
  870. * 1) Spontanous Remote Shutdown Request.
  871. * 2) Ack on a channel teardown(disconnect)
  872. * Must clear bit in case we previously received
  873. * remote shudown request.
  874. */
  875. if (STATE_IS_OPEN(cf_sk) && !STATE_IS_PENDING(cf_sk)) {
  876. SET_STATE_CLOSED(cf_sk);
  877. SET_PENDING_ON(cf_sk);
  878. tx_flow_state_was_on = TX_FLOW_IS_ON(cf_sk);
  879. SET_TX_FLOW_OFF(cf_sk);
  880. /* Hold the socket until DEINIT_RSP is received */
  881. sock_hold(&cf_sk->sk);
  882. result = caif_disconnect_client(&cf_sk->layer);
  883. if (result < 0) {
  884. pr_debug("CAIF: %s(): "
  885. "caif_disconnect_client() failed\n",
  886. __func__);
  887. SET_STATE_CLOSED(cf_sk);
  888. SET_PENDING_OFF(cf_sk);
  889. SET_TX_FLOW_OFF(cf_sk);
  890. release_sock(&cf_sk->sk);
  891. sock_put(&cf_sk->sk);
  892. return -EIO;
  893. }
  894. }
  895. if (STATE_IS_REMOTE_SHUTDOWN(cf_sk)) {
  896. SET_PENDING_OFF(cf_sk);
  897. SET_REMOTE_SHUTDOWN_OFF(cf_sk);
  898. }
  899. /*
  900. * Socket is no longer in state pending close,
  901. * and we can release the reference.
  902. */
  903. dbfs_atomic_inc(&cnt.num_close);
  904. drain_queue(cf_sk);
  905. SET_RX_FLOW_ON(cf_sk);
  906. cf_sk->file_mode = 0;
  907. sock_put(&cf_sk->sk);
  908. release_sock(&cf_sk->sk);
  909. if (!result && (sock->file->f_flags & O_NONBLOCK)) {
  910. pr_debug("nonblocking shutdown returing -EAGAIN\n");
  911. return -EAGAIN;
  912. } else
  913. return result;
  914. }
  915. static ssize_t caif_sock_no_sendpage(struct socket *sock,
  916. struct page *page,
  917. int offset, size_t size, int flags)
  918. {
  919. return -EOPNOTSUPP;
  920. }
  921. /* This function is called as part of close. */
  922. static int caif_release(struct socket *sock)
  923. {
  924. struct sock *sk = sock->sk;
  925. struct caifsock *cf_sk = NULL;
  926. int res;
  927. caif_assert(sk != NULL);
  928. cf_sk = container_of(sk, struct caifsock, sk);
  929. if (cf_sk->debugfs_socket_dir != NULL)
  930. debugfs_remove_recursive(cf_sk->debugfs_socket_dir);
  931. res = caif_shutdown(sock, SHUT_RDWR);
  932. if (res && res != -EINPROGRESS)
  933. return res;
  934. /*
  935. * FIXME: Shutdown should probably be possible to do async
  936. * without flushing queues, allowing reception of frames while
  937. * waiting for DEINIT_IND.
  938. * Release should always block, to allow secure decoupling of
  939. * CAIF stack.
  940. */
  941. if (!(sock->file->f_flags & O_NONBLOCK)) {
  942. res = wait_event_interruptible(*cf_sk->sk.sk_sleep,
  943. !STATE_IS_PENDING(cf_sk));
  944. if (res == -ERESTARTSYS) {
  945. pr_debug("CAIF: %s(): wait_event_interruptible"
  946. "woken by a signal (1)", __func__);
  947. }
  948. }
  949. lock_sock(&(cf_sk->sk));
  950. sock->sk = NULL;
  951. /* Detach the socket from its process context by making it orphan. */
  952. sock_orphan(sk);
  953. /*
  954. * Setting SHUTDOWN_MASK means that both send and receive are shutdown
  955. * for the socket.
  956. */
  957. sk->sk_shutdown = SHUTDOWN_MASK;
  958. /*
  959. * Set the socket state to closed, the TCP_CLOSE macro is used when
  960. * closing any socket.
  961. */
  962. /* Flush out this sockets receive queue. */
  963. drain_queue(cf_sk);
  964. /* Finally release the socket. */
  965. SET_STATE_PENDING_DESTROY(cf_sk);
  966. release_sock(&cf_sk->sk);
  967. sock_put(sk);
  968. /*
  969. * The rest of the cleanup will be handled from the
  970. * caif_sock_destructor
  971. */
  972. return res;
  973. }
  974. static const struct proto_ops caif_ops = {
  975. .family = PF_CAIF,
  976. .owner = THIS_MODULE,
  977. .release = caif_release,
  978. .bind = sock_no_bind,
  979. .connect = caif_connect,
  980. .socketpair = sock_no_socketpair,
  981. .accept = sock_no_accept,
  982. .getname = sock_no_getname,
  983. .poll = caif_poll,
  984. .ioctl = sock_no_ioctl,
  985. .listen = sock_no_listen,
  986. .shutdown = caif_shutdown,
  987. .setsockopt = setsockopt,
  988. .getsockopt = sock_no_getsockopt,
  989. .sendmsg = caif_sendmsg,
  990. .recvmsg = caif_recvmsg,
  991. .mmap = sock_no_mmap,
  992. .sendpage = caif_sock_no_sendpage,
  993. };
  994. /* This function is called when a socket is finally destroyed. */
  995. static void caif_sock_destructor(struct sock *sk)
  996. {
  997. struct caifsock *cf_sk = NULL;
  998. cf_sk = container_of(sk, struct caifsock, sk);
  999. /* Error checks. */
  1000. caif_assert(!atomic_read(&sk->sk_wmem_alloc));
  1001. caif_assert(sk_unhashed(sk));
  1002. caif_assert(!sk->sk_socket);
  1003. if (!sock_flag(sk, SOCK_DEAD)) {
  1004. pr_debug("CAIF: %s(): 0x%p", __func__, sk);
  1005. return;
  1006. }
  1007. if (STATE_IS_OPEN(cf_sk)) {
  1008. pr_debug("CAIF: %s(): socket is opened (cf_sk=%p)"
  1009. " file_mode = 0x%x\n", __func__,
  1010. cf_sk, cf_sk->file_mode);
  1011. return;
  1012. }
  1013. drain_queue(cf_sk);
  1014. kfree(cf_sk->pktq);
  1015. trace_printk("CAIF: %s(): caif_sock_destructor: Removing socket %s\n",
  1016. __func__, cf_sk->name);
  1017. atomic_dec(&caif_nr_socks);
  1018. }
  1019. static int caif_create(struct net *net, struct socket *sock, int protocol,
  1020. int kern)
  1021. {
  1022. struct sock *sk = NULL;
  1023. struct caifsock *cf_sk = NULL;
  1024. int result = 0;
  1025. static struct proto prot = {.name = "PF_CAIF",
  1026. .owner = THIS_MODULE,
  1027. .obj_size = sizeof(struct caifsock),
  1028. };
  1029. /*
  1030. * The sock->type specifies the socket type to use.
  1031. * in SEQPACKET mode packet boundaries are enforced.
  1032. */
  1033. if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
  1034. return -ESOCKTNOSUPPORT;
  1035. if (net != &init_net)
  1036. return -EAFNOSUPPORT;
  1037. if (protocol < 0 || protocol >= CAIFPROTO_MAX)
  1038. return -EPROTONOSUPPORT;
  1039. /*
  1040. * Set the socket state to unconnected. The socket state is really
  1041. * not used at all in the net/core or socket.c but the
  1042. * initialization makes sure that sock->state is not uninitialized.
  1043. */
  1044. sock->state = SS_UNCONNECTED;
  1045. sk = sk_alloc(net, PF_CAIF, GFP_KERNEL, &prot);
  1046. if (!sk)
  1047. return -ENOMEM;
  1048. cf_sk = container_of(sk, struct caifsock, sk);
  1049. /* Store the protocol */
  1050. sk->sk_protocol = (unsigned char) protocol;
  1051. spin_lock_init(&cf_sk->read_queue_len_lock);
  1052. /* Fill in some information concerning the misc socket. */
  1053. snprintf(cf_sk->name, sizeof(cf_sk->name), "cf_sk%d",
  1054. atomic_read(&caif_nr_socks));
  1055. /*
  1056. * Lock in order to try to stop someone from opening the socket
  1057. * too early.
  1058. */
  1059. lock_sock(&(cf_sk->sk));
  1060. /* Initialize the nozero default sock structure data. */
  1061. sock_init_data(sock, sk);
  1062. sock->ops = &caif_ops;
  1063. sk->sk_destruct = caif_sock_destructor;
  1064. sk->sk_sndbuf = caif_sockbuf_size;
  1065. sk->sk_rcvbuf = caif_sockbuf_size;
  1066. cf_sk->pktq = cfpktq_create();
  1067. if (!cf_sk->pktq) {
  1068. pr_err("CAIF: %s(): queue create failed.\n", __func__);
  1069. result = -ENOMEM;
  1070. release_sock(&cf_sk->sk);
  1071. goto err_failed;
  1072. }
  1073. cf_sk->layer.ctrlcmd = caif_sktflowctrl_cb;
  1074. SET_STATE_CLOSED(cf_sk);
  1075. SET_PENDING_OFF(cf_sk);
  1076. SET_TX_FLOW_OFF(cf_sk);
  1077. SET_RX_FLOW_ON(cf_sk);
  1078. /* Set default options on configuration */
  1079. cf_sk->conn_req.priority = CAIF_PRIO_NORMAL;
  1080. cf_sk->conn_req.link_selector = CAIF_LINK_HIGH_BANDW;
  1081. cf_sk->conn_req.protocol = protocol;
  1082. /* Increase the number of sockets created. */
  1083. atomic_inc(&caif_nr_socks);
  1084. if (!IS_ERR(debugfsdir)) {
  1085. cf_sk->debugfs_socket_dir =
  1086. debugfs_create_dir(cf_sk->name, debugfsdir);
  1087. debugfs_create_u32("conn_state", S_IRUSR | S_IWUSR,
  1088. cf_sk->debugfs_socket_dir, &cf_sk->conn_state);
  1089. debugfs_create_u32("flow_state", S_IRUSR | S_IWUSR,
  1090. cf_sk->debugfs_socket_dir, &cf_sk->flow_state);
  1091. debugfs_create_u32("read_queue_len", S_IRUSR | S_IWUSR,
  1092. cf_sk->debugfs_socket_dir,
  1093. (u32 *) &cf_sk->read_queue_len);
  1094. debugfs_create_u32("identity", S_IRUSR | S_IWUSR,
  1095. cf_sk->debugfs_socket_dir,
  1096. (u32 *) &cf_sk->layer.id);
  1097. }
  1098. release_sock(&cf_sk->sk);
  1099. return 0;
  1100. err_failed:
  1101. sk_free(sk);
  1102. return result;
  1103. }
  1104. static struct net_proto_family caif_family_ops = {
  1105. .family = PF_CAIF,
  1106. .create = caif_create,
  1107. .owner = THIS_MODULE,
  1108. };
  1109. static int af_caif_init(void)
  1110. {
  1111. int err;
  1112. err = sock_register(&caif_family_ops);
  1113. if (!err)
  1114. return err;
  1115. return 0;
  1116. }
  1117. static int __init caif_sktinit_module(void)
  1118. {
  1119. int stat;
  1120. #ifdef CONFIG_DEBUG_FS
  1121. debugfsdir = debugfs_create_dir("chnl_skt", NULL);
  1122. if (!IS_ERR(debugfsdir)) {
  1123. debugfs_create_u32("skb_inuse", S_IRUSR | S_IWUSR,
  1124. debugfsdir,
  1125. (u32 *) &cnt.skb_in_use);
  1126. debugfs_create_u32("skb_alloc", S_IRUSR | S_IWUSR,
  1127. debugfsdir,
  1128. (u32 *) &cnt.skb_alloc);
  1129. debugfs_create_u32("skb_free", S_IRUSR | S_IWUSR,
  1130. debugfsdir,
  1131. (u32 *) &cnt.skb_free);
  1132. debugfs_create_u32("num_sockets", S_IRUSR | S_IWUSR,
  1133. debugfsdir,
  1134. (u32 *) &caif_nr_socks);
  1135. debugfs_create_u32("num_open", S_IRUSR | S_IWUSR,
  1136. debugfsdir,
  1137. (u32 *) &cnt.num_open);
  1138. debugfs_create_u32("num_close", S_IRUSR | S_IWUSR,
  1139. debugfsdir,
  1140. (u32 *) &cnt.num_close);
  1141. debugfs_create_u32("num_init", S_IRUSR | S_IWUSR,
  1142. debugfsdir,
  1143. (u32 *) &cnt.num_init);
  1144. debugfs_create_u32("num_init_resp", S_IRUSR | S_IWUSR,
  1145. debugfsdir,
  1146. (u32 *) &cnt.num_init_resp);
  1147. debugfs_create_u32("num_init_fail_resp", S_IRUSR | S_IWUSR,
  1148. debugfsdir,
  1149. (u32 *) &cnt.num_init_fail_resp);
  1150. debugfs_create_u32("num_deinit", S_IRUSR | S_IWUSR,
  1151. debugfsdir,
  1152. (u32 *) &cnt.num_deinit);
  1153. debugfs_create_u32("num_deinit_resp", S_IRUSR | S_IWUSR,
  1154. debugfsdir,
  1155. (u32 *) &cnt.num_deinit_resp);
  1156. debugfs_create_u32("num_remote_shutdown_ind",
  1157. S_IRUSR | S_IWUSR, debugfsdir,
  1158. (u32 *) &cnt.num_remote_shutdown_ind);
  1159. debugfs_create_u32("num_tx_flow_off_ind", S_IRUSR | S_IWUSR,
  1160. debugfsdir,
  1161. (u32 *) &cnt.num_tx_flow_off_ind);
  1162. debugfs_create_u32("num_tx_flow_on_ind", S_IRUSR | S_IWUSR,
  1163. debugfsdir,
  1164. (u32 *) &cnt.num_tx_flow_on_ind);
  1165. debugfs_create_u32("num_rx_flow_off", S_IRUSR | S_IWUSR,
  1166. debugfsdir,
  1167. (u32 *) &cnt.num_rx_flow_off);
  1168. debugfs_create_u32("num_rx_flow_on", S_IRUSR | S_IWUSR,
  1169. debugfsdir,
  1170. (u32 *) &cnt.num_rx_flow_on);
  1171. }
  1172. #endif
  1173. stat = af_caif_init();
  1174. if (stat) {
  1175. pr_err("CAIF: %s(): Failed to initialize CAIF socket layer.",
  1176. __func__);
  1177. return stat;
  1178. }
  1179. return 0;
  1180. }
  1181. static void __exit caif_sktexit_module(void)
  1182. {
  1183. sock_unregister(PF_CAIF);
  1184. if (debugfsdir != NULL)
  1185. debugfs_remove_recursive(debugfsdir);
  1186. }
  1187. module_init(caif_sktinit_module);
  1188. module_exit(caif_sktexit_module);