pep.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101
  1. /*
  2. * File: pep.c
  3. *
  4. * Phonet pipe protocol end point socket
  5. *
  6. * Copyright (C) 2008 Nokia Corporation.
  7. *
  8. * Author: Rémi Denis-Courmont <remi.denis-courmont@nokia.com>
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License
  12. * version 2 as published by the Free Software Foundation.
  13. *
  14. * This program is distributed in the hope that it will be useful, but
  15. * WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  22. * 02110-1301 USA
  23. */
  24. #include <linux/kernel.h>
  25. #include <linux/socket.h>
  26. #include <net/sock.h>
  27. #include <net/tcp_states.h>
  28. #include <asm/ioctls.h>
  29. #include <linux/phonet.h>
  30. #include <net/phonet/phonet.h>
  31. #include <net/phonet/pep.h>
  32. #include <net/phonet/gprs.h>
  33. /* sk_state values:
  34. * TCP_CLOSE sock not in use yet
  35. * TCP_CLOSE_WAIT disconnected pipe
  36. * TCP_LISTEN listening pipe endpoint
  37. * TCP_SYN_RECV connected pipe in disabled state
  38. * TCP_ESTABLISHED connected pipe in enabled state
  39. *
  40. * pep_sock locking:
  41. * - sk_state, ackq, hlist: sock lock needed
  42. * - listener: read only
  43. * - pipe_handle: read only
  44. */
  45. #define CREDITS_MAX 10
  46. #define CREDITS_THR 7
  47. static const struct sockaddr_pn pipe_srv = {
  48. .spn_family = AF_PHONET,
  49. .spn_resource = 0xD9, /* pipe service */
  50. };
  51. #define pep_sb_size(s) (((s) + 5) & ~3) /* 2-bytes head, 32-bits aligned */
  52. /* Get the next TLV sub-block. */
  53. static unsigned char *pep_get_sb(struct sk_buff *skb, u8 *ptype, u8 *plen,
  54. void *buf)
  55. {
  56. void *data = NULL;
  57. struct {
  58. u8 sb_type;
  59. u8 sb_len;
  60. } *ph, h;
  61. int buflen = *plen;
  62. ph = skb_header_pointer(skb, 0, 2, &h);
  63. if (ph == NULL || ph->sb_len < 2 || !pskb_may_pull(skb, ph->sb_len))
  64. return NULL;
  65. ph->sb_len -= 2;
  66. *ptype = ph->sb_type;
  67. *plen = ph->sb_len;
  68. if (buflen > ph->sb_len)
  69. buflen = ph->sb_len;
  70. data = skb_header_pointer(skb, 2, buflen, buf);
  71. __skb_pull(skb, 2 + ph->sb_len);
  72. return data;
  73. }
  74. static int pep_reply(struct sock *sk, struct sk_buff *oskb,
  75. u8 code, const void *data, int len, gfp_t priority)
  76. {
  77. const struct pnpipehdr *oph = pnp_hdr(oskb);
  78. struct pnpipehdr *ph;
  79. struct sk_buff *skb;
  80. skb = alloc_skb(MAX_PNPIPE_HEADER + len, priority);
  81. if (!skb)
  82. return -ENOMEM;
  83. skb_set_owner_w(skb, sk);
  84. skb_reserve(skb, MAX_PNPIPE_HEADER);
  85. __skb_put(skb, len);
  86. skb_copy_to_linear_data(skb, data, len);
  87. __skb_push(skb, sizeof(*ph));
  88. skb_reset_transport_header(skb);
  89. ph = pnp_hdr(skb);
  90. ph->utid = oph->utid;
  91. ph->message_id = oph->message_id + 1; /* REQ -> RESP */
  92. ph->pipe_handle = oph->pipe_handle;
  93. ph->error_code = code;
  94. return pn_skb_send(sk, skb, &pipe_srv);
  95. }
  96. #define PAD 0x00
  97. static int pep_accept_conn(struct sock *sk, struct sk_buff *skb)
  98. {
  99. static const u8 data[20] = {
  100. PAD, PAD, PAD, 2 /* sub-blocks */,
  101. PN_PIPE_SB_REQUIRED_FC_TX, pep_sb_size(5), 3, PAD,
  102. PN_MULTI_CREDIT_FLOW_CONTROL,
  103. PN_ONE_CREDIT_FLOW_CONTROL,
  104. PN_LEGACY_FLOW_CONTROL,
  105. PAD,
  106. PN_PIPE_SB_PREFERRED_FC_RX, pep_sb_size(5), 3, PAD,
  107. PN_MULTI_CREDIT_FLOW_CONTROL,
  108. PN_ONE_CREDIT_FLOW_CONTROL,
  109. PN_LEGACY_FLOW_CONTROL,
  110. PAD,
  111. };
  112. might_sleep();
  113. return pep_reply(sk, skb, PN_PIPE_NO_ERROR, data, sizeof(data),
  114. GFP_KERNEL);
  115. }
  116. static int pep_reject_conn(struct sock *sk, struct sk_buff *skb, u8 code)
  117. {
  118. static const u8 data[4] = { PAD, PAD, PAD, 0 /* sub-blocks */ };
  119. WARN_ON(code == PN_PIPE_NO_ERROR);
  120. return pep_reply(sk, skb, code, data, sizeof(data), GFP_ATOMIC);
  121. }
  122. /* Control requests are not sent by the pipe service and have a specific
  123. * message format. */
  124. static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code,
  125. gfp_t priority)
  126. {
  127. const struct pnpipehdr *oph = pnp_hdr(oskb);
  128. struct sk_buff *skb;
  129. struct pnpipehdr *ph;
  130. struct sockaddr_pn dst;
  131. skb = alloc_skb(MAX_PNPIPE_HEADER + 4, priority);
  132. if (!skb)
  133. return -ENOMEM;
  134. skb_set_owner_w(skb, sk);
  135. skb_reserve(skb, MAX_PHONET_HEADER);
  136. ph = (struct pnpipehdr *)skb_put(skb, sizeof(*ph) + 4);
  137. ph->utid = oph->utid;
  138. ph->message_id = PNS_PEP_CTRL_RESP;
  139. ph->pipe_handle = oph->pipe_handle;
  140. ph->data[0] = oph->data[1]; /* CTRL id */
  141. ph->data[1] = oph->data[0]; /* PEP type */
  142. ph->data[2] = code; /* error code, at an usual offset */
  143. ph->data[3] = PAD;
  144. ph->data[4] = PAD;
  145. pn_skb_get_src_sockaddr(oskb, &dst);
  146. return pn_skb_send(sk, skb, &dst);
  147. }
  148. static int pipe_snd_status(struct sock *sk, u8 type, u8 status, gfp_t priority)
  149. {
  150. struct pep_sock *pn = pep_sk(sk);
  151. struct pnpipehdr *ph;
  152. struct sk_buff *skb;
  153. skb = alloc_skb(MAX_PNPIPE_HEADER + 4, priority);
  154. if (!skb)
  155. return -ENOMEM;
  156. skb_set_owner_w(skb, sk);
  157. skb_reserve(skb, MAX_PNPIPE_HEADER + 4);
  158. __skb_push(skb, sizeof(*ph) + 4);
  159. skb_reset_transport_header(skb);
  160. ph = pnp_hdr(skb);
  161. ph->utid = 0;
  162. ph->message_id = PNS_PEP_STATUS_IND;
  163. ph->pipe_handle = pn->pipe_handle;
  164. ph->pep_type = PN_PEP_TYPE_COMMON;
  165. ph->data[1] = type;
  166. ph->data[2] = PAD;
  167. ph->data[3] = PAD;
  168. ph->data[4] = status;
  169. return pn_skb_send(sk, skb, &pipe_srv);
  170. }
  171. /* Send our RX flow control information to the sender.
  172. * Socket must be locked. */
  173. static void pipe_grant_credits(struct sock *sk)
  174. {
  175. struct pep_sock *pn = pep_sk(sk);
  176. BUG_ON(sk->sk_state != TCP_ESTABLISHED);
  177. switch (pn->rx_fc) {
  178. case PN_LEGACY_FLOW_CONTROL: /* TODO */
  179. break;
  180. case PN_ONE_CREDIT_FLOW_CONTROL:
  181. pipe_snd_status(sk, PN_PEP_IND_FLOW_CONTROL,
  182. PEP_IND_READY, GFP_ATOMIC);
  183. pn->rx_credits = 1;
  184. break;
  185. case PN_MULTI_CREDIT_FLOW_CONTROL:
  186. if ((pn->rx_credits + CREDITS_THR) > CREDITS_MAX)
  187. break;
  188. if (pipe_snd_status(sk, PN_PEP_IND_ID_MCFC_GRANT_CREDITS,
  189. CREDITS_MAX - pn->rx_credits,
  190. GFP_ATOMIC) == 0)
  191. pn->rx_credits = CREDITS_MAX;
  192. break;
  193. }
  194. }
  195. static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
  196. {
  197. struct pep_sock *pn = pep_sk(sk);
  198. struct pnpipehdr *hdr = pnp_hdr(skb);
  199. int wake = 0;
  200. if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
  201. return -EINVAL;
  202. if (hdr->data[0] != PN_PEP_TYPE_COMMON) {
  203. LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP type: %u\n",
  204. (unsigned)hdr->data[0]);
  205. return -EOPNOTSUPP;
  206. }
  207. switch (hdr->data[1]) {
  208. case PN_PEP_IND_FLOW_CONTROL:
  209. switch (pn->tx_fc) {
  210. case PN_LEGACY_FLOW_CONTROL:
  211. switch (hdr->data[4]) {
  212. case PEP_IND_BUSY:
  213. atomic_set(&pn->tx_credits, 0);
  214. break;
  215. case PEP_IND_READY:
  216. atomic_set(&pn->tx_credits, wake = 1);
  217. break;
  218. }
  219. break;
  220. case PN_ONE_CREDIT_FLOW_CONTROL:
  221. if (hdr->data[4] == PEP_IND_READY)
  222. atomic_set(&pn->tx_credits, wake = 1);
  223. break;
  224. }
  225. break;
  226. case PN_PEP_IND_ID_MCFC_GRANT_CREDITS:
  227. if (pn->tx_fc != PN_MULTI_CREDIT_FLOW_CONTROL)
  228. break;
  229. atomic_add(wake = hdr->data[4], &pn->tx_credits);
  230. break;
  231. default:
  232. LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP indication: %u\n",
  233. (unsigned)hdr->data[1]);
  234. return -EOPNOTSUPP;
  235. }
  236. if (wake)
  237. sk->sk_write_space(sk);
  238. return 0;
  239. }
  240. static int pipe_rcv_created(struct sock *sk, struct sk_buff *skb)
  241. {
  242. struct pep_sock *pn = pep_sk(sk);
  243. struct pnpipehdr *hdr = pnp_hdr(skb);
  244. u8 n_sb = hdr->data[0];
  245. pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL;
  246. __skb_pull(skb, sizeof(*hdr));
  247. while (n_sb > 0) {
  248. u8 type, buf[2], len = sizeof(buf);
  249. u8 *data = pep_get_sb(skb, &type, &len, buf);
  250. if (data == NULL)
  251. return -EINVAL;
  252. switch (type) {
  253. case PN_PIPE_SB_NEGOTIATED_FC:
  254. if (len < 2 || (data[0] | data[1]) > 3)
  255. break;
  256. pn->tx_fc = data[0] & 3;
  257. pn->rx_fc = data[1] & 3;
  258. break;
  259. }
  260. n_sb--;
  261. }
  262. return 0;
  263. }
  264. /* Queue an skb to a connected sock.
  265. * Socket lock must be held. */
  266. static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
  267. {
  268. struct pep_sock *pn = pep_sk(sk);
  269. struct pnpipehdr *hdr = pnp_hdr(skb);
  270. struct sk_buff_head *queue;
  271. int err = 0;
  272. BUG_ON(sk->sk_state == TCP_CLOSE_WAIT);
  273. switch (hdr->message_id) {
  274. case PNS_PEP_CONNECT_REQ:
  275. pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE);
  276. break;
  277. case PNS_PEP_DISCONNECT_REQ:
  278. pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
  279. sk->sk_state = TCP_CLOSE_WAIT;
  280. if (!sock_flag(sk, SOCK_DEAD))
  281. sk->sk_state_change(sk);
  282. break;
  283. case PNS_PEP_ENABLE_REQ:
  284. /* Wait for PNS_PIPE_(ENABLED|REDIRECTED)_IND */
  285. pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
  286. break;
  287. case PNS_PEP_RESET_REQ:
  288. switch (hdr->state_after_reset) {
  289. case PN_PIPE_DISABLE:
  290. pn->init_enable = 0;
  291. break;
  292. case PN_PIPE_ENABLE:
  293. pn->init_enable = 1;
  294. break;
  295. default: /* not allowed to send an error here!? */
  296. err = -EINVAL;
  297. goto out;
  298. }
  299. /* fall through */
  300. case PNS_PEP_DISABLE_REQ:
  301. atomic_set(&pn->tx_credits, 0);
  302. pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
  303. break;
  304. case PNS_PEP_CTRL_REQ:
  305. if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
  306. atomic_inc(&sk->sk_drops);
  307. break;
  308. }
  309. __skb_pull(skb, 4);
  310. queue = &pn->ctrlreq_queue;
  311. goto queue;
  312. case PNS_PIPE_ALIGNED_DATA:
  313. __skb_pull(skb, 1);
  314. /* fall through */
  315. case PNS_PIPE_DATA:
  316. __skb_pull(skb, 3); /* Pipe data header */
  317. if (!pn_flow_safe(pn->rx_fc)) {
  318. err = sock_queue_rcv_skb(sk, skb);
  319. if (!err)
  320. return 0;
  321. break;
  322. }
  323. if (pn->rx_credits == 0) {
  324. atomic_inc(&sk->sk_drops);
  325. err = -ENOBUFS;
  326. break;
  327. }
  328. pn->rx_credits--;
  329. queue = &sk->sk_receive_queue;
  330. goto queue;
  331. case PNS_PEP_STATUS_IND:
  332. pipe_rcv_status(sk, skb);
  333. break;
  334. case PNS_PIPE_REDIRECTED_IND:
  335. err = pipe_rcv_created(sk, skb);
  336. break;
  337. case PNS_PIPE_CREATED_IND:
  338. err = pipe_rcv_created(sk, skb);
  339. if (err)
  340. break;
  341. /* fall through */
  342. case PNS_PIPE_RESET_IND:
  343. if (!pn->init_enable)
  344. break;
  345. /* fall through */
  346. case PNS_PIPE_ENABLED_IND:
  347. if (!pn_flow_safe(pn->tx_fc)) {
  348. atomic_set(&pn->tx_credits, 1);
  349. sk->sk_write_space(sk);
  350. }
  351. if (sk->sk_state == TCP_ESTABLISHED)
  352. break; /* Nothing to do */
  353. sk->sk_state = TCP_ESTABLISHED;
  354. pipe_grant_credits(sk);
  355. break;
  356. case PNS_PIPE_DISABLED_IND:
  357. sk->sk_state = TCP_SYN_RECV;
  358. pn->rx_credits = 0;
  359. break;
  360. default:
  361. LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP message: %u\n",
  362. hdr->message_id);
  363. err = -EINVAL;
  364. }
  365. out:
  366. kfree_skb(skb);
  367. return err;
  368. queue:
  369. skb->dev = NULL;
  370. skb_set_owner_r(skb, sk);
  371. err = skb->len;
  372. skb_queue_tail(queue, skb);
  373. if (!sock_flag(sk, SOCK_DEAD))
  374. sk->sk_data_ready(sk, err);
  375. return 0;
  376. }
  377. /* Destroy connected sock. */
  378. static void pipe_destruct(struct sock *sk)
  379. {
  380. struct pep_sock *pn = pep_sk(sk);
  381. skb_queue_purge(&sk->sk_receive_queue);
  382. skb_queue_purge(&pn->ctrlreq_queue);
  383. }
  384. static int pep_connreq_rcv(struct sock *sk, struct sk_buff *skb)
  385. {
  386. struct sock *newsk;
  387. struct pep_sock *newpn, *pn = pep_sk(sk);
  388. struct pnpipehdr *hdr;
  389. struct sockaddr_pn dst;
  390. u16 peer_type;
  391. u8 pipe_handle, enabled, n_sb;
  392. u8 aligned = 0;
  393. if (!pskb_pull(skb, sizeof(*hdr) + 4))
  394. return -EINVAL;
  395. hdr = pnp_hdr(skb);
  396. pipe_handle = hdr->pipe_handle;
  397. switch (hdr->state_after_connect) {
  398. case PN_PIPE_DISABLE:
  399. enabled = 0;
  400. break;
  401. case PN_PIPE_ENABLE:
  402. enabled = 1;
  403. break;
  404. default:
  405. pep_reject_conn(sk, skb, PN_PIPE_ERR_INVALID_PARAM);
  406. return -EINVAL;
  407. }
  408. peer_type = hdr->other_pep_type << 8;
  409. if (unlikely(sk->sk_state != TCP_LISTEN) || sk_acceptq_is_full(sk)) {
  410. pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE);
  411. return -ENOBUFS;
  412. }
  413. /* Parse sub-blocks (options) */
  414. n_sb = hdr->data[4];
  415. while (n_sb > 0) {
  416. u8 type, buf[1], len = sizeof(buf);
  417. const u8 *data = pep_get_sb(skb, &type, &len, buf);
  418. if (data == NULL)
  419. return -EINVAL;
  420. switch (type) {
  421. case PN_PIPE_SB_CONNECT_REQ_PEP_SUB_TYPE:
  422. if (len < 1)
  423. return -EINVAL;
  424. peer_type = (peer_type & 0xff00) | data[0];
  425. break;
  426. case PN_PIPE_SB_ALIGNED_DATA:
  427. aligned = data[0] != 0;
  428. break;
  429. }
  430. n_sb--;
  431. }
  432. skb = skb_clone(skb, GFP_ATOMIC);
  433. if (!skb)
  434. return -ENOMEM;
  435. /* Create a new to-be-accepted sock */
  436. newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_ATOMIC, sk->sk_prot);
  437. if (!newsk) {
  438. kfree_skb(skb);
  439. return -ENOMEM;
  440. }
  441. sock_init_data(NULL, newsk);
  442. newsk->sk_state = TCP_SYN_RECV;
  443. newsk->sk_backlog_rcv = pipe_do_rcv;
  444. newsk->sk_protocol = sk->sk_protocol;
  445. newsk->sk_destruct = pipe_destruct;
  446. newpn = pep_sk(newsk);
  447. pn_skb_get_dst_sockaddr(skb, &dst);
  448. newpn->pn_sk.sobject = pn_sockaddr_get_object(&dst);
  449. newpn->pn_sk.resource = pn->pn_sk.resource;
  450. skb_queue_head_init(&newpn->ctrlreq_queue);
  451. newpn->pipe_handle = pipe_handle;
  452. atomic_set(&newpn->tx_credits, 0);
  453. newpn->peer_type = peer_type;
  454. newpn->rx_credits = 0;
  455. newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL;
  456. newpn->init_enable = enabled;
  457. newpn->aligned = aligned;
  458. BUG_ON(!skb_queue_empty(&newsk->sk_receive_queue));
  459. skb_queue_head(&newsk->sk_receive_queue, skb);
  460. if (!sock_flag(sk, SOCK_DEAD))
  461. sk->sk_data_ready(sk, 0);
  462. sk_acceptq_added(sk);
  463. sk_add_node(newsk, &pn->ackq);
  464. return 0;
  465. }
  466. /* Listening sock must be locked */
  467. static struct sock *pep_find_pipe(const struct hlist_head *hlist,
  468. const struct sockaddr_pn *dst,
  469. u8 pipe_handle)
  470. {
  471. struct hlist_node *node;
  472. struct sock *sknode;
  473. u16 dobj = pn_sockaddr_get_object(dst);
  474. sk_for_each(sknode, node, hlist) {
  475. struct pep_sock *pnnode = pep_sk(sknode);
  476. /* Ports match, but addresses might not: */
  477. if (pnnode->pn_sk.sobject != dobj)
  478. continue;
  479. if (pnnode->pipe_handle != pipe_handle)
  480. continue;
  481. if (sknode->sk_state == TCP_CLOSE_WAIT)
  482. continue;
  483. sock_hold(sknode);
  484. return sknode;
  485. }
  486. return NULL;
  487. }
  488. /*
  489. * Deliver an skb to a listening sock.
  490. * Socket lock must be held.
  491. * We then queue the skb to the right connected sock (if any).
  492. */
  493. static int pep_do_rcv(struct sock *sk, struct sk_buff *skb)
  494. {
  495. struct pep_sock *pn = pep_sk(sk);
  496. struct sock *sknode;
  497. struct pnpipehdr *hdr;
  498. struct sockaddr_pn dst;
  499. int err = NET_RX_SUCCESS;
  500. u8 pipe_handle;
  501. if (!pskb_may_pull(skb, sizeof(*hdr)))
  502. goto drop;
  503. hdr = pnp_hdr(skb);
  504. pipe_handle = hdr->pipe_handle;
  505. if (pipe_handle == PN_PIPE_INVALID_HANDLE)
  506. goto drop;
  507. pn_skb_get_dst_sockaddr(skb, &dst);
  508. /* Look for an existing pipe handle */
  509. sknode = pep_find_pipe(&pn->hlist, &dst, pipe_handle);
  510. if (sknode)
  511. return sk_receive_skb(sknode, skb, 1);
  512. /* Look for a pipe handle pending accept */
  513. sknode = pep_find_pipe(&pn->ackq, &dst, pipe_handle);
  514. if (sknode) {
  515. sock_put(sknode);
  516. if (net_ratelimit())
  517. printk(KERN_WARNING"Phonet unconnected PEP ignored");
  518. err = NET_RX_DROP;
  519. goto drop;
  520. }
  521. switch (hdr->message_id) {
  522. case PNS_PEP_CONNECT_REQ:
  523. err = pep_connreq_rcv(sk, skb);
  524. break;
  525. case PNS_PEP_DISCONNECT_REQ:
  526. pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
  527. break;
  528. case PNS_PEP_CTRL_REQ:
  529. pep_ctrlreq_error(sk, skb, PN_PIPE_INVALID_HANDLE, GFP_ATOMIC);
  530. break;
  531. case PNS_PEP_RESET_REQ:
  532. case PNS_PEP_ENABLE_REQ:
  533. case PNS_PEP_DISABLE_REQ:
  534. /* invalid handle is not even allowed here! */
  535. default:
  536. err = NET_RX_DROP;
  537. }
  538. drop:
  539. kfree_skb(skb);
  540. return err;
  541. }
  542. /* associated socket ceases to exist */
  543. static void pep_sock_close(struct sock *sk, long timeout)
  544. {
  545. struct pep_sock *pn = pep_sk(sk);
  546. int ifindex = 0;
  547. sk_common_release(sk);
  548. lock_sock(sk);
  549. if (sk->sk_state == TCP_LISTEN) {
  550. /* Destroy the listen queue */
  551. struct sock *sknode;
  552. struct hlist_node *p, *n;
  553. sk_for_each_safe(sknode, p, n, &pn->ackq)
  554. sk_del_node_init(sknode);
  555. sk->sk_state = TCP_CLOSE;
  556. }
  557. ifindex = pn->ifindex;
  558. pn->ifindex = 0;
  559. release_sock(sk);
  560. if (ifindex)
  561. gprs_detach(sk);
  562. }
  563. static int pep_wait_connreq(struct sock *sk, int noblock)
  564. {
  565. struct task_struct *tsk = current;
  566. struct pep_sock *pn = pep_sk(sk);
  567. long timeo = sock_rcvtimeo(sk, noblock);
  568. for (;;) {
  569. DEFINE_WAIT(wait);
  570. if (sk->sk_state != TCP_LISTEN)
  571. return -EINVAL;
  572. if (!hlist_empty(&pn->ackq))
  573. break;
  574. if (!timeo)
  575. return -EWOULDBLOCK;
  576. if (signal_pending(tsk))
  577. return sock_intr_errno(timeo);
  578. prepare_to_wait_exclusive(&sk->sk_socket->wait, &wait,
  579. TASK_INTERRUPTIBLE);
  580. release_sock(sk);
  581. timeo = schedule_timeout(timeo);
  582. lock_sock(sk);
  583. finish_wait(&sk->sk_socket->wait, &wait);
  584. }
  585. return 0;
  586. }
  587. static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp)
  588. {
  589. struct pep_sock *pn = pep_sk(sk);
  590. struct sock *newsk = NULL;
  591. struct sk_buff *oskb;
  592. int err;
  593. lock_sock(sk);
  594. err = pep_wait_connreq(sk, flags & O_NONBLOCK);
  595. if (err)
  596. goto out;
  597. newsk = __sk_head(&pn->ackq);
  598. oskb = skb_dequeue(&newsk->sk_receive_queue);
  599. err = pep_accept_conn(newsk, oskb);
  600. if (err) {
  601. skb_queue_head(&newsk->sk_receive_queue, oskb);
  602. newsk = NULL;
  603. goto out;
  604. }
  605. sock_hold(sk);
  606. pep_sk(newsk)->listener = sk;
  607. sock_hold(newsk);
  608. sk_del_node_init(newsk);
  609. sk_acceptq_removed(sk);
  610. sk_add_node(newsk, &pn->hlist);
  611. __sock_put(newsk);
  612. out:
  613. release_sock(sk);
  614. *errp = err;
  615. return newsk;
  616. }
  617. static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg)
  618. {
  619. struct pep_sock *pn = pep_sk(sk);
  620. int answ;
  621. switch (cmd) {
  622. case SIOCINQ:
  623. if (sk->sk_state == TCP_LISTEN)
  624. return -EINVAL;
  625. lock_sock(sk);
  626. if (sock_flag(sk, SOCK_URGINLINE) &&
  627. !skb_queue_empty(&pn->ctrlreq_queue))
  628. answ = skb_peek(&pn->ctrlreq_queue)->len;
  629. else if (!skb_queue_empty(&sk->sk_receive_queue))
  630. answ = skb_peek(&sk->sk_receive_queue)->len;
  631. else
  632. answ = 0;
  633. release_sock(sk);
  634. return put_user(answ, (int __user *)arg);
  635. }
  636. return -ENOIOCTLCMD;
  637. }
  638. static int pep_init(struct sock *sk)
  639. {
  640. struct pep_sock *pn = pep_sk(sk);
  641. INIT_HLIST_HEAD(&pn->ackq);
  642. INIT_HLIST_HEAD(&pn->hlist);
  643. skb_queue_head_init(&pn->ctrlreq_queue);
  644. pn->pipe_handle = PN_PIPE_INVALID_HANDLE;
  645. return 0;
  646. }
  647. static int pep_setsockopt(struct sock *sk, int level, int optname,
  648. char __user *optval, unsigned int optlen)
  649. {
  650. struct pep_sock *pn = pep_sk(sk);
  651. int val = 0, err = 0;
  652. if (level != SOL_PNPIPE)
  653. return -ENOPROTOOPT;
  654. if (optlen >= sizeof(int)) {
  655. if (get_user(val, (int __user *) optval))
  656. return -EFAULT;
  657. }
  658. lock_sock(sk);
  659. switch (optname) {
  660. case PNPIPE_ENCAP:
  661. if (val && val != PNPIPE_ENCAP_IP) {
  662. err = -EINVAL;
  663. break;
  664. }
  665. if (!pn->ifindex == !val)
  666. break; /* Nothing to do! */
  667. if (!capable(CAP_NET_ADMIN)) {
  668. err = -EPERM;
  669. break;
  670. }
  671. if (val) {
  672. release_sock(sk);
  673. err = gprs_attach(sk);
  674. if (err > 0) {
  675. pn->ifindex = err;
  676. err = 0;
  677. }
  678. } else {
  679. pn->ifindex = 0;
  680. release_sock(sk);
  681. gprs_detach(sk);
  682. err = 0;
  683. }
  684. goto out_norel;
  685. default:
  686. err = -ENOPROTOOPT;
  687. }
  688. release_sock(sk);
  689. out_norel:
  690. return err;
  691. }
  692. static int pep_getsockopt(struct sock *sk, int level, int optname,
  693. char __user *optval, int __user *optlen)
  694. {
  695. struct pep_sock *pn = pep_sk(sk);
  696. int len, val;
  697. if (level != SOL_PNPIPE)
  698. return -ENOPROTOOPT;
  699. if (get_user(len, optlen))
  700. return -EFAULT;
  701. switch (optname) {
  702. case PNPIPE_ENCAP:
  703. val = pn->ifindex ? PNPIPE_ENCAP_IP : PNPIPE_ENCAP_NONE;
  704. break;
  705. case PNPIPE_IFINDEX:
  706. val = pn->ifindex;
  707. break;
  708. default:
  709. return -ENOPROTOOPT;
  710. }
  711. len = min_t(unsigned int, sizeof(int), len);
  712. if (put_user(len, optlen))
  713. return -EFAULT;
  714. if (put_user(val, (int __user *) optval))
  715. return -EFAULT;
  716. return 0;
  717. }
  718. static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
  719. {
  720. struct pep_sock *pn = pep_sk(sk);
  721. struct pnpipehdr *ph;
  722. if (pn_flow_safe(pn->tx_fc) &&
  723. !atomic_add_unless(&pn->tx_credits, -1, 0)) {
  724. kfree_skb(skb);
  725. return -ENOBUFS;
  726. }
  727. skb_push(skb, 3 + pn->aligned);
  728. skb_reset_transport_header(skb);
  729. ph = pnp_hdr(skb);
  730. ph->utid = 0;
  731. if (pn->aligned) {
  732. ph->message_id = PNS_PIPE_ALIGNED_DATA;
  733. ph->data[0] = 0; /* padding */
  734. } else
  735. ph->message_id = PNS_PIPE_DATA;
  736. ph->pipe_handle = pn->pipe_handle;
  737. return pn_skb_send(sk, skb, &pipe_srv);
  738. }
  739. static int pep_sendmsg(struct kiocb *iocb, struct sock *sk,
  740. struct msghdr *msg, size_t len)
  741. {
  742. struct pep_sock *pn = pep_sk(sk);
  743. struct sk_buff *skb;
  744. long timeo;
  745. int flags = msg->msg_flags;
  746. int err, done;
  747. if ((msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|
  748. MSG_CMSG_COMPAT)) ||
  749. !(msg->msg_flags & MSG_EOR))
  750. return -EOPNOTSUPP;
  751. skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len,
  752. flags & MSG_DONTWAIT, &err);
  753. if (!skb)
  754. return -ENOBUFS;
  755. skb_reserve(skb, MAX_PHONET_HEADER + 3);
  756. err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
  757. if (err < 0)
  758. goto outfree;
  759. lock_sock(sk);
  760. timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
  761. if ((1 << sk->sk_state) & (TCPF_LISTEN|TCPF_CLOSE)) {
  762. err = -ENOTCONN;
  763. goto out;
  764. }
  765. if (sk->sk_state != TCP_ESTABLISHED) {
  766. /* Wait until the pipe gets to enabled state */
  767. disabled:
  768. err = sk_stream_wait_connect(sk, &timeo);
  769. if (err)
  770. goto out;
  771. if (sk->sk_state == TCP_CLOSE_WAIT) {
  772. err = -ECONNRESET;
  773. goto out;
  774. }
  775. }
  776. BUG_ON(sk->sk_state != TCP_ESTABLISHED);
  777. /* Wait until flow control allows TX */
  778. done = atomic_read(&pn->tx_credits);
  779. while (!done) {
  780. DEFINE_WAIT(wait);
  781. if (!timeo) {
  782. err = -EAGAIN;
  783. goto out;
  784. }
  785. if (signal_pending(current)) {
  786. err = sock_intr_errno(timeo);
  787. goto out;
  788. }
  789. prepare_to_wait(&sk->sk_socket->wait, &wait,
  790. TASK_INTERRUPTIBLE);
  791. done = sk_wait_event(sk, &timeo, atomic_read(&pn->tx_credits));
  792. finish_wait(&sk->sk_socket->wait, &wait);
  793. if (sk->sk_state != TCP_ESTABLISHED)
  794. goto disabled;
  795. }
  796. err = pipe_skb_send(sk, skb);
  797. if (err >= 0)
  798. err = len; /* success! */
  799. skb = NULL;
  800. out:
  801. release_sock(sk);
  802. outfree:
  803. kfree_skb(skb);
  804. return err;
  805. }
  806. int pep_writeable(struct sock *sk)
  807. {
  808. struct pep_sock *pn = pep_sk(sk);
  809. return atomic_read(&pn->tx_credits);
  810. }
  811. int pep_write(struct sock *sk, struct sk_buff *skb)
  812. {
  813. struct sk_buff *rskb, *fs;
  814. int flen = 0;
  815. if (pep_sk(sk)->aligned)
  816. return pipe_skb_send(sk, skb);
  817. rskb = alloc_skb(MAX_PNPIPE_HEADER, GFP_ATOMIC);
  818. if (!rskb) {
  819. kfree_skb(skb);
  820. return -ENOMEM;
  821. }
  822. skb_shinfo(rskb)->frag_list = skb;
  823. rskb->len += skb->len;
  824. rskb->data_len += rskb->len;
  825. rskb->truesize += rskb->len;
  826. /* Avoid nested fragments */
  827. skb_walk_frags(skb, fs)
  828. flen += fs->len;
  829. skb->next = skb_shinfo(skb)->frag_list;
  830. skb_frag_list_init(skb);
  831. skb->len -= flen;
  832. skb->data_len -= flen;
  833. skb->truesize -= flen;
  834. skb_reserve(rskb, MAX_PHONET_HEADER + 3);
  835. return pipe_skb_send(sk, rskb);
  836. }
  837. struct sk_buff *pep_read(struct sock *sk)
  838. {
  839. struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue);
  840. if (sk->sk_state == TCP_ESTABLISHED)
  841. pipe_grant_credits(sk);
  842. return skb;
  843. }
  844. static int pep_recvmsg(struct kiocb *iocb, struct sock *sk,
  845. struct msghdr *msg, size_t len, int noblock,
  846. int flags, int *addr_len)
  847. {
  848. struct sk_buff *skb;
  849. int err;
  850. if (flags & ~(MSG_OOB|MSG_PEEK|MSG_TRUNC|MSG_DONTWAIT|MSG_WAITALL|
  851. MSG_NOSIGNAL|MSG_CMSG_COMPAT))
  852. return -EOPNOTSUPP;
  853. if (unlikely(1 << sk->sk_state & (TCPF_LISTEN | TCPF_CLOSE)))
  854. return -ENOTCONN;
  855. if ((flags & MSG_OOB) || sock_flag(sk, SOCK_URGINLINE)) {
  856. /* Dequeue and acknowledge control request */
  857. struct pep_sock *pn = pep_sk(sk);
  858. if (flags & MSG_PEEK)
  859. return -EOPNOTSUPP;
  860. skb = skb_dequeue(&pn->ctrlreq_queue);
  861. if (skb) {
  862. pep_ctrlreq_error(sk, skb, PN_PIPE_NO_ERROR,
  863. GFP_KERNEL);
  864. msg->msg_flags |= MSG_OOB;
  865. goto copy;
  866. }
  867. if (flags & MSG_OOB)
  868. return -EINVAL;
  869. }
  870. skb = skb_recv_datagram(sk, flags, noblock, &err);
  871. lock_sock(sk);
  872. if (skb == NULL) {
  873. if (err == -ENOTCONN && sk->sk_state == TCP_CLOSE_WAIT)
  874. err = -ECONNRESET;
  875. release_sock(sk);
  876. return err;
  877. }
  878. if (sk->sk_state == TCP_ESTABLISHED)
  879. pipe_grant_credits(sk);
  880. release_sock(sk);
  881. copy:
  882. msg->msg_flags |= MSG_EOR;
  883. if (skb->len > len)
  884. msg->msg_flags |= MSG_TRUNC;
  885. else
  886. len = skb->len;
  887. err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len);
  888. if (!err)
  889. err = (flags & MSG_TRUNC) ? skb->len : len;
  890. skb_free_datagram(sk, skb);
  891. return err;
  892. }
  893. static void pep_sock_unhash(struct sock *sk)
  894. {
  895. struct pep_sock *pn = pep_sk(sk);
  896. struct sock *skparent = NULL;
  897. lock_sock(sk);
  898. if ((1 << sk->sk_state) & ~(TCPF_CLOSE|TCPF_LISTEN)) {
  899. skparent = pn->listener;
  900. sk_del_node_init(sk);
  901. release_sock(sk);
  902. sk = skparent;
  903. pn = pep_sk(skparent);
  904. lock_sock(sk);
  905. }
  906. /* Unhash a listening sock only when it is closed
  907. * and all of its active connected pipes are closed. */
  908. if (hlist_empty(&pn->hlist))
  909. pn_sock_unhash(&pn->pn_sk.sk);
  910. release_sock(sk);
  911. if (skparent)
  912. sock_put(skparent);
  913. }
  914. static struct proto pep_proto = {
  915. .close = pep_sock_close,
  916. .accept = pep_sock_accept,
  917. .ioctl = pep_ioctl,
  918. .init = pep_init,
  919. .setsockopt = pep_setsockopt,
  920. .getsockopt = pep_getsockopt,
  921. .sendmsg = pep_sendmsg,
  922. .recvmsg = pep_recvmsg,
  923. .backlog_rcv = pep_do_rcv,
  924. .hash = pn_sock_hash,
  925. .unhash = pep_sock_unhash,
  926. .get_port = pn_sock_get_port,
  927. .obj_size = sizeof(struct pep_sock),
  928. .owner = THIS_MODULE,
  929. .name = "PNPIPE",
  930. };
  931. static struct phonet_protocol pep_pn_proto = {
  932. .ops = &phonet_stream_ops,
  933. .prot = &pep_proto,
  934. .sock_type = SOCK_SEQPACKET,
  935. };
  936. static int __init pep_register(void)
  937. {
  938. return phonet_proto_register(PN_PROTO_PIPE, &pep_pn_proto);
  939. }
  940. static void __exit pep_unregister(void)
  941. {
  942. phonet_proto_unregister(PN_PROTO_PIPE, &pep_pn_proto);
  943. }
  944. module_init(pep_register);
  945. module_exit(pep_unregister);
  946. MODULE_AUTHOR("Remi Denis-Courmont, Nokia");
  947. MODULE_DESCRIPTION("Phonet pipe protocol");
  948. MODULE_LICENSE("GPL");
  949. MODULE_ALIAS_NET_PF_PROTO(PF_PHONET, PN_PROTO_PIPE);