raw.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768
  1. /*
  2. * raw.c - Raw sockets for protocol family CAN
  3. *
  4. * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
  5. * All rights reserved.
  6. *
  7. * Redistribution and use in source and binary forms, with or without
  8. * modification, are permitted provided that the following conditions
  9. * are met:
  10. * 1. Redistributions of source code must retain the above copyright
  11. * notice, this list of conditions and the following disclaimer.
  12. * 2. Redistributions in binary form must reproduce the above copyright
  13. * notice, this list of conditions and the following disclaimer in the
  14. * documentation and/or other materials provided with the distribution.
  15. * 3. Neither the name of Volkswagen nor the names of its contributors
  16. * may be used to endorse or promote products derived from this software
  17. * without specific prior written permission.
  18. *
  19. * Alternatively, provided that this notice is retained in full, this
  20. * software may be distributed under the terms of the GNU General
  21. * Public License ("GPL") version 2, in which case the provisions of the
  22. * GPL apply INSTEAD OF those given above.
  23. *
  24. * The provided data structures and external interfaces from this code
  25. * are not restricted to be used by modules with a GPL compatible license.
  26. *
  27. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  28. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  29. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  30. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  31. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  32. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  33. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  34. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  35. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  36. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  37. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
  38. * DAMAGE.
  39. *
  40. * Send feedback to <socketcan-users@lists.berlios.de>
  41. *
  42. */
  43. #include <linux/module.h>
  44. #include <linux/init.h>
  45. #include <linux/uio.h>
  46. #include <linux/net.h>
  47. #include <linux/slab.h>
  48. #include <linux/netdevice.h>
  49. #include <linux/socket.h>
  50. #include <linux/if_arp.h>
  51. #include <linux/skbuff.h>
  52. #include <linux/can.h>
  53. #include <linux/can/core.h>
  54. #include <linux/can/raw.h>
  55. #include <net/sock.h>
  56. #include <net/net_namespace.h>
  57. #define CAN_RAW_VERSION CAN_VERSION
  58. static __initdata const char banner[] =
  59. KERN_INFO "can: raw protocol (rev " CAN_RAW_VERSION ")\n";
  60. MODULE_DESCRIPTION("PF_CAN raw protocol");
  61. MODULE_LICENSE("Dual BSD/GPL");
  62. MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
  63. MODULE_ALIAS("can-proto-1");
  64. #define MASK_ALL 0
  65. /*
  66. * A raw socket has a list of can_filters attached to it, each receiving
  67. * the CAN frames matching that filter. If the filter list is empty,
  68. * no CAN frames will be received by the socket. The default after
  69. * opening the socket, is to have one filter which receives all frames.
  70. * The filter list is allocated dynamically with the exception of the
  71. * list containing only one item. This common case is optimized by
  72. * storing the single filter in dfilter, to avoid using dynamic memory.
  73. */
  74. struct raw_sock {
  75. struct sock sk;
  76. int bound;
  77. int ifindex;
  78. struct notifier_block notifier;
  79. int loopback;
  80. int recv_own_msgs;
  81. int count; /* number of active filters */
  82. struct can_filter dfilter; /* default/single filter */
  83. struct can_filter *filter; /* pointer to filter(s) */
  84. can_err_mask_t err_mask;
  85. };
  86. static inline struct raw_sock *raw_sk(const struct sock *sk)
  87. {
  88. return (struct raw_sock *)sk;
  89. }
  90. static void raw_rcv(struct sk_buff *skb, void *data)
  91. {
  92. struct sock *sk = (struct sock *)data;
  93. struct raw_sock *ro = raw_sk(sk);
  94. struct sockaddr_can *addr;
  95. /* check the received tx sock reference */
  96. if (!ro->recv_own_msgs && skb->sk == sk)
  97. return;
  98. /* clone the given skb to be able to enqueue it into the rcv queue */
  99. skb = skb_clone(skb, GFP_ATOMIC);
  100. if (!skb)
  101. return;
  102. /*
  103. * Put the datagram to the queue so that raw_recvmsg() can
  104. * get it from there. We need to pass the interface index to
  105. * raw_recvmsg(). We pass a whole struct sockaddr_can in skb->cb
  106. * containing the interface index.
  107. */
  108. BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct sockaddr_can));
  109. addr = (struct sockaddr_can *)skb->cb;
  110. memset(addr, 0, sizeof(*addr));
  111. addr->can_family = AF_CAN;
  112. addr->can_ifindex = skb->dev->ifindex;
  113. if (sock_queue_rcv_skb(sk, skb) < 0)
  114. kfree_skb(skb);
  115. }
  116. static int raw_enable_filters(struct net_device *dev, struct sock *sk,
  117. struct can_filter *filter, int count)
  118. {
  119. int err = 0;
  120. int i;
  121. for (i = 0; i < count; i++) {
  122. err = can_rx_register(dev, filter[i].can_id,
  123. filter[i].can_mask,
  124. raw_rcv, sk, "raw");
  125. if (err) {
  126. /* clean up successfully registered filters */
  127. while (--i >= 0)
  128. can_rx_unregister(dev, filter[i].can_id,
  129. filter[i].can_mask,
  130. raw_rcv, sk);
  131. break;
  132. }
  133. }
  134. return err;
  135. }
  136. static int raw_enable_errfilter(struct net_device *dev, struct sock *sk,
  137. can_err_mask_t err_mask)
  138. {
  139. int err = 0;
  140. if (err_mask)
  141. err = can_rx_register(dev, 0, err_mask | CAN_ERR_FLAG,
  142. raw_rcv, sk, "raw");
  143. return err;
  144. }
  145. static void raw_disable_filters(struct net_device *dev, struct sock *sk,
  146. struct can_filter *filter, int count)
  147. {
  148. int i;
  149. for (i = 0; i < count; i++)
  150. can_rx_unregister(dev, filter[i].can_id, filter[i].can_mask,
  151. raw_rcv, sk);
  152. }
  153. static inline void raw_disable_errfilter(struct net_device *dev,
  154. struct sock *sk,
  155. can_err_mask_t err_mask)
  156. {
  157. if (err_mask)
  158. can_rx_unregister(dev, 0, err_mask | CAN_ERR_FLAG,
  159. raw_rcv, sk);
  160. }
  161. static inline void raw_disable_allfilters(struct net_device *dev,
  162. struct sock *sk)
  163. {
  164. struct raw_sock *ro = raw_sk(sk);
  165. raw_disable_filters(dev, sk, ro->filter, ro->count);
  166. raw_disable_errfilter(dev, sk, ro->err_mask);
  167. }
  168. static int raw_enable_allfilters(struct net_device *dev, struct sock *sk)
  169. {
  170. struct raw_sock *ro = raw_sk(sk);
  171. int err;
  172. err = raw_enable_filters(dev, sk, ro->filter, ro->count);
  173. if (!err) {
  174. err = raw_enable_errfilter(dev, sk, ro->err_mask);
  175. if (err)
  176. raw_disable_filters(dev, sk, ro->filter, ro->count);
  177. }
  178. return err;
  179. }
  180. static int raw_notifier(struct notifier_block *nb,
  181. unsigned long msg, void *data)
  182. {
  183. struct net_device *dev = (struct net_device *)data;
  184. struct raw_sock *ro = container_of(nb, struct raw_sock, notifier);
  185. struct sock *sk = &ro->sk;
  186. if (!net_eq(dev_net(dev), &init_net))
  187. return NOTIFY_DONE;
  188. if (dev->type != ARPHRD_CAN)
  189. return NOTIFY_DONE;
  190. if (ro->ifindex != dev->ifindex)
  191. return NOTIFY_DONE;
  192. switch (msg) {
  193. case NETDEV_UNREGISTER:
  194. lock_sock(sk);
  195. /* remove current filters & unregister */
  196. if (ro->bound)
  197. raw_disable_allfilters(dev, sk);
  198. if (ro->count > 1)
  199. kfree(ro->filter);
  200. ro->ifindex = 0;
  201. ro->bound = 0;
  202. ro->count = 0;
  203. release_sock(sk);
  204. sk->sk_err = ENODEV;
  205. if (!sock_flag(sk, SOCK_DEAD))
  206. sk->sk_error_report(sk);
  207. break;
  208. case NETDEV_DOWN:
  209. sk->sk_err = ENETDOWN;
  210. if (!sock_flag(sk, SOCK_DEAD))
  211. sk->sk_error_report(sk);
  212. break;
  213. }
  214. return NOTIFY_DONE;
  215. }
  216. static int raw_init(struct sock *sk)
  217. {
  218. struct raw_sock *ro = raw_sk(sk);
  219. ro->bound = 0;
  220. ro->ifindex = 0;
  221. /* set default filter to single entry dfilter */
  222. ro->dfilter.can_id = 0;
  223. ro->dfilter.can_mask = MASK_ALL;
  224. ro->filter = &ro->dfilter;
  225. ro->count = 1;
  226. /* set default loopback behaviour */
  227. ro->loopback = 1;
  228. ro->recv_own_msgs = 0;
  229. /* set notifier */
  230. ro->notifier.notifier_call = raw_notifier;
  231. register_netdevice_notifier(&ro->notifier);
  232. return 0;
  233. }
  234. static int raw_release(struct socket *sock)
  235. {
  236. struct sock *sk = sock->sk;
  237. struct raw_sock *ro = raw_sk(sk);
  238. unregister_netdevice_notifier(&ro->notifier);
  239. lock_sock(sk);
  240. /* remove current filters & unregister */
  241. if (ro->bound) {
  242. if (ro->ifindex) {
  243. struct net_device *dev;
  244. dev = dev_get_by_index(&init_net, ro->ifindex);
  245. if (dev) {
  246. raw_disable_allfilters(dev, sk);
  247. dev_put(dev);
  248. }
  249. } else
  250. raw_disable_allfilters(NULL, sk);
  251. }
  252. if (ro->count > 1)
  253. kfree(ro->filter);
  254. ro->ifindex = 0;
  255. ro->bound = 0;
  256. ro->count = 0;
  257. sock_orphan(sk);
  258. sock->sk = NULL;
  259. release_sock(sk);
  260. sock_put(sk);
  261. return 0;
  262. }
  263. static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
  264. {
  265. struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
  266. struct sock *sk = sock->sk;
  267. struct raw_sock *ro = raw_sk(sk);
  268. int ifindex;
  269. int err = 0;
  270. int notify_enetdown = 0;
  271. if (len < sizeof(*addr))
  272. return -EINVAL;
  273. lock_sock(sk);
  274. if (ro->bound && addr->can_ifindex == ro->ifindex)
  275. goto out;
  276. if (addr->can_ifindex) {
  277. struct net_device *dev;
  278. dev = dev_get_by_index(&init_net, addr->can_ifindex);
  279. if (!dev) {
  280. err = -ENODEV;
  281. goto out;
  282. }
  283. if (dev->type != ARPHRD_CAN) {
  284. dev_put(dev);
  285. err = -ENODEV;
  286. goto out;
  287. }
  288. if (!(dev->flags & IFF_UP))
  289. notify_enetdown = 1;
  290. ifindex = dev->ifindex;
  291. /* filters set by default/setsockopt */
  292. err = raw_enable_allfilters(dev, sk);
  293. dev_put(dev);
  294. } else {
  295. ifindex = 0;
  296. /* filters set by default/setsockopt */
  297. err = raw_enable_allfilters(NULL, sk);
  298. }
  299. if (!err) {
  300. if (ro->bound) {
  301. /* unregister old filters */
  302. if (ro->ifindex) {
  303. struct net_device *dev;
  304. dev = dev_get_by_index(&init_net, ro->ifindex);
  305. if (dev) {
  306. raw_disable_allfilters(dev, sk);
  307. dev_put(dev);
  308. }
  309. } else
  310. raw_disable_allfilters(NULL, sk);
  311. }
  312. ro->ifindex = ifindex;
  313. ro->bound = 1;
  314. }
  315. out:
  316. release_sock(sk);
  317. if (notify_enetdown) {
  318. sk->sk_err = ENETDOWN;
  319. if (!sock_flag(sk, SOCK_DEAD))
  320. sk->sk_error_report(sk);
  321. }
  322. return err;
  323. }
  324. static int raw_getname(struct socket *sock, struct sockaddr *uaddr,
  325. int *len, int peer)
  326. {
  327. struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
  328. struct sock *sk = sock->sk;
  329. struct raw_sock *ro = raw_sk(sk);
  330. if (peer)
  331. return -EOPNOTSUPP;
  332. memset(addr, 0, sizeof(*addr));
  333. addr->can_family = AF_CAN;
  334. addr->can_ifindex = ro->ifindex;
  335. *len = sizeof(*addr);
  336. return 0;
  337. }
  338. static int raw_setsockopt(struct socket *sock, int level, int optname,
  339. char __user *optval, unsigned int optlen)
  340. {
  341. struct sock *sk = sock->sk;
  342. struct raw_sock *ro = raw_sk(sk);
  343. struct can_filter *filter = NULL; /* dyn. alloc'ed filters */
  344. struct can_filter sfilter; /* single filter */
  345. struct net_device *dev = NULL;
  346. can_err_mask_t err_mask = 0;
  347. int count = 0;
  348. int err = 0;
  349. if (level != SOL_CAN_RAW)
  350. return -EINVAL;
  351. switch (optname) {
  352. case CAN_RAW_FILTER:
  353. if (optlen % sizeof(struct can_filter) != 0)
  354. return -EINVAL;
  355. count = optlen / sizeof(struct can_filter);
  356. if (count > 1) {
  357. /* filter does not fit into dfilter => alloc space */
  358. filter = memdup_user(optval, optlen);
  359. if (IS_ERR(filter))
  360. return PTR_ERR(filter);
  361. } else if (count == 1) {
  362. if (copy_from_user(&sfilter, optval, sizeof(sfilter)))
  363. return -EFAULT;
  364. }
  365. lock_sock(sk);
  366. if (ro->bound && ro->ifindex)
  367. dev = dev_get_by_index(&init_net, ro->ifindex);
  368. if (ro->bound) {
  369. /* (try to) register the new filters */
  370. if (count == 1)
  371. err = raw_enable_filters(dev, sk, &sfilter, 1);
  372. else
  373. err = raw_enable_filters(dev, sk, filter,
  374. count);
  375. if (err) {
  376. if (count > 1)
  377. kfree(filter);
  378. goto out_fil;
  379. }
  380. /* remove old filter registrations */
  381. raw_disable_filters(dev, sk, ro->filter, ro->count);
  382. }
  383. /* remove old filter space */
  384. if (ro->count > 1)
  385. kfree(ro->filter);
  386. /* link new filters to the socket */
  387. if (count == 1) {
  388. /* copy filter data for single filter */
  389. ro->dfilter = sfilter;
  390. filter = &ro->dfilter;
  391. }
  392. ro->filter = filter;
  393. ro->count = count;
  394. out_fil:
  395. if (dev)
  396. dev_put(dev);
  397. release_sock(sk);
  398. break;
  399. case CAN_RAW_ERR_FILTER:
  400. if (optlen != sizeof(err_mask))
  401. return -EINVAL;
  402. if (copy_from_user(&err_mask, optval, optlen))
  403. return -EFAULT;
  404. err_mask &= CAN_ERR_MASK;
  405. lock_sock(sk);
  406. if (ro->bound && ro->ifindex)
  407. dev = dev_get_by_index(&init_net, ro->ifindex);
  408. /* remove current error mask */
  409. if (ro->bound) {
  410. /* (try to) register the new err_mask */
  411. err = raw_enable_errfilter(dev, sk, err_mask);
  412. if (err)
  413. goto out_err;
  414. /* remove old err_mask registration */
  415. raw_disable_errfilter(dev, sk, ro->err_mask);
  416. }
  417. /* link new err_mask to the socket */
  418. ro->err_mask = err_mask;
  419. out_err:
  420. if (dev)
  421. dev_put(dev);
  422. release_sock(sk);
  423. break;
  424. case CAN_RAW_LOOPBACK:
  425. if (optlen != sizeof(ro->loopback))
  426. return -EINVAL;
  427. if (copy_from_user(&ro->loopback, optval, optlen))
  428. return -EFAULT;
  429. break;
  430. case CAN_RAW_RECV_OWN_MSGS:
  431. if (optlen != sizeof(ro->recv_own_msgs))
  432. return -EINVAL;
  433. if (copy_from_user(&ro->recv_own_msgs, optval, optlen))
  434. return -EFAULT;
  435. break;
  436. default:
  437. return -ENOPROTOOPT;
  438. }
  439. return err;
  440. }
  441. static int raw_getsockopt(struct socket *sock, int level, int optname,
  442. char __user *optval, int __user *optlen)
  443. {
  444. struct sock *sk = sock->sk;
  445. struct raw_sock *ro = raw_sk(sk);
  446. int len;
  447. void *val;
  448. int err = 0;
  449. if (level != SOL_CAN_RAW)
  450. return -EINVAL;
  451. if (get_user(len, optlen))
  452. return -EFAULT;
  453. if (len < 0)
  454. return -EINVAL;
  455. switch (optname) {
  456. case CAN_RAW_FILTER:
  457. lock_sock(sk);
  458. if (ro->count > 0) {
  459. int fsize = ro->count * sizeof(struct can_filter);
  460. if (len > fsize)
  461. len = fsize;
  462. if (copy_to_user(optval, ro->filter, len))
  463. err = -EFAULT;
  464. } else
  465. len = 0;
  466. release_sock(sk);
  467. if (!err)
  468. err = put_user(len, optlen);
  469. return err;
  470. case CAN_RAW_ERR_FILTER:
  471. if (len > sizeof(can_err_mask_t))
  472. len = sizeof(can_err_mask_t);
  473. val = &ro->err_mask;
  474. break;
  475. case CAN_RAW_LOOPBACK:
  476. if (len > sizeof(int))
  477. len = sizeof(int);
  478. val = &ro->loopback;
  479. break;
  480. case CAN_RAW_RECV_OWN_MSGS:
  481. if (len > sizeof(int))
  482. len = sizeof(int);
  483. val = &ro->recv_own_msgs;
  484. break;
  485. default:
  486. return -ENOPROTOOPT;
  487. }
  488. if (put_user(len, optlen))
  489. return -EFAULT;
  490. if (copy_to_user(optval, val, len))
  491. return -EFAULT;
  492. return 0;
  493. }
  494. static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
  495. struct msghdr *msg, size_t size)
  496. {
  497. struct sock *sk = sock->sk;
  498. struct raw_sock *ro = raw_sk(sk);
  499. struct sk_buff *skb;
  500. struct net_device *dev;
  501. int ifindex;
  502. int err;
  503. if (msg->msg_name) {
  504. struct sockaddr_can *addr =
  505. (struct sockaddr_can *)msg->msg_name;
  506. if (addr->can_family != AF_CAN)
  507. return -EINVAL;
  508. ifindex = addr->can_ifindex;
  509. } else
  510. ifindex = ro->ifindex;
  511. if (size != sizeof(struct can_frame))
  512. return -EINVAL;
  513. dev = dev_get_by_index(&init_net, ifindex);
  514. if (!dev)
  515. return -ENXIO;
  516. skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT,
  517. &err);
  518. if (!skb)
  519. goto put_dev;
  520. err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
  521. if (err < 0)
  522. goto free_skb;
  523. err = sock_tx_timestamp(msg, sk, skb_tx(skb));
  524. if (err < 0)
  525. goto free_skb;
  526. /* to be able to check the received tx sock reference in raw_rcv() */
  527. skb_tx(skb)->prevent_sk_orphan = 1;
  528. skb->dev = dev;
  529. skb->sk = sk;
  530. err = can_send(skb, ro->loopback);
  531. dev_put(dev);
  532. if (err)
  533. goto send_failed;
  534. return size;
  535. free_skb:
  536. kfree_skb(skb);
  537. put_dev:
  538. dev_put(dev);
  539. send_failed:
  540. return err;
  541. }
  542. static int raw_recvmsg(struct kiocb *iocb, struct socket *sock,
  543. struct msghdr *msg, size_t size, int flags)
  544. {
  545. struct sock *sk = sock->sk;
  546. struct sk_buff *skb;
  547. int err = 0;
  548. int noblock;
  549. noblock = flags & MSG_DONTWAIT;
  550. flags &= ~MSG_DONTWAIT;
  551. skb = skb_recv_datagram(sk, flags, noblock, &err);
  552. if (!skb)
  553. return err;
  554. if (size < skb->len)
  555. msg->msg_flags |= MSG_TRUNC;
  556. else
  557. size = skb->len;
  558. err = memcpy_toiovec(msg->msg_iov, skb->data, size);
  559. if (err < 0) {
  560. skb_free_datagram(sk, skb);
  561. return err;
  562. }
  563. sock_recv_ts_and_drops(msg, sk, skb);
  564. if (msg->msg_name) {
  565. msg->msg_namelen = sizeof(struct sockaddr_can);
  566. memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
  567. }
  568. skb_free_datagram(sk, skb);
  569. return size;
  570. }
  571. static struct proto_ops raw_ops __read_mostly = {
  572. .family = PF_CAN,
  573. .release = raw_release,
  574. .bind = raw_bind,
  575. .connect = sock_no_connect,
  576. .socketpair = sock_no_socketpair,
  577. .accept = sock_no_accept,
  578. .getname = raw_getname,
  579. .poll = datagram_poll,
  580. .ioctl = NULL, /* use can_ioctl() from af_can.c */
  581. .listen = sock_no_listen,
  582. .shutdown = sock_no_shutdown,
  583. .setsockopt = raw_setsockopt,
  584. .getsockopt = raw_getsockopt,
  585. .sendmsg = raw_sendmsg,
  586. .recvmsg = raw_recvmsg,
  587. .mmap = sock_no_mmap,
  588. .sendpage = sock_no_sendpage,
  589. };
  590. static struct proto raw_proto __read_mostly = {
  591. .name = "CAN_RAW",
  592. .owner = THIS_MODULE,
  593. .obj_size = sizeof(struct raw_sock),
  594. .init = raw_init,
  595. };
  596. static struct can_proto raw_can_proto __read_mostly = {
  597. .type = SOCK_RAW,
  598. .protocol = CAN_RAW,
  599. .ops = &raw_ops,
  600. .prot = &raw_proto,
  601. };
  602. static __init int raw_module_init(void)
  603. {
  604. int err;
  605. printk(banner);
  606. err = can_proto_register(&raw_can_proto);
  607. if (err < 0)
  608. printk(KERN_ERR "can: registration of raw protocol failed\n");
  609. return err;
  610. }
  611. static __exit void raw_module_exit(void)
  612. {
  613. can_proto_unregister(&raw_can_proto);
  614. }
  615. module_init(raw_module_init);
  616. module_exit(raw_module_exit);