bcm.c 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615
  1. /*
  2. * bcm.c - Broadcast Manager to filter/send (cyclic) CAN content
  3. *
  4. * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
  5. * All rights reserved.
  6. *
  7. * Redistribution and use in source and binary forms, with or without
  8. * modification, are permitted provided that the following conditions
  9. * are met:
  10. * 1. Redistributions of source code must retain the above copyright
  11. * notice, this list of conditions and the following disclaimer.
  12. * 2. Redistributions in binary form must reproduce the above copyright
  13. * notice, this list of conditions and the following disclaimer in the
  14. * documentation and/or other materials provided with the distribution.
  15. * 3. Neither the name of Volkswagen nor the names of its contributors
  16. * may be used to endorse or promote products derived from this software
  17. * without specific prior written permission.
  18. *
  19. * Alternatively, provided that this notice is retained in full, this
  20. * software may be distributed under the terms of the GNU General
  21. * Public License ("GPL") version 2, in which case the provisions of the
  22. * GPL apply INSTEAD OF those given above.
  23. *
  24. * The provided data structures and external interfaces from this code
  25. * are not restricted to be used by modules with a GPL compatible license.
  26. *
  27. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  28. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  29. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  30. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  31. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  32. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  33. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  34. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  35. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  36. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  37. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
  38. * DAMAGE.
  39. *
  40. * Send feedback to <socketcan-users@lists.berlios.de>
  41. *
  42. */
  43. #include <linux/module.h>
  44. #include <linux/init.h>
  45. #include <linux/hrtimer.h>
  46. #include <linux/list.h>
  47. #include <linux/proc_fs.h>
  48. #include <linux/seq_file.h>
  49. #include <linux/uio.h>
  50. #include <linux/net.h>
  51. #include <linux/netdevice.h>
  52. #include <linux/socket.h>
  53. #include <linux/if_arp.h>
  54. #include <linux/skbuff.h>
  55. #include <linux/can.h>
  56. #include <linux/can/core.h>
  57. #include <linux/can/bcm.h>
  58. #include <net/sock.h>
  59. #include <net/net_namespace.h>
  60. /* use of last_frames[index].can_dlc */
  61. #define RX_RECV 0x40 /* received data for this element */
  62. #define RX_THR 0x80 /* element not been sent due to throttle feature */
  63. #define BCM_CAN_DLC_MASK 0x0F /* clean private flags in can_dlc by masking */
  64. /* get best masking value for can_rx_register() for a given single can_id */
  65. #define REGMASK(id) ((id & CAN_EFF_FLAG) ? \
  66. (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
  67. (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
  68. #define CAN_BCM_VERSION CAN_VERSION
  69. static __initdata const char banner[] = KERN_INFO
  70. "can: broadcast manager protocol (rev " CAN_BCM_VERSION " t)\n";
  71. MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
  72. MODULE_LICENSE("Dual BSD/GPL");
  73. MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
  74. MODULE_ALIAS("can-proto-2");
  75. /* easy access to can_frame payload */
  76. static inline u64 GET_U64(const struct can_frame *cp)
  77. {
  78. return *(u64 *)cp->data;
  79. }
  80. struct bcm_op {
  81. struct list_head list;
  82. int ifindex;
  83. canid_t can_id;
  84. int flags;
  85. unsigned long frames_abs, frames_filtered;
  86. struct timeval ival1, ival2;
  87. struct hrtimer timer, thrtimer;
  88. struct tasklet_struct tsklet, thrtsklet;
  89. ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
  90. int rx_ifindex;
  91. int count;
  92. int nframes;
  93. int currframe;
  94. struct can_frame *frames;
  95. struct can_frame *last_frames;
  96. struct can_frame sframe;
  97. struct can_frame last_sframe;
  98. struct sock *sk;
  99. struct net_device *rx_reg_dev;
  100. };
  101. static struct proc_dir_entry *proc_dir;
  102. struct bcm_sock {
  103. struct sock sk;
  104. int bound;
  105. int ifindex;
  106. struct notifier_block notifier;
  107. struct list_head rx_ops;
  108. struct list_head tx_ops;
  109. unsigned long dropped_usr_msgs;
  110. struct proc_dir_entry *bcm_proc_read;
  111. char procname [9]; /* pointer printed in ASCII with \0 */
  112. };
  113. static inline struct bcm_sock *bcm_sk(const struct sock *sk)
  114. {
  115. return (struct bcm_sock *)sk;
  116. }
  117. #define CFSIZ sizeof(struct can_frame)
  118. #define OPSIZ sizeof(struct bcm_op)
  119. #define MHSIZ sizeof(struct bcm_msg_head)
  120. /*
  121. * procfs functions
  122. */
  123. static char *bcm_proc_getifname(char *result, int ifindex)
  124. {
  125. struct net_device *dev;
  126. if (!ifindex)
  127. return "any";
  128. read_lock(&dev_base_lock);
  129. dev = __dev_get_by_index(&init_net, ifindex);
  130. if (dev)
  131. strcpy(result, dev->name);
  132. else
  133. strcpy(result, "???");
  134. read_unlock(&dev_base_lock);
  135. return result;
  136. }
  137. static int bcm_proc_show(struct seq_file *m, void *v)
  138. {
  139. char ifname[IFNAMSIZ];
  140. struct sock *sk = (struct sock *)m->private;
  141. struct bcm_sock *bo = bcm_sk(sk);
  142. struct bcm_op *op;
  143. seq_printf(m, ">>> socket %p", sk->sk_socket);
  144. seq_printf(m, " / sk %p", sk);
  145. seq_printf(m, " / bo %p", bo);
  146. seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
  147. seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
  148. seq_printf(m, " <<<\n");
  149. list_for_each_entry(op, &bo->rx_ops, list) {
  150. unsigned long reduction;
  151. /* print only active entries & prevent division by zero */
  152. if (!op->frames_abs)
  153. continue;
  154. seq_printf(m, "rx_op: %03X %-5s ",
  155. op->can_id, bcm_proc_getifname(ifname, op->ifindex));
  156. seq_printf(m, "[%d]%c ", op->nframes,
  157. (op->flags & RX_CHECK_DLC)?'d':' ');
  158. if (op->kt_ival1.tv64)
  159. seq_printf(m, "timeo=%lld ",
  160. (long long)
  161. ktime_to_us(op->kt_ival1));
  162. if (op->kt_ival2.tv64)
  163. seq_printf(m, "thr=%lld ",
  164. (long long)
  165. ktime_to_us(op->kt_ival2));
  166. seq_printf(m, "# recv %ld (%ld) => reduction: ",
  167. op->frames_filtered, op->frames_abs);
  168. reduction = 100 - (op->frames_filtered * 100) / op->frames_abs;
  169. seq_printf(m, "%s%ld%%\n",
  170. (reduction == 100)?"near ":"", reduction);
  171. }
  172. list_for_each_entry(op, &bo->tx_ops, list) {
  173. seq_printf(m, "tx_op: %03X %s [%d] ",
  174. op->can_id,
  175. bcm_proc_getifname(ifname, op->ifindex),
  176. op->nframes);
  177. if (op->kt_ival1.tv64)
  178. seq_printf(m, "t1=%lld ",
  179. (long long) ktime_to_us(op->kt_ival1));
  180. if (op->kt_ival2.tv64)
  181. seq_printf(m, "t2=%lld ",
  182. (long long) ktime_to_us(op->kt_ival2));
  183. seq_printf(m, "# sent %ld\n", op->frames_abs);
  184. }
  185. seq_putc(m, '\n');
  186. return 0;
  187. }
  188. static int bcm_proc_open(struct inode *inode, struct file *file)
  189. {
  190. return single_open(file, bcm_proc_show, PDE(inode)->data);
  191. }
  192. static const struct file_operations bcm_proc_fops = {
  193. .owner = THIS_MODULE,
  194. .open = bcm_proc_open,
  195. .read = seq_read,
  196. .llseek = seq_lseek,
  197. .release = single_release,
  198. };
  199. /*
  200. * bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface
  201. * of the given bcm tx op
  202. */
  203. static void bcm_can_tx(struct bcm_op *op)
  204. {
  205. struct sk_buff *skb;
  206. struct net_device *dev;
  207. struct can_frame *cf = &op->frames[op->currframe];
  208. /* no target device? => exit */
  209. if (!op->ifindex)
  210. return;
  211. dev = dev_get_by_index(&init_net, op->ifindex);
  212. if (!dev) {
  213. /* RFC: should this bcm_op remove itself here? */
  214. return;
  215. }
  216. skb = alloc_skb(CFSIZ, gfp_any());
  217. if (!skb)
  218. goto out;
  219. memcpy(skb_put(skb, CFSIZ), cf, CFSIZ);
  220. /* send with loopback */
  221. skb->dev = dev;
  222. skb->sk = op->sk;
  223. can_send(skb, 1);
  224. /* update statistics */
  225. op->currframe++;
  226. op->frames_abs++;
  227. /* reached last frame? */
  228. if (op->currframe >= op->nframes)
  229. op->currframe = 0;
  230. out:
  231. dev_put(dev);
  232. }
  233. /*
  234. * bcm_send_to_user - send a BCM message to the userspace
  235. * (consisting of bcm_msg_head + x CAN frames)
  236. */
  237. static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
  238. struct can_frame *frames, int has_timestamp)
  239. {
  240. struct sk_buff *skb;
  241. struct can_frame *firstframe;
  242. struct sockaddr_can *addr;
  243. struct sock *sk = op->sk;
  244. int datalen = head->nframes * CFSIZ;
  245. int err;
  246. skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
  247. if (!skb)
  248. return;
  249. memcpy(skb_put(skb, sizeof(*head)), head, sizeof(*head));
  250. if (head->nframes) {
  251. /* can_frames starting here */
  252. firstframe = (struct can_frame *)skb_tail_pointer(skb);
  253. memcpy(skb_put(skb, datalen), frames, datalen);
  254. /*
  255. * the BCM uses the can_dlc-element of the can_frame
  256. * structure for internal purposes. This is only
  257. * relevant for updates that are generated by the
  258. * BCM, where nframes is 1
  259. */
  260. if (head->nframes == 1)
  261. firstframe->can_dlc &= BCM_CAN_DLC_MASK;
  262. }
  263. if (has_timestamp) {
  264. /* restore rx timestamp */
  265. skb->tstamp = op->rx_stamp;
  266. }
  267. /*
  268. * Put the datagram to the queue so that bcm_recvmsg() can
  269. * get it from there. We need to pass the interface index to
  270. * bcm_recvmsg(). We pass a whole struct sockaddr_can in skb->cb
  271. * containing the interface index.
  272. */
  273. BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct sockaddr_can));
  274. addr = (struct sockaddr_can *)skb->cb;
  275. memset(addr, 0, sizeof(*addr));
  276. addr->can_family = AF_CAN;
  277. addr->can_ifindex = op->rx_ifindex;
  278. err = sock_queue_rcv_skb(sk, skb);
  279. if (err < 0) {
  280. struct bcm_sock *bo = bcm_sk(sk);
  281. kfree_skb(skb);
  282. /* don't care about overflows in this statistic */
  283. bo->dropped_usr_msgs++;
  284. }
  285. }
  286. static void bcm_tx_timeout_tsklet(unsigned long data)
  287. {
  288. struct bcm_op *op = (struct bcm_op *)data;
  289. struct bcm_msg_head msg_head;
  290. if (op->kt_ival1.tv64 && (op->count > 0)) {
  291. op->count--;
  292. if (!op->count && (op->flags & TX_COUNTEVT)) {
  293. /* create notification to user */
  294. msg_head.opcode = TX_EXPIRED;
  295. msg_head.flags = op->flags;
  296. msg_head.count = op->count;
  297. msg_head.ival1 = op->ival1;
  298. msg_head.ival2 = op->ival2;
  299. msg_head.can_id = op->can_id;
  300. msg_head.nframes = 0;
  301. bcm_send_to_user(op, &msg_head, NULL, 0);
  302. }
  303. }
  304. if (op->kt_ival1.tv64 && (op->count > 0)) {
  305. /* send (next) frame */
  306. bcm_can_tx(op);
  307. hrtimer_start(&op->timer,
  308. ktime_add(ktime_get(), op->kt_ival1),
  309. HRTIMER_MODE_ABS);
  310. } else {
  311. if (op->kt_ival2.tv64) {
  312. /* send (next) frame */
  313. bcm_can_tx(op);
  314. hrtimer_start(&op->timer,
  315. ktime_add(ktime_get(), op->kt_ival2),
  316. HRTIMER_MODE_ABS);
  317. }
  318. }
  319. }
  320. /*
  321. * bcm_tx_timeout_handler - performes cyclic CAN frame transmissions
  322. */
  323. static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
  324. {
  325. struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
  326. tasklet_schedule(&op->tsklet);
  327. return HRTIMER_NORESTART;
  328. }
  329. /*
  330. * bcm_rx_changed - create a RX_CHANGED notification due to changed content
  331. */
  332. static void bcm_rx_changed(struct bcm_op *op, struct can_frame *data)
  333. {
  334. struct bcm_msg_head head;
  335. /* update statistics */
  336. op->frames_filtered++;
  337. /* prevent statistics overflow */
  338. if (op->frames_filtered > ULONG_MAX/100)
  339. op->frames_filtered = op->frames_abs = 0;
  340. /* this element is not throttled anymore */
  341. data->can_dlc &= (BCM_CAN_DLC_MASK|RX_RECV);
  342. head.opcode = RX_CHANGED;
  343. head.flags = op->flags;
  344. head.count = op->count;
  345. head.ival1 = op->ival1;
  346. head.ival2 = op->ival2;
  347. head.can_id = op->can_id;
  348. head.nframes = 1;
  349. bcm_send_to_user(op, &head, data, 1);
  350. }
  351. /*
  352. * bcm_rx_update_and_send - process a detected relevant receive content change
  353. * 1. update the last received data
  354. * 2. send a notification to the user (if possible)
  355. */
  356. static void bcm_rx_update_and_send(struct bcm_op *op,
  357. struct can_frame *lastdata,
  358. const struct can_frame *rxdata)
  359. {
  360. memcpy(lastdata, rxdata, CFSIZ);
  361. /* mark as used and throttled by default */
  362. lastdata->can_dlc |= (RX_RECV|RX_THR);
  363. /* throtteling mode inactive ? */
  364. if (!op->kt_ival2.tv64) {
  365. /* send RX_CHANGED to the user immediately */
  366. bcm_rx_changed(op, lastdata);
  367. return;
  368. }
  369. /* with active throttling timer we are just done here */
  370. if (hrtimer_active(&op->thrtimer))
  371. return;
  372. /* first receiption with enabled throttling mode */
  373. if (!op->kt_lastmsg.tv64)
  374. goto rx_changed_settime;
  375. /* got a second frame inside a potential throttle period? */
  376. if (ktime_us_delta(ktime_get(), op->kt_lastmsg) <
  377. ktime_to_us(op->kt_ival2)) {
  378. /* do not send the saved data - only start throttle timer */
  379. hrtimer_start(&op->thrtimer,
  380. ktime_add(op->kt_lastmsg, op->kt_ival2),
  381. HRTIMER_MODE_ABS);
  382. return;
  383. }
  384. /* the gap was that big, that throttling was not needed here */
  385. rx_changed_settime:
  386. bcm_rx_changed(op, lastdata);
  387. op->kt_lastmsg = ktime_get();
  388. }
  389. /*
  390. * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly
  391. * received data stored in op->last_frames[]
  392. */
  393. static void bcm_rx_cmp_to_index(struct bcm_op *op, int index,
  394. const struct can_frame *rxdata)
  395. {
  396. /*
  397. * no one uses the MSBs of can_dlc for comparation,
  398. * so we use it here to detect the first time of reception
  399. */
  400. if (!(op->last_frames[index].can_dlc & RX_RECV)) {
  401. /* received data for the first time => send update to user */
  402. bcm_rx_update_and_send(op, &op->last_frames[index], rxdata);
  403. return;
  404. }
  405. /* do a real check in can_frame data section */
  406. if ((GET_U64(&op->frames[index]) & GET_U64(rxdata)) !=
  407. (GET_U64(&op->frames[index]) & GET_U64(&op->last_frames[index]))) {
  408. bcm_rx_update_and_send(op, &op->last_frames[index], rxdata);
  409. return;
  410. }
  411. if (op->flags & RX_CHECK_DLC) {
  412. /* do a real check in can_frame dlc */
  413. if (rxdata->can_dlc != (op->last_frames[index].can_dlc &
  414. BCM_CAN_DLC_MASK)) {
  415. bcm_rx_update_and_send(op, &op->last_frames[index],
  416. rxdata);
  417. return;
  418. }
  419. }
  420. }
  421. /*
  422. * bcm_rx_starttimer - enable timeout monitoring for CAN frame receiption
  423. */
  424. static void bcm_rx_starttimer(struct bcm_op *op)
  425. {
  426. if (op->flags & RX_NO_AUTOTIMER)
  427. return;
  428. if (op->kt_ival1.tv64)
  429. hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL);
  430. }
  431. static void bcm_rx_timeout_tsklet(unsigned long data)
  432. {
  433. struct bcm_op *op = (struct bcm_op *)data;
  434. struct bcm_msg_head msg_head;
  435. /* create notification to user */
  436. msg_head.opcode = RX_TIMEOUT;
  437. msg_head.flags = op->flags;
  438. msg_head.count = op->count;
  439. msg_head.ival1 = op->ival1;
  440. msg_head.ival2 = op->ival2;
  441. msg_head.can_id = op->can_id;
  442. msg_head.nframes = 0;
  443. bcm_send_to_user(op, &msg_head, NULL, 0);
  444. }
  445. /*
  446. * bcm_rx_timeout_handler - when the (cyclic) CAN frame receiption timed out
  447. */
  448. static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
  449. {
  450. struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
  451. /* schedule before NET_RX_SOFTIRQ */
  452. tasklet_hi_schedule(&op->tsklet);
  453. /* no restart of the timer is done here! */
  454. /* if user wants to be informed, when cyclic CAN-Messages come back */
  455. if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) {
  456. /* clear received can_frames to indicate 'nothing received' */
  457. memset(op->last_frames, 0, op->nframes * CFSIZ);
  458. }
  459. return HRTIMER_NORESTART;
  460. }
  461. /*
  462. * bcm_rx_do_flush - helper for bcm_rx_thr_flush
  463. */
  464. static inline int bcm_rx_do_flush(struct bcm_op *op, int update, int index)
  465. {
  466. if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) {
  467. if (update)
  468. bcm_rx_changed(op, &op->last_frames[index]);
  469. return 1;
  470. }
  471. return 0;
  472. }
  473. /*
  474. * bcm_rx_thr_flush - Check for throttled data and send it to the userspace
  475. *
  476. * update == 0 : just check if throttled data is available (any irq context)
  477. * update == 1 : check and send throttled data to userspace (soft_irq context)
  478. */
  479. static int bcm_rx_thr_flush(struct bcm_op *op, int update)
  480. {
  481. int updated = 0;
  482. if (op->nframes > 1) {
  483. int i;
  484. /* for MUX filter we start at index 1 */
  485. for (i = 1; i < op->nframes; i++)
  486. updated += bcm_rx_do_flush(op, update, i);
  487. } else {
  488. /* for RX_FILTER_ID and simple filter */
  489. updated += bcm_rx_do_flush(op, update, 0);
  490. }
  491. return updated;
  492. }
  493. static void bcm_rx_thr_tsklet(unsigned long data)
  494. {
  495. struct bcm_op *op = (struct bcm_op *)data;
  496. /* push the changed data to the userspace */
  497. bcm_rx_thr_flush(op, 1);
  498. }
  499. /*
  500. * bcm_rx_thr_handler - the time for blocked content updates is over now:
  501. * Check for throttled data and send it to the userspace
  502. */
  503. static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
  504. {
  505. struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer);
  506. tasklet_schedule(&op->thrtsklet);
  507. if (bcm_rx_thr_flush(op, 0)) {
  508. hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2);
  509. return HRTIMER_RESTART;
  510. } else {
  511. /* rearm throttle handling */
  512. op->kt_lastmsg = ktime_set(0, 0);
  513. return HRTIMER_NORESTART;
  514. }
  515. }
  516. /*
  517. * bcm_rx_handler - handle a CAN frame receiption
  518. */
  519. static void bcm_rx_handler(struct sk_buff *skb, void *data)
  520. {
  521. struct bcm_op *op = (struct bcm_op *)data;
  522. const struct can_frame *rxframe = (struct can_frame *)skb->data;
  523. int i;
  524. /* disable timeout */
  525. hrtimer_cancel(&op->timer);
  526. if (op->can_id != rxframe->can_id)
  527. return;
  528. /* save rx timestamp */
  529. op->rx_stamp = skb->tstamp;
  530. /* save originator for recvfrom() */
  531. op->rx_ifindex = skb->dev->ifindex;
  532. /* update statistics */
  533. op->frames_abs++;
  534. if (op->flags & RX_RTR_FRAME) {
  535. /* send reply for RTR-request (placed in op->frames[0]) */
  536. bcm_can_tx(op);
  537. return;
  538. }
  539. if (op->flags & RX_FILTER_ID) {
  540. /* the easiest case */
  541. bcm_rx_update_and_send(op, &op->last_frames[0], rxframe);
  542. goto rx_starttimer;
  543. }
  544. if (op->nframes == 1) {
  545. /* simple compare with index 0 */
  546. bcm_rx_cmp_to_index(op, 0, rxframe);
  547. goto rx_starttimer;
  548. }
  549. if (op->nframes > 1) {
  550. /*
  551. * multiplex compare
  552. *
  553. * find the first multiplex mask that fits.
  554. * Remark: The MUX-mask is stored in index 0
  555. */
  556. for (i = 1; i < op->nframes; i++) {
  557. if ((GET_U64(&op->frames[0]) & GET_U64(rxframe)) ==
  558. (GET_U64(&op->frames[0]) &
  559. GET_U64(&op->frames[i]))) {
  560. bcm_rx_cmp_to_index(op, i, rxframe);
  561. break;
  562. }
  563. }
  564. }
  565. rx_starttimer:
  566. bcm_rx_starttimer(op);
  567. }
  568. /*
  569. * helpers for bcm_op handling: find & delete bcm [rx|tx] op elements
  570. */
  571. static struct bcm_op *bcm_find_op(struct list_head *ops, canid_t can_id,
  572. int ifindex)
  573. {
  574. struct bcm_op *op;
  575. list_for_each_entry(op, ops, list) {
  576. if ((op->can_id == can_id) && (op->ifindex == ifindex))
  577. return op;
  578. }
  579. return NULL;
  580. }
  581. static void bcm_remove_op(struct bcm_op *op)
  582. {
  583. hrtimer_cancel(&op->timer);
  584. hrtimer_cancel(&op->thrtimer);
  585. if (op->tsklet.func)
  586. tasklet_kill(&op->tsklet);
  587. if (op->thrtsklet.func)
  588. tasklet_kill(&op->thrtsklet);
  589. if ((op->frames) && (op->frames != &op->sframe))
  590. kfree(op->frames);
  591. if ((op->last_frames) && (op->last_frames != &op->last_sframe))
  592. kfree(op->last_frames);
  593. kfree(op);
  594. return;
  595. }
  596. static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op)
  597. {
  598. if (op->rx_reg_dev == dev) {
  599. can_rx_unregister(dev, op->can_id, REGMASK(op->can_id),
  600. bcm_rx_handler, op);
  601. /* mark as removed subscription */
  602. op->rx_reg_dev = NULL;
  603. } else
  604. printk(KERN_ERR "can-bcm: bcm_rx_unreg: registered device "
  605. "mismatch %p %p\n", op->rx_reg_dev, dev);
  606. }
  607. /*
  608. * bcm_delete_rx_op - find and remove a rx op (returns number of removed ops)
  609. */
  610. static int bcm_delete_rx_op(struct list_head *ops, canid_t can_id, int ifindex)
  611. {
  612. struct bcm_op *op, *n;
  613. list_for_each_entry_safe(op, n, ops, list) {
  614. if ((op->can_id == can_id) && (op->ifindex == ifindex)) {
  615. /*
  616. * Don't care if we're bound or not (due to netdev
  617. * problems) can_rx_unregister() is always a save
  618. * thing to do here.
  619. */
  620. if (op->ifindex) {
  621. /*
  622. * Only remove subscriptions that had not
  623. * been removed due to NETDEV_UNREGISTER
  624. * in bcm_notifier()
  625. */
  626. if (op->rx_reg_dev) {
  627. struct net_device *dev;
  628. dev = dev_get_by_index(&init_net,
  629. op->ifindex);
  630. if (dev) {
  631. bcm_rx_unreg(dev, op);
  632. dev_put(dev);
  633. }
  634. }
  635. } else
  636. can_rx_unregister(NULL, op->can_id,
  637. REGMASK(op->can_id),
  638. bcm_rx_handler, op);
  639. list_del(&op->list);
  640. bcm_remove_op(op);
  641. return 1; /* done */
  642. }
  643. }
  644. return 0; /* not found */
  645. }
  646. /*
  647. * bcm_delete_tx_op - find and remove a tx op (returns number of removed ops)
  648. */
  649. static int bcm_delete_tx_op(struct list_head *ops, canid_t can_id, int ifindex)
  650. {
  651. struct bcm_op *op, *n;
  652. list_for_each_entry_safe(op, n, ops, list) {
  653. if ((op->can_id == can_id) && (op->ifindex == ifindex)) {
  654. list_del(&op->list);
  655. bcm_remove_op(op);
  656. return 1; /* done */
  657. }
  658. }
  659. return 0; /* not found */
  660. }
  661. /*
  662. * bcm_read_op - read out a bcm_op and send it to the user (for bcm_sendmsg)
  663. */
  664. static int bcm_read_op(struct list_head *ops, struct bcm_msg_head *msg_head,
  665. int ifindex)
  666. {
  667. struct bcm_op *op = bcm_find_op(ops, msg_head->can_id, ifindex);
  668. if (!op)
  669. return -EINVAL;
  670. /* put current values into msg_head */
  671. msg_head->flags = op->flags;
  672. msg_head->count = op->count;
  673. msg_head->ival1 = op->ival1;
  674. msg_head->ival2 = op->ival2;
  675. msg_head->nframes = op->nframes;
  676. bcm_send_to_user(op, msg_head, op->frames, 0);
  677. return MHSIZ;
  678. }
  679. /*
  680. * bcm_tx_setup - create or update a bcm tx op (for bcm_sendmsg)
  681. */
  682. static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
  683. int ifindex, struct sock *sk)
  684. {
  685. struct bcm_sock *bo = bcm_sk(sk);
  686. struct bcm_op *op;
  687. int i, err;
  688. /* we need a real device to send frames */
  689. if (!ifindex)
  690. return -ENODEV;
  691. /* we need at least one can_frame */
  692. if (msg_head->nframes < 1)
  693. return -EINVAL;
  694. /* check the given can_id */
  695. op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex);
  696. if (op) {
  697. /* update existing BCM operation */
  698. /*
  699. * Do we need more space for the can_frames than currently
  700. * allocated? -> This is a _really_ unusual use-case and
  701. * therefore (complexity / locking) it is not supported.
  702. */
  703. if (msg_head->nframes > op->nframes)
  704. return -E2BIG;
  705. /* update can_frames content */
  706. for (i = 0; i < msg_head->nframes; i++) {
  707. err = memcpy_fromiovec((u8 *)&op->frames[i],
  708. msg->msg_iov, CFSIZ);
  709. if (op->frames[i].can_dlc > 8)
  710. err = -EINVAL;
  711. if (err < 0)
  712. return err;
  713. if (msg_head->flags & TX_CP_CAN_ID) {
  714. /* copy can_id into frame */
  715. op->frames[i].can_id = msg_head->can_id;
  716. }
  717. }
  718. } else {
  719. /* insert new BCM operation for the given can_id */
  720. op = kzalloc(OPSIZ, GFP_KERNEL);
  721. if (!op)
  722. return -ENOMEM;
  723. op->can_id = msg_head->can_id;
  724. /* create array for can_frames and copy the data */
  725. if (msg_head->nframes > 1) {
  726. op->frames = kmalloc(msg_head->nframes * CFSIZ,
  727. GFP_KERNEL);
  728. if (!op->frames) {
  729. kfree(op);
  730. return -ENOMEM;
  731. }
  732. } else
  733. op->frames = &op->sframe;
  734. for (i = 0; i < msg_head->nframes; i++) {
  735. err = memcpy_fromiovec((u8 *)&op->frames[i],
  736. msg->msg_iov, CFSIZ);
  737. if (op->frames[i].can_dlc > 8)
  738. err = -EINVAL;
  739. if (err < 0) {
  740. if (op->frames != &op->sframe)
  741. kfree(op->frames);
  742. kfree(op);
  743. return err;
  744. }
  745. if (msg_head->flags & TX_CP_CAN_ID) {
  746. /* copy can_id into frame */
  747. op->frames[i].can_id = msg_head->can_id;
  748. }
  749. }
  750. /* tx_ops never compare with previous received messages */
  751. op->last_frames = NULL;
  752. /* bcm_can_tx / bcm_tx_timeout_handler needs this */
  753. op->sk = sk;
  754. op->ifindex = ifindex;
  755. /* initialize uninitialized (kzalloc) structure */
  756. hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  757. op->timer.function = bcm_tx_timeout_handler;
  758. /* initialize tasklet for tx countevent notification */
  759. tasklet_init(&op->tsklet, bcm_tx_timeout_tsklet,
  760. (unsigned long) op);
  761. /* currently unused in tx_ops */
  762. hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  763. /* add this bcm_op to the list of the tx_ops */
  764. list_add(&op->list, &bo->tx_ops);
  765. } /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */
  766. if (op->nframes != msg_head->nframes) {
  767. op->nframes = msg_head->nframes;
  768. /* start multiple frame transmission with index 0 */
  769. op->currframe = 0;
  770. }
  771. /* check flags */
  772. op->flags = msg_head->flags;
  773. if (op->flags & TX_RESET_MULTI_IDX) {
  774. /* start multiple frame transmission with index 0 */
  775. op->currframe = 0;
  776. }
  777. if (op->flags & SETTIMER) {
  778. /* set timer values */
  779. op->count = msg_head->count;
  780. op->ival1 = msg_head->ival1;
  781. op->ival2 = msg_head->ival2;
  782. op->kt_ival1 = timeval_to_ktime(msg_head->ival1);
  783. op->kt_ival2 = timeval_to_ktime(msg_head->ival2);
  784. /* disable an active timer due to zero values? */
  785. if (!op->kt_ival1.tv64 && !op->kt_ival2.tv64)
  786. hrtimer_cancel(&op->timer);
  787. }
  788. if ((op->flags & STARTTIMER) &&
  789. ((op->kt_ival1.tv64 && op->count) || op->kt_ival2.tv64)) {
  790. /* spec: send can_frame when starting timer */
  791. op->flags |= TX_ANNOUNCE;
  792. if (op->kt_ival1.tv64 && (op->count > 0)) {
  793. /* op->count-- is done in bcm_tx_timeout_handler */
  794. hrtimer_start(&op->timer, op->kt_ival1,
  795. HRTIMER_MODE_REL);
  796. } else
  797. hrtimer_start(&op->timer, op->kt_ival2,
  798. HRTIMER_MODE_REL);
  799. }
  800. if (op->flags & TX_ANNOUNCE)
  801. bcm_can_tx(op);
  802. return msg_head->nframes * CFSIZ + MHSIZ;
  803. }
  804. /*
  805. * bcm_rx_setup - create or update a bcm rx op (for bcm_sendmsg)
  806. */
  807. static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
  808. int ifindex, struct sock *sk)
  809. {
  810. struct bcm_sock *bo = bcm_sk(sk);
  811. struct bcm_op *op;
  812. int do_rx_register;
  813. int err = 0;
  814. if ((msg_head->flags & RX_FILTER_ID) || (!(msg_head->nframes))) {
  815. /* be robust against wrong usage ... */
  816. msg_head->flags |= RX_FILTER_ID;
  817. /* ignore trailing garbage */
  818. msg_head->nframes = 0;
  819. }
  820. if ((msg_head->flags & RX_RTR_FRAME) &&
  821. ((msg_head->nframes != 1) ||
  822. (!(msg_head->can_id & CAN_RTR_FLAG))))
  823. return -EINVAL;
  824. /* check the given can_id */
  825. op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex);
  826. if (op) {
  827. /* update existing BCM operation */
  828. /*
  829. * Do we need more space for the can_frames than currently
  830. * allocated? -> This is a _really_ unusual use-case and
  831. * therefore (complexity / locking) it is not supported.
  832. */
  833. if (msg_head->nframes > op->nframes)
  834. return -E2BIG;
  835. if (msg_head->nframes) {
  836. /* update can_frames content */
  837. err = memcpy_fromiovec((u8 *)op->frames,
  838. msg->msg_iov,
  839. msg_head->nframes * CFSIZ);
  840. if (err < 0)
  841. return err;
  842. /* clear last_frames to indicate 'nothing received' */
  843. memset(op->last_frames, 0, msg_head->nframes * CFSIZ);
  844. }
  845. op->nframes = msg_head->nframes;
  846. /* Only an update -> do not call can_rx_register() */
  847. do_rx_register = 0;
  848. } else {
  849. /* insert new BCM operation for the given can_id */
  850. op = kzalloc(OPSIZ, GFP_KERNEL);
  851. if (!op)
  852. return -ENOMEM;
  853. op->can_id = msg_head->can_id;
  854. op->nframes = msg_head->nframes;
  855. if (msg_head->nframes > 1) {
  856. /* create array for can_frames and copy the data */
  857. op->frames = kmalloc(msg_head->nframes * CFSIZ,
  858. GFP_KERNEL);
  859. if (!op->frames) {
  860. kfree(op);
  861. return -ENOMEM;
  862. }
  863. /* create and init array for received can_frames */
  864. op->last_frames = kzalloc(msg_head->nframes * CFSIZ,
  865. GFP_KERNEL);
  866. if (!op->last_frames) {
  867. kfree(op->frames);
  868. kfree(op);
  869. return -ENOMEM;
  870. }
  871. } else {
  872. op->frames = &op->sframe;
  873. op->last_frames = &op->last_sframe;
  874. }
  875. if (msg_head->nframes) {
  876. err = memcpy_fromiovec((u8 *)op->frames, msg->msg_iov,
  877. msg_head->nframes * CFSIZ);
  878. if (err < 0) {
  879. if (op->frames != &op->sframe)
  880. kfree(op->frames);
  881. if (op->last_frames != &op->last_sframe)
  882. kfree(op->last_frames);
  883. kfree(op);
  884. return err;
  885. }
  886. }
  887. /* bcm_can_tx / bcm_tx_timeout_handler needs this */
  888. op->sk = sk;
  889. op->ifindex = ifindex;
  890. /* initialize uninitialized (kzalloc) structure */
  891. hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  892. op->timer.function = bcm_rx_timeout_handler;
  893. /* initialize tasklet for rx timeout notification */
  894. tasklet_init(&op->tsklet, bcm_rx_timeout_tsklet,
  895. (unsigned long) op);
  896. hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  897. op->thrtimer.function = bcm_rx_thr_handler;
  898. /* initialize tasklet for rx throttle handling */
  899. tasklet_init(&op->thrtsklet, bcm_rx_thr_tsklet,
  900. (unsigned long) op);
  901. /* add this bcm_op to the list of the rx_ops */
  902. list_add(&op->list, &bo->rx_ops);
  903. /* call can_rx_register() */
  904. do_rx_register = 1;
  905. } /* if ((op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex))) */
  906. /* check flags */
  907. op->flags = msg_head->flags;
  908. if (op->flags & RX_RTR_FRAME) {
  909. /* no timers in RTR-mode */
  910. hrtimer_cancel(&op->thrtimer);
  911. hrtimer_cancel(&op->timer);
  912. /*
  913. * funny feature in RX(!)_SETUP only for RTR-mode:
  914. * copy can_id into frame BUT without RTR-flag to
  915. * prevent a full-load-loopback-test ... ;-]
  916. */
  917. if ((op->flags & TX_CP_CAN_ID) ||
  918. (op->frames[0].can_id == op->can_id))
  919. op->frames[0].can_id = op->can_id & ~CAN_RTR_FLAG;
  920. } else {
  921. if (op->flags & SETTIMER) {
  922. /* set timer value */
  923. op->ival1 = msg_head->ival1;
  924. op->ival2 = msg_head->ival2;
  925. op->kt_ival1 = timeval_to_ktime(msg_head->ival1);
  926. op->kt_ival2 = timeval_to_ktime(msg_head->ival2);
  927. /* disable an active timer due to zero value? */
  928. if (!op->kt_ival1.tv64)
  929. hrtimer_cancel(&op->timer);
  930. /*
  931. * In any case cancel the throttle timer, flush
  932. * potentially blocked msgs and reset throttle handling
  933. */
  934. op->kt_lastmsg = ktime_set(0, 0);
  935. hrtimer_cancel(&op->thrtimer);
  936. bcm_rx_thr_flush(op, 1);
  937. }
  938. if ((op->flags & STARTTIMER) && op->kt_ival1.tv64)
  939. hrtimer_start(&op->timer, op->kt_ival1,
  940. HRTIMER_MODE_REL);
  941. }
  942. /* now we can register for can_ids, if we added a new bcm_op */
  943. if (do_rx_register) {
  944. if (ifindex) {
  945. struct net_device *dev;
  946. dev = dev_get_by_index(&init_net, ifindex);
  947. if (dev) {
  948. err = can_rx_register(dev, op->can_id,
  949. REGMASK(op->can_id),
  950. bcm_rx_handler, op,
  951. "bcm");
  952. op->rx_reg_dev = dev;
  953. dev_put(dev);
  954. }
  955. } else
  956. err = can_rx_register(NULL, op->can_id,
  957. REGMASK(op->can_id),
  958. bcm_rx_handler, op, "bcm");
  959. if (err) {
  960. /* this bcm rx op is broken -> remove it */
  961. list_del(&op->list);
  962. bcm_remove_op(op);
  963. return err;
  964. }
  965. }
  966. return msg_head->nframes * CFSIZ + MHSIZ;
  967. }
  968. /*
  969. * bcm_tx_send - send a single CAN frame to the CAN interface (for bcm_sendmsg)
  970. */
  971. static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk)
  972. {
  973. struct sk_buff *skb;
  974. struct net_device *dev;
  975. int err;
  976. /* we need a real device to send frames */
  977. if (!ifindex)
  978. return -ENODEV;
  979. skb = alloc_skb(CFSIZ, GFP_KERNEL);
  980. if (!skb)
  981. return -ENOMEM;
  982. err = memcpy_fromiovec(skb_put(skb, CFSIZ), msg->msg_iov, CFSIZ);
  983. if (err < 0) {
  984. kfree_skb(skb);
  985. return err;
  986. }
  987. dev = dev_get_by_index(&init_net, ifindex);
  988. if (!dev) {
  989. kfree_skb(skb);
  990. return -ENODEV;
  991. }
  992. skb->dev = dev;
  993. skb->sk = sk;
  994. err = can_send(skb, 1); /* send with loopback */
  995. dev_put(dev);
  996. if (err)
  997. return err;
  998. return CFSIZ + MHSIZ;
  999. }
  1000. /*
  1001. * bcm_sendmsg - process BCM commands (opcodes) from the userspace
  1002. */
  1003. static int bcm_sendmsg(struct kiocb *iocb, struct socket *sock,
  1004. struct msghdr *msg, size_t size)
  1005. {
  1006. struct sock *sk = sock->sk;
  1007. struct bcm_sock *bo = bcm_sk(sk);
  1008. int ifindex = bo->ifindex; /* default ifindex for this bcm_op */
  1009. struct bcm_msg_head msg_head;
  1010. int ret; /* read bytes or error codes as return value */
  1011. if (!bo->bound)
  1012. return -ENOTCONN;
  1013. /* check for valid message length from userspace */
  1014. if (size < MHSIZ || (size - MHSIZ) % CFSIZ)
  1015. return -EINVAL;
  1016. /* check for alternative ifindex for this bcm_op */
  1017. if (!ifindex && msg->msg_name) {
  1018. /* no bound device as default => check msg_name */
  1019. struct sockaddr_can *addr =
  1020. (struct sockaddr_can *)msg->msg_name;
  1021. if (addr->can_family != AF_CAN)
  1022. return -EINVAL;
  1023. /* ifindex from sendto() */
  1024. ifindex = addr->can_ifindex;
  1025. if (ifindex) {
  1026. struct net_device *dev;
  1027. dev = dev_get_by_index(&init_net, ifindex);
  1028. if (!dev)
  1029. return -ENODEV;
  1030. if (dev->type != ARPHRD_CAN) {
  1031. dev_put(dev);
  1032. return -ENODEV;
  1033. }
  1034. dev_put(dev);
  1035. }
  1036. }
  1037. /* read message head information */
  1038. ret = memcpy_fromiovec((u8 *)&msg_head, msg->msg_iov, MHSIZ);
  1039. if (ret < 0)
  1040. return ret;
  1041. lock_sock(sk);
  1042. switch (msg_head.opcode) {
  1043. case TX_SETUP:
  1044. ret = bcm_tx_setup(&msg_head, msg, ifindex, sk);
  1045. break;
  1046. case RX_SETUP:
  1047. ret = bcm_rx_setup(&msg_head, msg, ifindex, sk);
  1048. break;
  1049. case TX_DELETE:
  1050. if (bcm_delete_tx_op(&bo->tx_ops, msg_head.can_id, ifindex))
  1051. ret = MHSIZ;
  1052. else
  1053. ret = -EINVAL;
  1054. break;
  1055. case RX_DELETE:
  1056. if (bcm_delete_rx_op(&bo->rx_ops, msg_head.can_id, ifindex))
  1057. ret = MHSIZ;
  1058. else
  1059. ret = -EINVAL;
  1060. break;
  1061. case TX_READ:
  1062. /* reuse msg_head for the reply to TX_READ */
  1063. msg_head.opcode = TX_STATUS;
  1064. ret = bcm_read_op(&bo->tx_ops, &msg_head, ifindex);
  1065. break;
  1066. case RX_READ:
  1067. /* reuse msg_head for the reply to RX_READ */
  1068. msg_head.opcode = RX_STATUS;
  1069. ret = bcm_read_op(&bo->rx_ops, &msg_head, ifindex);
  1070. break;
  1071. case TX_SEND:
  1072. /* we need exactly one can_frame behind the msg head */
  1073. if ((msg_head.nframes != 1) || (size != CFSIZ + MHSIZ))
  1074. ret = -EINVAL;
  1075. else
  1076. ret = bcm_tx_send(msg, ifindex, sk);
  1077. break;
  1078. default:
  1079. ret = -EINVAL;
  1080. break;
  1081. }
  1082. release_sock(sk);
  1083. return ret;
  1084. }
  1085. /*
  1086. * notification handler for netdevice status changes
  1087. */
  1088. static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
  1089. void *data)
  1090. {
  1091. struct net_device *dev = (struct net_device *)data;
  1092. struct bcm_sock *bo = container_of(nb, struct bcm_sock, notifier);
  1093. struct sock *sk = &bo->sk;
  1094. struct bcm_op *op;
  1095. int notify_enodev = 0;
  1096. if (!net_eq(dev_net(dev), &init_net))
  1097. return NOTIFY_DONE;
  1098. if (dev->type != ARPHRD_CAN)
  1099. return NOTIFY_DONE;
  1100. switch (msg) {
  1101. case NETDEV_UNREGISTER:
  1102. lock_sock(sk);
  1103. /* remove device specific receive entries */
  1104. list_for_each_entry(op, &bo->rx_ops, list)
  1105. if (op->rx_reg_dev == dev)
  1106. bcm_rx_unreg(dev, op);
  1107. /* remove device reference, if this is our bound device */
  1108. if (bo->bound && bo->ifindex == dev->ifindex) {
  1109. bo->bound = 0;
  1110. bo->ifindex = 0;
  1111. notify_enodev = 1;
  1112. }
  1113. release_sock(sk);
  1114. if (notify_enodev) {
  1115. sk->sk_err = ENODEV;
  1116. if (!sock_flag(sk, SOCK_DEAD))
  1117. sk->sk_error_report(sk);
  1118. }
  1119. break;
  1120. case NETDEV_DOWN:
  1121. if (bo->bound && bo->ifindex == dev->ifindex) {
  1122. sk->sk_err = ENETDOWN;
  1123. if (!sock_flag(sk, SOCK_DEAD))
  1124. sk->sk_error_report(sk);
  1125. }
  1126. }
  1127. return NOTIFY_DONE;
  1128. }
  1129. /*
  1130. * initial settings for all BCM sockets to be set at socket creation time
  1131. */
  1132. static int bcm_init(struct sock *sk)
  1133. {
  1134. struct bcm_sock *bo = bcm_sk(sk);
  1135. bo->bound = 0;
  1136. bo->ifindex = 0;
  1137. bo->dropped_usr_msgs = 0;
  1138. bo->bcm_proc_read = NULL;
  1139. INIT_LIST_HEAD(&bo->tx_ops);
  1140. INIT_LIST_HEAD(&bo->rx_ops);
  1141. /* set notifier */
  1142. bo->notifier.notifier_call = bcm_notifier;
  1143. register_netdevice_notifier(&bo->notifier);
  1144. return 0;
  1145. }
  1146. /*
  1147. * standard socket functions
  1148. */
  1149. static int bcm_release(struct socket *sock)
  1150. {
  1151. struct sock *sk = sock->sk;
  1152. struct bcm_sock *bo = bcm_sk(sk);
  1153. struct bcm_op *op, *next;
  1154. /* remove bcm_ops, timer, rx_unregister(), etc. */
  1155. unregister_netdevice_notifier(&bo->notifier);
  1156. lock_sock(sk);
  1157. list_for_each_entry_safe(op, next, &bo->tx_ops, list)
  1158. bcm_remove_op(op);
  1159. list_for_each_entry_safe(op, next, &bo->rx_ops, list) {
  1160. /*
  1161. * Don't care if we're bound or not (due to netdev problems)
  1162. * can_rx_unregister() is always a save thing to do here.
  1163. */
  1164. if (op->ifindex) {
  1165. /*
  1166. * Only remove subscriptions that had not
  1167. * been removed due to NETDEV_UNREGISTER
  1168. * in bcm_notifier()
  1169. */
  1170. if (op->rx_reg_dev) {
  1171. struct net_device *dev;
  1172. dev = dev_get_by_index(&init_net, op->ifindex);
  1173. if (dev) {
  1174. bcm_rx_unreg(dev, op);
  1175. dev_put(dev);
  1176. }
  1177. }
  1178. } else
  1179. can_rx_unregister(NULL, op->can_id,
  1180. REGMASK(op->can_id),
  1181. bcm_rx_handler, op);
  1182. bcm_remove_op(op);
  1183. }
  1184. /* remove procfs entry */
  1185. if (proc_dir && bo->bcm_proc_read)
  1186. remove_proc_entry(bo->procname, proc_dir);
  1187. /* remove device reference */
  1188. if (bo->bound) {
  1189. bo->bound = 0;
  1190. bo->ifindex = 0;
  1191. }
  1192. sock_orphan(sk);
  1193. sock->sk = NULL;
  1194. release_sock(sk);
  1195. sock_put(sk);
  1196. return 0;
  1197. }
  1198. static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
  1199. int flags)
  1200. {
  1201. struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
  1202. struct sock *sk = sock->sk;
  1203. struct bcm_sock *bo = bcm_sk(sk);
  1204. if (bo->bound)
  1205. return -EISCONN;
  1206. /* bind a device to this socket */
  1207. if (addr->can_ifindex) {
  1208. struct net_device *dev;
  1209. dev = dev_get_by_index(&init_net, addr->can_ifindex);
  1210. if (!dev)
  1211. return -ENODEV;
  1212. if (dev->type != ARPHRD_CAN) {
  1213. dev_put(dev);
  1214. return -ENODEV;
  1215. }
  1216. bo->ifindex = dev->ifindex;
  1217. dev_put(dev);
  1218. } else {
  1219. /* no interface reference for ifindex = 0 ('any' CAN device) */
  1220. bo->ifindex = 0;
  1221. }
  1222. bo->bound = 1;
  1223. if (proc_dir) {
  1224. /* unique socket address as filename */
  1225. sprintf(bo->procname, "%p", sock);
  1226. bo->bcm_proc_read = proc_create_data(bo->procname, 0644,
  1227. proc_dir,
  1228. &bcm_proc_fops, sk);
  1229. }
  1230. return 0;
  1231. }
  1232. static int bcm_recvmsg(struct kiocb *iocb, struct socket *sock,
  1233. struct msghdr *msg, size_t size, int flags)
  1234. {
  1235. struct sock *sk = sock->sk;
  1236. struct sk_buff *skb;
  1237. int error = 0;
  1238. int noblock;
  1239. int err;
  1240. noblock = flags & MSG_DONTWAIT;
  1241. flags &= ~MSG_DONTWAIT;
  1242. skb = skb_recv_datagram(sk, flags, noblock, &error);
  1243. if (!skb)
  1244. return error;
  1245. if (skb->len < size)
  1246. size = skb->len;
  1247. err = memcpy_toiovec(msg->msg_iov, skb->data, size);
  1248. if (err < 0) {
  1249. skb_free_datagram(sk, skb);
  1250. return err;
  1251. }
  1252. sock_recv_timestamp(msg, sk, skb);
  1253. if (msg->msg_name) {
  1254. msg->msg_namelen = sizeof(struct sockaddr_can);
  1255. memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
  1256. }
  1257. skb_free_datagram(sk, skb);
  1258. return size;
  1259. }
  1260. static struct proto_ops bcm_ops __read_mostly = {
  1261. .family = PF_CAN,
  1262. .release = bcm_release,
  1263. .bind = sock_no_bind,
  1264. .connect = bcm_connect,
  1265. .socketpair = sock_no_socketpair,
  1266. .accept = sock_no_accept,
  1267. .getname = sock_no_getname,
  1268. .poll = datagram_poll,
  1269. .ioctl = NULL, /* use can_ioctl() from af_can.c */
  1270. .listen = sock_no_listen,
  1271. .shutdown = sock_no_shutdown,
  1272. .setsockopt = sock_no_setsockopt,
  1273. .getsockopt = sock_no_getsockopt,
  1274. .sendmsg = bcm_sendmsg,
  1275. .recvmsg = bcm_recvmsg,
  1276. .mmap = sock_no_mmap,
  1277. .sendpage = sock_no_sendpage,
  1278. };
  1279. static struct proto bcm_proto __read_mostly = {
  1280. .name = "CAN_BCM",
  1281. .owner = THIS_MODULE,
  1282. .obj_size = sizeof(struct bcm_sock),
  1283. .init = bcm_init,
  1284. };
  1285. static struct can_proto bcm_can_proto __read_mostly = {
  1286. .type = SOCK_DGRAM,
  1287. .protocol = CAN_BCM,
  1288. .capability = -1,
  1289. .ops = &bcm_ops,
  1290. .prot = &bcm_proto,
  1291. };
  1292. static int __init bcm_module_init(void)
  1293. {
  1294. int err;
  1295. printk(banner);
  1296. err = can_proto_register(&bcm_can_proto);
  1297. if (err < 0) {
  1298. printk(KERN_ERR "can: registration of bcm protocol failed\n");
  1299. return err;
  1300. }
  1301. /* create /proc/net/can-bcm directory */
  1302. proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
  1303. return 0;
  1304. }
  1305. static void __exit bcm_module_exit(void)
  1306. {
  1307. can_proto_unregister(&bcm_can_proto);
  1308. if (proc_dir)
  1309. proc_net_remove(&init_net, "can-bcm");
  1310. }
  1311. module_init(bcm_module_init);
  1312. module_exit(bcm_module_exit);