bcm.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815
  1. /*
  2. * bcm.c - Broadcast Manager to filter/send (cyclic) CAN content
  3. *
  4. * Copyright (c) 2002-2017 Volkswagen Group Electronic Research
  5. * All rights reserved.
  6. *
  7. * Redistribution and use in source and binary forms, with or without
  8. * modification, are permitted provided that the following conditions
  9. * are met:
  10. * 1. Redistributions of source code must retain the above copyright
  11. * notice, this list of conditions and the following disclaimer.
  12. * 2. Redistributions in binary form must reproduce the above copyright
  13. * notice, this list of conditions and the following disclaimer in the
  14. * documentation and/or other materials provided with the distribution.
  15. * 3. Neither the name of Volkswagen nor the names of its contributors
  16. * may be used to endorse or promote products derived from this software
  17. * without specific prior written permission.
  18. *
  19. * Alternatively, provided that this notice is retained in full, this
  20. * software may be distributed under the terms of the GNU General
  21. * Public License ("GPL") version 2, in which case the provisions of the
  22. * GPL apply INSTEAD OF those given above.
  23. *
  24. * The provided data structures and external interfaces from this code
  25. * are not restricted to be used by modules with a GPL compatible license.
  26. *
  27. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  28. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  29. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  30. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  31. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  32. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  33. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  34. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  35. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  36. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  37. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
  38. * DAMAGE.
  39. *
  40. */
  41. #include <linux/module.h>
  42. #include <linux/init.h>
  43. #include <linux/interrupt.h>
  44. #include <linux/hrtimer.h>
  45. #include <linux/list.h>
  46. #include <linux/proc_fs.h>
  47. #include <linux/seq_file.h>
  48. #include <linux/uio.h>
  49. #include <linux/net.h>
  50. #include <linux/netdevice.h>
  51. #include <linux/socket.h>
  52. #include <linux/if_arp.h>
  53. #include <linux/skbuff.h>
  54. #include <linux/can.h>
  55. #include <linux/can/core.h>
  56. #include <linux/can/skb.h>
  57. #include <linux/can/bcm.h>
  58. #include <linux/slab.h>
  59. #include <net/sock.h>
  60. #include <net/net_namespace.h>
  61. /*
  62. * To send multiple CAN frame content within TX_SETUP or to filter
  63. * CAN messages with multiplex index within RX_SETUP, the number of
  64. * different filters is limited to 256 due to the one byte index value.
  65. */
  66. #define MAX_NFRAMES 256
  67. /* limit timers to 400 days for sending/timeouts */
  68. #define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60)
  69. /* use of last_frames[index].flags */
  70. #define RX_RECV 0x40 /* received data for this element */
  71. #define RX_THR 0x80 /* element not been sent due to throttle feature */
  72. #define BCM_CAN_FLAGS_MASK 0x3F /* to clean private flags after usage */
  73. /* get best masking value for can_rx_register() for a given single can_id */
  74. #define REGMASK(id) ((id & CAN_EFF_FLAG) ? \
  75. (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
  76. (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
  77. #define CAN_BCM_VERSION "20170425"
  78. MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
  79. MODULE_LICENSE("Dual BSD/GPL");
  80. MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
  81. MODULE_ALIAS("can-proto-2");
  82. /*
  83. * easy access to the first 64 bit of can(fd)_frame payload. cp->data is
  84. * 64 bit aligned so the offset has to be multiples of 8 which is ensured
  85. * by the only callers in bcm_rx_cmp_to_index() bcm_rx_handler().
  86. */
  87. static inline u64 get_u64(const struct canfd_frame *cp, int offset)
  88. {
  89. return *(u64 *)(cp->data + offset);
  90. }
  91. struct bcm_op {
  92. struct list_head list;
  93. int ifindex;
  94. canid_t can_id;
  95. u32 flags;
  96. unsigned long frames_abs, frames_filtered;
  97. struct bcm_timeval ival1, ival2;
  98. struct hrtimer timer, thrtimer;
  99. struct tasklet_struct tsklet, thrtsklet;
  100. ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
  101. int rx_ifindex;
  102. int cfsiz;
  103. u32 count;
  104. u32 nframes;
  105. u32 currframe;
  106. /* void pointers to arrays of struct can[fd]_frame */
  107. void *frames;
  108. void *last_frames;
  109. struct canfd_frame sframe;
  110. struct canfd_frame last_sframe;
  111. struct sock *sk;
  112. struct net_device *rx_reg_dev;
  113. };
  114. struct bcm_sock {
  115. struct sock sk;
  116. int bound;
  117. int ifindex;
  118. struct list_head notifier;
  119. struct list_head rx_ops;
  120. struct list_head tx_ops;
  121. unsigned long dropped_usr_msgs;
  122. struct proc_dir_entry *bcm_proc_read;
  123. char procname [32]; /* inode number in decimal with \0 */
  124. };
  125. static LIST_HEAD(bcm_notifier_list);
  126. static DEFINE_SPINLOCK(bcm_notifier_lock);
  127. static struct bcm_sock *bcm_busy_notifier;
  128. static inline struct bcm_sock *bcm_sk(const struct sock *sk)
  129. {
  130. return (struct bcm_sock *)sk;
  131. }
  132. static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv)
  133. {
  134. return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
  135. }
  136. /* check limitations for timeval provided by user */
  137. static bool bcm_is_invalid_tv(struct bcm_msg_head *msg_head)
  138. {
  139. if ((msg_head->ival1.tv_sec < 0) ||
  140. (msg_head->ival1.tv_sec > BCM_TIMER_SEC_MAX) ||
  141. (msg_head->ival1.tv_usec < 0) ||
  142. (msg_head->ival1.tv_usec >= USEC_PER_SEC) ||
  143. (msg_head->ival2.tv_sec < 0) ||
  144. (msg_head->ival2.tv_sec > BCM_TIMER_SEC_MAX) ||
  145. (msg_head->ival2.tv_usec < 0) ||
  146. (msg_head->ival2.tv_usec >= USEC_PER_SEC))
  147. return true;
  148. return false;
  149. }
  150. #define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU)
  151. #define OPSIZ sizeof(struct bcm_op)
  152. #define MHSIZ sizeof(struct bcm_msg_head)
  153. /*
  154. * procfs functions
  155. */
  156. #if IS_ENABLED(CONFIG_PROC_FS)
  157. static char *bcm_proc_getifname(struct net *net, char *result, int ifindex)
  158. {
  159. struct net_device *dev;
  160. if (!ifindex)
  161. return "any";
  162. rcu_read_lock();
  163. dev = dev_get_by_index_rcu(net, ifindex);
  164. if (dev)
  165. strcpy(result, dev->name);
  166. else
  167. strcpy(result, "???");
  168. rcu_read_unlock();
  169. return result;
  170. }
  171. static int bcm_proc_show(struct seq_file *m, void *v)
  172. {
  173. char ifname[IFNAMSIZ];
  174. struct net *net = m->private;
  175. struct sock *sk = (struct sock *)PDE_DATA(m->file->f_inode);
  176. struct bcm_sock *bo = bcm_sk(sk);
  177. struct bcm_op *op;
  178. seq_printf(m, ">>> socket %pK", sk->sk_socket);
  179. seq_printf(m, " / sk %pK", sk);
  180. seq_printf(m, " / bo %pK", bo);
  181. seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
  182. seq_printf(m, " / bound %s", bcm_proc_getifname(net, ifname, bo->ifindex));
  183. seq_printf(m, " <<<\n");
  184. list_for_each_entry(op, &bo->rx_ops, list) {
  185. unsigned long reduction;
  186. /* print only active entries & prevent division by zero */
  187. if (!op->frames_abs)
  188. continue;
  189. seq_printf(m, "rx_op: %03X %-5s ", op->can_id,
  190. bcm_proc_getifname(net, ifname, op->ifindex));
  191. if (op->flags & CAN_FD_FRAME)
  192. seq_printf(m, "(%u)", op->nframes);
  193. else
  194. seq_printf(m, "[%u]", op->nframes);
  195. seq_printf(m, "%c ", (op->flags & RX_CHECK_DLC) ? 'd' : ' ');
  196. if (op->kt_ival1)
  197. seq_printf(m, "timeo=%lld ",
  198. (long long)ktime_to_us(op->kt_ival1));
  199. if (op->kt_ival2)
  200. seq_printf(m, "thr=%lld ",
  201. (long long)ktime_to_us(op->kt_ival2));
  202. seq_printf(m, "# recv %ld (%ld) => reduction: ",
  203. op->frames_filtered, op->frames_abs);
  204. reduction = 100 - (op->frames_filtered * 100) / op->frames_abs;
  205. seq_printf(m, "%s%ld%%\n",
  206. (reduction == 100) ? "near " : "", reduction);
  207. }
  208. list_for_each_entry(op, &bo->tx_ops, list) {
  209. seq_printf(m, "tx_op: %03X %s ", op->can_id,
  210. bcm_proc_getifname(net, ifname, op->ifindex));
  211. if (op->flags & CAN_FD_FRAME)
  212. seq_printf(m, "(%u) ", op->nframes);
  213. else
  214. seq_printf(m, "[%u] ", op->nframes);
  215. if (op->kt_ival1)
  216. seq_printf(m, "t1=%lld ",
  217. (long long)ktime_to_us(op->kt_ival1));
  218. if (op->kt_ival2)
  219. seq_printf(m, "t2=%lld ",
  220. (long long)ktime_to_us(op->kt_ival2));
  221. seq_printf(m, "# sent %ld\n", op->frames_abs);
  222. }
  223. seq_putc(m, '\n');
  224. return 0;
  225. }
  226. static int bcm_proc_open(struct inode *inode, struct file *file)
  227. {
  228. return single_open_net(inode, file, bcm_proc_show);
  229. }
  230. static const struct file_operations bcm_proc_fops = {
  231. .owner = THIS_MODULE,
  232. .open = bcm_proc_open,
  233. .read = seq_read,
  234. .llseek = seq_lseek,
  235. .release = single_release,
  236. };
  237. #endif /* CONFIG_PROC_FS */
  238. /*
  239. * bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface
  240. * of the given bcm tx op
  241. */
  242. static void bcm_can_tx(struct bcm_op *op)
  243. {
  244. struct sk_buff *skb;
  245. struct net_device *dev;
  246. struct canfd_frame *cf = op->frames + op->cfsiz * op->currframe;
  247. /* no target device? => exit */
  248. if (!op->ifindex)
  249. return;
  250. dev = dev_get_by_index(sock_net(op->sk), op->ifindex);
  251. if (!dev) {
  252. /* RFC: should this bcm_op remove itself here? */
  253. return;
  254. }
  255. skb = alloc_skb(op->cfsiz + sizeof(struct can_skb_priv), gfp_any());
  256. if (!skb)
  257. goto out;
  258. can_skb_reserve(skb);
  259. can_skb_prv(skb)->ifindex = dev->ifindex;
  260. can_skb_prv(skb)->skbcnt = 0;
  261. skb_put_data(skb, cf, op->cfsiz);
  262. /* send with loopback */
  263. skb->dev = dev;
  264. can_skb_set_owner(skb, op->sk);
  265. can_send(skb, 1);
  266. /* update statistics */
  267. op->currframe++;
  268. op->frames_abs++;
  269. /* reached last frame? */
  270. if (op->currframe >= op->nframes)
  271. op->currframe = 0;
  272. out:
  273. dev_put(dev);
  274. }
  275. /*
  276. * bcm_send_to_user - send a BCM message to the userspace
  277. * (consisting of bcm_msg_head + x CAN frames)
  278. */
  279. static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
  280. struct canfd_frame *frames, int has_timestamp)
  281. {
  282. struct sk_buff *skb;
  283. struct canfd_frame *firstframe;
  284. struct sockaddr_can *addr;
  285. struct sock *sk = op->sk;
  286. unsigned int datalen = head->nframes * op->cfsiz;
  287. int err;
  288. skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
  289. if (!skb)
  290. return;
  291. skb_put_data(skb, head, sizeof(*head));
  292. if (head->nframes) {
  293. /* CAN frames starting here */
  294. firstframe = (struct canfd_frame *)skb_tail_pointer(skb);
  295. skb_put_data(skb, frames, datalen);
  296. /*
  297. * the BCM uses the flags-element of the canfd_frame
  298. * structure for internal purposes. This is only
  299. * relevant for updates that are generated by the
  300. * BCM, where nframes is 1
  301. */
  302. if (head->nframes == 1)
  303. firstframe->flags &= BCM_CAN_FLAGS_MASK;
  304. }
  305. if (has_timestamp) {
  306. /* restore rx timestamp */
  307. skb->tstamp = op->rx_stamp;
  308. }
  309. /*
  310. * Put the datagram to the queue so that bcm_recvmsg() can
  311. * get it from there. We need to pass the interface index to
  312. * bcm_recvmsg(). We pass a whole struct sockaddr_can in skb->cb
  313. * containing the interface index.
  314. */
  315. sock_skb_cb_check_size(sizeof(struct sockaddr_can));
  316. addr = (struct sockaddr_can *)skb->cb;
  317. memset(addr, 0, sizeof(*addr));
  318. addr->can_family = AF_CAN;
  319. addr->can_ifindex = op->rx_ifindex;
  320. err = sock_queue_rcv_skb(sk, skb);
  321. if (err < 0) {
  322. struct bcm_sock *bo = bcm_sk(sk);
  323. kfree_skb(skb);
  324. /* don't care about overflows in this statistic */
  325. bo->dropped_usr_msgs++;
  326. }
  327. }
  328. static void bcm_tx_start_timer(struct bcm_op *op)
  329. {
  330. if (op->kt_ival1 && op->count)
  331. hrtimer_start(&op->timer,
  332. ktime_add(ktime_get(), op->kt_ival1),
  333. HRTIMER_MODE_ABS);
  334. else if (op->kt_ival2)
  335. hrtimer_start(&op->timer,
  336. ktime_add(ktime_get(), op->kt_ival2),
  337. HRTIMER_MODE_ABS);
  338. }
  339. static void bcm_tx_timeout_tsklet(unsigned long data)
  340. {
  341. struct bcm_op *op = (struct bcm_op *)data;
  342. struct bcm_msg_head msg_head;
  343. if (op->kt_ival1 && (op->count > 0)) {
  344. op->count--;
  345. if (!op->count && (op->flags & TX_COUNTEVT)) {
  346. /* create notification to user */
  347. memset(&msg_head, 0, sizeof(msg_head));
  348. msg_head.opcode = TX_EXPIRED;
  349. msg_head.flags = op->flags;
  350. msg_head.count = op->count;
  351. msg_head.ival1 = op->ival1;
  352. msg_head.ival2 = op->ival2;
  353. msg_head.can_id = op->can_id;
  354. msg_head.nframes = 0;
  355. bcm_send_to_user(op, &msg_head, NULL, 0);
  356. }
  357. bcm_can_tx(op);
  358. } else if (op->kt_ival2)
  359. bcm_can_tx(op);
  360. bcm_tx_start_timer(op);
  361. }
  362. /*
  363. * bcm_tx_timeout_handler - performs cyclic CAN frame transmissions
  364. */
  365. static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
  366. {
  367. struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
  368. tasklet_schedule(&op->tsklet);
  369. return HRTIMER_NORESTART;
  370. }
  371. /*
  372. * bcm_rx_changed - create a RX_CHANGED notification due to changed content
  373. */
  374. static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data)
  375. {
  376. struct bcm_msg_head head;
  377. /* update statistics */
  378. op->frames_filtered++;
  379. /* prevent statistics overflow */
  380. if (op->frames_filtered > ULONG_MAX/100)
  381. op->frames_filtered = op->frames_abs = 0;
  382. /* this element is not throttled anymore */
  383. data->flags &= (BCM_CAN_FLAGS_MASK|RX_RECV);
  384. memset(&head, 0, sizeof(head));
  385. head.opcode = RX_CHANGED;
  386. head.flags = op->flags;
  387. head.count = op->count;
  388. head.ival1 = op->ival1;
  389. head.ival2 = op->ival2;
  390. head.can_id = op->can_id;
  391. head.nframes = 1;
  392. bcm_send_to_user(op, &head, data, 1);
  393. }
  394. /*
  395. * bcm_rx_update_and_send - process a detected relevant receive content change
  396. * 1. update the last received data
  397. * 2. send a notification to the user (if possible)
  398. */
  399. static void bcm_rx_update_and_send(struct bcm_op *op,
  400. struct canfd_frame *lastdata,
  401. const struct canfd_frame *rxdata)
  402. {
  403. memcpy(lastdata, rxdata, op->cfsiz);
  404. /* mark as used and throttled by default */
  405. lastdata->flags |= (RX_RECV|RX_THR);
  406. /* throttling mode inactive ? */
  407. if (!op->kt_ival2) {
  408. /* send RX_CHANGED to the user immediately */
  409. bcm_rx_changed(op, lastdata);
  410. return;
  411. }
  412. /* with active throttling timer we are just done here */
  413. if (hrtimer_active(&op->thrtimer))
  414. return;
  415. /* first reception with enabled throttling mode */
  416. if (!op->kt_lastmsg)
  417. goto rx_changed_settime;
  418. /* got a second frame inside a potential throttle period? */
  419. if (ktime_us_delta(ktime_get(), op->kt_lastmsg) <
  420. ktime_to_us(op->kt_ival2)) {
  421. /* do not send the saved data - only start throttle timer */
  422. hrtimer_start(&op->thrtimer,
  423. ktime_add(op->kt_lastmsg, op->kt_ival2),
  424. HRTIMER_MODE_ABS);
  425. return;
  426. }
  427. /* the gap was that big, that throttling was not needed here */
  428. rx_changed_settime:
  429. bcm_rx_changed(op, lastdata);
  430. op->kt_lastmsg = ktime_get();
  431. }
  432. /*
  433. * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly
  434. * received data stored in op->last_frames[]
  435. */
  436. static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
  437. const struct canfd_frame *rxdata)
  438. {
  439. struct canfd_frame *cf = op->frames + op->cfsiz * index;
  440. struct canfd_frame *lcf = op->last_frames + op->cfsiz * index;
  441. int i;
  442. /*
  443. * no one uses the MSBs of flags for comparison,
  444. * so we use it here to detect the first time of reception
  445. */
  446. if (!(lcf->flags & RX_RECV)) {
  447. /* received data for the first time => send update to user */
  448. bcm_rx_update_and_send(op, lcf, rxdata);
  449. return;
  450. }
  451. /* do a real check in CAN frame data section */
  452. for (i = 0; i < rxdata->len; i += 8) {
  453. if ((get_u64(cf, i) & get_u64(rxdata, i)) !=
  454. (get_u64(cf, i) & get_u64(lcf, i))) {
  455. bcm_rx_update_and_send(op, lcf, rxdata);
  456. return;
  457. }
  458. }
  459. if (op->flags & RX_CHECK_DLC) {
  460. /* do a real check in CAN frame length */
  461. if (rxdata->len != lcf->len) {
  462. bcm_rx_update_and_send(op, lcf, rxdata);
  463. return;
  464. }
  465. }
  466. }
  467. /*
  468. * bcm_rx_starttimer - enable timeout monitoring for CAN frame reception
  469. */
  470. static void bcm_rx_starttimer(struct bcm_op *op)
  471. {
  472. if (op->flags & RX_NO_AUTOTIMER)
  473. return;
  474. if (op->kt_ival1)
  475. hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL);
  476. }
  477. static void bcm_rx_timeout_tsklet(unsigned long data)
  478. {
  479. struct bcm_op *op = (struct bcm_op *)data;
  480. struct bcm_msg_head msg_head;
  481. /* create notification to user */
  482. memset(&msg_head, 0, sizeof(msg_head));
  483. msg_head.opcode = RX_TIMEOUT;
  484. msg_head.flags = op->flags;
  485. msg_head.count = op->count;
  486. msg_head.ival1 = op->ival1;
  487. msg_head.ival2 = op->ival2;
  488. msg_head.can_id = op->can_id;
  489. msg_head.nframes = 0;
  490. bcm_send_to_user(op, &msg_head, NULL, 0);
  491. }
  492. /*
  493. * bcm_rx_timeout_handler - when the (cyclic) CAN frame reception timed out
  494. */
  495. static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
  496. {
  497. struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
  498. /* schedule before NET_RX_SOFTIRQ */
  499. tasklet_hi_schedule(&op->tsklet);
  500. /* no restart of the timer is done here! */
  501. /* if user wants to be informed, when cyclic CAN-Messages come back */
  502. if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) {
  503. /* clear received CAN frames to indicate 'nothing received' */
  504. memset(op->last_frames, 0, op->nframes * op->cfsiz);
  505. }
  506. return HRTIMER_NORESTART;
  507. }
  508. /*
  509. * bcm_rx_do_flush - helper for bcm_rx_thr_flush
  510. */
  511. static inline int bcm_rx_do_flush(struct bcm_op *op, int update,
  512. unsigned int index)
  513. {
  514. struct canfd_frame *lcf = op->last_frames + op->cfsiz * index;
  515. if ((op->last_frames) && (lcf->flags & RX_THR)) {
  516. if (update)
  517. bcm_rx_changed(op, lcf);
  518. return 1;
  519. }
  520. return 0;
  521. }
  522. /*
  523. * bcm_rx_thr_flush - Check for throttled data and send it to the userspace
  524. *
  525. * update == 0 : just check if throttled data is available (any irq context)
  526. * update == 1 : check and send throttled data to userspace (soft_irq context)
  527. */
  528. static int bcm_rx_thr_flush(struct bcm_op *op, int update)
  529. {
  530. int updated = 0;
  531. if (op->nframes > 1) {
  532. unsigned int i;
  533. /* for MUX filter we start at index 1 */
  534. for (i = 1; i < op->nframes; i++)
  535. updated += bcm_rx_do_flush(op, update, i);
  536. } else {
  537. /* for RX_FILTER_ID and simple filter */
  538. updated += bcm_rx_do_flush(op, update, 0);
  539. }
  540. return updated;
  541. }
  542. static void bcm_rx_thr_tsklet(unsigned long data)
  543. {
  544. struct bcm_op *op = (struct bcm_op *)data;
  545. /* push the changed data to the userspace */
  546. bcm_rx_thr_flush(op, 1);
  547. }
  548. /*
  549. * bcm_rx_thr_handler - the time for blocked content updates is over now:
  550. * Check for throttled data and send it to the userspace
  551. */
  552. static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
  553. {
  554. struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer);
  555. tasklet_schedule(&op->thrtsklet);
  556. if (bcm_rx_thr_flush(op, 0)) {
  557. hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2);
  558. return HRTIMER_RESTART;
  559. } else {
  560. /* rearm throttle handling */
  561. op->kt_lastmsg = 0;
  562. return HRTIMER_NORESTART;
  563. }
  564. }
  565. /*
  566. * bcm_rx_handler - handle a CAN frame reception
  567. */
  568. static void bcm_rx_handler(struct sk_buff *skb, void *data)
  569. {
  570. struct bcm_op *op = (struct bcm_op *)data;
  571. const struct canfd_frame *rxframe = (struct canfd_frame *)skb->data;
  572. unsigned int i;
  573. if (op->can_id != rxframe->can_id)
  574. return;
  575. /* make sure to handle the correct frame type (CAN / CAN FD) */
  576. if (skb->len != op->cfsiz)
  577. return;
  578. /* disable timeout */
  579. hrtimer_cancel(&op->timer);
  580. /* save rx timestamp */
  581. op->rx_stamp = skb->tstamp;
  582. /* save originator for recvfrom() */
  583. op->rx_ifindex = skb->dev->ifindex;
  584. /* update statistics */
  585. op->frames_abs++;
  586. if (op->flags & RX_RTR_FRAME) {
  587. /* send reply for RTR-request (placed in op->frames[0]) */
  588. bcm_can_tx(op);
  589. return;
  590. }
  591. if (op->flags & RX_FILTER_ID) {
  592. /* the easiest case */
  593. bcm_rx_update_and_send(op, op->last_frames, rxframe);
  594. goto rx_starttimer;
  595. }
  596. if (op->nframes == 1) {
  597. /* simple compare with index 0 */
  598. bcm_rx_cmp_to_index(op, 0, rxframe);
  599. goto rx_starttimer;
  600. }
  601. if (op->nframes > 1) {
  602. /*
  603. * multiplex compare
  604. *
  605. * find the first multiplex mask that fits.
  606. * Remark: The MUX-mask is stored in index 0 - but only the
  607. * first 64 bits of the frame data[] are relevant (CAN FD)
  608. */
  609. for (i = 1; i < op->nframes; i++) {
  610. if ((get_u64(op->frames, 0) & get_u64(rxframe, 0)) ==
  611. (get_u64(op->frames, 0) &
  612. get_u64(op->frames + op->cfsiz * i, 0))) {
  613. bcm_rx_cmp_to_index(op, i, rxframe);
  614. break;
  615. }
  616. }
  617. }
  618. rx_starttimer:
  619. bcm_rx_starttimer(op);
  620. }
  621. /*
  622. * helpers for bcm_op handling: find & delete bcm [rx|tx] op elements
  623. */
  624. static struct bcm_op *bcm_find_op(struct list_head *ops,
  625. struct bcm_msg_head *mh, int ifindex)
  626. {
  627. struct bcm_op *op;
  628. list_for_each_entry(op, ops, list) {
  629. if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
  630. (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME))
  631. return op;
  632. }
  633. return NULL;
  634. }
  635. static void bcm_remove_op(struct bcm_op *op)
  636. {
  637. if (op->tsklet.func) {
  638. while (test_bit(TASKLET_STATE_SCHED, &op->tsklet.state) ||
  639. test_bit(TASKLET_STATE_RUN, &op->tsklet.state) ||
  640. hrtimer_active(&op->timer)) {
  641. hrtimer_cancel(&op->timer);
  642. tasklet_kill(&op->tsklet);
  643. }
  644. }
  645. if (op->thrtsklet.func) {
  646. while (test_bit(TASKLET_STATE_SCHED, &op->thrtsklet.state) ||
  647. test_bit(TASKLET_STATE_RUN, &op->thrtsklet.state) ||
  648. hrtimer_active(&op->thrtimer)) {
  649. hrtimer_cancel(&op->thrtimer);
  650. tasklet_kill(&op->thrtsklet);
  651. }
  652. }
  653. if ((op->frames) && (op->frames != &op->sframe))
  654. kfree(op->frames);
  655. if ((op->last_frames) && (op->last_frames != &op->last_sframe))
  656. kfree(op->last_frames);
  657. kfree(op);
  658. }
  659. static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op)
  660. {
  661. if (op->rx_reg_dev == dev) {
  662. can_rx_unregister(dev_net(dev), dev, op->can_id,
  663. REGMASK(op->can_id), bcm_rx_handler, op);
  664. /* mark as removed subscription */
  665. op->rx_reg_dev = NULL;
  666. } else
  667. printk(KERN_ERR "can-bcm: bcm_rx_unreg: registered device "
  668. "mismatch %p %p\n", op->rx_reg_dev, dev);
  669. }
  670. /*
  671. * bcm_delete_rx_op - find and remove a rx op (returns number of removed ops)
  672. */
  673. static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh,
  674. int ifindex)
  675. {
  676. struct bcm_op *op, *n;
  677. list_for_each_entry_safe(op, n, ops, list) {
  678. if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
  679. (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) {
  680. /*
  681. * Don't care if we're bound or not (due to netdev
  682. * problems) can_rx_unregister() is always a save
  683. * thing to do here.
  684. */
  685. if (op->ifindex) {
  686. /*
  687. * Only remove subscriptions that had not
  688. * been removed due to NETDEV_UNREGISTER
  689. * in bcm_notifier()
  690. */
  691. if (op->rx_reg_dev) {
  692. struct net_device *dev;
  693. dev = dev_get_by_index(sock_net(op->sk),
  694. op->ifindex);
  695. if (dev) {
  696. bcm_rx_unreg(dev, op);
  697. dev_put(dev);
  698. }
  699. }
  700. } else
  701. can_rx_unregister(sock_net(op->sk), NULL,
  702. op->can_id,
  703. REGMASK(op->can_id),
  704. bcm_rx_handler, op);
  705. list_del(&op->list);
  706. synchronize_rcu();
  707. bcm_remove_op(op);
  708. return 1; /* done */
  709. }
  710. }
  711. return 0; /* not found */
  712. }
  713. /*
  714. * bcm_delete_tx_op - find and remove a tx op (returns number of removed ops)
  715. */
  716. static int bcm_delete_tx_op(struct list_head *ops, struct bcm_msg_head *mh,
  717. int ifindex)
  718. {
  719. struct bcm_op *op, *n;
  720. list_for_each_entry_safe(op, n, ops, list) {
  721. if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
  722. (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) {
  723. list_del(&op->list);
  724. bcm_remove_op(op);
  725. return 1; /* done */
  726. }
  727. }
  728. return 0; /* not found */
  729. }
  730. /*
  731. * bcm_read_op - read out a bcm_op and send it to the user (for bcm_sendmsg)
  732. */
  733. static int bcm_read_op(struct list_head *ops, struct bcm_msg_head *msg_head,
  734. int ifindex)
  735. {
  736. struct bcm_op *op = bcm_find_op(ops, msg_head, ifindex);
  737. if (!op)
  738. return -EINVAL;
  739. /* put current values into msg_head */
  740. msg_head->flags = op->flags;
  741. msg_head->count = op->count;
  742. msg_head->ival1 = op->ival1;
  743. msg_head->ival2 = op->ival2;
  744. msg_head->nframes = op->nframes;
  745. bcm_send_to_user(op, msg_head, op->frames, 0);
  746. return MHSIZ;
  747. }
  748. /*
  749. * bcm_tx_setup - create or update a bcm tx op (for bcm_sendmsg)
  750. */
  751. static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
  752. int ifindex, struct sock *sk)
  753. {
  754. struct bcm_sock *bo = bcm_sk(sk);
  755. struct bcm_op *op;
  756. struct canfd_frame *cf;
  757. unsigned int i;
  758. int err;
  759. /* we need a real device to send frames */
  760. if (!ifindex)
  761. return -ENODEV;
  762. /* check nframes boundaries - we need at least one CAN frame */
  763. if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
  764. return -EINVAL;
  765. /* check timeval limitations */
  766. if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
  767. return -EINVAL;
  768. /* check the given can_id */
  769. op = bcm_find_op(&bo->tx_ops, msg_head, ifindex);
  770. if (op) {
  771. /* update existing BCM operation */
  772. /*
  773. * Do we need more space for the CAN frames than currently
  774. * allocated? -> This is a _really_ unusual use-case and
  775. * therefore (complexity / locking) it is not supported.
  776. */
  777. if (msg_head->nframes > op->nframes)
  778. return -E2BIG;
  779. /* update CAN frames content */
  780. for (i = 0; i < msg_head->nframes; i++) {
  781. cf = op->frames + op->cfsiz * i;
  782. err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz);
  783. if (op->flags & CAN_FD_FRAME) {
  784. if (cf->len > 64)
  785. err = -EINVAL;
  786. } else {
  787. if (cf->len > 8)
  788. err = -EINVAL;
  789. }
  790. if (err < 0)
  791. return err;
  792. if (msg_head->flags & TX_CP_CAN_ID) {
  793. /* copy can_id into frame */
  794. cf->can_id = msg_head->can_id;
  795. }
  796. }
  797. op->flags = msg_head->flags;
  798. } else {
  799. /* insert new BCM operation for the given can_id */
  800. op = kzalloc(OPSIZ, GFP_KERNEL);
  801. if (!op)
  802. return -ENOMEM;
  803. op->can_id = msg_head->can_id;
  804. op->cfsiz = CFSIZ(msg_head->flags);
  805. op->flags = msg_head->flags;
  806. /* create array for CAN frames and copy the data */
  807. if (msg_head->nframes > 1) {
  808. op->frames = kmalloc(msg_head->nframes * op->cfsiz,
  809. GFP_KERNEL);
  810. if (!op->frames) {
  811. kfree(op);
  812. return -ENOMEM;
  813. }
  814. } else
  815. op->frames = &op->sframe;
  816. for (i = 0; i < msg_head->nframes; i++) {
  817. cf = op->frames + op->cfsiz * i;
  818. err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz);
  819. if (op->flags & CAN_FD_FRAME) {
  820. if (cf->len > 64)
  821. err = -EINVAL;
  822. } else {
  823. if (cf->len > 8)
  824. err = -EINVAL;
  825. }
  826. if (err < 0) {
  827. if (op->frames != &op->sframe)
  828. kfree(op->frames);
  829. kfree(op);
  830. return err;
  831. }
  832. if (msg_head->flags & TX_CP_CAN_ID) {
  833. /* copy can_id into frame */
  834. cf->can_id = msg_head->can_id;
  835. }
  836. }
  837. /* tx_ops never compare with previous received messages */
  838. op->last_frames = NULL;
  839. /* bcm_can_tx / bcm_tx_timeout_handler needs this */
  840. op->sk = sk;
  841. op->ifindex = ifindex;
  842. /* initialize uninitialized (kzalloc) structure */
  843. hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  844. op->timer.function = bcm_tx_timeout_handler;
  845. /* initialize tasklet for tx countevent notification */
  846. tasklet_init(&op->tsklet, bcm_tx_timeout_tsklet,
  847. (unsigned long) op);
  848. /* currently unused in tx_ops */
  849. hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  850. /* add this bcm_op to the list of the tx_ops */
  851. list_add(&op->list, &bo->tx_ops);
  852. } /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */
  853. if (op->nframes != msg_head->nframes) {
  854. op->nframes = msg_head->nframes;
  855. /* start multiple frame transmission with index 0 */
  856. op->currframe = 0;
  857. }
  858. /* check flags */
  859. if (op->flags & TX_RESET_MULTI_IDX) {
  860. /* start multiple frame transmission with index 0 */
  861. op->currframe = 0;
  862. }
  863. if (op->flags & SETTIMER) {
  864. /* set timer values */
  865. op->count = msg_head->count;
  866. op->ival1 = msg_head->ival1;
  867. op->ival2 = msg_head->ival2;
  868. op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1);
  869. op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2);
  870. /* disable an active timer due to zero values? */
  871. if (!op->kt_ival1 && !op->kt_ival2)
  872. hrtimer_cancel(&op->timer);
  873. }
  874. if (op->flags & STARTTIMER) {
  875. hrtimer_cancel(&op->timer);
  876. /* spec: send CAN frame when starting timer */
  877. op->flags |= TX_ANNOUNCE;
  878. }
  879. if (op->flags & TX_ANNOUNCE) {
  880. bcm_can_tx(op);
  881. if (op->count)
  882. op->count--;
  883. }
  884. if (op->flags & STARTTIMER)
  885. bcm_tx_start_timer(op);
  886. return msg_head->nframes * op->cfsiz + MHSIZ;
  887. }
  888. /*
  889. * bcm_rx_setup - create or update a bcm rx op (for bcm_sendmsg)
  890. */
  891. static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
  892. int ifindex, struct sock *sk)
  893. {
  894. struct bcm_sock *bo = bcm_sk(sk);
  895. struct bcm_op *op;
  896. int do_rx_register;
  897. int err = 0;
  898. if ((msg_head->flags & RX_FILTER_ID) || (!(msg_head->nframes))) {
  899. /* be robust against wrong usage ... */
  900. msg_head->flags |= RX_FILTER_ID;
  901. /* ignore trailing garbage */
  902. msg_head->nframes = 0;
  903. }
  904. /* the first element contains the mux-mask => MAX_NFRAMES + 1 */
  905. if (msg_head->nframes > MAX_NFRAMES + 1)
  906. return -EINVAL;
  907. if ((msg_head->flags & RX_RTR_FRAME) &&
  908. ((msg_head->nframes != 1) ||
  909. (!(msg_head->can_id & CAN_RTR_FLAG))))
  910. return -EINVAL;
  911. /* check timeval limitations */
  912. if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
  913. return -EINVAL;
  914. /* check the given can_id */
  915. op = bcm_find_op(&bo->rx_ops, msg_head, ifindex);
  916. if (op) {
  917. /* update existing BCM operation */
  918. /*
  919. * Do we need more space for the CAN frames than currently
  920. * allocated? -> This is a _really_ unusual use-case and
  921. * therefore (complexity / locking) it is not supported.
  922. */
  923. if (msg_head->nframes > op->nframes)
  924. return -E2BIG;
  925. if (msg_head->nframes) {
  926. /* update CAN frames content */
  927. err = memcpy_from_msg(op->frames, msg,
  928. msg_head->nframes * op->cfsiz);
  929. if (err < 0)
  930. return err;
  931. /* clear last_frames to indicate 'nothing received' */
  932. memset(op->last_frames, 0, msg_head->nframes * op->cfsiz);
  933. }
  934. op->nframes = msg_head->nframes;
  935. op->flags = msg_head->flags;
  936. /* Only an update -> do not call can_rx_register() */
  937. do_rx_register = 0;
  938. } else {
  939. /* insert new BCM operation for the given can_id */
  940. op = kzalloc(OPSIZ, GFP_KERNEL);
  941. if (!op)
  942. return -ENOMEM;
  943. op->can_id = msg_head->can_id;
  944. op->nframes = msg_head->nframes;
  945. op->cfsiz = CFSIZ(msg_head->flags);
  946. op->flags = msg_head->flags;
  947. if (msg_head->nframes > 1) {
  948. /* create array for CAN frames and copy the data */
  949. op->frames = kmalloc(msg_head->nframes * op->cfsiz,
  950. GFP_KERNEL);
  951. if (!op->frames) {
  952. kfree(op);
  953. return -ENOMEM;
  954. }
  955. /* create and init array for received CAN frames */
  956. op->last_frames = kzalloc(msg_head->nframes * op->cfsiz,
  957. GFP_KERNEL);
  958. if (!op->last_frames) {
  959. kfree(op->frames);
  960. kfree(op);
  961. return -ENOMEM;
  962. }
  963. } else {
  964. op->frames = &op->sframe;
  965. op->last_frames = &op->last_sframe;
  966. }
  967. if (msg_head->nframes) {
  968. err = memcpy_from_msg(op->frames, msg,
  969. msg_head->nframes * op->cfsiz);
  970. if (err < 0) {
  971. if (op->frames != &op->sframe)
  972. kfree(op->frames);
  973. if (op->last_frames != &op->last_sframe)
  974. kfree(op->last_frames);
  975. kfree(op);
  976. return err;
  977. }
  978. }
  979. /* bcm_can_tx / bcm_tx_timeout_handler needs this */
  980. op->sk = sk;
  981. op->ifindex = ifindex;
  982. /* ifindex for timeout events w/o previous frame reception */
  983. op->rx_ifindex = ifindex;
  984. /* initialize uninitialized (kzalloc) structure */
  985. hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  986. op->timer.function = bcm_rx_timeout_handler;
  987. /* initialize tasklet for rx timeout notification */
  988. tasklet_init(&op->tsklet, bcm_rx_timeout_tsklet,
  989. (unsigned long) op);
  990. hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  991. op->thrtimer.function = bcm_rx_thr_handler;
  992. /* initialize tasklet for rx throttle handling */
  993. tasklet_init(&op->thrtsklet, bcm_rx_thr_tsklet,
  994. (unsigned long) op);
  995. /* add this bcm_op to the list of the rx_ops */
  996. list_add(&op->list, &bo->rx_ops);
  997. /* call can_rx_register() */
  998. do_rx_register = 1;
  999. } /* if ((op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex))) */
  1000. /* check flags */
  1001. if (op->flags & RX_RTR_FRAME) {
  1002. struct canfd_frame *frame0 = op->frames;
  1003. /* no timers in RTR-mode */
  1004. hrtimer_cancel(&op->thrtimer);
  1005. hrtimer_cancel(&op->timer);
  1006. /*
  1007. * funny feature in RX(!)_SETUP only for RTR-mode:
  1008. * copy can_id into frame BUT without RTR-flag to
  1009. * prevent a full-load-loopback-test ... ;-]
  1010. */
  1011. if ((op->flags & TX_CP_CAN_ID) ||
  1012. (frame0->can_id == op->can_id))
  1013. frame0->can_id = op->can_id & ~CAN_RTR_FLAG;
  1014. } else {
  1015. if (op->flags & SETTIMER) {
  1016. /* set timer value */
  1017. op->ival1 = msg_head->ival1;
  1018. op->ival2 = msg_head->ival2;
  1019. op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1);
  1020. op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2);
  1021. /* disable an active timer due to zero value? */
  1022. if (!op->kt_ival1)
  1023. hrtimer_cancel(&op->timer);
  1024. /*
  1025. * In any case cancel the throttle timer, flush
  1026. * potentially blocked msgs and reset throttle handling
  1027. */
  1028. op->kt_lastmsg = 0;
  1029. hrtimer_cancel(&op->thrtimer);
  1030. bcm_rx_thr_flush(op, 1);
  1031. }
  1032. if ((op->flags & STARTTIMER) && op->kt_ival1)
  1033. hrtimer_start(&op->timer, op->kt_ival1,
  1034. HRTIMER_MODE_REL);
  1035. }
  1036. /* now we can register for can_ids, if we added a new bcm_op */
  1037. if (do_rx_register) {
  1038. if (ifindex) {
  1039. struct net_device *dev;
  1040. dev = dev_get_by_index(sock_net(sk), ifindex);
  1041. if (dev) {
  1042. err = can_rx_register(sock_net(sk), dev,
  1043. op->can_id,
  1044. REGMASK(op->can_id),
  1045. bcm_rx_handler, op,
  1046. "bcm", sk);
  1047. op->rx_reg_dev = dev;
  1048. dev_put(dev);
  1049. }
  1050. } else
  1051. err = can_rx_register(sock_net(sk), NULL, op->can_id,
  1052. REGMASK(op->can_id),
  1053. bcm_rx_handler, op, "bcm", sk);
  1054. if (err) {
  1055. /* this bcm rx op is broken -> remove it */
  1056. list_del(&op->list);
  1057. bcm_remove_op(op);
  1058. return err;
  1059. }
  1060. }
  1061. return msg_head->nframes * op->cfsiz + MHSIZ;
  1062. }
  1063. /*
  1064. * bcm_tx_send - send a single CAN frame to the CAN interface (for bcm_sendmsg)
  1065. */
  1066. static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk,
  1067. int cfsiz)
  1068. {
  1069. struct sk_buff *skb;
  1070. struct net_device *dev;
  1071. int err;
  1072. /* we need a real device to send frames */
  1073. if (!ifindex)
  1074. return -ENODEV;
  1075. skb = alloc_skb(cfsiz + sizeof(struct can_skb_priv), GFP_KERNEL);
  1076. if (!skb)
  1077. return -ENOMEM;
  1078. can_skb_reserve(skb);
  1079. err = memcpy_from_msg(skb_put(skb, cfsiz), msg, cfsiz);
  1080. if (err < 0) {
  1081. kfree_skb(skb);
  1082. return err;
  1083. }
  1084. dev = dev_get_by_index(sock_net(sk), ifindex);
  1085. if (!dev) {
  1086. kfree_skb(skb);
  1087. return -ENODEV;
  1088. }
  1089. can_skb_prv(skb)->ifindex = dev->ifindex;
  1090. can_skb_prv(skb)->skbcnt = 0;
  1091. skb->dev = dev;
  1092. can_skb_set_owner(skb, sk);
  1093. err = can_send(skb, 1); /* send with loopback */
  1094. dev_put(dev);
  1095. if (err)
  1096. return err;
  1097. return cfsiz + MHSIZ;
  1098. }
  1099. /*
  1100. * bcm_sendmsg - process BCM commands (opcodes) from the userspace
  1101. */
  1102. static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
  1103. {
  1104. struct sock *sk = sock->sk;
  1105. struct bcm_sock *bo = bcm_sk(sk);
  1106. int ifindex = bo->ifindex; /* default ifindex for this bcm_op */
  1107. struct bcm_msg_head msg_head;
  1108. int cfsiz;
  1109. int ret; /* read bytes or error codes as return value */
  1110. if (!bo->bound)
  1111. return -ENOTCONN;
  1112. /* check for valid message length from userspace */
  1113. if (size < MHSIZ)
  1114. return -EINVAL;
  1115. /* read message head information */
  1116. ret = memcpy_from_msg((u8 *)&msg_head, msg, MHSIZ);
  1117. if (ret < 0)
  1118. return ret;
  1119. cfsiz = CFSIZ(msg_head.flags);
  1120. if ((size - MHSIZ) % cfsiz)
  1121. return -EINVAL;
  1122. /* check for alternative ifindex for this bcm_op */
  1123. if (!ifindex && msg->msg_name) {
  1124. /* no bound device as default => check msg_name */
  1125. DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
  1126. if (msg->msg_namelen < sizeof(*addr))
  1127. return -EINVAL;
  1128. if (addr->can_family != AF_CAN)
  1129. return -EINVAL;
  1130. /* ifindex from sendto() */
  1131. ifindex = addr->can_ifindex;
  1132. if (ifindex) {
  1133. struct net_device *dev;
  1134. dev = dev_get_by_index(sock_net(sk), ifindex);
  1135. if (!dev)
  1136. return -ENODEV;
  1137. if (dev->type != ARPHRD_CAN) {
  1138. dev_put(dev);
  1139. return -ENODEV;
  1140. }
  1141. dev_put(dev);
  1142. }
  1143. }
  1144. lock_sock(sk);
  1145. switch (msg_head.opcode) {
  1146. case TX_SETUP:
  1147. ret = bcm_tx_setup(&msg_head, msg, ifindex, sk);
  1148. break;
  1149. case RX_SETUP:
  1150. ret = bcm_rx_setup(&msg_head, msg, ifindex, sk);
  1151. break;
  1152. case TX_DELETE:
  1153. if (bcm_delete_tx_op(&bo->tx_ops, &msg_head, ifindex))
  1154. ret = MHSIZ;
  1155. else
  1156. ret = -EINVAL;
  1157. break;
  1158. case RX_DELETE:
  1159. if (bcm_delete_rx_op(&bo->rx_ops, &msg_head, ifindex))
  1160. ret = MHSIZ;
  1161. else
  1162. ret = -EINVAL;
  1163. break;
  1164. case TX_READ:
  1165. /* reuse msg_head for the reply to TX_READ */
  1166. msg_head.opcode = TX_STATUS;
  1167. ret = bcm_read_op(&bo->tx_ops, &msg_head, ifindex);
  1168. break;
  1169. case RX_READ:
  1170. /* reuse msg_head for the reply to RX_READ */
  1171. msg_head.opcode = RX_STATUS;
  1172. ret = bcm_read_op(&bo->rx_ops, &msg_head, ifindex);
  1173. break;
  1174. case TX_SEND:
  1175. /* we need exactly one CAN frame behind the msg head */
  1176. if ((msg_head.nframes != 1) || (size != cfsiz + MHSIZ))
  1177. ret = -EINVAL;
  1178. else
  1179. ret = bcm_tx_send(msg, ifindex, sk, cfsiz);
  1180. break;
  1181. default:
  1182. ret = -EINVAL;
  1183. break;
  1184. }
  1185. release_sock(sk);
  1186. return ret;
  1187. }
  1188. /*
  1189. * notification handler for netdevice status changes
  1190. */
  1191. static void bcm_notify(struct bcm_sock *bo, unsigned long msg,
  1192. struct net_device *dev)
  1193. {
  1194. struct sock *sk = &bo->sk;
  1195. struct bcm_op *op;
  1196. int notify_enodev = 0;
  1197. if (!net_eq(dev_net(dev), sock_net(sk)))
  1198. return;
  1199. switch (msg) {
  1200. case NETDEV_UNREGISTER:
  1201. lock_sock(sk);
  1202. /* remove device specific receive entries */
  1203. list_for_each_entry(op, &bo->rx_ops, list)
  1204. if (op->rx_reg_dev == dev)
  1205. bcm_rx_unreg(dev, op);
  1206. /* remove device reference, if this is our bound device */
  1207. if (bo->bound && bo->ifindex == dev->ifindex) {
  1208. bo->bound = 0;
  1209. bo->ifindex = 0;
  1210. notify_enodev = 1;
  1211. }
  1212. release_sock(sk);
  1213. if (notify_enodev) {
  1214. sk->sk_err = ENODEV;
  1215. if (!sock_flag(sk, SOCK_DEAD))
  1216. sk->sk_error_report(sk);
  1217. }
  1218. break;
  1219. case NETDEV_DOWN:
  1220. if (bo->bound && bo->ifindex == dev->ifindex) {
  1221. sk->sk_err = ENETDOWN;
  1222. if (!sock_flag(sk, SOCK_DEAD))
  1223. sk->sk_error_report(sk);
  1224. }
  1225. }
  1226. }
  1227. static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
  1228. void *ptr)
  1229. {
  1230. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  1231. if (dev->type != ARPHRD_CAN)
  1232. return NOTIFY_DONE;
  1233. if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
  1234. return NOTIFY_DONE;
  1235. if (unlikely(bcm_busy_notifier)) /* Check for reentrant bug. */
  1236. return NOTIFY_DONE;
  1237. spin_lock(&bcm_notifier_lock);
  1238. list_for_each_entry(bcm_busy_notifier, &bcm_notifier_list, notifier) {
  1239. spin_unlock(&bcm_notifier_lock);
  1240. bcm_notify(bcm_busy_notifier, msg, dev);
  1241. spin_lock(&bcm_notifier_lock);
  1242. }
  1243. bcm_busy_notifier = NULL;
  1244. spin_unlock(&bcm_notifier_lock);
  1245. return NOTIFY_DONE;
  1246. }
  1247. /*
  1248. * initial settings for all BCM sockets to be set at socket creation time
  1249. */
  1250. static int bcm_init(struct sock *sk)
  1251. {
  1252. struct bcm_sock *bo = bcm_sk(sk);
  1253. bo->bound = 0;
  1254. bo->ifindex = 0;
  1255. bo->dropped_usr_msgs = 0;
  1256. bo->bcm_proc_read = NULL;
  1257. INIT_LIST_HEAD(&bo->tx_ops);
  1258. INIT_LIST_HEAD(&bo->rx_ops);
  1259. /* set notifier */
  1260. spin_lock(&bcm_notifier_lock);
  1261. list_add_tail(&bo->notifier, &bcm_notifier_list);
  1262. spin_unlock(&bcm_notifier_lock);
  1263. return 0;
  1264. }
  1265. /*
  1266. * standard socket functions
  1267. */
  1268. static int bcm_release(struct socket *sock)
  1269. {
  1270. struct sock *sk = sock->sk;
  1271. struct net *net;
  1272. struct bcm_sock *bo;
  1273. struct bcm_op *op, *next;
  1274. if (!sk)
  1275. return 0;
  1276. net = sock_net(sk);
  1277. bo = bcm_sk(sk);
  1278. /* remove bcm_ops, timer, rx_unregister(), etc. */
  1279. spin_lock(&bcm_notifier_lock);
  1280. while (bcm_busy_notifier == bo) {
  1281. spin_unlock(&bcm_notifier_lock);
  1282. schedule_timeout_uninterruptible(1);
  1283. spin_lock(&bcm_notifier_lock);
  1284. }
  1285. list_del(&bo->notifier);
  1286. spin_unlock(&bcm_notifier_lock);
  1287. lock_sock(sk);
  1288. list_for_each_entry_safe(op, next, &bo->tx_ops, list)
  1289. bcm_remove_op(op);
  1290. list_for_each_entry_safe(op, next, &bo->rx_ops, list) {
  1291. /*
  1292. * Don't care if we're bound or not (due to netdev problems)
  1293. * can_rx_unregister() is always a save thing to do here.
  1294. */
  1295. if (op->ifindex) {
  1296. /*
  1297. * Only remove subscriptions that had not
  1298. * been removed due to NETDEV_UNREGISTER
  1299. * in bcm_notifier()
  1300. */
  1301. if (op->rx_reg_dev) {
  1302. struct net_device *dev;
  1303. dev = dev_get_by_index(net, op->ifindex);
  1304. if (dev) {
  1305. bcm_rx_unreg(dev, op);
  1306. dev_put(dev);
  1307. }
  1308. }
  1309. } else
  1310. can_rx_unregister(net, NULL, op->can_id,
  1311. REGMASK(op->can_id),
  1312. bcm_rx_handler, op);
  1313. }
  1314. synchronize_rcu();
  1315. list_for_each_entry_safe(op, next, &bo->rx_ops, list)
  1316. bcm_remove_op(op);
  1317. #if IS_ENABLED(CONFIG_PROC_FS)
  1318. /* remove procfs entry */
  1319. if (net->can.bcmproc_dir && bo->bcm_proc_read)
  1320. remove_proc_entry(bo->procname, net->can.bcmproc_dir);
  1321. #endif /* CONFIG_PROC_FS */
  1322. /* remove device reference */
  1323. if (bo->bound) {
  1324. bo->bound = 0;
  1325. bo->ifindex = 0;
  1326. }
  1327. sock_orphan(sk);
  1328. sock->sk = NULL;
  1329. release_sock(sk);
  1330. sock_put(sk);
  1331. return 0;
  1332. }
  1333. static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
  1334. int flags)
  1335. {
  1336. struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
  1337. struct sock *sk = sock->sk;
  1338. struct bcm_sock *bo = bcm_sk(sk);
  1339. struct net *net = sock_net(sk);
  1340. int ret = 0;
  1341. if (len < sizeof(*addr))
  1342. return -EINVAL;
  1343. lock_sock(sk);
  1344. if (bo->bound) {
  1345. ret = -EISCONN;
  1346. goto fail;
  1347. }
  1348. /* bind a device to this socket */
  1349. if (addr->can_ifindex) {
  1350. struct net_device *dev;
  1351. dev = dev_get_by_index(net, addr->can_ifindex);
  1352. if (!dev) {
  1353. ret = -ENODEV;
  1354. goto fail;
  1355. }
  1356. if (dev->type != ARPHRD_CAN) {
  1357. dev_put(dev);
  1358. ret = -ENODEV;
  1359. goto fail;
  1360. }
  1361. bo->ifindex = dev->ifindex;
  1362. dev_put(dev);
  1363. } else {
  1364. /* no interface reference for ifindex = 0 ('any' CAN device) */
  1365. bo->ifindex = 0;
  1366. }
  1367. #if IS_ENABLED(CONFIG_PROC_FS)
  1368. if (net->can.bcmproc_dir) {
  1369. /* unique socket address as filename */
  1370. sprintf(bo->procname, "%lu", sock_i_ino(sk));
  1371. bo->bcm_proc_read = proc_create_data(bo->procname, 0644,
  1372. net->can.bcmproc_dir,
  1373. &bcm_proc_fops, sk);
  1374. if (!bo->bcm_proc_read) {
  1375. ret = -ENOMEM;
  1376. goto fail;
  1377. }
  1378. }
  1379. #endif /* CONFIG_PROC_FS */
  1380. bo->bound = 1;
  1381. fail:
  1382. release_sock(sk);
  1383. return ret;
  1384. }
  1385. static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
  1386. int flags)
  1387. {
  1388. struct sock *sk = sock->sk;
  1389. struct sk_buff *skb;
  1390. int error = 0;
  1391. int noblock;
  1392. int err;
  1393. noblock = flags & MSG_DONTWAIT;
  1394. flags &= ~MSG_DONTWAIT;
  1395. skb = skb_recv_datagram(sk, flags, noblock, &error);
  1396. if (!skb)
  1397. return error;
  1398. if (skb->len < size)
  1399. size = skb->len;
  1400. err = memcpy_to_msg(msg, skb->data, size);
  1401. if (err < 0) {
  1402. skb_free_datagram(sk, skb);
  1403. return err;
  1404. }
  1405. sock_recv_ts_and_drops(msg, sk, skb);
  1406. if (msg->msg_name) {
  1407. __sockaddr_check_size(sizeof(struct sockaddr_can));
  1408. msg->msg_namelen = sizeof(struct sockaddr_can);
  1409. memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
  1410. }
  1411. skb_free_datagram(sk, skb);
  1412. return size;
  1413. }
  1414. static const struct proto_ops bcm_ops = {
  1415. .family = PF_CAN,
  1416. .release = bcm_release,
  1417. .bind = sock_no_bind,
  1418. .connect = bcm_connect,
  1419. .socketpair = sock_no_socketpair,
  1420. .accept = sock_no_accept,
  1421. .getname = sock_no_getname,
  1422. .poll = datagram_poll,
  1423. .ioctl = can_ioctl, /* use can_ioctl() from af_can.c */
  1424. .listen = sock_no_listen,
  1425. .shutdown = sock_no_shutdown,
  1426. .setsockopt = sock_no_setsockopt,
  1427. .getsockopt = sock_no_getsockopt,
  1428. .sendmsg = bcm_sendmsg,
  1429. .recvmsg = bcm_recvmsg,
  1430. .mmap = sock_no_mmap,
  1431. .sendpage = sock_no_sendpage,
  1432. };
  1433. static struct proto bcm_proto __read_mostly = {
  1434. .name = "CAN_BCM",
  1435. .owner = THIS_MODULE,
  1436. .obj_size = sizeof(struct bcm_sock),
  1437. .init = bcm_init,
  1438. };
  1439. static const struct can_proto bcm_can_proto = {
  1440. .type = SOCK_DGRAM,
  1441. .protocol = CAN_BCM,
  1442. .ops = &bcm_ops,
  1443. .prot = &bcm_proto,
  1444. };
  1445. static int canbcm_pernet_init(struct net *net)
  1446. {
  1447. #if IS_ENABLED(CONFIG_PROC_FS)
  1448. /* create /proc/net/can-bcm directory */
  1449. net->can.bcmproc_dir = proc_net_mkdir(net, "can-bcm", net->proc_net);
  1450. #endif /* CONFIG_PROC_FS */
  1451. return 0;
  1452. }
  1453. static void canbcm_pernet_exit(struct net *net)
  1454. {
  1455. #if IS_ENABLED(CONFIG_PROC_FS)
  1456. /* remove /proc/net/can-bcm directory */
  1457. if (net->can.bcmproc_dir)
  1458. remove_proc_entry("can-bcm", net->proc_net);
  1459. #endif /* CONFIG_PROC_FS */
  1460. }
  1461. static struct pernet_operations canbcm_pernet_ops __read_mostly = {
  1462. .init = canbcm_pernet_init,
  1463. .exit = canbcm_pernet_exit,
  1464. };
  1465. static struct notifier_block canbcm_notifier = {
  1466. .notifier_call = bcm_notifier
  1467. };
  1468. static int __init bcm_module_init(void)
  1469. {
  1470. int err;
  1471. pr_info("can: broadcast manager protocol (rev " CAN_BCM_VERSION " t)\n");
  1472. err = can_proto_register(&bcm_can_proto);
  1473. if (err < 0) {
  1474. printk(KERN_ERR "can: registration of bcm protocol failed\n");
  1475. return err;
  1476. }
  1477. register_pernet_subsys(&canbcm_pernet_ops);
  1478. register_netdevice_notifier(&canbcm_notifier);
  1479. return 0;
  1480. }
  1481. static void __exit bcm_module_exit(void)
  1482. {
  1483. can_proto_unregister(&bcm_can_proto);
  1484. unregister_netdevice_notifier(&canbcm_notifier);
  1485. unregister_pernet_subsys(&canbcm_pernet_ops);
  1486. }
  1487. module_init(bcm_module_init);
  1488. module_exit(bcm_module_exit);