cfcnfg.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553
  1. /*
  2. * Copyright (C) ST-Ericsson AB 2010
  3. * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
  4. * License terms: GNU General Public License (GPL) version 2
  5. */
  6. #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
  7. #include <linux/kernel.h>
  8. #include <linux/stddef.h>
  9. #include <linux/slab.h>
  10. #include <linux/netdevice.h>
  11. #include <linux/module.h>
  12. #include <net/caif/caif_layer.h>
  13. #include <net/caif/cfpkt.h>
  14. #include <net/caif/cfcnfg.h>
  15. #include <net/caif/cfctrl.h>
  16. #include <net/caif/cfmuxl.h>
  17. #include <net/caif/cffrml.h>
  18. #include <net/caif/cfserl.h>
  19. #include <net/caif/cfsrvl.h>
  20. #include <net/caif/caif_dev.h>
  21. #define container_obj(layr) container_of(layr, struct cfcnfg, layer)
  22. /* Information about CAIF physical interfaces held by Config Module in order
  23. * to manage physical interfaces
  24. */
  25. struct cfcnfg_phyinfo {
  26. struct list_head node;
  27. bool up;
  28. /* Pointer to the layer below the MUX (framing layer) */
  29. struct cflayer *frm_layer;
  30. /* Pointer to the lowest actual physical layer */
  31. struct cflayer *phy_layer;
  32. /* Unique identifier of the physical interface */
  33. unsigned int id;
  34. /* Preference of the physical in interface */
  35. enum cfcnfg_phy_preference pref;
  36. /* Information about the physical device */
  37. struct dev_info dev_info;
  38. /* Interface index */
  39. int ifindex;
  40. /* Use Start of frame extension */
  41. bool use_stx;
  42. /* Use Start of frame checksum */
  43. bool use_fcs;
  44. };
  45. struct cfcnfg {
  46. struct cflayer layer;
  47. struct cflayer *ctrl;
  48. struct cflayer *mux;
  49. struct list_head phys;
  50. struct mutex lock;
  51. };
  52. static void cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id,
  53. enum cfctrl_srv serv, u8 phyid,
  54. struct cflayer *adapt_layer);
  55. static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id);
  56. static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id,
  57. struct cflayer *adapt_layer);
  58. static void cfctrl_resp_func(void);
  59. static void cfctrl_enum_resp(void);
  60. struct cfcnfg *cfcnfg_create(void)
  61. {
  62. struct cfcnfg *this;
  63. struct cfctrl_rsp *resp;
  64. might_sleep();
  65. /* Initiate this layer */
  66. this = kzalloc(sizeof(struct cfcnfg), GFP_ATOMIC);
  67. if (!this) {
  68. pr_warn("Out of memory\n");
  69. return NULL;
  70. }
  71. this->mux = cfmuxl_create();
  72. if (!this->mux)
  73. goto out_of_mem;
  74. this->ctrl = cfctrl_create();
  75. if (!this->ctrl)
  76. goto out_of_mem;
  77. /* Initiate response functions */
  78. resp = cfctrl_get_respfuncs(this->ctrl);
  79. resp->enum_rsp = cfctrl_enum_resp;
  80. resp->linkerror_ind = cfctrl_resp_func;
  81. resp->linkdestroy_rsp = cfcnfg_linkdestroy_rsp;
  82. resp->sleep_rsp = cfctrl_resp_func;
  83. resp->wake_rsp = cfctrl_resp_func;
  84. resp->restart_rsp = cfctrl_resp_func;
  85. resp->radioset_rsp = cfctrl_resp_func;
  86. resp->linksetup_rsp = cfcnfg_linkup_rsp;
  87. resp->reject_rsp = cfcnfg_reject_rsp;
  88. INIT_LIST_HEAD(&this->phys);
  89. cfmuxl_set_uplayer(this->mux, this->ctrl, 0);
  90. layer_set_dn(this->ctrl, this->mux);
  91. layer_set_up(this->ctrl, this);
  92. mutex_init(&this->lock);
  93. return this;
  94. out_of_mem:
  95. pr_warn("Out of memory\n");
  96. synchronize_rcu();
  97. kfree(this->mux);
  98. kfree(this->ctrl);
  99. kfree(this);
  100. return NULL;
  101. }
  102. EXPORT_SYMBOL(cfcnfg_create);
  103. void cfcnfg_remove(struct cfcnfg *cfg)
  104. {
  105. might_sleep();
  106. if (cfg) {
  107. synchronize_rcu();
  108. kfree(cfg->mux);
  109. kfree(cfg->ctrl);
  110. kfree(cfg);
  111. }
  112. }
  113. static void cfctrl_resp_func(void)
  114. {
  115. }
  116. static struct cfcnfg_phyinfo *cfcnfg_get_phyinfo_rcu(struct cfcnfg *cnfg,
  117. u8 phyid)
  118. {
  119. struct cfcnfg_phyinfo *phy;
  120. list_for_each_entry_rcu(phy, &cnfg->phys, node)
  121. if (phy->id == phyid)
  122. return phy;
  123. return NULL;
  124. }
  125. static void cfctrl_enum_resp(void)
  126. {
  127. }
  128. struct dev_info *cfcnfg_get_phyid(struct cfcnfg *cnfg,
  129. enum cfcnfg_phy_preference phy_pref)
  130. {
  131. /* Try to match with specified preference */
  132. struct cfcnfg_phyinfo *phy;
  133. list_for_each_entry_rcu(phy, &cnfg->phys, node) {
  134. if (phy->up && phy->pref == phy_pref &&
  135. phy->frm_layer != NULL)
  136. return &phy->dev_info;
  137. }
  138. /* Otherwise just return something */
  139. list_for_each_entry_rcu(phy, &cnfg->phys, node)
  140. if (phy->up)
  141. return &phy->dev_info;
  142. return NULL;
  143. }
  144. int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi)
  145. {
  146. struct cfcnfg_phyinfo *phy;
  147. list_for_each_entry_rcu(phy, &cnfg->phys, node)
  148. if (phy->ifindex == ifi && phy->up)
  149. return phy->id;
  150. return -ENODEV;
  151. }
  152. int cfcnfg_disconn_adapt_layer(struct cfcnfg *cfg, struct cflayer *adap_layer)
  153. {
  154. u8 channel_id = 0;
  155. int ret = 0;
  156. struct cflayer *servl = NULL;
  157. caif_assert(adap_layer != NULL);
  158. channel_id = adap_layer->id;
  159. if (adap_layer->dn == NULL || channel_id == 0) {
  160. pr_err("adap_layer->dn == NULL or adap_layer->id is 0\n");
  161. ret = -ENOTCONN;
  162. goto end;
  163. }
  164. servl = cfmuxl_remove_uplayer(cfg->mux, channel_id);
  165. if (servl == NULL) {
  166. pr_err("PROTOCOL ERROR - "
  167. "Error removing service_layer Channel_Id(%d)",
  168. channel_id);
  169. ret = -EINVAL;
  170. goto end;
  171. }
  172. ret = cfctrl_linkdown_req(cfg->ctrl, channel_id, adap_layer);
  173. end:
  174. cfctrl_cancel_req(cfg->ctrl, adap_layer);
  175. /* Do RCU sync before initiating cleanup */
  176. synchronize_rcu();
  177. if (adap_layer->ctrlcmd != NULL)
  178. adap_layer->ctrlcmd(adap_layer, CAIF_CTRLCMD_DEINIT_RSP, 0);
  179. return ret;
  180. }
  181. EXPORT_SYMBOL(cfcnfg_disconn_adapt_layer);
  182. void cfcnfg_release_adap_layer(struct cflayer *adap_layer)
  183. {
  184. if (adap_layer->dn)
  185. cfsrvl_put(adap_layer->dn);
  186. }
  187. EXPORT_SYMBOL(cfcnfg_release_adap_layer);
  188. static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id)
  189. {
  190. }
  191. static const int protohead[CFCTRL_SRV_MASK] = {
  192. [CFCTRL_SRV_VEI] = 4,
  193. [CFCTRL_SRV_DATAGRAM] = 7,
  194. [CFCTRL_SRV_UTIL] = 4,
  195. [CFCTRL_SRV_RFM] = 3,
  196. [CFCTRL_SRV_DBG] = 3,
  197. };
  198. int cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg,
  199. struct cfctrl_link_param *param,
  200. struct cflayer *adap_layer,
  201. int *ifindex,
  202. int *proto_head,
  203. int *proto_tail)
  204. {
  205. struct cflayer *frml;
  206. struct cfcnfg_phyinfo *phy;
  207. int err;
  208. rcu_read_lock();
  209. phy = cfcnfg_get_phyinfo_rcu(cnfg, param->phyid);
  210. if (!phy) {
  211. err = -ENODEV;
  212. goto unlock;
  213. }
  214. err = -EINVAL;
  215. if (adap_layer == NULL) {
  216. pr_err("adap_layer is zero\n");
  217. goto unlock;
  218. }
  219. if (adap_layer->receive == NULL) {
  220. pr_err("adap_layer->receive is NULL\n");
  221. goto unlock;
  222. }
  223. if (adap_layer->ctrlcmd == NULL) {
  224. pr_err("adap_layer->ctrlcmd == NULL\n");
  225. goto unlock;
  226. }
  227. err = -ENODEV;
  228. frml = phy->frm_layer;
  229. if (frml == NULL) {
  230. pr_err("Specified PHY type does not exist!\n");
  231. goto unlock;
  232. }
  233. caif_assert(param->phyid == phy->id);
  234. caif_assert(phy->frm_layer->id ==
  235. param->phyid);
  236. caif_assert(phy->phy_layer->id ==
  237. param->phyid);
  238. *ifindex = phy->ifindex;
  239. *proto_tail = 2;
  240. *proto_head =
  241. protohead[param->linktype] + (phy->use_stx ? 1 : 0);
  242. rcu_read_unlock();
  243. /* FIXME: ENUMERATE INITIALLY WHEN ACTIVATING PHYSICAL INTERFACE */
  244. cfctrl_enum_req(cnfg->ctrl, param->phyid);
  245. return cfctrl_linkup_request(cnfg->ctrl, param, adap_layer);
  246. unlock:
  247. rcu_read_unlock();
  248. return err;
  249. }
  250. EXPORT_SYMBOL(cfcnfg_add_adaptation_layer);
  251. static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id,
  252. struct cflayer *adapt_layer)
  253. {
  254. if (adapt_layer != NULL && adapt_layer->ctrlcmd != NULL)
  255. adapt_layer->ctrlcmd(adapt_layer,
  256. CAIF_CTRLCMD_INIT_FAIL_RSP, 0);
  257. }
  258. static void
  259. cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
  260. u8 phyid, struct cflayer *adapt_layer)
  261. {
  262. struct cfcnfg *cnfg = container_obj(layer);
  263. struct cflayer *servicel = NULL;
  264. struct cfcnfg_phyinfo *phyinfo;
  265. struct net_device *netdev;
  266. rcu_read_lock();
  267. if (adapt_layer == NULL) {
  268. pr_debug("link setup response but no client exist,"
  269. "send linkdown back\n");
  270. cfctrl_linkdown_req(cnfg->ctrl, channel_id, NULL);
  271. goto unlock;
  272. }
  273. caif_assert(cnfg != NULL);
  274. caif_assert(phyid != 0);
  275. phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phyid);
  276. if (phyinfo == NULL) {
  277. pr_err("ERROR: Link Layer Device dissapeared"
  278. "while connecting\n");
  279. goto unlock;
  280. }
  281. caif_assert(phyinfo != NULL);
  282. caif_assert(phyinfo->id == phyid);
  283. caif_assert(phyinfo->phy_layer != NULL);
  284. caif_assert(phyinfo->phy_layer->id == phyid);
  285. adapt_layer->id = channel_id;
  286. switch (serv) {
  287. case CFCTRL_SRV_VEI:
  288. servicel = cfvei_create(channel_id, &phyinfo->dev_info);
  289. break;
  290. case CFCTRL_SRV_DATAGRAM:
  291. servicel = cfdgml_create(channel_id,
  292. &phyinfo->dev_info);
  293. break;
  294. case CFCTRL_SRV_RFM:
  295. netdev = phyinfo->dev_info.dev;
  296. servicel = cfrfml_create(channel_id, &phyinfo->dev_info,
  297. netdev->mtu);
  298. break;
  299. case CFCTRL_SRV_UTIL:
  300. servicel = cfutill_create(channel_id, &phyinfo->dev_info);
  301. break;
  302. case CFCTRL_SRV_VIDEO:
  303. servicel = cfvidl_create(channel_id, &phyinfo->dev_info);
  304. break;
  305. case CFCTRL_SRV_DBG:
  306. servicel = cfdbgl_create(channel_id, &phyinfo->dev_info);
  307. break;
  308. default:
  309. pr_err("Protocol error. Link setup response "
  310. "- unknown channel type\n");
  311. goto unlock;
  312. }
  313. if (!servicel) {
  314. pr_warn("Out of memory\n");
  315. goto unlock;
  316. }
  317. layer_set_dn(servicel, cnfg->mux);
  318. cfmuxl_set_uplayer(cnfg->mux, servicel, channel_id);
  319. layer_set_up(servicel, adapt_layer);
  320. layer_set_dn(adapt_layer, servicel);
  321. rcu_read_unlock();
  322. servicel->ctrlcmd(servicel, CAIF_CTRLCMD_INIT_RSP, 0);
  323. return;
  324. unlock:
  325. rcu_read_unlock();
  326. }
  327. void
  328. cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
  329. struct net_device *dev, struct cflayer *phy_layer,
  330. u16 *phy_id, enum cfcnfg_phy_preference pref,
  331. bool fcs, bool stx)
  332. {
  333. struct cflayer *frml;
  334. struct cflayer *phy_driver = NULL;
  335. struct cfcnfg_phyinfo *phyinfo;
  336. int i;
  337. u8 phyid;
  338. mutex_lock(&cnfg->lock);
  339. /* CAIF protocol allow maximum 6 link-layers */
  340. for (i = 0; i < 7; i++) {
  341. phyid = (dev->ifindex + i) & 0x7;
  342. if (phyid == 0)
  343. continue;
  344. if (cfcnfg_get_phyinfo_rcu(cnfg, phyid) == NULL)
  345. goto got_phyid;
  346. }
  347. pr_warn("Too many CAIF Link Layers (max 6)\n");
  348. goto out;
  349. got_phyid:
  350. phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC);
  351. switch (phy_type) {
  352. case CFPHYTYPE_FRAG:
  353. phy_driver =
  354. cfserl_create(CFPHYTYPE_FRAG, phyid, stx);
  355. if (!phy_driver) {
  356. pr_warn("Out of memory\n");
  357. goto out;
  358. }
  359. break;
  360. case CFPHYTYPE_CAIF:
  361. phy_driver = NULL;
  362. break;
  363. default:
  364. goto out;
  365. }
  366. phy_layer->id = phyid;
  367. phyinfo->pref = pref;
  368. phyinfo->id = phyid;
  369. phyinfo->dev_info.id = phyid;
  370. phyinfo->dev_info.dev = dev;
  371. phyinfo->phy_layer = phy_layer;
  372. phyinfo->ifindex = dev->ifindex;
  373. phyinfo->use_stx = stx;
  374. phyinfo->use_fcs = fcs;
  375. phy_layer->type = phy_type;
  376. frml = cffrml_create(phyid, fcs);
  377. if (!frml) {
  378. pr_warn("Out of memory\n");
  379. kfree(phyinfo);
  380. goto out;
  381. }
  382. phyinfo->frm_layer = frml;
  383. layer_set_up(frml, cnfg->mux);
  384. if (phy_driver != NULL) {
  385. phy_driver->id = phyid;
  386. layer_set_dn(frml, phy_driver);
  387. layer_set_up(phy_driver, frml);
  388. layer_set_dn(phy_driver, phy_layer);
  389. layer_set_up(phy_layer, phy_driver);
  390. } else {
  391. layer_set_dn(frml, phy_layer);
  392. layer_set_up(phy_layer, frml);
  393. }
  394. list_add_rcu(&phyinfo->node, &cnfg->phys);
  395. out:
  396. mutex_unlock(&cnfg->lock);
  397. }
  398. EXPORT_SYMBOL(cfcnfg_add_phy_layer);
  399. int cfcnfg_set_phy_state(struct cfcnfg *cnfg, struct cflayer *phy_layer,
  400. bool up)
  401. {
  402. struct cfcnfg_phyinfo *phyinfo;
  403. rcu_read_lock();
  404. phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phy_layer->id);
  405. if (phyinfo == NULL) {
  406. rcu_read_unlock();
  407. return -ENODEV;
  408. }
  409. if (phyinfo->up == up) {
  410. rcu_read_unlock();
  411. return 0;
  412. }
  413. phyinfo->up = up;
  414. if (up) {
  415. cffrml_hold(phyinfo->frm_layer);
  416. cfmuxl_set_dnlayer(cnfg->mux, phyinfo->frm_layer,
  417. phy_layer->id);
  418. } else {
  419. cfmuxl_remove_dnlayer(cnfg->mux, phy_layer->id);
  420. cffrml_put(phyinfo->frm_layer);
  421. }
  422. rcu_read_unlock();
  423. return 0;
  424. }
  425. EXPORT_SYMBOL(cfcnfg_set_phy_state);
  426. int cfcnfg_del_phy_layer(struct cfcnfg *cnfg, struct cflayer *phy_layer)
  427. {
  428. struct cflayer *frml, *frml_dn;
  429. u16 phyid;
  430. struct cfcnfg_phyinfo *phyinfo;
  431. might_sleep();
  432. mutex_lock(&cnfg->lock);
  433. phyid = phy_layer->id;
  434. phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phyid);
  435. if (phyinfo == NULL)
  436. return 0;
  437. caif_assert(phyid == phyinfo->id);
  438. caif_assert(phy_layer == phyinfo->phy_layer);
  439. caif_assert(phy_layer->id == phyid);
  440. caif_assert(phyinfo->frm_layer->id == phyid);
  441. /* Fail if reference count is not zero */
  442. if (cffrml_refcnt_read(phyinfo->frm_layer) != 0) {
  443. pr_info("Wait for device inuse\n");
  444. mutex_unlock(&cnfg->lock);
  445. return -EAGAIN;
  446. }
  447. list_del_rcu(&phyinfo->node);
  448. synchronize_rcu();
  449. frml = phyinfo->frm_layer;
  450. frml_dn = frml->dn;
  451. cffrml_set_uplayer(frml, NULL);
  452. cffrml_set_dnlayer(frml, NULL);
  453. if (phy_layer != frml_dn) {
  454. layer_set_up(frml_dn, NULL);
  455. layer_set_dn(frml_dn, NULL);
  456. }
  457. layer_set_up(phy_layer, NULL);
  458. if (phyinfo->phy_layer != frml_dn)
  459. kfree(frml_dn);
  460. cffrml_free(frml);
  461. kfree(phyinfo);
  462. mutex_unlock(&cnfg->lock);
  463. return 0;
  464. }
  465. EXPORT_SYMBOL(cfcnfg_del_phy_layer);