cfcnfg.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638
  1. /*
  2. * Copyright (C) ST-Ericsson AB 2010
  3. * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
  4. * License terms: GNU General Public License (GPL) version 2
  5. */
  6. #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
  7. #include <linux/kernel.h>
  8. #include <linux/stddef.h>
  9. #include <linux/slab.h>
  10. #include <linux/netdevice.h>
  11. #include <linux/module.h>
  12. #include <net/caif/caif_layer.h>
  13. #include <net/caif/cfpkt.h>
  14. #include <net/caif/cfcnfg.h>
  15. #include <net/caif/cfctrl.h>
  16. #include <net/caif/cfmuxl.h>
  17. #include <net/caif/cffrml.h>
  18. #include <net/caif/cfserl.h>
  19. #include <net/caif/cfsrvl.h>
  20. #include <net/caif/caif_dev.h>
  21. #define container_obj(layr) container_of(layr, struct cfcnfg, layer)
  22. /* Information about CAIF physical interfaces held by Config Module in order
  23. * to manage physical interfaces
  24. */
  25. struct cfcnfg_phyinfo {
  26. struct list_head node;
  27. bool up;
  28. /* Pointer to the layer below the MUX (framing layer) */
  29. struct cflayer *frm_layer;
  30. /* Pointer to the lowest actual physical layer */
  31. struct cflayer *phy_layer;
  32. /* Unique identifier of the physical interface */
  33. unsigned int id;
  34. /* Preference of the physical in interface */
  35. enum cfcnfg_phy_preference pref;
  36. /* Information about the physical device */
  37. struct dev_info dev_info;
  38. /* Interface index */
  39. int ifindex;
  40. /* Use Start of frame extension */
  41. bool use_stx;
  42. /* Use Start of frame checksum */
  43. bool use_fcs;
  44. };
  45. struct cfcnfg {
  46. struct cflayer layer;
  47. struct cflayer *ctrl;
  48. struct cflayer *mux;
  49. struct list_head phys;
  50. struct mutex lock;
  51. };
  52. static void cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id,
  53. enum cfctrl_srv serv, u8 phyid,
  54. struct cflayer *adapt_layer);
  55. static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id);
  56. static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id,
  57. struct cflayer *adapt_layer);
  58. static void cfctrl_resp_func(void);
  59. static void cfctrl_enum_resp(void);
  60. struct cfcnfg *cfcnfg_create(void)
  61. {
  62. struct cfcnfg *this;
  63. struct cfctrl_rsp *resp;
  64. might_sleep();
  65. /* Initiate this layer */
  66. this = kzalloc(sizeof(struct cfcnfg), GFP_ATOMIC);
  67. if (!this) {
  68. pr_warn("Out of memory\n");
  69. return NULL;
  70. }
  71. this->mux = cfmuxl_create();
  72. if (!this->mux)
  73. goto out_of_mem;
  74. this->ctrl = cfctrl_create();
  75. if (!this->ctrl)
  76. goto out_of_mem;
  77. /* Initiate response functions */
  78. resp = cfctrl_get_respfuncs(this->ctrl);
  79. resp->enum_rsp = cfctrl_enum_resp;
  80. resp->linkerror_ind = cfctrl_resp_func;
  81. resp->linkdestroy_rsp = cfcnfg_linkdestroy_rsp;
  82. resp->sleep_rsp = cfctrl_resp_func;
  83. resp->wake_rsp = cfctrl_resp_func;
  84. resp->restart_rsp = cfctrl_resp_func;
  85. resp->radioset_rsp = cfctrl_resp_func;
  86. resp->linksetup_rsp = cfcnfg_linkup_rsp;
  87. resp->reject_rsp = cfcnfg_reject_rsp;
  88. INIT_LIST_HEAD(&this->phys);
  89. cfmuxl_set_uplayer(this->mux, this->ctrl, 0);
  90. layer_set_dn(this->ctrl, this->mux);
  91. layer_set_up(this->ctrl, this);
  92. mutex_init(&this->lock);
  93. return this;
  94. out_of_mem:
  95. pr_warn("Out of memory\n");
  96. synchronize_rcu();
  97. kfree(this->mux);
  98. kfree(this->ctrl);
  99. kfree(this);
  100. return NULL;
  101. }
  102. void cfcnfg_remove(struct cfcnfg *cfg)
  103. {
  104. might_sleep();
  105. if (cfg) {
  106. synchronize_rcu();
  107. kfree(cfg->mux);
  108. cfctrl_remove(cfg->ctrl);
  109. kfree(cfg);
  110. }
  111. }
  112. static void cfctrl_resp_func(void)
  113. {
  114. }
  115. static struct cfcnfg_phyinfo *cfcnfg_get_phyinfo_rcu(struct cfcnfg *cnfg,
  116. u8 phyid)
  117. {
  118. struct cfcnfg_phyinfo *phy;
  119. list_for_each_entry_rcu(phy, &cnfg->phys, node)
  120. if (phy->id == phyid)
  121. return phy;
  122. return NULL;
  123. }
  124. static void cfctrl_enum_resp(void)
  125. {
  126. }
  127. static struct dev_info *cfcnfg_get_phyid(struct cfcnfg *cnfg,
  128. enum cfcnfg_phy_preference phy_pref)
  129. {
  130. /* Try to match with specified preference */
  131. struct cfcnfg_phyinfo *phy;
  132. list_for_each_entry_rcu(phy, &cnfg->phys, node) {
  133. if (phy->up && phy->pref == phy_pref &&
  134. phy->frm_layer != NULL)
  135. return &phy->dev_info;
  136. }
  137. /* Otherwise just return something */
  138. list_for_each_entry_rcu(phy, &cnfg->phys, node)
  139. if (phy->up)
  140. return &phy->dev_info;
  141. return NULL;
  142. }
  143. static int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi)
  144. {
  145. struct cfcnfg_phyinfo *phy;
  146. list_for_each_entry_rcu(phy, &cnfg->phys, node)
  147. if (phy->ifindex == ifi && phy->up)
  148. return phy->id;
  149. return -ENODEV;
  150. }
  151. int caif_disconnect_client(struct net *net, struct cflayer *adap_layer)
  152. {
  153. u8 channel_id = 0;
  154. int ret = 0;
  155. struct cflayer *servl = NULL;
  156. struct cfcnfg *cfg = get_cfcnfg(net);
  157. caif_assert(adap_layer != NULL);
  158. channel_id = adap_layer->id;
  159. if (adap_layer->dn == NULL || channel_id == 0) {
  160. pr_err("adap_layer->dn == NULL or adap_layer->id is 0\n");
  161. ret = -ENOTCONN;
  162. goto end;
  163. }
  164. servl = cfmuxl_remove_uplayer(cfg->mux, channel_id);
  165. if (servl == NULL) {
  166. pr_err("PROTOCOL ERROR - "
  167. "Error removing service_layer Channel_Id(%d)",
  168. channel_id);
  169. ret = -EINVAL;
  170. goto end;
  171. }
  172. ret = cfctrl_linkdown_req(cfg->ctrl, channel_id, adap_layer);
  173. end:
  174. cfctrl_cancel_req(cfg->ctrl, adap_layer);
  175. /* Do RCU sync before initiating cleanup */
  176. synchronize_rcu();
  177. if (adap_layer->ctrlcmd != NULL)
  178. adap_layer->ctrlcmd(adap_layer, CAIF_CTRLCMD_DEINIT_RSP, 0);
  179. return ret;
  180. }
  181. EXPORT_SYMBOL(caif_disconnect_client);
  182. static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id)
  183. {
  184. }
  185. static const int protohead[CFCTRL_SRV_MASK] = {
  186. [CFCTRL_SRV_VEI] = 4,
  187. [CFCTRL_SRV_DATAGRAM] = 7,
  188. [CFCTRL_SRV_UTIL] = 4,
  189. [CFCTRL_SRV_RFM] = 3,
  190. [CFCTRL_SRV_DBG] = 3,
  191. };
  192. static int caif_connect_req_to_link_param(struct cfcnfg *cnfg,
  193. struct caif_connect_request *s,
  194. struct cfctrl_link_param *l)
  195. {
  196. struct dev_info *dev_info;
  197. enum cfcnfg_phy_preference pref;
  198. int res;
  199. memset(l, 0, sizeof(*l));
  200. /* In caif protocol low value is high priority */
  201. l->priority = CAIF_PRIO_MAX - s->priority + 1;
  202. if (s->ifindex != 0) {
  203. res = cfcnfg_get_id_from_ifi(cnfg, s->ifindex);
  204. if (res < 0)
  205. return res;
  206. l->phyid = res;
  207. } else {
  208. switch (s->link_selector) {
  209. case CAIF_LINK_HIGH_BANDW:
  210. pref = CFPHYPREF_HIGH_BW;
  211. break;
  212. case CAIF_LINK_LOW_LATENCY:
  213. pref = CFPHYPREF_LOW_LAT;
  214. break;
  215. default:
  216. return -EINVAL;
  217. }
  218. dev_info = cfcnfg_get_phyid(cnfg, pref);
  219. if (dev_info == NULL)
  220. return -ENODEV;
  221. l->phyid = dev_info->id;
  222. }
  223. switch (s->protocol) {
  224. case CAIFPROTO_AT:
  225. l->linktype = CFCTRL_SRV_VEI;
  226. l->endpoint = (s->sockaddr.u.at.type >> 2) & 0x3;
  227. l->chtype = s->sockaddr.u.at.type & 0x3;
  228. break;
  229. case CAIFPROTO_DATAGRAM:
  230. l->linktype = CFCTRL_SRV_DATAGRAM;
  231. l->chtype = 0x00;
  232. l->u.datagram.connid = s->sockaddr.u.dgm.connection_id;
  233. break;
  234. case CAIFPROTO_DATAGRAM_LOOP:
  235. l->linktype = CFCTRL_SRV_DATAGRAM;
  236. l->chtype = 0x03;
  237. l->endpoint = 0x00;
  238. l->u.datagram.connid = s->sockaddr.u.dgm.connection_id;
  239. break;
  240. case CAIFPROTO_RFM:
  241. l->linktype = CFCTRL_SRV_RFM;
  242. l->u.datagram.connid = s->sockaddr.u.rfm.connection_id;
  243. strncpy(l->u.rfm.volume, s->sockaddr.u.rfm.volume,
  244. sizeof(l->u.rfm.volume)-1);
  245. l->u.rfm.volume[sizeof(l->u.rfm.volume)-1] = 0;
  246. break;
  247. case CAIFPROTO_UTIL:
  248. l->linktype = CFCTRL_SRV_UTIL;
  249. l->endpoint = 0x00;
  250. l->chtype = 0x00;
  251. strncpy(l->u.utility.name, s->sockaddr.u.util.service,
  252. sizeof(l->u.utility.name)-1);
  253. l->u.utility.name[sizeof(l->u.utility.name)-1] = 0;
  254. caif_assert(sizeof(l->u.utility.name) > 10);
  255. l->u.utility.paramlen = s->param.size;
  256. if (l->u.utility.paramlen > sizeof(l->u.utility.params))
  257. l->u.utility.paramlen = sizeof(l->u.utility.params);
  258. memcpy(l->u.utility.params, s->param.data,
  259. l->u.utility.paramlen);
  260. break;
  261. case CAIFPROTO_DEBUG:
  262. l->linktype = CFCTRL_SRV_DBG;
  263. l->endpoint = s->sockaddr.u.dbg.service;
  264. l->chtype = s->sockaddr.u.dbg.type;
  265. break;
  266. default:
  267. return -EINVAL;
  268. }
  269. return 0;
  270. }
  271. int caif_connect_client(struct net *net, struct caif_connect_request *conn_req,
  272. struct cflayer *adap_layer, int *ifindex,
  273. int *proto_head,
  274. int *proto_tail)
  275. {
  276. struct cflayer *frml;
  277. struct cfcnfg_phyinfo *phy;
  278. int err;
  279. struct cfctrl_link_param param;
  280. struct cfcnfg *cfg = get_cfcnfg(net);
  281. caif_assert(cfg != NULL);
  282. rcu_read_lock();
  283. err = caif_connect_req_to_link_param(cfg, conn_req, &param);
  284. if (err)
  285. goto unlock;
  286. phy = cfcnfg_get_phyinfo_rcu(cfg, param.phyid);
  287. if (!phy) {
  288. err = -ENODEV;
  289. goto unlock;
  290. }
  291. err = -EINVAL;
  292. if (adap_layer == NULL) {
  293. pr_err("adap_layer is zero\n");
  294. goto unlock;
  295. }
  296. if (adap_layer->receive == NULL) {
  297. pr_err("adap_layer->receive is NULL\n");
  298. goto unlock;
  299. }
  300. if (adap_layer->ctrlcmd == NULL) {
  301. pr_err("adap_layer->ctrlcmd == NULL\n");
  302. goto unlock;
  303. }
  304. err = -ENODEV;
  305. frml = phy->frm_layer;
  306. if (frml == NULL) {
  307. pr_err("Specified PHY type does not exist!\n");
  308. goto unlock;
  309. }
  310. caif_assert(param.phyid == phy->id);
  311. caif_assert(phy->frm_layer->id ==
  312. param.phyid);
  313. caif_assert(phy->phy_layer->id ==
  314. param.phyid);
  315. *ifindex = phy->ifindex;
  316. *proto_tail = 2;
  317. *proto_head =
  318. protohead[param.linktype] + (phy->use_stx ? 1 : 0);
  319. rcu_read_unlock();
  320. /* FIXME: ENUMERATE INITIALLY WHEN ACTIVATING PHYSICAL INTERFACE */
  321. cfctrl_enum_req(cfg->ctrl, param.phyid);
  322. return cfctrl_linkup_request(cfg->ctrl, &param, adap_layer);
  323. unlock:
  324. rcu_read_unlock();
  325. return err;
  326. }
  327. EXPORT_SYMBOL(caif_connect_client);
  328. static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id,
  329. struct cflayer *adapt_layer)
  330. {
  331. if (adapt_layer != NULL && adapt_layer->ctrlcmd != NULL)
  332. adapt_layer->ctrlcmd(adapt_layer,
  333. CAIF_CTRLCMD_INIT_FAIL_RSP, 0);
  334. }
  335. static void
  336. cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
  337. u8 phyid, struct cflayer *adapt_layer)
  338. {
  339. struct cfcnfg *cnfg = container_obj(layer);
  340. struct cflayer *servicel = NULL;
  341. struct cfcnfg_phyinfo *phyinfo;
  342. struct net_device *netdev;
  343. rcu_read_lock();
  344. if (adapt_layer == NULL) {
  345. pr_debug("link setup response but no client exist,"
  346. "send linkdown back\n");
  347. cfctrl_linkdown_req(cnfg->ctrl, channel_id, NULL);
  348. goto unlock;
  349. }
  350. caif_assert(cnfg != NULL);
  351. caif_assert(phyid != 0);
  352. phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phyid);
  353. if (phyinfo == NULL) {
  354. pr_err("ERROR: Link Layer Device dissapeared"
  355. "while connecting\n");
  356. goto unlock;
  357. }
  358. caif_assert(phyinfo != NULL);
  359. caif_assert(phyinfo->id == phyid);
  360. caif_assert(phyinfo->phy_layer != NULL);
  361. caif_assert(phyinfo->phy_layer->id == phyid);
  362. adapt_layer->id = channel_id;
  363. switch (serv) {
  364. case CFCTRL_SRV_VEI:
  365. servicel = cfvei_create(channel_id, &phyinfo->dev_info);
  366. break;
  367. case CFCTRL_SRV_DATAGRAM:
  368. servicel = cfdgml_create(channel_id,
  369. &phyinfo->dev_info);
  370. break;
  371. case CFCTRL_SRV_RFM:
  372. netdev = phyinfo->dev_info.dev;
  373. servicel = cfrfml_create(channel_id, &phyinfo->dev_info,
  374. netdev->mtu);
  375. break;
  376. case CFCTRL_SRV_UTIL:
  377. servicel = cfutill_create(channel_id, &phyinfo->dev_info);
  378. break;
  379. case CFCTRL_SRV_VIDEO:
  380. servicel = cfvidl_create(channel_id, &phyinfo->dev_info);
  381. break;
  382. case CFCTRL_SRV_DBG:
  383. servicel = cfdbgl_create(channel_id, &phyinfo->dev_info);
  384. break;
  385. default:
  386. pr_err("Protocol error. Link setup response "
  387. "- unknown channel type\n");
  388. goto unlock;
  389. }
  390. if (!servicel) {
  391. pr_warn("Out of memory\n");
  392. goto unlock;
  393. }
  394. layer_set_dn(servicel, cnfg->mux);
  395. cfmuxl_set_uplayer(cnfg->mux, servicel, channel_id);
  396. layer_set_up(servicel, adapt_layer);
  397. layer_set_dn(adapt_layer, servicel);
  398. rcu_read_unlock();
  399. servicel->ctrlcmd(servicel, CAIF_CTRLCMD_INIT_RSP, 0);
  400. return;
  401. unlock:
  402. rcu_read_unlock();
  403. }
  404. void
  405. cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
  406. struct net_device *dev, struct cflayer *phy_layer,
  407. enum cfcnfg_phy_preference pref,
  408. bool fcs, bool stx)
  409. {
  410. struct cflayer *frml;
  411. struct cflayer *phy_driver = NULL;
  412. struct cfcnfg_phyinfo *phyinfo;
  413. int i;
  414. u8 phyid;
  415. mutex_lock(&cnfg->lock);
  416. /* CAIF protocol allow maximum 6 link-layers */
  417. for (i = 0; i < 7; i++) {
  418. phyid = (dev->ifindex + i) & 0x7;
  419. if (phyid == 0)
  420. continue;
  421. if (cfcnfg_get_phyinfo_rcu(cnfg, phyid) == NULL)
  422. goto got_phyid;
  423. }
  424. pr_warn("Too many CAIF Link Layers (max 6)\n");
  425. goto out;
  426. got_phyid:
  427. phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC);
  428. switch (phy_type) {
  429. case CFPHYTYPE_FRAG:
  430. phy_driver =
  431. cfserl_create(CFPHYTYPE_FRAG, phyid, stx);
  432. if (!phy_driver) {
  433. pr_warn("Out of memory\n");
  434. goto out;
  435. }
  436. break;
  437. case CFPHYTYPE_CAIF:
  438. phy_driver = NULL;
  439. break;
  440. default:
  441. goto out;
  442. }
  443. phy_layer->id = phyid;
  444. phyinfo->pref = pref;
  445. phyinfo->id = phyid;
  446. phyinfo->dev_info.id = phyid;
  447. phyinfo->dev_info.dev = dev;
  448. phyinfo->phy_layer = phy_layer;
  449. phyinfo->ifindex = dev->ifindex;
  450. phyinfo->use_stx = stx;
  451. phyinfo->use_fcs = fcs;
  452. phy_layer->type = phy_type;
  453. frml = cffrml_create(phyid, fcs);
  454. if (!frml) {
  455. pr_warn("Out of memory\n");
  456. kfree(phyinfo);
  457. goto out;
  458. }
  459. phyinfo->frm_layer = frml;
  460. layer_set_up(frml, cnfg->mux);
  461. if (phy_driver != NULL) {
  462. phy_driver->id = phyid;
  463. layer_set_dn(frml, phy_driver);
  464. layer_set_up(phy_driver, frml);
  465. layer_set_dn(phy_driver, phy_layer);
  466. layer_set_up(phy_layer, phy_driver);
  467. } else {
  468. layer_set_dn(frml, phy_layer);
  469. layer_set_up(phy_layer, frml);
  470. }
  471. list_add_rcu(&phyinfo->node, &cnfg->phys);
  472. out:
  473. mutex_unlock(&cnfg->lock);
  474. }
  475. EXPORT_SYMBOL(cfcnfg_add_phy_layer);
  476. int cfcnfg_set_phy_state(struct cfcnfg *cnfg, struct cflayer *phy_layer,
  477. bool up)
  478. {
  479. struct cfcnfg_phyinfo *phyinfo;
  480. rcu_read_lock();
  481. phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phy_layer->id);
  482. if (phyinfo == NULL) {
  483. rcu_read_unlock();
  484. return -ENODEV;
  485. }
  486. if (phyinfo->up == up) {
  487. rcu_read_unlock();
  488. return 0;
  489. }
  490. phyinfo->up = up;
  491. if (up) {
  492. cffrml_hold(phyinfo->frm_layer);
  493. cfmuxl_set_dnlayer(cnfg->mux, phyinfo->frm_layer,
  494. phy_layer->id);
  495. } else {
  496. cfmuxl_remove_dnlayer(cnfg->mux, phy_layer->id);
  497. cffrml_put(phyinfo->frm_layer);
  498. }
  499. rcu_read_unlock();
  500. return 0;
  501. }
  502. EXPORT_SYMBOL(cfcnfg_set_phy_state);
  503. int cfcnfg_del_phy_layer(struct cfcnfg *cnfg, struct cflayer *phy_layer)
  504. {
  505. struct cflayer *frml, *frml_dn;
  506. u16 phyid;
  507. struct cfcnfg_phyinfo *phyinfo;
  508. might_sleep();
  509. mutex_lock(&cnfg->lock);
  510. phyid = phy_layer->id;
  511. phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phyid);
  512. if (phyinfo == NULL) {
  513. mutex_unlock(&cnfg->lock);
  514. return 0;
  515. }
  516. caif_assert(phyid == phyinfo->id);
  517. caif_assert(phy_layer == phyinfo->phy_layer);
  518. caif_assert(phy_layer->id == phyid);
  519. caif_assert(phyinfo->frm_layer->id == phyid);
  520. list_del_rcu(&phyinfo->node);
  521. synchronize_rcu();
  522. /* Fail if reference count is not zero */
  523. if (cffrml_refcnt_read(phyinfo->frm_layer) != 0) {
  524. pr_info("Wait for device inuse\n");
  525. list_add_rcu(&phyinfo->node, &cnfg->phys);
  526. mutex_unlock(&cnfg->lock);
  527. return -EAGAIN;
  528. }
  529. frml = phyinfo->frm_layer;
  530. frml_dn = frml->dn;
  531. cffrml_set_uplayer(frml, NULL);
  532. cffrml_set_dnlayer(frml, NULL);
  533. if (phy_layer != frml_dn) {
  534. layer_set_up(frml_dn, NULL);
  535. layer_set_dn(frml_dn, NULL);
  536. }
  537. layer_set_up(phy_layer, NULL);
  538. if (phyinfo->phy_layer != frml_dn)
  539. kfree(frml_dn);
  540. cffrml_free(frml);
  541. kfree(phyinfo);
  542. mutex_unlock(&cnfg->lock);
  543. return 0;
  544. }
  545. EXPORT_SYMBOL(cfcnfg_del_phy_layer);