cfcnfg.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632
  1. /*
  2. * Copyright (C) ST-Ericsson AB 2010
  3. * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
  4. * License terms: GNU General Public License (GPL) version 2
  5. */
  6. #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
  7. #include <linux/kernel.h>
  8. #include <linux/stddef.h>
  9. #include <linux/slab.h>
  10. #include <linux/netdevice.h>
  11. #include <linux/module.h>
  12. #include <net/caif/caif_layer.h>
  13. #include <net/caif/cfpkt.h>
  14. #include <net/caif/cfcnfg.h>
  15. #include <net/caif/cfctrl.h>
  16. #include <net/caif/cfmuxl.h>
  17. #include <net/caif/cffrml.h>
  18. #include <net/caif/cfserl.h>
  19. #include <net/caif/cfsrvl.h>
  20. #include <net/caif/caif_dev.h>
  21. #define container_obj(layr) container_of(layr, struct cfcnfg, layer)
  22. /* Information about CAIF physical interfaces held by Config Module in order
  23. * to manage physical interfaces
  24. */
  25. struct cfcnfg_phyinfo {
  26. struct list_head node;
  27. bool up;
  28. /* Pointer to the layer below the MUX (framing layer) */
  29. struct cflayer *frm_layer;
  30. /* Pointer to the lowest actual physical layer */
  31. struct cflayer *phy_layer;
  32. /* Unique identifier of the physical interface */
  33. unsigned int id;
  34. /* Preference of the physical in interface */
  35. enum cfcnfg_phy_preference pref;
  36. /* Information about the physical device */
  37. struct dev_info dev_info;
  38. /* Interface index */
  39. int ifindex;
  40. /* Use Start of frame extension */
  41. bool use_stx;
  42. /* Use Start of frame checksum */
  43. bool use_fcs;
  44. };
  45. struct cfcnfg {
  46. struct cflayer layer;
  47. struct cflayer *ctrl;
  48. struct cflayer *mux;
  49. struct list_head phys;
  50. struct mutex lock;
  51. };
  52. static void cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id,
  53. enum cfctrl_srv serv, u8 phyid,
  54. struct cflayer *adapt_layer);
  55. static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id);
  56. static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id,
  57. struct cflayer *adapt_layer);
  58. static void cfctrl_resp_func(void);
  59. static void cfctrl_enum_resp(void);
  60. struct cfcnfg *cfcnfg_create(void)
  61. {
  62. struct cfcnfg *this;
  63. struct cfctrl_rsp *resp;
  64. might_sleep();
  65. /* Initiate this layer */
  66. this = kzalloc(sizeof(struct cfcnfg), GFP_ATOMIC);
  67. if (!this) {
  68. pr_warn("Out of memory\n");
  69. return NULL;
  70. }
  71. this->mux = cfmuxl_create();
  72. if (!this->mux)
  73. goto out_of_mem;
  74. this->ctrl = cfctrl_create();
  75. if (!this->ctrl)
  76. goto out_of_mem;
  77. /* Initiate response functions */
  78. resp = cfctrl_get_respfuncs(this->ctrl);
  79. resp->enum_rsp = cfctrl_enum_resp;
  80. resp->linkerror_ind = cfctrl_resp_func;
  81. resp->linkdestroy_rsp = cfcnfg_linkdestroy_rsp;
  82. resp->sleep_rsp = cfctrl_resp_func;
  83. resp->wake_rsp = cfctrl_resp_func;
  84. resp->restart_rsp = cfctrl_resp_func;
  85. resp->radioset_rsp = cfctrl_resp_func;
  86. resp->linksetup_rsp = cfcnfg_linkup_rsp;
  87. resp->reject_rsp = cfcnfg_reject_rsp;
  88. INIT_LIST_HEAD(&this->phys);
  89. cfmuxl_set_uplayer(this->mux, this->ctrl, 0);
  90. layer_set_dn(this->ctrl, this->mux);
  91. layer_set_up(this->ctrl, this);
  92. mutex_init(&this->lock);
  93. return this;
  94. out_of_mem:
  95. pr_warn("Out of memory\n");
  96. synchronize_rcu();
  97. kfree(this->mux);
  98. kfree(this->ctrl);
  99. kfree(this);
  100. return NULL;
  101. }
  102. void cfcnfg_remove(struct cfcnfg *cfg)
  103. {
  104. might_sleep();
  105. if (cfg) {
  106. synchronize_rcu();
  107. kfree(cfg->mux);
  108. cfctrl_remove(cfg->ctrl);
  109. kfree(cfg);
  110. }
  111. }
  112. static void cfctrl_resp_func(void)
  113. {
  114. }
  115. static struct cfcnfg_phyinfo *cfcnfg_get_phyinfo_rcu(struct cfcnfg *cnfg,
  116. u8 phyid)
  117. {
  118. struct cfcnfg_phyinfo *phy;
  119. list_for_each_entry_rcu(phy, &cnfg->phys, node)
  120. if (phy->id == phyid)
  121. return phy;
  122. return NULL;
  123. }
  124. static void cfctrl_enum_resp(void)
  125. {
  126. }
  127. static struct dev_info *cfcnfg_get_phyid(struct cfcnfg *cnfg,
  128. enum cfcnfg_phy_preference phy_pref)
  129. {
  130. /* Try to match with specified preference */
  131. struct cfcnfg_phyinfo *phy;
  132. list_for_each_entry_rcu(phy, &cnfg->phys, node) {
  133. if (phy->up && phy->pref == phy_pref &&
  134. phy->frm_layer != NULL)
  135. return &phy->dev_info;
  136. }
  137. /* Otherwise just return something */
  138. list_for_each_entry_rcu(phy, &cnfg->phys, node)
  139. if (phy->up)
  140. return &phy->dev_info;
  141. return NULL;
  142. }
  143. static int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi)
  144. {
  145. struct cfcnfg_phyinfo *phy;
  146. list_for_each_entry_rcu(phy, &cnfg->phys, node)
  147. if (phy->ifindex == ifi && phy->up)
  148. return phy->id;
  149. return -ENODEV;
  150. }
  151. int caif_disconnect_client(struct net *net, struct cflayer *adap_layer)
  152. {
  153. u8 channel_id;
  154. struct cfcnfg *cfg = get_cfcnfg(net);
  155. caif_assert(adap_layer != NULL);
  156. cfctrl_cancel_req(cfg->ctrl, adap_layer);
  157. channel_id = adap_layer->id;
  158. if (channel_id != 0) {
  159. struct cflayer *servl;
  160. servl = cfmuxl_remove_uplayer(cfg->mux, channel_id);
  161. if (servl != NULL)
  162. layer_set_up(servl, NULL);
  163. } else
  164. pr_debug("nothing to disconnect\n");
  165. cfctrl_linkdown_req(cfg->ctrl, channel_id, adap_layer);
  166. /* Do RCU sync before initiating cleanup */
  167. synchronize_rcu();
  168. if (adap_layer->ctrlcmd != NULL)
  169. adap_layer->ctrlcmd(adap_layer, CAIF_CTRLCMD_DEINIT_RSP, 0);
  170. return 0;
  171. }
  172. EXPORT_SYMBOL(caif_disconnect_client);
  173. static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id)
  174. {
  175. }
  176. static const int protohead[CFCTRL_SRV_MASK] = {
  177. [CFCTRL_SRV_VEI] = 4,
  178. [CFCTRL_SRV_DATAGRAM] = 7,
  179. [CFCTRL_SRV_UTIL] = 4,
  180. [CFCTRL_SRV_RFM] = 3,
  181. [CFCTRL_SRV_DBG] = 3,
  182. };
  183. static int caif_connect_req_to_link_param(struct cfcnfg *cnfg,
  184. struct caif_connect_request *s,
  185. struct cfctrl_link_param *l)
  186. {
  187. struct dev_info *dev_info;
  188. enum cfcnfg_phy_preference pref;
  189. int res;
  190. memset(l, 0, sizeof(*l));
  191. /* In caif protocol low value is high priority */
  192. l->priority = CAIF_PRIO_MAX - s->priority + 1;
  193. if (s->ifindex != 0) {
  194. res = cfcnfg_get_id_from_ifi(cnfg, s->ifindex);
  195. if (res < 0)
  196. return res;
  197. l->phyid = res;
  198. } else {
  199. switch (s->link_selector) {
  200. case CAIF_LINK_HIGH_BANDW:
  201. pref = CFPHYPREF_HIGH_BW;
  202. break;
  203. case CAIF_LINK_LOW_LATENCY:
  204. pref = CFPHYPREF_LOW_LAT;
  205. break;
  206. default:
  207. return -EINVAL;
  208. }
  209. dev_info = cfcnfg_get_phyid(cnfg, pref);
  210. if (dev_info == NULL)
  211. return -ENODEV;
  212. l->phyid = dev_info->id;
  213. }
  214. switch (s->protocol) {
  215. case CAIFPROTO_AT:
  216. l->linktype = CFCTRL_SRV_VEI;
  217. l->endpoint = (s->sockaddr.u.at.type >> 2) & 0x3;
  218. l->chtype = s->sockaddr.u.at.type & 0x3;
  219. break;
  220. case CAIFPROTO_DATAGRAM:
  221. l->linktype = CFCTRL_SRV_DATAGRAM;
  222. l->chtype = 0x00;
  223. l->u.datagram.connid = s->sockaddr.u.dgm.connection_id;
  224. break;
  225. case CAIFPROTO_DATAGRAM_LOOP:
  226. l->linktype = CFCTRL_SRV_DATAGRAM;
  227. l->chtype = 0x03;
  228. l->endpoint = 0x00;
  229. l->u.datagram.connid = s->sockaddr.u.dgm.connection_id;
  230. break;
  231. case CAIFPROTO_RFM:
  232. l->linktype = CFCTRL_SRV_RFM;
  233. l->u.datagram.connid = s->sockaddr.u.rfm.connection_id;
  234. strncpy(l->u.rfm.volume, s->sockaddr.u.rfm.volume,
  235. sizeof(l->u.rfm.volume)-1);
  236. l->u.rfm.volume[sizeof(l->u.rfm.volume)-1] = 0;
  237. break;
  238. case CAIFPROTO_UTIL:
  239. l->linktype = CFCTRL_SRV_UTIL;
  240. l->endpoint = 0x00;
  241. l->chtype = 0x00;
  242. strncpy(l->u.utility.name, s->sockaddr.u.util.service,
  243. sizeof(l->u.utility.name)-1);
  244. l->u.utility.name[sizeof(l->u.utility.name)-1] = 0;
  245. caif_assert(sizeof(l->u.utility.name) > 10);
  246. l->u.utility.paramlen = s->param.size;
  247. if (l->u.utility.paramlen > sizeof(l->u.utility.params))
  248. l->u.utility.paramlen = sizeof(l->u.utility.params);
  249. memcpy(l->u.utility.params, s->param.data,
  250. l->u.utility.paramlen);
  251. break;
  252. case CAIFPROTO_DEBUG:
  253. l->linktype = CFCTRL_SRV_DBG;
  254. l->endpoint = s->sockaddr.u.dbg.service;
  255. l->chtype = s->sockaddr.u.dbg.type;
  256. break;
  257. default:
  258. return -EINVAL;
  259. }
  260. return 0;
  261. }
  262. int caif_connect_client(struct net *net, struct caif_connect_request *conn_req,
  263. struct cflayer *adap_layer, int *ifindex,
  264. int *proto_head,
  265. int *proto_tail)
  266. {
  267. struct cflayer *frml;
  268. struct cfcnfg_phyinfo *phy;
  269. int err;
  270. struct cfctrl_link_param param;
  271. struct cfcnfg *cfg = get_cfcnfg(net);
  272. caif_assert(cfg != NULL);
  273. rcu_read_lock();
  274. err = caif_connect_req_to_link_param(cfg, conn_req, &param);
  275. if (err)
  276. goto unlock;
  277. phy = cfcnfg_get_phyinfo_rcu(cfg, param.phyid);
  278. if (!phy) {
  279. err = -ENODEV;
  280. goto unlock;
  281. }
  282. err = -EINVAL;
  283. if (adap_layer == NULL) {
  284. pr_err("adap_layer is zero\n");
  285. goto unlock;
  286. }
  287. if (adap_layer->receive == NULL) {
  288. pr_err("adap_layer->receive is NULL\n");
  289. goto unlock;
  290. }
  291. if (adap_layer->ctrlcmd == NULL) {
  292. pr_err("adap_layer->ctrlcmd == NULL\n");
  293. goto unlock;
  294. }
  295. err = -ENODEV;
  296. frml = phy->frm_layer;
  297. if (frml == NULL) {
  298. pr_err("Specified PHY type does not exist!\n");
  299. goto unlock;
  300. }
  301. caif_assert(param.phyid == phy->id);
  302. caif_assert(phy->frm_layer->id ==
  303. param.phyid);
  304. caif_assert(phy->phy_layer->id ==
  305. param.phyid);
  306. *ifindex = phy->ifindex;
  307. *proto_tail = 2;
  308. *proto_head =
  309. protohead[param.linktype] + (phy->use_stx ? 1 : 0);
  310. rcu_read_unlock();
  311. /* FIXME: ENUMERATE INITIALLY WHEN ACTIVATING PHYSICAL INTERFACE */
  312. cfctrl_enum_req(cfg->ctrl, param.phyid);
  313. return cfctrl_linkup_request(cfg->ctrl, &param, adap_layer);
  314. unlock:
  315. rcu_read_unlock();
  316. return err;
  317. }
  318. EXPORT_SYMBOL(caif_connect_client);
  319. static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id,
  320. struct cflayer *adapt_layer)
  321. {
  322. if (adapt_layer != NULL && adapt_layer->ctrlcmd != NULL)
  323. adapt_layer->ctrlcmd(adapt_layer,
  324. CAIF_CTRLCMD_INIT_FAIL_RSP, 0);
  325. }
  326. static void
  327. cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
  328. u8 phyid, struct cflayer *adapt_layer)
  329. {
  330. struct cfcnfg *cnfg = container_obj(layer);
  331. struct cflayer *servicel = NULL;
  332. struct cfcnfg_phyinfo *phyinfo;
  333. struct net_device *netdev;
  334. if (channel_id == 0) {
  335. pr_warn("received channel_id zero\n");
  336. if (adapt_layer != NULL && adapt_layer->ctrlcmd != NULL)
  337. adapt_layer->ctrlcmd(adapt_layer,
  338. CAIF_CTRLCMD_INIT_FAIL_RSP, 0);
  339. return;
  340. }
  341. rcu_read_lock();
  342. if (adapt_layer == NULL) {
  343. pr_debug("link setup response but no client exist,"
  344. "send linkdown back\n");
  345. cfctrl_linkdown_req(cnfg->ctrl, channel_id, NULL);
  346. goto unlock;
  347. }
  348. caif_assert(cnfg != NULL);
  349. caif_assert(phyid != 0);
  350. phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phyid);
  351. if (phyinfo == NULL) {
  352. pr_err("ERROR: Link Layer Device dissapeared"
  353. "while connecting\n");
  354. goto unlock;
  355. }
  356. caif_assert(phyinfo != NULL);
  357. caif_assert(phyinfo->id == phyid);
  358. caif_assert(phyinfo->phy_layer != NULL);
  359. caif_assert(phyinfo->phy_layer->id == phyid);
  360. adapt_layer->id = channel_id;
  361. switch (serv) {
  362. case CFCTRL_SRV_VEI:
  363. servicel = cfvei_create(channel_id, &phyinfo->dev_info);
  364. break;
  365. case CFCTRL_SRV_DATAGRAM:
  366. servicel = cfdgml_create(channel_id,
  367. &phyinfo->dev_info);
  368. break;
  369. case CFCTRL_SRV_RFM:
  370. netdev = phyinfo->dev_info.dev;
  371. servicel = cfrfml_create(channel_id, &phyinfo->dev_info,
  372. netdev->mtu);
  373. break;
  374. case CFCTRL_SRV_UTIL:
  375. servicel = cfutill_create(channel_id, &phyinfo->dev_info);
  376. break;
  377. case CFCTRL_SRV_VIDEO:
  378. servicel = cfvidl_create(channel_id, &phyinfo->dev_info);
  379. break;
  380. case CFCTRL_SRV_DBG:
  381. servicel = cfdbgl_create(channel_id, &phyinfo->dev_info);
  382. break;
  383. default:
  384. pr_err("Protocol error. Link setup response "
  385. "- unknown channel type\n");
  386. goto unlock;
  387. }
  388. if (!servicel) {
  389. pr_warn("Out of memory\n");
  390. goto unlock;
  391. }
  392. layer_set_dn(servicel, cnfg->mux);
  393. cfmuxl_set_uplayer(cnfg->mux, servicel, channel_id);
  394. layer_set_up(servicel, adapt_layer);
  395. layer_set_dn(adapt_layer, servicel);
  396. rcu_read_unlock();
  397. servicel->ctrlcmd(servicel, CAIF_CTRLCMD_INIT_RSP, 0);
  398. return;
  399. unlock:
  400. rcu_read_unlock();
  401. }
  402. void
  403. cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
  404. struct net_device *dev, struct cflayer *phy_layer,
  405. enum cfcnfg_phy_preference pref,
  406. bool fcs, bool stx)
  407. {
  408. struct cflayer *frml;
  409. struct cflayer *phy_driver = NULL;
  410. struct cfcnfg_phyinfo *phyinfo;
  411. int i;
  412. u8 phyid;
  413. mutex_lock(&cnfg->lock);
  414. /* CAIF protocol allow maximum 6 link-layers */
  415. for (i = 0; i < 7; i++) {
  416. phyid = (dev->ifindex + i) & 0x7;
  417. if (phyid == 0)
  418. continue;
  419. if (cfcnfg_get_phyinfo_rcu(cnfg, phyid) == NULL)
  420. goto got_phyid;
  421. }
  422. pr_warn("Too many CAIF Link Layers (max 6)\n");
  423. goto out;
  424. got_phyid:
  425. phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC);
  426. switch (phy_type) {
  427. case CFPHYTYPE_FRAG:
  428. phy_driver =
  429. cfserl_create(CFPHYTYPE_FRAG, phyid, stx);
  430. if (!phy_driver) {
  431. pr_warn("Out of memory\n");
  432. goto out;
  433. }
  434. break;
  435. case CFPHYTYPE_CAIF:
  436. phy_driver = NULL;
  437. break;
  438. default:
  439. goto out;
  440. }
  441. phy_layer->id = phyid;
  442. phyinfo->pref = pref;
  443. phyinfo->id = phyid;
  444. phyinfo->dev_info.id = phyid;
  445. phyinfo->dev_info.dev = dev;
  446. phyinfo->phy_layer = phy_layer;
  447. phyinfo->ifindex = dev->ifindex;
  448. phyinfo->use_stx = stx;
  449. phyinfo->use_fcs = fcs;
  450. frml = cffrml_create(phyid, fcs);
  451. if (!frml) {
  452. pr_warn("Out of memory\n");
  453. kfree(phyinfo);
  454. goto out;
  455. }
  456. phyinfo->frm_layer = frml;
  457. layer_set_up(frml, cnfg->mux);
  458. if (phy_driver != NULL) {
  459. phy_driver->id = phyid;
  460. layer_set_dn(frml, phy_driver);
  461. layer_set_up(phy_driver, frml);
  462. layer_set_dn(phy_driver, phy_layer);
  463. layer_set_up(phy_layer, phy_driver);
  464. } else {
  465. layer_set_dn(frml, phy_layer);
  466. layer_set_up(phy_layer, frml);
  467. }
  468. list_add_rcu(&phyinfo->node, &cnfg->phys);
  469. out:
  470. mutex_unlock(&cnfg->lock);
  471. }
  472. EXPORT_SYMBOL(cfcnfg_add_phy_layer);
  473. int cfcnfg_set_phy_state(struct cfcnfg *cnfg, struct cflayer *phy_layer,
  474. bool up)
  475. {
  476. struct cfcnfg_phyinfo *phyinfo;
  477. rcu_read_lock();
  478. phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phy_layer->id);
  479. if (phyinfo == NULL) {
  480. rcu_read_unlock();
  481. return -ENODEV;
  482. }
  483. if (phyinfo->up == up) {
  484. rcu_read_unlock();
  485. return 0;
  486. }
  487. phyinfo->up = up;
  488. if (up) {
  489. cffrml_hold(phyinfo->frm_layer);
  490. cfmuxl_set_dnlayer(cnfg->mux, phyinfo->frm_layer,
  491. phy_layer->id);
  492. } else {
  493. cfmuxl_remove_dnlayer(cnfg->mux, phy_layer->id);
  494. cffrml_put(phyinfo->frm_layer);
  495. }
  496. rcu_read_unlock();
  497. return 0;
  498. }
  499. EXPORT_SYMBOL(cfcnfg_set_phy_state);
  500. int cfcnfg_del_phy_layer(struct cfcnfg *cnfg, struct cflayer *phy_layer)
  501. {
  502. struct cflayer *frml, *frml_dn;
  503. u16 phyid;
  504. struct cfcnfg_phyinfo *phyinfo;
  505. might_sleep();
  506. mutex_lock(&cnfg->lock);
  507. phyid = phy_layer->id;
  508. phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phyid);
  509. if (phyinfo == NULL) {
  510. mutex_unlock(&cnfg->lock);
  511. return 0;
  512. }
  513. caif_assert(phyid == phyinfo->id);
  514. caif_assert(phy_layer == phyinfo->phy_layer);
  515. caif_assert(phy_layer->id == phyid);
  516. caif_assert(phyinfo->frm_layer->id == phyid);
  517. list_del_rcu(&phyinfo->node);
  518. synchronize_rcu();
  519. /* Fail if reference count is not zero */
  520. if (cffrml_refcnt_read(phyinfo->frm_layer) != 0) {
  521. pr_info("Wait for device inuse\n");
  522. list_add_rcu(&phyinfo->node, &cnfg->phys);
  523. mutex_unlock(&cnfg->lock);
  524. return -EAGAIN;
  525. }
  526. frml = phyinfo->frm_layer;
  527. frml_dn = frml->dn;
  528. cffrml_set_uplayer(frml, NULL);
  529. cffrml_set_dnlayer(frml, NULL);
  530. if (phy_layer != frml_dn) {
  531. layer_set_up(frml_dn, NULL);
  532. layer_set_dn(frml_dn, NULL);
  533. }
  534. layer_set_up(phy_layer, NULL);
  535. if (phyinfo->phy_layer != frml_dn)
  536. kfree(frml_dn);
  537. cffrml_free(frml);
  538. kfree(phyinfo);
  539. mutex_unlock(&cnfg->lock);
  540. return 0;
  541. }
  542. EXPORT_SYMBOL(cfcnfg_del_phy_layer);