ds.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137
  1. /* ds.c: Domain Services driver for Logical Domains
  2. *
  3. * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/module.h>
  7. #include <linux/types.h>
  8. #include <linux/module.h>
  9. #include <linux/string.h>
  10. #include <linux/slab.h>
  11. #include <linux/sched.h>
  12. #include <linux/delay.h>
  13. #include <linux/mutex.h>
  14. #include <linux/kthread.h>
  15. #include <linux/cpu.h>
  16. #include <asm/ldc.h>
  17. #include <asm/vio.h>
  18. #include <asm/power.h>
  19. #include <asm/mdesc.h>
  20. #include <asm/head.h>
  21. #define DRV_MODULE_NAME "ds"
  22. #define PFX DRV_MODULE_NAME ": "
  23. #define DRV_MODULE_VERSION "1.0"
  24. #define DRV_MODULE_RELDATE "Jul 11, 2007"
  25. static char version[] __devinitdata =
  26. DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
  27. MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
  28. MODULE_DESCRIPTION("Sun LDOM domain services driver");
  29. MODULE_LICENSE("GPL");
  30. MODULE_VERSION(DRV_MODULE_VERSION);
  31. struct ds_msg_tag {
  32. __u32 type;
  33. #define DS_INIT_REQ 0x00
  34. #define DS_INIT_ACK 0x01
  35. #define DS_INIT_NACK 0x02
  36. #define DS_REG_REQ 0x03
  37. #define DS_REG_ACK 0x04
  38. #define DS_REG_NACK 0x05
  39. #define DS_UNREG_REQ 0x06
  40. #define DS_UNREG_ACK 0x07
  41. #define DS_UNREG_NACK 0x08
  42. #define DS_DATA 0x09
  43. #define DS_NACK 0x0a
  44. __u32 len;
  45. };
  46. /* Result codes */
  47. #define DS_OK 0x00
  48. #define DS_REG_VER_NACK 0x01
  49. #define DS_REG_DUP 0x02
  50. #define DS_INV_HDL 0x03
  51. #define DS_TYPE_UNKNOWN 0x04
  52. struct ds_version {
  53. __u16 major;
  54. __u16 minor;
  55. };
  56. struct ds_ver_req {
  57. struct ds_msg_tag tag;
  58. struct ds_version ver;
  59. };
  60. struct ds_ver_ack {
  61. struct ds_msg_tag tag;
  62. __u16 minor;
  63. };
  64. struct ds_ver_nack {
  65. struct ds_msg_tag tag;
  66. __u16 major;
  67. };
  68. struct ds_reg_req {
  69. struct ds_msg_tag tag;
  70. __u64 handle;
  71. __u16 major;
  72. __u16 minor;
  73. char svc_id[0];
  74. };
  75. struct ds_reg_ack {
  76. struct ds_msg_tag tag;
  77. __u64 handle;
  78. __u16 minor;
  79. };
  80. struct ds_reg_nack {
  81. struct ds_msg_tag tag;
  82. __u64 handle;
  83. __u16 major;
  84. };
  85. struct ds_unreg_req {
  86. struct ds_msg_tag tag;
  87. __u64 handle;
  88. };
  89. struct ds_unreg_ack {
  90. struct ds_msg_tag tag;
  91. __u64 handle;
  92. };
  93. struct ds_unreg_nack {
  94. struct ds_msg_tag tag;
  95. __u64 handle;
  96. };
  97. struct ds_data {
  98. struct ds_msg_tag tag;
  99. __u64 handle;
  100. };
  101. struct ds_data_nack {
  102. struct ds_msg_tag tag;
  103. __u64 handle;
  104. __u64 result;
  105. };
  106. struct ds_cap_state {
  107. __u64 handle;
  108. void (*data)(struct ldc_channel *lp,
  109. struct ds_cap_state *cp,
  110. void *buf, int len);
  111. const char *service_id;
  112. u8 state;
  113. #define CAP_STATE_UNKNOWN 0x00
  114. #define CAP_STATE_REG_SENT 0x01
  115. #define CAP_STATE_REGISTERED 0x02
  116. };
  117. static void md_update_data(struct ldc_channel *lp, struct ds_cap_state *cp,
  118. void *buf, int len);
  119. static void domain_shutdown_data(struct ldc_channel *lp,
  120. struct ds_cap_state *cp,
  121. void *buf, int len);
  122. static void domain_panic_data(struct ldc_channel *lp,
  123. struct ds_cap_state *cp,
  124. void *buf, int len);
  125. #ifdef CONFIG_HOTPLUG_CPU
  126. static void dr_cpu_data(struct ldc_channel *lp,
  127. struct ds_cap_state *cp,
  128. void *buf, int len);
  129. #endif
  130. static void ds_pri_data(struct ldc_channel *lp,
  131. struct ds_cap_state *cp,
  132. void *buf, int len);
  133. static void ds_var_data(struct ldc_channel *lp,
  134. struct ds_cap_state *cp,
  135. void *buf, int len);
  136. struct ds_cap_state ds_states[] = {
  137. {
  138. .service_id = "md-update",
  139. .data = md_update_data,
  140. },
  141. {
  142. .service_id = "domain-shutdown",
  143. .data = domain_shutdown_data,
  144. },
  145. {
  146. .service_id = "domain-panic",
  147. .data = domain_panic_data,
  148. },
  149. #ifdef CONFIG_HOTPLUG_CPU
  150. {
  151. .service_id = "dr-cpu",
  152. .data = dr_cpu_data,
  153. },
  154. #endif
  155. {
  156. .service_id = "pri",
  157. .data = ds_pri_data,
  158. },
  159. {
  160. .service_id = "var-config",
  161. .data = ds_var_data,
  162. },
  163. {
  164. .service_id = "var-config-backup",
  165. .data = ds_var_data,
  166. },
  167. };
  168. static DEFINE_SPINLOCK(ds_lock);
  169. struct ds_info {
  170. struct ldc_channel *lp;
  171. u8 hs_state;
  172. #define DS_HS_START 0x01
  173. #define DS_HS_DONE 0x02
  174. void *rcv_buf;
  175. int rcv_buf_len;
  176. };
  177. static struct ds_info *ds_info;
  178. static struct ds_cap_state *find_cap(u64 handle)
  179. {
  180. unsigned int index = handle >> 32;
  181. if (index >= ARRAY_SIZE(ds_states))
  182. return NULL;
  183. return &ds_states[index];
  184. }
  185. static struct ds_cap_state *find_cap_by_string(const char *name)
  186. {
  187. int i;
  188. for (i = 0; i < ARRAY_SIZE(ds_states); i++) {
  189. if (strcmp(ds_states[i].service_id, name))
  190. continue;
  191. return &ds_states[i];
  192. }
  193. return NULL;
  194. }
  195. static int ds_send(struct ldc_channel *lp, void *data, int len)
  196. {
  197. int err, limit = 1000;
  198. err = -EINVAL;
  199. while (limit-- > 0) {
  200. err = ldc_write(lp, data, len);
  201. if (!err || (err != -EAGAIN))
  202. break;
  203. udelay(1);
  204. }
  205. return err;
  206. }
  207. struct ds_md_update_req {
  208. __u64 req_num;
  209. };
  210. struct ds_md_update_res {
  211. __u64 req_num;
  212. __u32 result;
  213. };
  214. static void md_update_data(struct ldc_channel *lp,
  215. struct ds_cap_state *dp,
  216. void *buf, int len)
  217. {
  218. struct ds_data *dpkt = buf;
  219. struct ds_md_update_req *rp;
  220. struct {
  221. struct ds_data data;
  222. struct ds_md_update_res res;
  223. } pkt;
  224. rp = (struct ds_md_update_req *) (dpkt + 1);
  225. printk(KERN_INFO PFX "Machine description update.\n");
  226. memset(&pkt, 0, sizeof(pkt));
  227. pkt.data.tag.type = DS_DATA;
  228. pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag);
  229. pkt.data.handle = dp->handle;
  230. pkt.res.req_num = rp->req_num;
  231. pkt.res.result = DS_OK;
  232. ds_send(lp, &pkt, sizeof(pkt));
  233. mdesc_update();
  234. }
  235. struct ds_shutdown_req {
  236. __u64 req_num;
  237. __u32 ms_delay;
  238. };
  239. struct ds_shutdown_res {
  240. __u64 req_num;
  241. __u32 result;
  242. char reason[1];
  243. };
  244. static void domain_shutdown_data(struct ldc_channel *lp,
  245. struct ds_cap_state *dp,
  246. void *buf, int len)
  247. {
  248. struct ds_data *dpkt = buf;
  249. struct ds_shutdown_req *rp;
  250. struct {
  251. struct ds_data data;
  252. struct ds_shutdown_res res;
  253. } pkt;
  254. rp = (struct ds_shutdown_req *) (dpkt + 1);
  255. printk(KERN_ALERT PFX "Shutdown request from "
  256. "LDOM manager received.\n");
  257. memset(&pkt, 0, sizeof(pkt));
  258. pkt.data.tag.type = DS_DATA;
  259. pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag);
  260. pkt.data.handle = dp->handle;
  261. pkt.res.req_num = rp->req_num;
  262. pkt.res.result = DS_OK;
  263. pkt.res.reason[0] = 0;
  264. ds_send(lp, &pkt, sizeof(pkt));
  265. wake_up_powerd();
  266. }
  267. struct ds_panic_req {
  268. __u64 req_num;
  269. };
  270. struct ds_panic_res {
  271. __u64 req_num;
  272. __u32 result;
  273. char reason[1];
  274. };
  275. static void domain_panic_data(struct ldc_channel *lp,
  276. struct ds_cap_state *dp,
  277. void *buf, int len)
  278. {
  279. struct ds_data *dpkt = buf;
  280. struct ds_panic_req *rp;
  281. struct {
  282. struct ds_data data;
  283. struct ds_panic_res res;
  284. } pkt;
  285. rp = (struct ds_panic_req *) (dpkt + 1);
  286. printk(KERN_ALERT PFX "Panic request from "
  287. "LDOM manager received.\n");
  288. memset(&pkt, 0, sizeof(pkt));
  289. pkt.data.tag.type = DS_DATA;
  290. pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag);
  291. pkt.data.handle = dp->handle;
  292. pkt.res.req_num = rp->req_num;
  293. pkt.res.result = DS_OK;
  294. pkt.res.reason[0] = 0;
  295. ds_send(lp, &pkt, sizeof(pkt));
  296. panic("PANIC requested by LDOM manager.");
  297. }
  298. #ifdef CONFIG_HOTPLUG_CPU
  299. struct dr_cpu_tag {
  300. __u64 req_num;
  301. __u32 type;
  302. #define DR_CPU_CONFIGURE 0x43
  303. #define DR_CPU_UNCONFIGURE 0x55
  304. #define DR_CPU_FORCE_UNCONFIGURE 0x46
  305. #define DR_CPU_STATUS 0x53
  306. /* Responses */
  307. #define DR_CPU_OK 0x6f
  308. #define DR_CPU_ERROR 0x65
  309. __u32 num_records;
  310. };
  311. struct dr_cpu_resp_entry {
  312. __u32 cpu;
  313. __u32 result;
  314. #define DR_CPU_RES_OK 0x00
  315. #define DR_CPU_RES_FAILURE 0x01
  316. #define DR_CPU_RES_BLOCKED 0x02
  317. #define DR_CPU_RES_CPU_NOT_RESPONDING 0x03
  318. #define DR_CPU_RES_NOT_IN_MD 0x04
  319. __u32 stat;
  320. #define DR_CPU_STAT_NOT_PRESENT 0x00
  321. #define DR_CPU_STAT_UNCONFIGURED 0x01
  322. #define DR_CPU_STAT_CONFIGURED 0x02
  323. __u32 str_off;
  324. };
  325. /* DR cpu requests get queued onto the work list by the
  326. * dr_cpu_data() callback. The list is protected by
  327. * ds_lock, and processed by dr_cpu_process() in order.
  328. */
  329. static LIST_HEAD(dr_cpu_work_list);
  330. static DECLARE_WAIT_QUEUE_HEAD(dr_cpu_wait);
  331. struct dr_cpu_queue_entry {
  332. struct list_head list;
  333. char req[0];
  334. };
  335. static void __dr_cpu_send_error(struct ds_cap_state *cp, struct ds_data *data)
  336. {
  337. struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1);
  338. struct ds_info *dp = ds_info;
  339. struct {
  340. struct ds_data data;
  341. struct dr_cpu_tag tag;
  342. } pkt;
  343. int msg_len;
  344. memset(&pkt, 0, sizeof(pkt));
  345. pkt.data.tag.type = DS_DATA;
  346. pkt.data.handle = cp->handle;
  347. pkt.tag.req_num = tag->req_num;
  348. pkt.tag.type = DR_CPU_ERROR;
  349. pkt.tag.num_records = 0;
  350. msg_len = (sizeof(struct ds_data) +
  351. sizeof(struct dr_cpu_tag));
  352. pkt.data.tag.len = msg_len - sizeof(struct ds_msg_tag);
  353. ds_send(dp->lp, &pkt, msg_len);
  354. }
  355. static void dr_cpu_send_error(struct ds_cap_state *cp, struct ds_data *data)
  356. {
  357. unsigned long flags;
  358. spin_lock_irqsave(&ds_lock, flags);
  359. __dr_cpu_send_error(cp, data);
  360. spin_unlock_irqrestore(&ds_lock, flags);
  361. }
  362. #define CPU_SENTINEL 0xffffffff
  363. static void purge_dups(u32 *list, u32 num_ents)
  364. {
  365. unsigned int i;
  366. for (i = 0; i < num_ents; i++) {
  367. u32 cpu = list[i];
  368. unsigned int j;
  369. if (cpu == CPU_SENTINEL)
  370. continue;
  371. for (j = i + 1; j < num_ents; j++) {
  372. if (list[j] == cpu)
  373. list[j] = CPU_SENTINEL;
  374. }
  375. }
  376. }
  377. static int dr_cpu_size_response(int ncpus)
  378. {
  379. return (sizeof(struct ds_data) +
  380. sizeof(struct dr_cpu_tag) +
  381. (sizeof(struct dr_cpu_resp_entry) * ncpus));
  382. }
  383. static void dr_cpu_init_response(struct ds_data *resp, u64 req_num,
  384. u64 handle, int resp_len, int ncpus,
  385. cpumask_t *mask, u32 default_stat)
  386. {
  387. struct dr_cpu_resp_entry *ent;
  388. struct dr_cpu_tag *tag;
  389. int i, cpu;
  390. tag = (struct dr_cpu_tag *) (resp + 1);
  391. ent = (struct dr_cpu_resp_entry *) (tag + 1);
  392. resp->tag.type = DS_DATA;
  393. resp->tag.len = resp_len - sizeof(struct ds_msg_tag);
  394. resp->handle = handle;
  395. tag->req_num = req_num;
  396. tag->type = DR_CPU_OK;
  397. tag->num_records = ncpus;
  398. i = 0;
  399. for_each_cpu_mask(cpu, *mask) {
  400. ent[i].cpu = cpu;
  401. ent[i].result = DR_CPU_RES_OK;
  402. ent[i].stat = default_stat;
  403. i++;
  404. }
  405. BUG_ON(i != ncpus);
  406. }
  407. static void dr_cpu_mark(struct ds_data *resp, int cpu, int ncpus,
  408. u32 res, u32 stat)
  409. {
  410. struct dr_cpu_resp_entry *ent;
  411. struct dr_cpu_tag *tag;
  412. int i;
  413. tag = (struct dr_cpu_tag *) (resp + 1);
  414. ent = (struct dr_cpu_resp_entry *) (tag + 1);
  415. for (i = 0; i < ncpus; i++) {
  416. if (ent[i].cpu != cpu)
  417. continue;
  418. ent[i].result = res;
  419. ent[i].stat = stat;
  420. break;
  421. }
  422. }
  423. static int dr_cpu_configure(struct ds_cap_state *cp, u64 req_num,
  424. cpumask_t *mask)
  425. {
  426. struct ds_data *resp;
  427. int resp_len, ncpus, cpu;
  428. unsigned long flags;
  429. ncpus = cpus_weight(*mask);
  430. resp_len = dr_cpu_size_response(ncpus);
  431. resp = kzalloc(resp_len, GFP_KERNEL);
  432. if (!resp)
  433. return -ENOMEM;
  434. dr_cpu_init_response(resp, req_num, cp->handle,
  435. resp_len, ncpus, mask,
  436. DR_CPU_STAT_CONFIGURED);
  437. mdesc_fill_in_cpu_data(*mask);
  438. for_each_cpu_mask(cpu, *mask) {
  439. int err;
  440. printk(KERN_INFO PFX "Starting cpu %d...\n", cpu);
  441. err = cpu_up(cpu);
  442. if (err) {
  443. __u32 res = DR_CPU_RES_FAILURE;
  444. __u32 stat = DR_CPU_STAT_UNCONFIGURED;
  445. if (!cpu_present(cpu)) {
  446. /* CPU not present in MD */
  447. res = DR_CPU_RES_NOT_IN_MD;
  448. stat = DR_CPU_STAT_NOT_PRESENT;
  449. } else if (err == -ENODEV) {
  450. /* CPU did not call in successfully */
  451. res = DR_CPU_RES_CPU_NOT_RESPONDING;
  452. }
  453. printk(KERN_INFO PFX "CPU startup failed err=%d\n",
  454. err);
  455. dr_cpu_mark(resp, cpu, ncpus, res, stat);
  456. }
  457. }
  458. spin_lock_irqsave(&ds_lock, flags);
  459. ds_send(ds_info->lp, resp, resp_len);
  460. spin_unlock_irqrestore(&ds_lock, flags);
  461. kfree(resp);
  462. return 0;
  463. }
  464. static int dr_cpu_unconfigure(struct ds_cap_state *cp, u64 req_num,
  465. cpumask_t *mask)
  466. {
  467. struct ds_data *resp;
  468. int resp_len, ncpus;
  469. ncpus = cpus_weight(*mask);
  470. resp_len = dr_cpu_size_response(ncpus);
  471. resp = kzalloc(resp_len, GFP_KERNEL);
  472. if (!resp)
  473. return -ENOMEM;
  474. dr_cpu_init_response(resp, req_num, cp->handle,
  475. resp_len, ncpus, mask,
  476. DR_CPU_STAT_UNCONFIGURED);
  477. kfree(resp);
  478. return -EOPNOTSUPP;
  479. }
  480. static void process_dr_cpu_list(struct ds_cap_state *cp)
  481. {
  482. struct dr_cpu_queue_entry *qp, *tmp;
  483. unsigned long flags;
  484. LIST_HEAD(todo);
  485. cpumask_t mask;
  486. spin_lock_irqsave(&ds_lock, flags);
  487. list_splice(&dr_cpu_work_list, &todo);
  488. INIT_LIST_HEAD(&dr_cpu_work_list);
  489. spin_unlock_irqrestore(&ds_lock, flags);
  490. list_for_each_entry_safe(qp, tmp, &todo, list) {
  491. struct ds_data *data = (struct ds_data *) qp->req;
  492. struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1);
  493. u32 *cpu_list = (u32 *) (tag + 1);
  494. u64 req_num = tag->req_num;
  495. unsigned int i;
  496. int err;
  497. switch (tag->type) {
  498. case DR_CPU_CONFIGURE:
  499. case DR_CPU_UNCONFIGURE:
  500. case DR_CPU_FORCE_UNCONFIGURE:
  501. break;
  502. default:
  503. dr_cpu_send_error(cp, data);
  504. goto next;
  505. }
  506. purge_dups(cpu_list, tag->num_records);
  507. cpus_clear(mask);
  508. for (i = 0; i < tag->num_records; i++) {
  509. if (cpu_list[i] == CPU_SENTINEL)
  510. continue;
  511. if (cpu_list[i] < NR_CPUS)
  512. cpu_set(cpu_list[i], mask);
  513. }
  514. if (tag->type == DR_CPU_CONFIGURE)
  515. err = dr_cpu_configure(cp, req_num, &mask);
  516. else
  517. err = dr_cpu_unconfigure(cp, req_num, &mask);
  518. if (err)
  519. dr_cpu_send_error(cp, data);
  520. next:
  521. list_del(&qp->list);
  522. kfree(qp);
  523. }
  524. }
  525. static int dr_cpu_thread(void *__unused)
  526. {
  527. struct ds_cap_state *cp;
  528. DEFINE_WAIT(wait);
  529. cp = find_cap_by_string("dr-cpu");
  530. while (1) {
  531. prepare_to_wait(&dr_cpu_wait, &wait, TASK_INTERRUPTIBLE);
  532. if (list_empty(&dr_cpu_work_list))
  533. schedule();
  534. finish_wait(&dr_cpu_wait, &wait);
  535. if (kthread_should_stop())
  536. break;
  537. process_dr_cpu_list(cp);
  538. }
  539. return 0;
  540. }
  541. static void dr_cpu_data(struct ldc_channel *lp,
  542. struct ds_cap_state *dp,
  543. void *buf, int len)
  544. {
  545. struct dr_cpu_queue_entry *qp;
  546. struct ds_data *dpkt = buf;
  547. struct dr_cpu_tag *rp;
  548. rp = (struct dr_cpu_tag *) (dpkt + 1);
  549. qp = kmalloc(sizeof(struct dr_cpu_queue_entry) + len, GFP_ATOMIC);
  550. if (!qp) {
  551. struct ds_cap_state *cp;
  552. cp = find_cap_by_string("dr-cpu");
  553. __dr_cpu_send_error(cp, dpkt);
  554. } else {
  555. memcpy(&qp->req, buf, len);
  556. list_add_tail(&qp->list, &dr_cpu_work_list);
  557. wake_up(&dr_cpu_wait);
  558. }
  559. }
  560. #endif
  561. struct ds_pri_msg {
  562. __u64 req_num;
  563. __u64 type;
  564. #define DS_PRI_REQUEST 0x00
  565. #define DS_PRI_DATA 0x01
  566. #define DS_PRI_UPDATE 0x02
  567. };
  568. static void ds_pri_data(struct ldc_channel *lp,
  569. struct ds_cap_state *dp,
  570. void *buf, int len)
  571. {
  572. struct ds_data *dpkt = buf;
  573. struct ds_pri_msg *rp;
  574. rp = (struct ds_pri_msg *) (dpkt + 1);
  575. printk(KERN_INFO PFX "PRI REQ [%lx:%lx], len=%d\n",
  576. rp->req_num, rp->type, len);
  577. }
  578. struct ds_var_hdr {
  579. __u32 type;
  580. #define DS_VAR_SET_REQ 0x00
  581. #define DS_VAR_DELETE_REQ 0x01
  582. #define DS_VAR_SET_RESP 0x02
  583. #define DS_VAR_DELETE_RESP 0x03
  584. };
  585. struct ds_var_set_msg {
  586. struct ds_var_hdr hdr;
  587. char name_and_value[0];
  588. };
  589. struct ds_var_delete_msg {
  590. struct ds_var_hdr hdr;
  591. char name[0];
  592. };
  593. struct ds_var_resp {
  594. struct ds_var_hdr hdr;
  595. __u32 result;
  596. #define DS_VAR_SUCCESS 0x00
  597. #define DS_VAR_NO_SPACE 0x01
  598. #define DS_VAR_INVALID_VAR 0x02
  599. #define DS_VAR_INVALID_VAL 0x03
  600. #define DS_VAR_NOT_PRESENT 0x04
  601. };
  602. static DEFINE_MUTEX(ds_var_mutex);
  603. static int ds_var_doorbell;
  604. static int ds_var_response;
  605. static void ds_var_data(struct ldc_channel *lp,
  606. struct ds_cap_state *dp,
  607. void *buf, int len)
  608. {
  609. struct ds_data *dpkt = buf;
  610. struct ds_var_resp *rp;
  611. rp = (struct ds_var_resp *) (dpkt + 1);
  612. if (rp->hdr.type != DS_VAR_SET_RESP &&
  613. rp->hdr.type != DS_VAR_DELETE_RESP)
  614. return;
  615. ds_var_response = rp->result;
  616. wmb();
  617. ds_var_doorbell = 1;
  618. }
  619. void ldom_set_var(const char *var, const char *value)
  620. {
  621. struct ds_info *dp = ds_info;
  622. struct ds_cap_state *cp;
  623. cp = find_cap_by_string("var-config");
  624. if (cp->state != CAP_STATE_REGISTERED)
  625. cp = find_cap_by_string("var-config-backup");
  626. if (cp->state == CAP_STATE_REGISTERED) {
  627. union {
  628. struct {
  629. struct ds_data data;
  630. struct ds_var_set_msg msg;
  631. } header;
  632. char all[512];
  633. } pkt;
  634. unsigned long flags;
  635. char *base, *p;
  636. int msg_len, loops;
  637. memset(&pkt, 0, sizeof(pkt));
  638. pkt.header.data.tag.type = DS_DATA;
  639. pkt.header.data.handle = cp->handle;
  640. pkt.header.msg.hdr.type = DS_VAR_SET_REQ;
  641. base = p = &pkt.header.msg.name_and_value[0];
  642. strcpy(p, var);
  643. p += strlen(var) + 1;
  644. strcpy(p, value);
  645. p += strlen(value) + 1;
  646. msg_len = (sizeof(struct ds_data) +
  647. sizeof(struct ds_var_set_msg) +
  648. (p - base));
  649. msg_len = (msg_len + 3) & ~3;
  650. pkt.header.data.tag.len = msg_len - sizeof(struct ds_msg_tag);
  651. mutex_lock(&ds_var_mutex);
  652. spin_lock_irqsave(&ds_lock, flags);
  653. ds_var_doorbell = 0;
  654. ds_var_response = -1;
  655. ds_send(dp->lp, &pkt, msg_len);
  656. spin_unlock_irqrestore(&ds_lock, flags);
  657. loops = 1000;
  658. while (ds_var_doorbell == 0) {
  659. if (loops-- < 0)
  660. break;
  661. barrier();
  662. udelay(100);
  663. }
  664. mutex_unlock(&ds_var_mutex);
  665. if (ds_var_doorbell == 0 ||
  666. ds_var_response != DS_VAR_SUCCESS)
  667. printk(KERN_ERR PFX "var-config [%s:%s] "
  668. "failed, response(%d).\n",
  669. var, value,
  670. ds_var_response);
  671. } else {
  672. printk(KERN_ERR PFX "var-config not registered so "
  673. "could not set (%s) variable to (%s).\n",
  674. var, value);
  675. }
  676. }
  677. void ldom_reboot(const char *boot_command)
  678. {
  679. /* Don't bother with any of this if the boot_command
  680. * is empty.
  681. */
  682. if (boot_command && strlen(boot_command)) {
  683. char full_boot_str[256];
  684. strcpy(full_boot_str, "boot ");
  685. strcpy(full_boot_str + strlen("boot "), boot_command);
  686. ldom_set_var("reboot-command", full_boot_str);
  687. }
  688. sun4v_mach_sir();
  689. }
  690. void ldom_power_off(void)
  691. {
  692. sun4v_mach_exit(0);
  693. }
  694. static void ds_conn_reset(struct ds_info *dp)
  695. {
  696. printk(KERN_ERR PFX "ds_conn_reset() from %p\n",
  697. __builtin_return_address(0));
  698. }
  699. static int register_services(struct ds_info *dp)
  700. {
  701. struct ldc_channel *lp = dp->lp;
  702. int i;
  703. for (i = 0; i < ARRAY_SIZE(ds_states); i++) {
  704. struct {
  705. struct ds_reg_req req;
  706. u8 id_buf[256];
  707. } pbuf;
  708. struct ds_cap_state *cp = &ds_states[i];
  709. int err, msg_len;
  710. u64 new_count;
  711. if (cp->state == CAP_STATE_REGISTERED)
  712. continue;
  713. new_count = sched_clock() & 0xffffffff;
  714. cp->handle = ((u64) i << 32) | new_count;
  715. msg_len = (sizeof(struct ds_reg_req) +
  716. strlen(cp->service_id));
  717. memset(&pbuf, 0, sizeof(pbuf));
  718. pbuf.req.tag.type = DS_REG_REQ;
  719. pbuf.req.tag.len = (msg_len - sizeof(struct ds_msg_tag));
  720. pbuf.req.handle = cp->handle;
  721. pbuf.req.major = 1;
  722. pbuf.req.minor = 0;
  723. strcpy(pbuf.req.svc_id, cp->service_id);
  724. err = ds_send(lp, &pbuf, msg_len);
  725. if (err > 0)
  726. cp->state = CAP_STATE_REG_SENT;
  727. }
  728. return 0;
  729. }
  730. static int ds_handshake(struct ds_info *dp, struct ds_msg_tag *pkt)
  731. {
  732. if (dp->hs_state == DS_HS_START) {
  733. if (pkt->type != DS_INIT_ACK)
  734. goto conn_reset;
  735. dp->hs_state = DS_HS_DONE;
  736. return register_services(dp);
  737. }
  738. if (dp->hs_state != DS_HS_DONE)
  739. goto conn_reset;
  740. if (pkt->type == DS_REG_ACK) {
  741. struct ds_reg_ack *ap = (struct ds_reg_ack *) pkt;
  742. struct ds_cap_state *cp = find_cap(ap->handle);
  743. if (!cp) {
  744. printk(KERN_ERR PFX "REG ACK for unknown handle %lx\n",
  745. ap->handle);
  746. return 0;
  747. }
  748. printk(KERN_INFO PFX "Registered %s service.\n",
  749. cp->service_id);
  750. cp->state = CAP_STATE_REGISTERED;
  751. } else if (pkt->type == DS_REG_NACK) {
  752. struct ds_reg_nack *np = (struct ds_reg_nack *) pkt;
  753. struct ds_cap_state *cp = find_cap(np->handle);
  754. if (!cp) {
  755. printk(KERN_ERR PFX "REG NACK for "
  756. "unknown handle %lx\n",
  757. np->handle);
  758. return 0;
  759. }
  760. printk(KERN_INFO PFX "Could not register %s service\n",
  761. cp->service_id);
  762. cp->state = CAP_STATE_UNKNOWN;
  763. }
  764. return 0;
  765. conn_reset:
  766. ds_conn_reset(dp);
  767. return -ECONNRESET;
  768. }
  769. static int ds_data(struct ds_info *dp, struct ds_msg_tag *pkt, int len)
  770. {
  771. struct ds_data *dpkt = (struct ds_data *) pkt;
  772. struct ds_cap_state *cp = find_cap(dpkt->handle);
  773. if (!cp) {
  774. struct ds_data_nack nack = {
  775. .tag = {
  776. .type = DS_NACK,
  777. .len = (sizeof(struct ds_data_nack) -
  778. sizeof(struct ds_msg_tag)),
  779. },
  780. .handle = dpkt->handle,
  781. .result = DS_INV_HDL,
  782. };
  783. printk(KERN_ERR PFX "Data for unknown handle %lu\n",
  784. dpkt->handle);
  785. ds_send(dp->lp, &nack, sizeof(nack));
  786. } else {
  787. cp->data(dp->lp, cp, dpkt, len);
  788. }
  789. return 0;
  790. }
  791. static void ds_up(struct ds_info *dp)
  792. {
  793. struct ldc_channel *lp = dp->lp;
  794. struct ds_ver_req req;
  795. int err;
  796. req.tag.type = DS_INIT_REQ;
  797. req.tag.len = sizeof(req) - sizeof(struct ds_msg_tag);
  798. req.ver.major = 1;
  799. req.ver.minor = 0;
  800. err = ds_send(lp, &req, sizeof(req));
  801. if (err > 0)
  802. dp->hs_state = DS_HS_START;
  803. }
  804. static void ds_event(void *arg, int event)
  805. {
  806. struct ds_info *dp = arg;
  807. struct ldc_channel *lp = dp->lp;
  808. unsigned long flags;
  809. int err;
  810. spin_lock_irqsave(&ds_lock, flags);
  811. if (event == LDC_EVENT_UP) {
  812. ds_up(dp);
  813. spin_unlock_irqrestore(&ds_lock, flags);
  814. return;
  815. }
  816. if (event != LDC_EVENT_DATA_READY) {
  817. printk(KERN_WARNING PFX "Unexpected LDC event %d\n", event);
  818. spin_unlock_irqrestore(&ds_lock, flags);
  819. return;
  820. }
  821. err = 0;
  822. while (1) {
  823. struct ds_msg_tag *tag;
  824. err = ldc_read(lp, dp->rcv_buf, sizeof(*tag));
  825. if (unlikely(err < 0)) {
  826. if (err == -ECONNRESET)
  827. ds_conn_reset(dp);
  828. break;
  829. }
  830. if (err == 0)
  831. break;
  832. tag = dp->rcv_buf;
  833. err = ldc_read(lp, tag + 1, tag->len);
  834. if (unlikely(err < 0)) {
  835. if (err == -ECONNRESET)
  836. ds_conn_reset(dp);
  837. break;
  838. }
  839. if (err < tag->len)
  840. break;
  841. if (tag->type < DS_DATA)
  842. err = ds_handshake(dp, dp->rcv_buf);
  843. else
  844. err = ds_data(dp, dp->rcv_buf,
  845. sizeof(*tag) + err);
  846. if (err == -ECONNRESET)
  847. break;
  848. }
  849. spin_unlock_irqrestore(&ds_lock, flags);
  850. }
  851. static int __devinit ds_probe(struct vio_dev *vdev,
  852. const struct vio_device_id *id)
  853. {
  854. static int ds_version_printed;
  855. struct ldc_channel_config ds_cfg = {
  856. .event = ds_event,
  857. .mtu = 4096,
  858. .mode = LDC_MODE_STREAM,
  859. };
  860. struct ldc_channel *lp;
  861. struct ds_info *dp;
  862. int err;
  863. if (ds_version_printed++ == 0)
  864. printk(KERN_INFO "%s", version);
  865. dp = kzalloc(sizeof(*dp), GFP_KERNEL);
  866. err = -ENOMEM;
  867. if (!dp)
  868. goto out_err;
  869. dp->rcv_buf = kzalloc(4096, GFP_KERNEL);
  870. if (!dp->rcv_buf)
  871. goto out_free_dp;
  872. dp->rcv_buf_len = 4096;
  873. ds_cfg.tx_irq = vdev->tx_irq;
  874. ds_cfg.rx_irq = vdev->rx_irq;
  875. lp = ldc_alloc(vdev->channel_id, &ds_cfg, dp);
  876. if (IS_ERR(lp)) {
  877. err = PTR_ERR(lp);
  878. goto out_free_rcv_buf;
  879. }
  880. dp->lp = lp;
  881. err = ldc_bind(lp, "DS");
  882. if (err)
  883. goto out_free_ldc;
  884. ds_info = dp;
  885. start_powerd();
  886. return err;
  887. out_free_ldc:
  888. ldc_free(dp->lp);
  889. out_free_rcv_buf:
  890. kfree(dp->rcv_buf);
  891. out_free_dp:
  892. kfree(dp);
  893. out_err:
  894. return err;
  895. }
  896. static int ds_remove(struct vio_dev *vdev)
  897. {
  898. return 0;
  899. }
  900. static struct vio_device_id ds_match[] = {
  901. {
  902. .type = "domain-services-port",
  903. },
  904. {},
  905. };
  906. static struct vio_driver ds_driver = {
  907. .id_table = ds_match,
  908. .probe = ds_probe,
  909. .remove = ds_remove,
  910. .driver = {
  911. .name = "ds",
  912. .owner = THIS_MODULE,
  913. }
  914. };
  915. static int __init ds_init(void)
  916. {
  917. int i;
  918. for (i = 0; i < ARRAY_SIZE(ds_states); i++)
  919. ds_states[i].handle = ((u64)i << 32);
  920. #ifdef CONFIG_HOTPLUG_CPU
  921. kthread_run(dr_cpu_thread, NULL, "kdrcpud");
  922. #endif
  923. return vio_register_driver(&ds_driver);
  924. }
  925. subsys_initcall(ds_init);