hardwall.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. */
  14. #include <linux/fs.h>
  15. #include <linux/proc_fs.h>
  16. #include <linux/seq_file.h>
  17. #include <linux/rwsem.h>
  18. #include <linux/kprobes.h>
  19. #include <linux/sched.h>
  20. #include <linux/hardirq.h>
  21. #include <linux/uaccess.h>
  22. #include <linux/smp.h>
  23. #include <linux/cdev.h>
  24. #include <linux/compat.h>
  25. #include <asm/hardwall.h>
  26. #include <asm/traps.h>
  27. #include <asm/siginfo.h>
  28. #include <asm/irq_regs.h>
  29. #include <arch/interrupts.h>
  30. #include <arch/spr_def.h>
  31. /*
  32. * Implement a per-cpu "hardwall" resource class such as UDN or IPI.
  33. * We use "hardwall" nomenclature throughout for historical reasons.
  34. * The lock here controls access to the list data structure as well as
  35. * to the items on the list.
  36. */
  37. struct hardwall_type {
  38. int index;
  39. int is_xdn;
  40. int is_idn;
  41. int disabled;
  42. const char *name;
  43. struct list_head list;
  44. spinlock_t lock;
  45. struct proc_dir_entry *proc_dir;
  46. };
  47. enum hardwall_index {
  48. HARDWALL_UDN = 0,
  49. #ifndef __tilepro__
  50. HARDWALL_IDN = 1,
  51. HARDWALL_IPI = 2,
  52. #endif
  53. _HARDWALL_TYPES
  54. };
  55. static struct hardwall_type hardwall_types[] = {
  56. { /* user-space access to UDN */
  57. 0,
  58. 1,
  59. 0,
  60. 0,
  61. "udn",
  62. LIST_HEAD_INIT(hardwall_types[HARDWALL_UDN].list),
  63. __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_UDN].lock),
  64. NULL
  65. },
  66. #ifndef __tilepro__
  67. { /* user-space access to IDN */
  68. 1,
  69. 1,
  70. 1,
  71. 1, /* disabled pending hypervisor support */
  72. "idn",
  73. LIST_HEAD_INIT(hardwall_types[HARDWALL_IDN].list),
  74. __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IDN].lock),
  75. NULL
  76. },
  77. { /* access to user-space IPI */
  78. 2,
  79. 0,
  80. 0,
  81. 0,
  82. "ipi",
  83. LIST_HEAD_INIT(hardwall_types[HARDWALL_IPI].list),
  84. __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IPI].lock),
  85. NULL
  86. },
  87. #endif
  88. };
  89. /*
  90. * This data structure tracks the cpu data, etc., associated
  91. * one-to-one with a "struct file *" from opening a hardwall device file.
  92. * Note that the file's private data points back to this structure.
  93. */
  94. struct hardwall_info {
  95. struct list_head list; /* for hardwall_types.list */
  96. struct list_head task_head; /* head of tasks in this hardwall */
  97. struct hardwall_type *type; /* type of this resource */
  98. struct cpumask cpumask; /* cpus reserved */
  99. int id; /* integer id for this hardwall */
  100. int teardown_in_progress; /* are we tearing this one down? */
  101. /* Remaining fields only valid for user-network resources. */
  102. int ulhc_x; /* upper left hand corner x coord */
  103. int ulhc_y; /* upper left hand corner y coord */
  104. int width; /* rectangle width */
  105. int height; /* rectangle height */
  106. #if CHIP_HAS_REV1_XDN()
  107. atomic_t xdn_pending_count; /* cores in phase 1 of drain */
  108. #endif
  109. };
  110. /* /proc/tile/hardwall */
  111. static struct proc_dir_entry *hardwall_proc_dir;
  112. /* Functions to manage files in /proc/tile/hardwall. */
  113. static void hardwall_add_proc(struct hardwall_info *);
  114. static void hardwall_remove_proc(struct hardwall_info *);
  115. /* Allow disabling UDN access. */
  116. static int __init noudn(char *str)
  117. {
  118. pr_info("User-space UDN access is disabled\n");
  119. hardwall_types[HARDWALL_UDN].disabled = 1;
  120. return 0;
  121. }
  122. early_param("noudn", noudn);
  123. #ifndef __tilepro__
  124. /* Allow disabling IDN access. */
  125. static int __init noidn(char *str)
  126. {
  127. pr_info("User-space IDN access is disabled\n");
  128. hardwall_types[HARDWALL_IDN].disabled = 1;
  129. return 0;
  130. }
  131. early_param("noidn", noidn);
  132. /* Allow disabling IPI access. */
  133. static int __init noipi(char *str)
  134. {
  135. pr_info("User-space IPI access is disabled\n");
  136. hardwall_types[HARDWALL_IPI].disabled = 1;
  137. return 0;
  138. }
  139. early_param("noipi", noipi);
  140. #endif
  141. /*
  142. * Low-level primitives for UDN/IDN
  143. */
  144. #ifdef __tilepro__
  145. #define mtspr_XDN(hwt, name, val) \
  146. do { (void)(hwt); __insn_mtspr(SPR_UDN_##name, (val)); } while (0)
  147. #define mtspr_MPL_XDN(hwt, name, val) \
  148. do { (void)(hwt); __insn_mtspr(SPR_MPL_UDN_##name, (val)); } while (0)
  149. #define mfspr_XDN(hwt, name) \
  150. ((void)(hwt), __insn_mfspr(SPR_UDN_##name))
  151. #else
  152. #define mtspr_XDN(hwt, name, val) \
  153. do { \
  154. if ((hwt)->is_idn) \
  155. __insn_mtspr(SPR_IDN_##name, (val)); \
  156. else \
  157. __insn_mtspr(SPR_UDN_##name, (val)); \
  158. } while (0)
  159. #define mtspr_MPL_XDN(hwt, name, val) \
  160. do { \
  161. if ((hwt)->is_idn) \
  162. __insn_mtspr(SPR_MPL_IDN_##name, (val)); \
  163. else \
  164. __insn_mtspr(SPR_MPL_UDN_##name, (val)); \
  165. } while (0)
  166. #define mfspr_XDN(hwt, name) \
  167. ((hwt)->is_idn ? __insn_mfspr(SPR_IDN_##name) : __insn_mfspr(SPR_UDN_##name))
  168. #endif
  169. /* Set a CPU bit if the CPU is online. */
  170. #define cpu_online_set(cpu, dst) do { \
  171. if (cpu_online(cpu)) \
  172. cpumask_set_cpu(cpu, dst); \
  173. } while (0)
  174. /* Does the given rectangle contain the given x,y coordinate? */
  175. static int contains(struct hardwall_info *r, int x, int y)
  176. {
  177. return (x >= r->ulhc_x && x < r->ulhc_x + r->width) &&
  178. (y >= r->ulhc_y && y < r->ulhc_y + r->height);
  179. }
  180. /* Compute the rectangle parameters and validate the cpumask. */
  181. static int check_rectangle(struct hardwall_info *r, struct cpumask *mask)
  182. {
  183. int x, y, cpu, ulhc, lrhc;
  184. /* The first cpu is the ULHC, the last the LRHC. */
  185. ulhc = find_first_bit(cpumask_bits(mask), nr_cpumask_bits);
  186. lrhc = find_last_bit(cpumask_bits(mask), nr_cpumask_bits);
  187. /* Compute the rectangle attributes from the cpus. */
  188. r->ulhc_x = cpu_x(ulhc);
  189. r->ulhc_y = cpu_y(ulhc);
  190. r->width = cpu_x(lrhc) - r->ulhc_x + 1;
  191. r->height = cpu_y(lrhc) - r->ulhc_y + 1;
  192. /* Width and height must be positive */
  193. if (r->width <= 0 || r->height <= 0)
  194. return -EINVAL;
  195. /* Confirm that the cpumask is exactly the rectangle. */
  196. for (y = 0, cpu = 0; y < smp_height; ++y)
  197. for (x = 0; x < smp_width; ++x, ++cpu)
  198. if (cpumask_test_cpu(cpu, mask) != contains(r, x, y))
  199. return -EINVAL;
  200. /*
  201. * Note that offline cpus can't be drained when this user network
  202. * rectangle eventually closes. We used to detect this
  203. * situation and print a warning, but it annoyed users and
  204. * they ignored it anyway, so now we just return without a
  205. * warning.
  206. */
  207. return 0;
  208. }
  209. /*
  210. * Hardware management of hardwall setup, teardown, trapping,
  211. * and enabling/disabling PL0 access to the networks.
  212. */
  213. /* Bit field values to mask together for writes to SPR_XDN_DIRECTION_PROTECT */
  214. enum direction_protect {
  215. N_PROTECT = (1 << 0),
  216. E_PROTECT = (1 << 1),
  217. S_PROTECT = (1 << 2),
  218. W_PROTECT = (1 << 3),
  219. C_PROTECT = (1 << 4),
  220. };
  221. static inline int xdn_which_interrupt(struct hardwall_type *hwt)
  222. {
  223. #ifndef __tilepro__
  224. if (hwt->is_idn)
  225. return INT_IDN_FIREWALL;
  226. #endif
  227. return INT_UDN_FIREWALL;
  228. }
  229. static void enable_firewall_interrupts(struct hardwall_type *hwt)
  230. {
  231. arch_local_irq_unmask_now(xdn_which_interrupt(hwt));
  232. }
  233. static void disable_firewall_interrupts(struct hardwall_type *hwt)
  234. {
  235. arch_local_irq_mask_now(xdn_which_interrupt(hwt));
  236. }
  237. /* Set up hardwall on this cpu based on the passed hardwall_info. */
  238. static void hardwall_setup_func(void *info)
  239. {
  240. struct hardwall_info *r = info;
  241. struct hardwall_type *hwt = r->type;
  242. int cpu = smp_processor_id(); /* on_each_cpu disables preemption */
  243. int x = cpu_x(cpu);
  244. int y = cpu_y(cpu);
  245. int bits = 0;
  246. if (x == r->ulhc_x)
  247. bits |= W_PROTECT;
  248. if (x == r->ulhc_x + r->width - 1)
  249. bits |= E_PROTECT;
  250. if (y == r->ulhc_y)
  251. bits |= N_PROTECT;
  252. if (y == r->ulhc_y + r->height - 1)
  253. bits |= S_PROTECT;
  254. BUG_ON(bits == 0);
  255. mtspr_XDN(hwt, DIRECTION_PROTECT, bits);
  256. enable_firewall_interrupts(hwt);
  257. }
  258. /* Set up all cpus on edge of rectangle to enable/disable hardwall SPRs. */
  259. static void hardwall_protect_rectangle(struct hardwall_info *r)
  260. {
  261. int x, y, cpu, delta;
  262. struct cpumask rect_cpus;
  263. cpumask_clear(&rect_cpus);
  264. /* First include the top and bottom edges */
  265. cpu = r->ulhc_y * smp_width + r->ulhc_x;
  266. delta = (r->height - 1) * smp_width;
  267. for (x = 0; x < r->width; ++x, ++cpu) {
  268. cpu_online_set(cpu, &rect_cpus);
  269. cpu_online_set(cpu + delta, &rect_cpus);
  270. }
  271. /* Then the left and right edges */
  272. cpu -= r->width;
  273. delta = r->width - 1;
  274. for (y = 0; y < r->height; ++y, cpu += smp_width) {
  275. cpu_online_set(cpu, &rect_cpus);
  276. cpu_online_set(cpu + delta, &rect_cpus);
  277. }
  278. /* Then tell all the cpus to set up their protection SPR */
  279. on_each_cpu_mask(&rect_cpus, hardwall_setup_func, r, 1);
  280. }
  281. /* Entered from INT_xDN_FIREWALL interrupt vector with irqs disabled. */
  282. void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
  283. {
  284. struct hardwall_info *rect;
  285. struct hardwall_type *hwt;
  286. struct task_struct *p;
  287. struct siginfo info;
  288. int cpu = smp_processor_id();
  289. int found_processes;
  290. struct pt_regs *old_regs = set_irq_regs(regs);
  291. irq_enter();
  292. /* Figure out which network trapped. */
  293. switch (fault_num) {
  294. #ifndef __tilepro__
  295. case INT_IDN_FIREWALL:
  296. hwt = &hardwall_types[HARDWALL_IDN];
  297. break;
  298. #endif
  299. case INT_UDN_FIREWALL:
  300. hwt = &hardwall_types[HARDWALL_UDN];
  301. break;
  302. default:
  303. BUG();
  304. }
  305. BUG_ON(hwt->disabled);
  306. /* This tile trapped a network access; find the rectangle. */
  307. spin_lock(&hwt->lock);
  308. list_for_each_entry(rect, &hwt->list, list) {
  309. if (cpumask_test_cpu(cpu, &rect->cpumask))
  310. break;
  311. }
  312. /*
  313. * It shouldn't be possible not to find this cpu on the
  314. * rectangle list, since only cpus in rectangles get hardwalled.
  315. * The hardwall is only removed after the user network is drained.
  316. */
  317. BUG_ON(&rect->list == &hwt->list);
  318. /*
  319. * If we already started teardown on this hardwall, don't worry;
  320. * the abort signal has been sent and we are just waiting for things
  321. * to quiesce.
  322. */
  323. if (rect->teardown_in_progress) {
  324. pr_notice("cpu %d: detected %s hardwall violation %#lx"
  325. " while teardown already in progress\n",
  326. cpu, hwt->name,
  327. (long)mfspr_XDN(hwt, DIRECTION_PROTECT));
  328. goto done;
  329. }
  330. /*
  331. * Kill off any process that is activated in this rectangle.
  332. * We bypass security to deliver the signal, since it must be
  333. * one of the activated processes that generated the user network
  334. * message that caused this trap, and all the activated
  335. * processes shared a single open file so are pretty tightly
  336. * bound together from a security point of view to begin with.
  337. */
  338. rect->teardown_in_progress = 1;
  339. wmb(); /* Ensure visibility of rectangle before notifying processes. */
  340. pr_notice("cpu %d: detected %s hardwall violation %#lx...\n",
  341. cpu, hwt->name, (long)mfspr_XDN(hwt, DIRECTION_PROTECT));
  342. info.si_signo = SIGILL;
  343. info.si_errno = 0;
  344. info.si_code = ILL_HARDWALL;
  345. found_processes = 0;
  346. list_for_each_entry(p, &rect->task_head,
  347. thread.hardwall[hwt->index].list) {
  348. BUG_ON(p->thread.hardwall[hwt->index].info != rect);
  349. if (!(p->flags & PF_EXITING)) {
  350. found_processes = 1;
  351. pr_notice("hardwall: killing %d\n", p->pid);
  352. do_send_sig_info(info.si_signo, &info, p, false);
  353. }
  354. }
  355. if (!found_processes)
  356. pr_notice("hardwall: no associated processes!\n");
  357. done:
  358. spin_unlock(&hwt->lock);
  359. /*
  360. * We have to disable firewall interrupts now, or else when we
  361. * return from this handler, we will simply re-interrupt back to
  362. * it. However, we can't clear the protection bits, since we
  363. * haven't yet drained the network, and that would allow packets
  364. * to cross out of the hardwall region.
  365. */
  366. disable_firewall_interrupts(hwt);
  367. irq_exit();
  368. set_irq_regs(old_regs);
  369. }
  370. /* Allow access from user space to the user network. */
  371. void grant_hardwall_mpls(struct hardwall_type *hwt)
  372. {
  373. #ifndef __tilepro__
  374. if (!hwt->is_xdn) {
  375. __insn_mtspr(SPR_MPL_IPI_0_SET_0, 1);
  376. return;
  377. }
  378. #endif
  379. mtspr_MPL_XDN(hwt, ACCESS_SET_0, 1);
  380. mtspr_MPL_XDN(hwt, AVAIL_SET_0, 1);
  381. mtspr_MPL_XDN(hwt, COMPLETE_SET_0, 1);
  382. mtspr_MPL_XDN(hwt, TIMER_SET_0, 1);
  383. #if !CHIP_HAS_REV1_XDN()
  384. mtspr_MPL_XDN(hwt, REFILL_SET_0, 1);
  385. mtspr_MPL_XDN(hwt, CA_SET_0, 1);
  386. #endif
  387. }
  388. /* Deny access from user space to the user network. */
  389. void restrict_hardwall_mpls(struct hardwall_type *hwt)
  390. {
  391. #ifndef __tilepro__
  392. if (!hwt->is_xdn) {
  393. __insn_mtspr(SPR_MPL_IPI_0_SET_1, 1);
  394. return;
  395. }
  396. #endif
  397. mtspr_MPL_XDN(hwt, ACCESS_SET_1, 1);
  398. mtspr_MPL_XDN(hwt, AVAIL_SET_1, 1);
  399. mtspr_MPL_XDN(hwt, COMPLETE_SET_1, 1);
  400. mtspr_MPL_XDN(hwt, TIMER_SET_1, 1);
  401. #if !CHIP_HAS_REV1_XDN()
  402. mtspr_MPL_XDN(hwt, REFILL_SET_1, 1);
  403. mtspr_MPL_XDN(hwt, CA_SET_1, 1);
  404. #endif
  405. }
  406. /* Restrict or deny as necessary for the task we're switching to. */
  407. void hardwall_switch_tasks(struct task_struct *prev,
  408. struct task_struct *next)
  409. {
  410. int i;
  411. for (i = 0; i < HARDWALL_TYPES; ++i) {
  412. if (prev->thread.hardwall[i].info != NULL) {
  413. if (next->thread.hardwall[i].info == NULL)
  414. restrict_hardwall_mpls(&hardwall_types[i]);
  415. } else if (next->thread.hardwall[i].info != NULL) {
  416. grant_hardwall_mpls(&hardwall_types[i]);
  417. }
  418. }
  419. }
  420. /* Does this task have the right to IPI the given cpu? */
  421. int hardwall_ipi_valid(int cpu)
  422. {
  423. #ifdef __tilegx__
  424. struct hardwall_info *info =
  425. current->thread.hardwall[HARDWALL_IPI].info;
  426. return info && cpumask_test_cpu(cpu, &info->cpumask);
  427. #else
  428. return 0;
  429. #endif
  430. }
  431. /*
  432. * Code to create, activate, deactivate, and destroy hardwall resources.
  433. */
  434. /* Create a hardwall for the given resource */
  435. static struct hardwall_info *hardwall_create(struct hardwall_type *hwt,
  436. size_t size,
  437. const unsigned char __user *bits)
  438. {
  439. struct hardwall_info *iter, *info;
  440. struct cpumask mask;
  441. unsigned long flags;
  442. int rc;
  443. /* Reject crazy sizes out of hand, a la sys_mbind(). */
  444. if (size > PAGE_SIZE)
  445. return ERR_PTR(-EINVAL);
  446. /* Copy whatever fits into a cpumask. */
  447. if (copy_from_user(&mask, bits, min(sizeof(struct cpumask), size)))
  448. return ERR_PTR(-EFAULT);
  449. /*
  450. * If the size was short, clear the rest of the mask;
  451. * otherwise validate that the rest of the user mask was zero
  452. * (we don't try hard to be efficient when validating huge masks).
  453. */
  454. if (size < sizeof(struct cpumask)) {
  455. memset((char *)&mask + size, 0, sizeof(struct cpumask) - size);
  456. } else if (size > sizeof(struct cpumask)) {
  457. size_t i;
  458. for (i = sizeof(struct cpumask); i < size; ++i) {
  459. char c;
  460. if (get_user(c, &bits[i]))
  461. return ERR_PTR(-EFAULT);
  462. if (c)
  463. return ERR_PTR(-EINVAL);
  464. }
  465. }
  466. /* Allocate a new hardwall_info optimistically. */
  467. info = kmalloc(sizeof(struct hardwall_info),
  468. GFP_KERNEL | __GFP_ZERO);
  469. if (info == NULL)
  470. return ERR_PTR(-ENOMEM);
  471. INIT_LIST_HEAD(&info->task_head);
  472. info->type = hwt;
  473. /* Compute the rectangle size and validate that it's plausible. */
  474. cpumask_copy(&info->cpumask, &mask);
  475. info->id = find_first_bit(cpumask_bits(&mask), nr_cpumask_bits);
  476. if (hwt->is_xdn) {
  477. rc = check_rectangle(info, &mask);
  478. if (rc != 0) {
  479. kfree(info);
  480. return ERR_PTR(rc);
  481. }
  482. }
  483. /*
  484. * Eliminate cpus that are not part of this Linux client.
  485. * Note that this allows for configurations that we might not want to
  486. * support, such as one client on every even cpu, another client on
  487. * every odd cpu.
  488. */
  489. cpumask_and(&info->cpumask, &info->cpumask, cpu_online_mask);
  490. /* Confirm it doesn't overlap and add it to the list. */
  491. spin_lock_irqsave(&hwt->lock, flags);
  492. list_for_each_entry(iter, &hwt->list, list) {
  493. if (cpumask_intersects(&iter->cpumask, &info->cpumask)) {
  494. spin_unlock_irqrestore(&hwt->lock, flags);
  495. kfree(info);
  496. return ERR_PTR(-EBUSY);
  497. }
  498. }
  499. list_add_tail(&info->list, &hwt->list);
  500. spin_unlock_irqrestore(&hwt->lock, flags);
  501. /* Set up appropriate hardwalling on all affected cpus. */
  502. if (hwt->is_xdn)
  503. hardwall_protect_rectangle(info);
  504. /* Create a /proc/tile/hardwall entry. */
  505. hardwall_add_proc(info);
  506. return info;
  507. }
  508. /* Activate a given hardwall on this cpu for this process. */
  509. static int hardwall_activate(struct hardwall_info *info)
  510. {
  511. int cpu;
  512. unsigned long flags;
  513. struct task_struct *p = current;
  514. struct thread_struct *ts = &p->thread;
  515. struct hardwall_type *hwt;
  516. /* Require a hardwall. */
  517. if (info == NULL)
  518. return -ENODATA;
  519. /* Not allowed to activate a hardwall that is being torn down. */
  520. if (info->teardown_in_progress)
  521. return -EINVAL;
  522. /*
  523. * Get our affinity; if we're not bound to this tile uniquely,
  524. * we can't access the network registers.
  525. */
  526. if (cpumask_weight(&p->cpus_allowed) != 1)
  527. return -EPERM;
  528. /* Make sure we are bound to a cpu assigned to this resource. */
  529. cpu = smp_processor_id();
  530. BUG_ON(cpumask_first(&p->cpus_allowed) != cpu);
  531. if (!cpumask_test_cpu(cpu, &info->cpumask))
  532. return -EINVAL;
  533. /* If we are already bound to this hardwall, it's a no-op. */
  534. hwt = info->type;
  535. if (ts->hardwall[hwt->index].info) {
  536. BUG_ON(ts->hardwall[hwt->index].info != info);
  537. return 0;
  538. }
  539. /* Success! This process gets to use the resource on this cpu. */
  540. ts->hardwall[hwt->index].info = info;
  541. spin_lock_irqsave(&hwt->lock, flags);
  542. list_add(&ts->hardwall[hwt->index].list, &info->task_head);
  543. spin_unlock_irqrestore(&hwt->lock, flags);
  544. grant_hardwall_mpls(hwt);
  545. printk(KERN_DEBUG "Pid %d (%s) activated for %s hardwall: cpu %d\n",
  546. p->pid, p->comm, hwt->name, cpu);
  547. return 0;
  548. }
  549. /*
  550. * Deactivate a task's hardwall. Must hold lock for hardwall_type.
  551. * This method may be called from exit_thread(), so we don't want to
  552. * rely on too many fields of struct task_struct still being valid.
  553. * We assume the cpus_allowed, pid, and comm fields are still valid.
  554. */
  555. static void _hardwall_deactivate(struct hardwall_type *hwt,
  556. struct task_struct *task)
  557. {
  558. struct thread_struct *ts = &task->thread;
  559. if (cpumask_weight(&task->cpus_allowed) != 1) {
  560. pr_err("pid %d (%s) releasing %s hardwall with"
  561. " an affinity mask containing %d cpus!\n",
  562. task->pid, task->comm, hwt->name,
  563. cpumask_weight(&task->cpus_allowed));
  564. BUG();
  565. }
  566. BUG_ON(ts->hardwall[hwt->index].info == NULL);
  567. ts->hardwall[hwt->index].info = NULL;
  568. list_del(&ts->hardwall[hwt->index].list);
  569. if (task == current)
  570. restrict_hardwall_mpls(hwt);
  571. }
  572. /* Deactivate a task's hardwall. */
  573. static int hardwall_deactivate(struct hardwall_type *hwt,
  574. struct task_struct *task)
  575. {
  576. unsigned long flags;
  577. int activated;
  578. spin_lock_irqsave(&hwt->lock, flags);
  579. activated = (task->thread.hardwall[hwt->index].info != NULL);
  580. if (activated)
  581. _hardwall_deactivate(hwt, task);
  582. spin_unlock_irqrestore(&hwt->lock, flags);
  583. if (!activated)
  584. return -EINVAL;
  585. printk(KERN_DEBUG "Pid %d (%s) deactivated for %s hardwall: cpu %d\n",
  586. task->pid, task->comm, hwt->name, raw_smp_processor_id());
  587. return 0;
  588. }
  589. void hardwall_deactivate_all(struct task_struct *task)
  590. {
  591. int i;
  592. for (i = 0; i < HARDWALL_TYPES; ++i)
  593. if (task->thread.hardwall[i].info)
  594. hardwall_deactivate(&hardwall_types[i], task);
  595. }
  596. /* Stop the switch before draining the network. */
  597. static void stop_xdn_switch(void *arg)
  598. {
  599. #if !CHIP_HAS_REV1_XDN()
  600. /* Freeze the switch and the demux. */
  601. __insn_mtspr(SPR_UDN_SP_FREEZE,
  602. SPR_UDN_SP_FREEZE__SP_FRZ_MASK |
  603. SPR_UDN_SP_FREEZE__DEMUX_FRZ_MASK |
  604. SPR_UDN_SP_FREEZE__NON_DEST_EXT_MASK);
  605. #else
  606. /*
  607. * Drop all packets bound for the core or off the edge.
  608. * We rely on the normal hardwall protection setup code
  609. * to have set the low four bits to trigger firewall interrupts,
  610. * and shift those bits up to trigger "drop on send" semantics,
  611. * plus adding "drop on send to core" for all switches.
  612. * In practice it seems the switches latch the DIRECTION_PROTECT
  613. * SPR so they won't start dropping if they're already
  614. * delivering the last message to the core, but it doesn't
  615. * hurt to enable it here.
  616. */
  617. struct hardwall_type *hwt = arg;
  618. unsigned long protect = mfspr_XDN(hwt, DIRECTION_PROTECT);
  619. mtspr_XDN(hwt, DIRECTION_PROTECT, (protect | C_PROTECT) << 5);
  620. #endif
  621. }
  622. static void empty_xdn_demuxes(struct hardwall_type *hwt)
  623. {
  624. #ifndef __tilepro__
  625. if (hwt->is_idn) {
  626. while (__insn_mfspr(SPR_IDN_DATA_AVAIL) & (1 << 0))
  627. (void) __tile_idn0_receive();
  628. while (__insn_mfspr(SPR_IDN_DATA_AVAIL) & (1 << 1))
  629. (void) __tile_idn1_receive();
  630. return;
  631. }
  632. #endif
  633. while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 0))
  634. (void) __tile_udn0_receive();
  635. while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 1))
  636. (void) __tile_udn1_receive();
  637. while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 2))
  638. (void) __tile_udn2_receive();
  639. while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 3))
  640. (void) __tile_udn3_receive();
  641. }
  642. /* Drain all the state from a stopped switch. */
  643. static void drain_xdn_switch(void *arg)
  644. {
  645. struct hardwall_info *info = arg;
  646. struct hardwall_type *hwt = info->type;
  647. #if CHIP_HAS_REV1_XDN()
  648. /*
  649. * The switches have been configured to drop any messages
  650. * destined for cores (or off the edge of the rectangle).
  651. * But the current message may continue to be delivered,
  652. * so we wait until all the cores have finished any pending
  653. * messages before we stop draining.
  654. */
  655. int pending = mfspr_XDN(hwt, PENDING);
  656. while (pending--) {
  657. empty_xdn_demuxes(hwt);
  658. if (hwt->is_idn)
  659. __tile_idn_send(0);
  660. else
  661. __tile_udn_send(0);
  662. }
  663. atomic_dec(&info->xdn_pending_count);
  664. while (atomic_read(&info->xdn_pending_count))
  665. empty_xdn_demuxes(hwt);
  666. #else
  667. int i;
  668. int from_tile_words, ca_count;
  669. /* Empty out the 5 switch point fifos. */
  670. for (i = 0; i < 5; i++) {
  671. int words, j;
  672. __insn_mtspr(SPR_UDN_SP_FIFO_SEL, i);
  673. words = __insn_mfspr(SPR_UDN_SP_STATE) & 0xF;
  674. for (j = 0; j < words; j++)
  675. (void) __insn_mfspr(SPR_UDN_SP_FIFO_DATA);
  676. BUG_ON((__insn_mfspr(SPR_UDN_SP_STATE) & 0xF) != 0);
  677. }
  678. /* Dump out the 3 word fifo at top. */
  679. from_tile_words = (__insn_mfspr(SPR_UDN_DEMUX_STATUS) >> 10) & 0x3;
  680. for (i = 0; i < from_tile_words; i++)
  681. (void) __insn_mfspr(SPR_UDN_DEMUX_WRITE_FIFO);
  682. /* Empty out demuxes. */
  683. empty_xdn_demuxes(hwt);
  684. /* Empty out catch all. */
  685. ca_count = __insn_mfspr(SPR_UDN_DEMUX_CA_COUNT);
  686. for (i = 0; i < ca_count; i++)
  687. (void) __insn_mfspr(SPR_UDN_CA_DATA);
  688. BUG_ON(__insn_mfspr(SPR_UDN_DEMUX_CA_COUNT) != 0);
  689. /* Clear demux logic. */
  690. __insn_mtspr(SPR_UDN_DEMUX_CTL, 1);
  691. /*
  692. * Write switch state; experimentation indicates that 0xc3000
  693. * is an idle switch point.
  694. */
  695. for (i = 0; i < 5; i++) {
  696. __insn_mtspr(SPR_UDN_SP_FIFO_SEL, i);
  697. __insn_mtspr(SPR_UDN_SP_STATE, 0xc3000);
  698. }
  699. #endif
  700. }
  701. /* Reset random XDN state registers at boot up and during hardwall teardown. */
  702. static void reset_xdn_network_state(struct hardwall_type *hwt)
  703. {
  704. if (hwt->disabled)
  705. return;
  706. /* Clear out other random registers so we have a clean slate. */
  707. mtspr_XDN(hwt, DIRECTION_PROTECT, 0);
  708. mtspr_XDN(hwt, AVAIL_EN, 0);
  709. mtspr_XDN(hwt, DEADLOCK_TIMEOUT, 0);
  710. #if !CHIP_HAS_REV1_XDN()
  711. /* Reset UDN coordinates to their standard value */
  712. {
  713. unsigned int cpu = smp_processor_id();
  714. unsigned int x = cpu_x(cpu);
  715. unsigned int y = cpu_y(cpu);
  716. __insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7));
  717. }
  718. /* Set demux tags to predefined values and enable them. */
  719. __insn_mtspr(SPR_UDN_TAG_VALID, 0xf);
  720. __insn_mtspr(SPR_UDN_TAG_0, (1 << 0));
  721. __insn_mtspr(SPR_UDN_TAG_1, (1 << 1));
  722. __insn_mtspr(SPR_UDN_TAG_2, (1 << 2));
  723. __insn_mtspr(SPR_UDN_TAG_3, (1 << 3));
  724. /* Set other rev0 random registers to a clean state. */
  725. __insn_mtspr(SPR_UDN_REFILL_EN, 0);
  726. __insn_mtspr(SPR_UDN_DEMUX_QUEUE_SEL, 0);
  727. __insn_mtspr(SPR_UDN_SP_FIFO_SEL, 0);
  728. /* Start the switch and demux. */
  729. __insn_mtspr(SPR_UDN_SP_FREEZE, 0);
  730. #endif
  731. }
  732. void reset_network_state(void)
  733. {
  734. reset_xdn_network_state(&hardwall_types[HARDWALL_UDN]);
  735. #ifndef __tilepro__
  736. reset_xdn_network_state(&hardwall_types[HARDWALL_IDN]);
  737. #endif
  738. }
  739. /* Restart an XDN switch after draining. */
  740. static void restart_xdn_switch(void *arg)
  741. {
  742. struct hardwall_type *hwt = arg;
  743. #if CHIP_HAS_REV1_XDN()
  744. /* One last drain step to avoid races with injection and draining. */
  745. empty_xdn_demuxes(hwt);
  746. #endif
  747. reset_xdn_network_state(hwt);
  748. /* Disable firewall interrupts. */
  749. disable_firewall_interrupts(hwt);
  750. }
  751. /* Last reference to a hardwall is gone, so clear the network. */
  752. static void hardwall_destroy(struct hardwall_info *info)
  753. {
  754. struct task_struct *task;
  755. struct hardwall_type *hwt;
  756. unsigned long flags;
  757. /* Make sure this file actually represents a hardwall. */
  758. if (info == NULL)
  759. return;
  760. /*
  761. * Deactivate any remaining tasks. It's possible to race with
  762. * some other thread that is exiting and hasn't yet called
  763. * deactivate (when freeing its thread_info), so we carefully
  764. * deactivate any remaining tasks before freeing the
  765. * hardwall_info object itself.
  766. */
  767. hwt = info->type;
  768. info->teardown_in_progress = 1;
  769. spin_lock_irqsave(&hwt->lock, flags);
  770. list_for_each_entry(task, &info->task_head,
  771. thread.hardwall[hwt->index].list)
  772. _hardwall_deactivate(hwt, task);
  773. spin_unlock_irqrestore(&hwt->lock, flags);
  774. if (hwt->is_xdn) {
  775. /* Configure the switches for draining the user network. */
  776. printk(KERN_DEBUG
  777. "Clearing %s hardwall rectangle %dx%d %d,%d\n",
  778. hwt->name, info->width, info->height,
  779. info->ulhc_x, info->ulhc_y);
  780. on_each_cpu_mask(&info->cpumask, stop_xdn_switch, hwt, 1);
  781. /* Drain the network. */
  782. #if CHIP_HAS_REV1_XDN()
  783. atomic_set(&info->xdn_pending_count,
  784. cpumask_weight(&info->cpumask));
  785. on_each_cpu_mask(&info->cpumask, drain_xdn_switch, info, 0);
  786. #else
  787. on_each_cpu_mask(&info->cpumask, drain_xdn_switch, info, 1);
  788. #endif
  789. /* Restart switch and disable firewall. */
  790. on_each_cpu_mask(&info->cpumask, restart_xdn_switch, hwt, 1);
  791. }
  792. /* Remove the /proc/tile/hardwall entry. */
  793. hardwall_remove_proc(info);
  794. /* Now free the hardwall from the list. */
  795. spin_lock_irqsave(&hwt->lock, flags);
  796. BUG_ON(!list_empty(&info->task_head));
  797. list_del(&info->list);
  798. spin_unlock_irqrestore(&hwt->lock, flags);
  799. kfree(info);
  800. }
  801. static int hardwall_proc_show(struct seq_file *sf, void *v)
  802. {
  803. struct hardwall_info *info = sf->private;
  804. char buf[256];
  805. int rc = cpulist_scnprintf(buf, sizeof(buf), &info->cpumask);
  806. buf[rc++] = '\n';
  807. seq_write(sf, buf, rc);
  808. return 0;
  809. }
  810. static int hardwall_proc_open(struct inode *inode,
  811. struct file *file)
  812. {
  813. return single_open(file, hardwall_proc_show, PDE_DATA(inode));
  814. }
  815. static const struct file_operations hardwall_proc_fops = {
  816. .open = hardwall_proc_open,
  817. .read = seq_read,
  818. .llseek = seq_lseek,
  819. .release = single_release,
  820. };
  821. static void hardwall_add_proc(struct hardwall_info *info)
  822. {
  823. char buf[64];
  824. snprintf(buf, sizeof(buf), "%d", info->id);
  825. proc_create_data(buf, 0444, info->type->proc_dir,
  826. &hardwall_proc_fops, info);
  827. }
  828. static void hardwall_remove_proc(struct hardwall_info *info)
  829. {
  830. char buf[64];
  831. snprintf(buf, sizeof(buf), "%d", info->id);
  832. remove_proc_entry(buf, info->type->proc_dir);
  833. }
  834. int proc_pid_hardwall(struct task_struct *task, char *buffer)
  835. {
  836. int i;
  837. int n = 0;
  838. for (i = 0; i < HARDWALL_TYPES; ++i) {
  839. struct hardwall_info *info = task->thread.hardwall[i].info;
  840. if (info)
  841. n += sprintf(&buffer[n], "%s: %d\n",
  842. info->type->name, info->id);
  843. }
  844. return n;
  845. }
  846. void proc_tile_hardwall_init(struct proc_dir_entry *root)
  847. {
  848. int i;
  849. for (i = 0; i < HARDWALL_TYPES; ++i) {
  850. struct hardwall_type *hwt = &hardwall_types[i];
  851. if (hwt->disabled)
  852. continue;
  853. if (hardwall_proc_dir == NULL)
  854. hardwall_proc_dir = proc_mkdir("hardwall", root);
  855. hwt->proc_dir = proc_mkdir(hwt->name, hardwall_proc_dir);
  856. }
  857. }
  858. /*
  859. * Character device support via ioctl/close.
  860. */
  861. static long hardwall_ioctl(struct file *file, unsigned int a, unsigned long b)
  862. {
  863. struct hardwall_info *info = file->private_data;
  864. int minor = iminor(file->f_mapping->host);
  865. struct hardwall_type* hwt;
  866. if (_IOC_TYPE(a) != HARDWALL_IOCTL_BASE)
  867. return -EINVAL;
  868. BUILD_BUG_ON(HARDWALL_TYPES != _HARDWALL_TYPES);
  869. BUILD_BUG_ON(HARDWALL_TYPES !=
  870. sizeof(hardwall_types)/sizeof(hardwall_types[0]));
  871. if (minor < 0 || minor >= HARDWALL_TYPES)
  872. return -EINVAL;
  873. hwt = &hardwall_types[minor];
  874. WARN_ON(info && hwt != info->type);
  875. switch (_IOC_NR(a)) {
  876. case _HARDWALL_CREATE:
  877. if (hwt->disabled)
  878. return -ENOSYS;
  879. if (info != NULL)
  880. return -EALREADY;
  881. info = hardwall_create(hwt, _IOC_SIZE(a),
  882. (const unsigned char __user *)b);
  883. if (IS_ERR(info))
  884. return PTR_ERR(info);
  885. file->private_data = info;
  886. return 0;
  887. case _HARDWALL_ACTIVATE:
  888. return hardwall_activate(info);
  889. case _HARDWALL_DEACTIVATE:
  890. if (current->thread.hardwall[hwt->index].info != info)
  891. return -EINVAL;
  892. return hardwall_deactivate(hwt, current);
  893. case _HARDWALL_GET_ID:
  894. return info ? info->id : -EINVAL;
  895. default:
  896. return -EINVAL;
  897. }
  898. }
  899. #ifdef CONFIG_COMPAT
  900. static long hardwall_compat_ioctl(struct file *file,
  901. unsigned int a, unsigned long b)
  902. {
  903. /* Sign-extend the argument so it can be used as a pointer. */
  904. return hardwall_ioctl(file, a, (unsigned long)compat_ptr(b));
  905. }
  906. #endif
  907. /* The user process closed the file; revoke access to user networks. */
  908. static int hardwall_flush(struct file *file, fl_owner_t owner)
  909. {
  910. struct hardwall_info *info = file->private_data;
  911. struct task_struct *task, *tmp;
  912. unsigned long flags;
  913. if (info) {
  914. /*
  915. * NOTE: if multiple threads are activated on this hardwall
  916. * file, the other threads will continue having access to the
  917. * user network until they are context-switched out and back
  918. * in again.
  919. *
  920. * NOTE: A NULL files pointer means the task is being torn
  921. * down, so in that case we also deactivate it.
  922. */
  923. struct hardwall_type *hwt = info->type;
  924. spin_lock_irqsave(&hwt->lock, flags);
  925. list_for_each_entry_safe(task, tmp, &info->task_head,
  926. thread.hardwall[hwt->index].list) {
  927. if (task->files == owner || task->files == NULL)
  928. _hardwall_deactivate(hwt, task);
  929. }
  930. spin_unlock_irqrestore(&hwt->lock, flags);
  931. }
  932. return 0;
  933. }
  934. /* This hardwall is gone, so destroy it. */
  935. static int hardwall_release(struct inode *inode, struct file *file)
  936. {
  937. hardwall_destroy(file->private_data);
  938. return 0;
  939. }
  940. static const struct file_operations dev_hardwall_fops = {
  941. .open = nonseekable_open,
  942. .unlocked_ioctl = hardwall_ioctl,
  943. #ifdef CONFIG_COMPAT
  944. .compat_ioctl = hardwall_compat_ioctl,
  945. #endif
  946. .flush = hardwall_flush,
  947. .release = hardwall_release,
  948. };
  949. static struct cdev hardwall_dev;
  950. static int __init dev_hardwall_init(void)
  951. {
  952. int rc;
  953. dev_t dev;
  954. rc = alloc_chrdev_region(&dev, 0, HARDWALL_TYPES, "hardwall");
  955. if (rc < 0)
  956. return rc;
  957. cdev_init(&hardwall_dev, &dev_hardwall_fops);
  958. rc = cdev_add(&hardwall_dev, dev, HARDWALL_TYPES);
  959. if (rc < 0)
  960. return rc;
  961. return 0;
  962. }
  963. late_initcall(dev_hardwall_init);