hardwall.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. */
  14. #include <linux/fs.h>
  15. #include <linux/proc_fs.h>
  16. #include <linux/seq_file.h>
  17. #include <linux/rwsem.h>
  18. #include <linux/kprobes.h>
  19. #include <linux/sched.h>
  20. #include <linux/hardirq.h>
  21. #include <linux/uaccess.h>
  22. #include <linux/smp.h>
  23. #include <linux/cdev.h>
  24. #include <linux/compat.h>
  25. #include <asm/hardwall.h>
  26. #include <asm/traps.h>
  27. #include <asm/siginfo.h>
  28. #include <asm/irq_regs.h>
  29. #include <arch/interrupts.h>
  30. #include <arch/spr_def.h>
  31. /*
  32. * This data structure tracks the rectangle data, etc., associated
  33. * one-to-one with a "struct file *" from opening HARDWALL_FILE.
  34. * Note that the file's private data points back to this structure.
  35. */
  36. struct hardwall_info {
  37. struct list_head list; /* "rectangles" list */
  38. struct list_head task_head; /* head of tasks in this hardwall */
  39. int ulhc_x; /* upper left hand corner x coord */
  40. int ulhc_y; /* upper left hand corner y coord */
  41. int width; /* rectangle width */
  42. int height; /* rectangle height */
  43. int teardown_in_progress; /* are we tearing this one down? */
  44. };
  45. /* Currently allocated hardwall rectangles */
  46. static LIST_HEAD(rectangles);
  47. /*
  48. * Guard changes to the hardwall data structures.
  49. * This could be finer grained (e.g. one lock for the list of hardwall
  50. * rectangles, then separate embedded locks for each one's list of tasks),
  51. * but there are subtle correctness issues when trying to start with
  52. * a task's "hardwall" pointer and lock the correct rectangle's embedded
  53. * lock in the presence of a simultaneous deactivation, so it seems
  54. * easier to have a single lock, given that none of these data
  55. * structures are touched very frequently during normal operation.
  56. */
  57. static DEFINE_SPINLOCK(hardwall_lock);
  58. /* Allow disabling UDN access. */
  59. static int udn_disabled;
  60. static int __init noudn(char *str)
  61. {
  62. pr_info("User-space UDN access is disabled\n");
  63. udn_disabled = 1;
  64. return 0;
  65. }
  66. early_param("noudn", noudn);
  67. /*
  68. * Low-level primitives
  69. */
  70. /* Set a CPU bit if the CPU is online. */
  71. #define cpu_online_set(cpu, dst) do { \
  72. if (cpu_online(cpu)) \
  73. cpumask_set_cpu(cpu, dst); \
  74. } while (0)
  75. /* Does the given rectangle contain the given x,y coordinate? */
  76. static int contains(struct hardwall_info *r, int x, int y)
  77. {
  78. return (x >= r->ulhc_x && x < r->ulhc_x + r->width) &&
  79. (y >= r->ulhc_y && y < r->ulhc_y + r->height);
  80. }
  81. /* Compute the rectangle parameters and validate the cpumask. */
  82. static int setup_rectangle(struct hardwall_info *r, struct cpumask *mask)
  83. {
  84. int x, y, cpu, ulhc, lrhc;
  85. /* The first cpu is the ULHC, the last the LRHC. */
  86. ulhc = find_first_bit(cpumask_bits(mask), nr_cpumask_bits);
  87. lrhc = find_last_bit(cpumask_bits(mask), nr_cpumask_bits);
  88. /* Compute the rectangle attributes from the cpus. */
  89. r->ulhc_x = cpu_x(ulhc);
  90. r->ulhc_y = cpu_y(ulhc);
  91. r->width = cpu_x(lrhc) - r->ulhc_x + 1;
  92. r->height = cpu_y(lrhc) - r->ulhc_y + 1;
  93. /* Width and height must be positive */
  94. if (r->width <= 0 || r->height <= 0)
  95. return -EINVAL;
  96. /* Confirm that the cpumask is exactly the rectangle. */
  97. for (y = 0, cpu = 0; y < smp_height; ++y)
  98. for (x = 0; x < smp_width; ++x, ++cpu)
  99. if (cpumask_test_cpu(cpu, mask) != contains(r, x, y))
  100. return -EINVAL;
  101. /*
  102. * Note that offline cpus can't be drained when this UDN
  103. * rectangle eventually closes. We used to detect this
  104. * situation and print a warning, but it annoyed users and
  105. * they ignored it anyway, so now we just return without a
  106. * warning.
  107. */
  108. return 0;
  109. }
  110. /* Do the two given rectangles overlap on any cpu? */
  111. static int overlaps(struct hardwall_info *a, struct hardwall_info *b)
  112. {
  113. return a->ulhc_x + a->width > b->ulhc_x && /* A not to the left */
  114. b->ulhc_x + b->width > a->ulhc_x && /* B not to the left */
  115. a->ulhc_y + a->height > b->ulhc_y && /* A not above */
  116. b->ulhc_y + b->height > a->ulhc_y; /* B not above */
  117. }
  118. /*
  119. * Hardware management of hardwall setup, teardown, trapping,
  120. * and enabling/disabling PL0 access to the networks.
  121. */
  122. /* Bit field values to mask together for writes to SPR_XDN_DIRECTION_PROTECT */
  123. enum direction_protect {
  124. N_PROTECT = (1 << 0),
  125. E_PROTECT = (1 << 1),
  126. S_PROTECT = (1 << 2),
  127. W_PROTECT = (1 << 3)
  128. };
  129. static void enable_firewall_interrupts(void)
  130. {
  131. arch_local_irq_unmask_now(INT_UDN_FIREWALL);
  132. }
  133. static void disable_firewall_interrupts(void)
  134. {
  135. arch_local_irq_mask_now(INT_UDN_FIREWALL);
  136. }
  137. /* Set up hardwall on this cpu based on the passed hardwall_info. */
  138. static void hardwall_setup_ipi_func(void *info)
  139. {
  140. struct hardwall_info *r = info;
  141. int cpu = smp_processor_id();
  142. int x = cpu % smp_width;
  143. int y = cpu / smp_width;
  144. int bits = 0;
  145. if (x == r->ulhc_x)
  146. bits |= W_PROTECT;
  147. if (x == r->ulhc_x + r->width - 1)
  148. bits |= E_PROTECT;
  149. if (y == r->ulhc_y)
  150. bits |= N_PROTECT;
  151. if (y == r->ulhc_y + r->height - 1)
  152. bits |= S_PROTECT;
  153. BUG_ON(bits == 0);
  154. __insn_mtspr(SPR_UDN_DIRECTION_PROTECT, bits);
  155. enable_firewall_interrupts();
  156. }
  157. /* Set up all cpus on edge of rectangle to enable/disable hardwall SPRs. */
  158. static void hardwall_setup(struct hardwall_info *r)
  159. {
  160. int x, y, cpu, delta;
  161. struct cpumask rect_cpus;
  162. cpumask_clear(&rect_cpus);
  163. /* First include the top and bottom edges */
  164. cpu = r->ulhc_y * smp_width + r->ulhc_x;
  165. delta = (r->height - 1) * smp_width;
  166. for (x = 0; x < r->width; ++x, ++cpu) {
  167. cpu_online_set(cpu, &rect_cpus);
  168. cpu_online_set(cpu + delta, &rect_cpus);
  169. }
  170. /* Then the left and right edges */
  171. cpu -= r->width;
  172. delta = r->width - 1;
  173. for (y = 0; y < r->height; ++y, cpu += smp_width) {
  174. cpu_online_set(cpu, &rect_cpus);
  175. cpu_online_set(cpu + delta, &rect_cpus);
  176. }
  177. /* Then tell all the cpus to set up their protection SPR */
  178. on_each_cpu_mask(&rect_cpus, hardwall_setup_ipi_func, r, 1);
  179. }
  180. void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
  181. {
  182. struct hardwall_info *rect;
  183. struct task_struct *p;
  184. struct siginfo info;
  185. int x, y;
  186. int cpu = smp_processor_id();
  187. int found_processes;
  188. unsigned long flags;
  189. struct pt_regs *old_regs = set_irq_regs(regs);
  190. irq_enter();
  191. /* This tile trapped a network access; find the rectangle. */
  192. x = cpu % smp_width;
  193. y = cpu / smp_width;
  194. spin_lock_irqsave(&hardwall_lock, flags);
  195. list_for_each_entry(rect, &rectangles, list) {
  196. if (contains(rect, x, y))
  197. break;
  198. }
  199. /*
  200. * It shouldn't be possible not to find this cpu on the
  201. * rectangle list, since only cpus in rectangles get hardwalled.
  202. * The hardwall is only removed after the UDN is drained.
  203. */
  204. BUG_ON(&rect->list == &rectangles);
  205. /*
  206. * If we already started teardown on this hardwall, don't worry;
  207. * the abort signal has been sent and we are just waiting for things
  208. * to quiesce.
  209. */
  210. if (rect->teardown_in_progress) {
  211. pr_notice("cpu %d: detected hardwall violation %#lx"
  212. " while teardown already in progress\n",
  213. cpu, (long) __insn_mfspr(SPR_UDN_DIRECTION_PROTECT));
  214. goto done;
  215. }
  216. /*
  217. * Kill off any process that is activated in this rectangle.
  218. * We bypass security to deliver the signal, since it must be
  219. * one of the activated processes that generated the UDN
  220. * message that caused this trap, and all the activated
  221. * processes shared a single open file so are pretty tightly
  222. * bound together from a security point of view to begin with.
  223. */
  224. rect->teardown_in_progress = 1;
  225. wmb(); /* Ensure visibility of rectangle before notifying processes. */
  226. pr_notice("cpu %d: detected hardwall violation %#lx...\n",
  227. cpu, (long) __insn_mfspr(SPR_UDN_DIRECTION_PROTECT));
  228. info.si_signo = SIGILL;
  229. info.si_errno = 0;
  230. info.si_code = ILL_HARDWALL;
  231. found_processes = 0;
  232. list_for_each_entry(p, &rect->task_head, thread.hardwall_list) {
  233. BUG_ON(p->thread.hardwall != rect);
  234. if (p->sighand) {
  235. found_processes = 1;
  236. pr_notice("hardwall: killing %d\n", p->pid);
  237. spin_lock(&p->sighand->siglock);
  238. __group_send_sig_info(info.si_signo, &info, p);
  239. spin_unlock(&p->sighand->siglock);
  240. }
  241. }
  242. if (!found_processes)
  243. pr_notice("hardwall: no associated processes!\n");
  244. done:
  245. spin_unlock_irqrestore(&hardwall_lock, flags);
  246. /*
  247. * We have to disable firewall interrupts now, or else when we
  248. * return from this handler, we will simply re-interrupt back to
  249. * it. However, we can't clear the protection bits, since we
  250. * haven't yet drained the network, and that would allow packets
  251. * to cross out of the hardwall region.
  252. */
  253. disable_firewall_interrupts();
  254. irq_exit();
  255. set_irq_regs(old_regs);
  256. }
  257. /* Allow access from user space to the UDN. */
  258. void grant_network_mpls(void)
  259. {
  260. __insn_mtspr(SPR_MPL_UDN_ACCESS_SET_0, 1);
  261. __insn_mtspr(SPR_MPL_UDN_AVAIL_SET_0, 1);
  262. __insn_mtspr(SPR_MPL_UDN_COMPLETE_SET_0, 1);
  263. __insn_mtspr(SPR_MPL_UDN_TIMER_SET_0, 1);
  264. #if !CHIP_HAS_REV1_XDN()
  265. __insn_mtspr(SPR_MPL_UDN_REFILL_SET_0, 1);
  266. __insn_mtspr(SPR_MPL_UDN_CA_SET_0, 1);
  267. #endif
  268. }
  269. /* Deny access from user space to the UDN. */
  270. void restrict_network_mpls(void)
  271. {
  272. __insn_mtspr(SPR_MPL_UDN_ACCESS_SET_1, 1);
  273. __insn_mtspr(SPR_MPL_UDN_AVAIL_SET_1, 1);
  274. __insn_mtspr(SPR_MPL_UDN_COMPLETE_SET_1, 1);
  275. __insn_mtspr(SPR_MPL_UDN_TIMER_SET_1, 1);
  276. #if !CHIP_HAS_REV1_XDN()
  277. __insn_mtspr(SPR_MPL_UDN_REFILL_SET_1, 1);
  278. __insn_mtspr(SPR_MPL_UDN_CA_SET_1, 1);
  279. #endif
  280. }
  281. /*
  282. * Code to create, activate, deactivate, and destroy hardwall rectangles.
  283. */
  284. /* Create a hardwall for the given rectangle */
  285. static struct hardwall_info *hardwall_create(
  286. size_t size, const unsigned char __user *bits)
  287. {
  288. struct hardwall_info *iter, *rect;
  289. struct cpumask mask;
  290. unsigned long flags;
  291. int rc;
  292. /* Reject crazy sizes out of hand, a la sys_mbind(). */
  293. if (size > PAGE_SIZE)
  294. return ERR_PTR(-EINVAL);
  295. /* Copy whatever fits into a cpumask. */
  296. if (copy_from_user(&mask, bits, min(sizeof(struct cpumask), size)))
  297. return ERR_PTR(-EFAULT);
  298. /*
  299. * If the size was short, clear the rest of the mask;
  300. * otherwise validate that the rest of the user mask was zero
  301. * (we don't try hard to be efficient when validating huge masks).
  302. */
  303. if (size < sizeof(struct cpumask)) {
  304. memset((char *)&mask + size, 0, sizeof(struct cpumask) - size);
  305. } else if (size > sizeof(struct cpumask)) {
  306. size_t i;
  307. for (i = sizeof(struct cpumask); i < size; ++i) {
  308. char c;
  309. if (get_user(c, &bits[i]))
  310. return ERR_PTR(-EFAULT);
  311. if (c)
  312. return ERR_PTR(-EINVAL);
  313. }
  314. }
  315. /* Allocate a new rectangle optimistically. */
  316. rect = kmalloc(sizeof(struct hardwall_info),
  317. GFP_KERNEL | __GFP_ZERO);
  318. if (rect == NULL)
  319. return ERR_PTR(-ENOMEM);
  320. INIT_LIST_HEAD(&rect->task_head);
  321. /* Compute the rectangle size and validate that it's plausible. */
  322. rc = setup_rectangle(rect, &mask);
  323. if (rc != 0) {
  324. kfree(rect);
  325. return ERR_PTR(rc);
  326. }
  327. /* Confirm it doesn't overlap and add it to the list. */
  328. spin_lock_irqsave(&hardwall_lock, flags);
  329. list_for_each_entry(iter, &rectangles, list) {
  330. if (overlaps(iter, rect)) {
  331. spin_unlock_irqrestore(&hardwall_lock, flags);
  332. kfree(rect);
  333. return ERR_PTR(-EBUSY);
  334. }
  335. }
  336. list_add_tail(&rect->list, &rectangles);
  337. spin_unlock_irqrestore(&hardwall_lock, flags);
  338. /* Set up appropriate hardwalling on all affected cpus. */
  339. hardwall_setup(rect);
  340. return rect;
  341. }
  342. /* Activate a given hardwall on this cpu for this process. */
  343. static int hardwall_activate(struct hardwall_info *rect)
  344. {
  345. int cpu, x, y;
  346. unsigned long flags;
  347. struct task_struct *p = current;
  348. struct thread_struct *ts = &p->thread;
  349. /* Require a rectangle. */
  350. if (rect == NULL)
  351. return -ENODATA;
  352. /* Not allowed to activate a rectangle that is being torn down. */
  353. if (rect->teardown_in_progress)
  354. return -EINVAL;
  355. /*
  356. * Get our affinity; if we're not bound to this tile uniquely,
  357. * we can't access the network registers.
  358. */
  359. if (cpumask_weight(&p->cpus_allowed) != 1)
  360. return -EPERM;
  361. /* Make sure we are bound to a cpu in this rectangle. */
  362. cpu = smp_processor_id();
  363. BUG_ON(cpumask_first(&p->cpus_allowed) != cpu);
  364. x = cpu_x(cpu);
  365. y = cpu_y(cpu);
  366. if (!contains(rect, x, y))
  367. return -EINVAL;
  368. /* If we are already bound to this hardwall, it's a no-op. */
  369. if (ts->hardwall) {
  370. BUG_ON(ts->hardwall != rect);
  371. return 0;
  372. }
  373. /* Success! This process gets to use the user networks on this cpu. */
  374. ts->hardwall = rect;
  375. spin_lock_irqsave(&hardwall_lock, flags);
  376. list_add(&ts->hardwall_list, &rect->task_head);
  377. spin_unlock_irqrestore(&hardwall_lock, flags);
  378. grant_network_mpls();
  379. printk(KERN_DEBUG "Pid %d (%s) activated for hardwall: cpu %d\n",
  380. p->pid, p->comm, cpu);
  381. return 0;
  382. }
  383. /*
  384. * Deactivate a task's hardwall. Must hold hardwall_lock.
  385. * This method may be called from free_task(), so we don't want to
  386. * rely on too many fields of struct task_struct still being valid.
  387. * We assume the cpus_allowed, pid, and comm fields are still valid.
  388. */
  389. static void _hardwall_deactivate(struct task_struct *task)
  390. {
  391. struct thread_struct *ts = &task->thread;
  392. if (cpumask_weight(&task->cpus_allowed) != 1) {
  393. pr_err("pid %d (%s) releasing networks with"
  394. " an affinity mask containing %d cpus!\n",
  395. task->pid, task->comm,
  396. cpumask_weight(&task->cpus_allowed));
  397. BUG();
  398. }
  399. BUG_ON(ts->hardwall == NULL);
  400. ts->hardwall = NULL;
  401. list_del(&ts->hardwall_list);
  402. if (task == current)
  403. restrict_network_mpls();
  404. }
  405. /* Deactivate a task's hardwall. */
  406. int hardwall_deactivate(struct task_struct *task)
  407. {
  408. unsigned long flags;
  409. int activated;
  410. spin_lock_irqsave(&hardwall_lock, flags);
  411. activated = (task->thread.hardwall != NULL);
  412. if (activated)
  413. _hardwall_deactivate(task);
  414. spin_unlock_irqrestore(&hardwall_lock, flags);
  415. if (!activated)
  416. return -EINVAL;
  417. printk(KERN_DEBUG "Pid %d (%s) deactivated for hardwall: cpu %d\n",
  418. task->pid, task->comm, smp_processor_id());
  419. return 0;
  420. }
  421. /* Stop a UDN switch before draining the network. */
  422. static void stop_udn_switch(void *ignored)
  423. {
  424. #if !CHIP_HAS_REV1_XDN()
  425. /* Freeze the switch and the demux. */
  426. __insn_mtspr(SPR_UDN_SP_FREEZE,
  427. SPR_UDN_SP_FREEZE__SP_FRZ_MASK |
  428. SPR_UDN_SP_FREEZE__DEMUX_FRZ_MASK |
  429. SPR_UDN_SP_FREEZE__NON_DEST_EXT_MASK);
  430. #endif
  431. }
  432. /* Drain all the state from a stopped switch. */
  433. static void drain_udn_switch(void *ignored)
  434. {
  435. #if !CHIP_HAS_REV1_XDN()
  436. int i;
  437. int from_tile_words, ca_count;
  438. /* Empty out the 5 switch point fifos. */
  439. for (i = 0; i < 5; i++) {
  440. int words, j;
  441. __insn_mtspr(SPR_UDN_SP_FIFO_SEL, i);
  442. words = __insn_mfspr(SPR_UDN_SP_STATE) & 0xF;
  443. for (j = 0; j < words; j++)
  444. (void) __insn_mfspr(SPR_UDN_SP_FIFO_DATA);
  445. BUG_ON((__insn_mfspr(SPR_UDN_SP_STATE) & 0xF) != 0);
  446. }
  447. /* Dump out the 3 word fifo at top. */
  448. from_tile_words = (__insn_mfspr(SPR_UDN_DEMUX_STATUS) >> 10) & 0x3;
  449. for (i = 0; i < from_tile_words; i++)
  450. (void) __insn_mfspr(SPR_UDN_DEMUX_WRITE_FIFO);
  451. /* Empty out demuxes. */
  452. while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 0))
  453. (void) __tile_udn0_receive();
  454. while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 1))
  455. (void) __tile_udn1_receive();
  456. while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 2))
  457. (void) __tile_udn2_receive();
  458. while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 3))
  459. (void) __tile_udn3_receive();
  460. BUG_ON((__insn_mfspr(SPR_UDN_DATA_AVAIL) & 0xF) != 0);
  461. /* Empty out catch all. */
  462. ca_count = __insn_mfspr(SPR_UDN_DEMUX_CA_COUNT);
  463. for (i = 0; i < ca_count; i++)
  464. (void) __insn_mfspr(SPR_UDN_CA_DATA);
  465. BUG_ON(__insn_mfspr(SPR_UDN_DEMUX_CA_COUNT) != 0);
  466. /* Clear demux logic. */
  467. __insn_mtspr(SPR_UDN_DEMUX_CTL, 1);
  468. /*
  469. * Write switch state; experimentation indicates that 0xc3000
  470. * is an idle switch point.
  471. */
  472. for (i = 0; i < 5; i++) {
  473. __insn_mtspr(SPR_UDN_SP_FIFO_SEL, i);
  474. __insn_mtspr(SPR_UDN_SP_STATE, 0xc3000);
  475. }
  476. #endif
  477. }
  478. /* Reset random UDN state registers at boot up and during hardwall teardown. */
  479. void reset_network_state(void)
  480. {
  481. #if !CHIP_HAS_REV1_XDN()
  482. /* Reset UDN coordinates to their standard value */
  483. unsigned int cpu = smp_processor_id();
  484. unsigned int x = cpu % smp_width;
  485. unsigned int y = cpu / smp_width;
  486. #endif
  487. if (udn_disabled)
  488. return;
  489. #if !CHIP_HAS_REV1_XDN()
  490. __insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7));
  491. /* Set demux tags to predefined values and enable them. */
  492. __insn_mtspr(SPR_UDN_TAG_VALID, 0xf);
  493. __insn_mtspr(SPR_UDN_TAG_0, (1 << 0));
  494. __insn_mtspr(SPR_UDN_TAG_1, (1 << 1));
  495. __insn_mtspr(SPR_UDN_TAG_2, (1 << 2));
  496. __insn_mtspr(SPR_UDN_TAG_3, (1 << 3));
  497. #endif
  498. /* Clear out other random registers so we have a clean slate. */
  499. __insn_mtspr(SPR_UDN_AVAIL_EN, 0);
  500. __insn_mtspr(SPR_UDN_DEADLOCK_TIMEOUT, 0);
  501. #if !CHIP_HAS_REV1_XDN()
  502. __insn_mtspr(SPR_UDN_REFILL_EN, 0);
  503. __insn_mtspr(SPR_UDN_DEMUX_QUEUE_SEL, 0);
  504. __insn_mtspr(SPR_UDN_SP_FIFO_SEL, 0);
  505. #endif
  506. /* Start the switch and demux. */
  507. #if !CHIP_HAS_REV1_XDN()
  508. __insn_mtspr(SPR_UDN_SP_FREEZE, 0);
  509. #endif
  510. }
  511. /* Restart a UDN switch after draining. */
  512. static void restart_udn_switch(void *ignored)
  513. {
  514. reset_network_state();
  515. /* Disable firewall interrupts. */
  516. __insn_mtspr(SPR_UDN_DIRECTION_PROTECT, 0);
  517. disable_firewall_interrupts();
  518. }
  519. /* Build a struct cpumask containing all valid tiles in bounding rectangle. */
  520. static void fill_mask(struct hardwall_info *r, struct cpumask *result)
  521. {
  522. int x, y, cpu;
  523. cpumask_clear(result);
  524. cpu = r->ulhc_y * smp_width + r->ulhc_x;
  525. for (y = 0; y < r->height; ++y, cpu += smp_width - r->width) {
  526. for (x = 0; x < r->width; ++x, ++cpu)
  527. cpu_online_set(cpu, result);
  528. }
  529. }
  530. /* Last reference to a hardwall is gone, so clear the network. */
  531. static void hardwall_destroy(struct hardwall_info *rect)
  532. {
  533. struct task_struct *task;
  534. unsigned long flags;
  535. struct cpumask mask;
  536. /* Make sure this file actually represents a rectangle. */
  537. if (rect == NULL)
  538. return;
  539. /*
  540. * Deactivate any remaining tasks. It's possible to race with
  541. * some other thread that is exiting and hasn't yet called
  542. * deactivate (when freeing its thread_info), so we carefully
  543. * deactivate any remaining tasks before freeing the
  544. * hardwall_info object itself.
  545. */
  546. spin_lock_irqsave(&hardwall_lock, flags);
  547. list_for_each_entry(task, &rect->task_head, thread.hardwall_list)
  548. _hardwall_deactivate(task);
  549. spin_unlock_irqrestore(&hardwall_lock, flags);
  550. /* Drain the UDN. */
  551. printk(KERN_DEBUG "Clearing hardwall rectangle %dx%d %d,%d\n",
  552. rect->width, rect->height, rect->ulhc_x, rect->ulhc_y);
  553. fill_mask(rect, &mask);
  554. on_each_cpu_mask(&mask, stop_udn_switch, NULL, 1);
  555. on_each_cpu_mask(&mask, drain_udn_switch, NULL, 1);
  556. /* Restart switch and disable firewall. */
  557. on_each_cpu_mask(&mask, restart_udn_switch, NULL, 1);
  558. /* Now free the rectangle from the list. */
  559. spin_lock_irqsave(&hardwall_lock, flags);
  560. BUG_ON(!list_empty(&rect->task_head));
  561. list_del(&rect->list);
  562. spin_unlock_irqrestore(&hardwall_lock, flags);
  563. kfree(rect);
  564. }
  565. /*
  566. * Dump hardwall state via /proc; initialized in arch/tile/sys/proc.c.
  567. */
  568. int proc_tile_hardwall_show(struct seq_file *sf, void *v)
  569. {
  570. struct hardwall_info *r;
  571. if (udn_disabled) {
  572. seq_printf(sf, "%dx%d 0,0 pids:\n", smp_width, smp_height);
  573. return 0;
  574. }
  575. spin_lock_irq(&hardwall_lock);
  576. list_for_each_entry(r, &rectangles, list) {
  577. struct task_struct *p;
  578. seq_printf(sf, "%dx%d %d,%d pids:",
  579. r->width, r->height, r->ulhc_x, r->ulhc_y);
  580. list_for_each_entry(p, &r->task_head, thread.hardwall_list) {
  581. unsigned int cpu = cpumask_first(&p->cpus_allowed);
  582. unsigned int x = cpu % smp_width;
  583. unsigned int y = cpu / smp_width;
  584. seq_printf(sf, " %d@%d,%d", p->pid, x, y);
  585. }
  586. seq_printf(sf, "\n");
  587. }
  588. spin_unlock_irq(&hardwall_lock);
  589. return 0;
  590. }
  591. /*
  592. * Character device support via ioctl/close.
  593. */
  594. static long hardwall_ioctl(struct file *file, unsigned int a, unsigned long b)
  595. {
  596. struct hardwall_info *rect = file->private_data;
  597. if (_IOC_TYPE(a) != HARDWALL_IOCTL_BASE)
  598. return -EINVAL;
  599. switch (_IOC_NR(a)) {
  600. case _HARDWALL_CREATE:
  601. if (udn_disabled)
  602. return -ENOSYS;
  603. if (rect != NULL)
  604. return -EALREADY;
  605. rect = hardwall_create(_IOC_SIZE(a),
  606. (const unsigned char __user *)b);
  607. if (IS_ERR(rect))
  608. return PTR_ERR(rect);
  609. file->private_data = rect;
  610. return 0;
  611. case _HARDWALL_ACTIVATE:
  612. return hardwall_activate(rect);
  613. case _HARDWALL_DEACTIVATE:
  614. if (current->thread.hardwall != rect)
  615. return -EINVAL;
  616. return hardwall_deactivate(current);
  617. default:
  618. return -EINVAL;
  619. }
  620. }
  621. #ifdef CONFIG_COMPAT
  622. static long hardwall_compat_ioctl(struct file *file,
  623. unsigned int a, unsigned long b)
  624. {
  625. /* Sign-extend the argument so it can be used as a pointer. */
  626. return hardwall_ioctl(file, a, (unsigned long)compat_ptr(b));
  627. }
  628. #endif
  629. /* The user process closed the file; revoke access to user networks. */
  630. static int hardwall_flush(struct file *file, fl_owner_t owner)
  631. {
  632. struct hardwall_info *rect = file->private_data;
  633. struct task_struct *task, *tmp;
  634. unsigned long flags;
  635. if (rect) {
  636. /*
  637. * NOTE: if multiple threads are activated on this hardwall
  638. * file, the other threads will continue having access to the
  639. * UDN until they are context-switched out and back in again.
  640. *
  641. * NOTE: A NULL files pointer means the task is being torn
  642. * down, so in that case we also deactivate it.
  643. */
  644. spin_lock_irqsave(&hardwall_lock, flags);
  645. list_for_each_entry_safe(task, tmp, &rect->task_head,
  646. thread.hardwall_list) {
  647. if (task->files == owner || task->files == NULL)
  648. _hardwall_deactivate(task);
  649. }
  650. spin_unlock_irqrestore(&hardwall_lock, flags);
  651. }
  652. return 0;
  653. }
  654. /* This hardwall is gone, so destroy it. */
  655. static int hardwall_release(struct inode *inode, struct file *file)
  656. {
  657. hardwall_destroy(file->private_data);
  658. return 0;
  659. }
  660. static const struct file_operations dev_hardwall_fops = {
  661. .open = nonseekable_open,
  662. .unlocked_ioctl = hardwall_ioctl,
  663. #ifdef CONFIG_COMPAT
  664. .compat_ioctl = hardwall_compat_ioctl,
  665. #endif
  666. .flush = hardwall_flush,
  667. .release = hardwall_release,
  668. };
  669. static struct cdev hardwall_dev;
  670. static int __init dev_hardwall_init(void)
  671. {
  672. int rc;
  673. dev_t dev;
  674. rc = alloc_chrdev_region(&dev, 0, 1, "hardwall");
  675. if (rc < 0)
  676. return rc;
  677. cdev_init(&hardwall_dev, &dev_hardwall_fops);
  678. rc = cdev_add(&hardwall_dev, dev, 1);
  679. if (rc < 0)
  680. return rc;
  681. return 0;
  682. }
  683. late_initcall(dev_hardwall_init);