cpuset.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578
  1. /*
  2. * kernel/cpuset.c
  3. *
  4. * Processor and Memory placement constraints for sets of tasks.
  5. *
  6. * Copyright (C) 2003 BULL SA.
  7. * Copyright (C) 2004 Silicon Graphics, Inc.
  8. *
  9. * Portions derived from Patrick Mochel's sysfs code.
  10. * sysfs is Copyright (c) 2001-3 Patrick Mochel
  11. * Portions Copyright (c) 2004 Silicon Graphics, Inc.
  12. *
  13. * 2003-10-10 Written by Simon Derr <simon.derr@bull.net>
  14. * 2003-10-22 Updates by Stephen Hemminger.
  15. * 2004 May-July Rework by Paul Jackson <pj@sgi.com>
  16. *
  17. * This file is subject to the terms and conditions of the GNU General Public
  18. * License. See the file COPYING in the main directory of the Linux
  19. * distribution for more details.
  20. */
  21. #include <linux/config.h>
  22. #include <linux/cpu.h>
  23. #include <linux/cpumask.h>
  24. #include <linux/cpuset.h>
  25. #include <linux/err.h>
  26. #include <linux/errno.h>
  27. #include <linux/file.h>
  28. #include <linux/fs.h>
  29. #include <linux/init.h>
  30. #include <linux/interrupt.h>
  31. #include <linux/kernel.h>
  32. #include <linux/kmod.h>
  33. #include <linux/list.h>
  34. #include <linux/mm.h>
  35. #include <linux/module.h>
  36. #include <linux/mount.h>
  37. #include <linux/namei.h>
  38. #include <linux/pagemap.h>
  39. #include <linux/proc_fs.h>
  40. #include <linux/sched.h>
  41. #include <linux/seq_file.h>
  42. #include <linux/slab.h>
  43. #include <linux/smp_lock.h>
  44. #include <linux/spinlock.h>
  45. #include <linux/stat.h>
  46. #include <linux/string.h>
  47. #include <linux/time.h>
  48. #include <linux/backing-dev.h>
  49. #include <linux/sort.h>
  50. #include <asm/uaccess.h>
  51. #include <asm/atomic.h>
  52. #include <asm/semaphore.h>
  53. #define CPUSET_SUPER_MAGIC 0x27e0eb
  54. struct cpuset {
  55. unsigned long flags; /* "unsigned long" so bitops work */
  56. cpumask_t cpus_allowed; /* CPUs allowed to tasks in cpuset */
  57. nodemask_t mems_allowed; /* Memory Nodes allowed to tasks */
  58. atomic_t count; /* count tasks using this cpuset */
  59. /*
  60. * We link our 'sibling' struct into our parents 'children'.
  61. * Our children link their 'sibling' into our 'children'.
  62. */
  63. struct list_head sibling; /* my parents children */
  64. struct list_head children; /* my children */
  65. struct cpuset *parent; /* my parent */
  66. struct dentry *dentry; /* cpuset fs entry */
  67. /*
  68. * Copy of global cpuset_mems_generation as of the most
  69. * recent time this cpuset changed its mems_allowed.
  70. */
  71. int mems_generation;
  72. };
  73. /* bits in struct cpuset flags field */
  74. typedef enum {
  75. CS_CPU_EXCLUSIVE,
  76. CS_MEM_EXCLUSIVE,
  77. CS_REMOVED,
  78. CS_NOTIFY_ON_RELEASE
  79. } cpuset_flagbits_t;
  80. /* convenient tests for these bits */
  81. static inline int is_cpu_exclusive(const struct cpuset *cs)
  82. {
  83. return !!test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
  84. }
  85. static inline int is_mem_exclusive(const struct cpuset *cs)
  86. {
  87. return !!test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
  88. }
  89. static inline int is_removed(const struct cpuset *cs)
  90. {
  91. return !!test_bit(CS_REMOVED, &cs->flags);
  92. }
  93. static inline int notify_on_release(const struct cpuset *cs)
  94. {
  95. return !!test_bit(CS_NOTIFY_ON_RELEASE, &cs->flags);
  96. }
  97. /*
  98. * Increment this atomic integer everytime any cpuset changes its
  99. * mems_allowed value. Users of cpusets can track this generation
  100. * number, and avoid having to lock and reload mems_allowed unless
  101. * the cpuset they're using changes generation.
  102. *
  103. * A single, global generation is needed because attach_task() could
  104. * reattach a task to a different cpuset, which must not have its
  105. * generation numbers aliased with those of that tasks previous cpuset.
  106. *
  107. * Generations are needed for mems_allowed because one task cannot
  108. * modify anothers memory placement. So we must enable every task,
  109. * on every visit to __alloc_pages(), to efficiently check whether
  110. * its current->cpuset->mems_allowed has changed, requiring an update
  111. * of its current->mems_allowed.
  112. */
  113. static atomic_t cpuset_mems_generation = ATOMIC_INIT(1);
  114. static struct cpuset top_cpuset = {
  115. .flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)),
  116. .cpus_allowed = CPU_MASK_ALL,
  117. .mems_allowed = NODE_MASK_ALL,
  118. .count = ATOMIC_INIT(0),
  119. .sibling = LIST_HEAD_INIT(top_cpuset.sibling),
  120. .children = LIST_HEAD_INIT(top_cpuset.children),
  121. .parent = NULL,
  122. .dentry = NULL,
  123. .mems_generation = 0,
  124. };
  125. static struct vfsmount *cpuset_mount;
  126. static struct super_block *cpuset_sb = NULL;
  127. /*
  128. * cpuset_sem should be held by anyone who is depending on the children
  129. * or sibling lists of any cpuset, or performing non-atomic operations
  130. * on the flags or *_allowed values of a cpuset, such as raising the
  131. * CS_REMOVED flag bit iff it is not already raised, or reading and
  132. * conditionally modifying the *_allowed values. One kernel global
  133. * cpuset semaphore should be sufficient - these things don't change
  134. * that much.
  135. *
  136. * The code that modifies cpusets holds cpuset_sem across the entire
  137. * operation, from cpuset_common_file_write() down, single threading
  138. * all cpuset modifications (except for counter manipulations from
  139. * fork and exit) across the system. This presumes that cpuset
  140. * modifications are rare - better kept simple and safe, even if slow.
  141. *
  142. * The code that reads cpusets, such as in cpuset_common_file_read()
  143. * and below, only holds cpuset_sem across small pieces of code, such
  144. * as when reading out possibly multi-word cpumasks and nodemasks, as
  145. * the risks are less, and the desire for performance a little greater.
  146. * The proc_cpuset_show() routine needs to hold cpuset_sem to insure
  147. * that no cs->dentry is NULL, as it walks up the cpuset tree to root.
  148. *
  149. * The hooks from fork and exit, cpuset_fork() and cpuset_exit(), don't
  150. * (usually) grab cpuset_sem. These are the two most performance
  151. * critical pieces of code here. The exception occurs on exit(),
  152. * when a task in a notify_on_release cpuset exits. Then cpuset_sem
  153. * is taken, and if the cpuset count is zero, a usermode call made
  154. * to /sbin/cpuset_release_agent with the name of the cpuset (path
  155. * relative to the root of cpuset file system) as the argument.
  156. *
  157. * A cpuset can only be deleted if both its 'count' of using tasks is
  158. * zero, and its list of 'children' cpusets is empty. Since all tasks
  159. * in the system use _some_ cpuset, and since there is always at least
  160. * one task in the system (init, pid == 1), therefore, top_cpuset
  161. * always has either children cpusets and/or using tasks. So no need
  162. * for any special hack to ensure that top_cpuset cannot be deleted.
  163. */
  164. static DECLARE_MUTEX(cpuset_sem);
  165. /*
  166. * A couple of forward declarations required, due to cyclic reference loop:
  167. * cpuset_mkdir -> cpuset_create -> cpuset_populate_dir -> cpuset_add_file
  168. * -> cpuset_create_file -> cpuset_dir_inode_operations -> cpuset_mkdir.
  169. */
  170. static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode);
  171. static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry);
  172. static struct backing_dev_info cpuset_backing_dev_info = {
  173. .ra_pages = 0, /* No readahead */
  174. .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
  175. };
  176. static struct inode *cpuset_new_inode(mode_t mode)
  177. {
  178. struct inode *inode = new_inode(cpuset_sb);
  179. if (inode) {
  180. inode->i_mode = mode;
  181. inode->i_uid = current->fsuid;
  182. inode->i_gid = current->fsgid;
  183. inode->i_blksize = PAGE_CACHE_SIZE;
  184. inode->i_blocks = 0;
  185. inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
  186. inode->i_mapping->backing_dev_info = &cpuset_backing_dev_info;
  187. }
  188. return inode;
  189. }
  190. static void cpuset_diput(struct dentry *dentry, struct inode *inode)
  191. {
  192. /* is dentry a directory ? if so, kfree() associated cpuset */
  193. if (S_ISDIR(inode->i_mode)) {
  194. struct cpuset *cs = dentry->d_fsdata;
  195. BUG_ON(!(is_removed(cs)));
  196. kfree(cs);
  197. }
  198. iput(inode);
  199. }
  200. static struct dentry_operations cpuset_dops = {
  201. .d_iput = cpuset_diput,
  202. };
  203. static struct dentry *cpuset_get_dentry(struct dentry *parent, const char *name)
  204. {
  205. struct qstr qstr;
  206. struct dentry *d;
  207. qstr.name = name;
  208. qstr.len = strlen(name);
  209. qstr.hash = full_name_hash(name, qstr.len);
  210. d = lookup_hash(&qstr, parent);
  211. if (!IS_ERR(d))
  212. d->d_op = &cpuset_dops;
  213. return d;
  214. }
  215. static void remove_dir(struct dentry *d)
  216. {
  217. struct dentry *parent = dget(d->d_parent);
  218. d_delete(d);
  219. simple_rmdir(parent->d_inode, d);
  220. dput(parent);
  221. }
  222. /*
  223. * NOTE : the dentry must have been dget()'ed
  224. */
  225. static void cpuset_d_remove_dir(struct dentry *dentry)
  226. {
  227. struct list_head *node;
  228. spin_lock(&dcache_lock);
  229. node = dentry->d_subdirs.next;
  230. while (node != &dentry->d_subdirs) {
  231. struct dentry *d = list_entry(node, struct dentry, d_child);
  232. list_del_init(node);
  233. if (d->d_inode) {
  234. d = dget_locked(d);
  235. spin_unlock(&dcache_lock);
  236. d_delete(d);
  237. simple_unlink(dentry->d_inode, d);
  238. dput(d);
  239. spin_lock(&dcache_lock);
  240. }
  241. node = dentry->d_subdirs.next;
  242. }
  243. list_del_init(&dentry->d_child);
  244. spin_unlock(&dcache_lock);
  245. remove_dir(dentry);
  246. }
  247. static struct super_operations cpuset_ops = {
  248. .statfs = simple_statfs,
  249. .drop_inode = generic_delete_inode,
  250. };
  251. static int cpuset_fill_super(struct super_block *sb, void *unused_data,
  252. int unused_silent)
  253. {
  254. struct inode *inode;
  255. struct dentry *root;
  256. sb->s_blocksize = PAGE_CACHE_SIZE;
  257. sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
  258. sb->s_magic = CPUSET_SUPER_MAGIC;
  259. sb->s_op = &cpuset_ops;
  260. cpuset_sb = sb;
  261. inode = cpuset_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR);
  262. if (inode) {
  263. inode->i_op = &simple_dir_inode_operations;
  264. inode->i_fop = &simple_dir_operations;
  265. /* directories start off with i_nlink == 2 (for "." entry) */
  266. inode->i_nlink++;
  267. } else {
  268. return -ENOMEM;
  269. }
  270. root = d_alloc_root(inode);
  271. if (!root) {
  272. iput(inode);
  273. return -ENOMEM;
  274. }
  275. sb->s_root = root;
  276. return 0;
  277. }
  278. static struct super_block *cpuset_get_sb(struct file_system_type *fs_type,
  279. int flags, const char *unused_dev_name,
  280. void *data)
  281. {
  282. return get_sb_single(fs_type, flags, data, cpuset_fill_super);
  283. }
  284. static struct file_system_type cpuset_fs_type = {
  285. .name = "cpuset",
  286. .get_sb = cpuset_get_sb,
  287. .kill_sb = kill_litter_super,
  288. };
  289. /* struct cftype:
  290. *
  291. * The files in the cpuset filesystem mostly have a very simple read/write
  292. * handling, some common function will take care of it. Nevertheless some cases
  293. * (read tasks) are special and therefore I define this structure for every
  294. * kind of file.
  295. *
  296. *
  297. * When reading/writing to a file:
  298. * - the cpuset to use in file->f_dentry->d_parent->d_fsdata
  299. * - the 'cftype' of the file is file->f_dentry->d_fsdata
  300. */
  301. struct cftype {
  302. char *name;
  303. int private;
  304. int (*open) (struct inode *inode, struct file *file);
  305. ssize_t (*read) (struct file *file, char __user *buf, size_t nbytes,
  306. loff_t *ppos);
  307. int (*write) (struct file *file, const char __user *buf, size_t nbytes,
  308. loff_t *ppos);
  309. int (*release) (struct inode *inode, struct file *file);
  310. };
  311. static inline struct cpuset *__d_cs(struct dentry *dentry)
  312. {
  313. return dentry->d_fsdata;
  314. }
  315. static inline struct cftype *__d_cft(struct dentry *dentry)
  316. {
  317. return dentry->d_fsdata;
  318. }
  319. /*
  320. * Call with cpuset_sem held. Writes path of cpuset into buf.
  321. * Returns 0 on success, -errno on error.
  322. */
  323. static int cpuset_path(const struct cpuset *cs, char *buf, int buflen)
  324. {
  325. char *start;
  326. start = buf + buflen;
  327. *--start = '\0';
  328. for (;;) {
  329. int len = cs->dentry->d_name.len;
  330. if ((start -= len) < buf)
  331. return -ENAMETOOLONG;
  332. memcpy(start, cs->dentry->d_name.name, len);
  333. cs = cs->parent;
  334. if (!cs)
  335. break;
  336. if (!cs->parent)
  337. continue;
  338. if (--start < buf)
  339. return -ENAMETOOLONG;
  340. *start = '/';
  341. }
  342. memmove(buf, start, buf + buflen - start);
  343. return 0;
  344. }
  345. /*
  346. * Notify userspace when a cpuset is released, by running
  347. * /sbin/cpuset_release_agent with the name of the cpuset (path
  348. * relative to the root of cpuset file system) as the argument.
  349. *
  350. * Most likely, this user command will try to rmdir this cpuset.
  351. *
  352. * This races with the possibility that some other task will be
  353. * attached to this cpuset before it is removed, or that some other
  354. * user task will 'mkdir' a child cpuset of this cpuset. That's ok.
  355. * The presumed 'rmdir' will fail quietly if this cpuset is no longer
  356. * unused, and this cpuset will be reprieved from its death sentence,
  357. * to continue to serve a useful existence. Next time it's released,
  358. * we will get notified again, if it still has 'notify_on_release' set.
  359. *
  360. * Note final arg to call_usermodehelper() is 0 - that means
  361. * don't wait. Since we are holding the global cpuset_sem here,
  362. * and we are asking another thread (started from keventd) to rmdir a
  363. * cpuset, we can't wait - or we'd deadlock with the removing thread
  364. * on cpuset_sem.
  365. */
  366. static int cpuset_release_agent(char *cpuset_str)
  367. {
  368. char *argv[3], *envp[3];
  369. int i;
  370. i = 0;
  371. argv[i++] = "/sbin/cpuset_release_agent";
  372. argv[i++] = cpuset_str;
  373. argv[i] = NULL;
  374. i = 0;
  375. /* minimal command environment */
  376. envp[i++] = "HOME=/";
  377. envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
  378. envp[i] = NULL;
  379. return call_usermodehelper(argv[0], argv, envp, 0);
  380. }
  381. /*
  382. * Either cs->count of using tasks transitioned to zero, or the
  383. * cs->children list of child cpusets just became empty. If this
  384. * cs is notify_on_release() and now both the user count is zero and
  385. * the list of children is empty, send notice to user land.
  386. */
  387. static void check_for_release(struct cpuset *cs)
  388. {
  389. if (notify_on_release(cs) && atomic_read(&cs->count) == 0 &&
  390. list_empty(&cs->children)) {
  391. char *buf;
  392. buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
  393. if (!buf)
  394. return;
  395. if (cpuset_path(cs, buf, PAGE_SIZE) < 0)
  396. goto out;
  397. cpuset_release_agent(buf);
  398. out:
  399. kfree(buf);
  400. }
  401. }
  402. /*
  403. * Return in *pmask the portion of a cpusets's cpus_allowed that
  404. * are online. If none are online, walk up the cpuset hierarchy
  405. * until we find one that does have some online cpus. If we get
  406. * all the way to the top and still haven't found any online cpus,
  407. * return cpu_online_map. Or if passed a NULL cs from an exit'ing
  408. * task, return cpu_online_map.
  409. *
  410. * One way or another, we guarantee to return some non-empty subset
  411. * of cpu_online_map.
  412. *
  413. * Call with cpuset_sem held.
  414. */
  415. static void guarantee_online_cpus(const struct cpuset *cs, cpumask_t *pmask)
  416. {
  417. while (cs && !cpus_intersects(cs->cpus_allowed, cpu_online_map))
  418. cs = cs->parent;
  419. if (cs)
  420. cpus_and(*pmask, cs->cpus_allowed, cpu_online_map);
  421. else
  422. *pmask = cpu_online_map;
  423. BUG_ON(!cpus_intersects(*pmask, cpu_online_map));
  424. }
  425. /*
  426. * Return in *pmask the portion of a cpusets's mems_allowed that
  427. * are online. If none are online, walk up the cpuset hierarchy
  428. * until we find one that does have some online mems. If we get
  429. * all the way to the top and still haven't found any online mems,
  430. * return node_online_map.
  431. *
  432. * One way or another, we guarantee to return some non-empty subset
  433. * of node_online_map.
  434. *
  435. * Call with cpuset_sem held.
  436. */
  437. static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
  438. {
  439. while (cs && !nodes_intersects(cs->mems_allowed, node_online_map))
  440. cs = cs->parent;
  441. if (cs)
  442. nodes_and(*pmask, cs->mems_allowed, node_online_map);
  443. else
  444. *pmask = node_online_map;
  445. BUG_ON(!nodes_intersects(*pmask, node_online_map));
  446. }
  447. /*
  448. * Refresh current tasks mems_allowed and mems_generation from
  449. * current tasks cpuset. Call with cpuset_sem held.
  450. *
  451. * Be sure to call refresh_mems() on any cpuset operation which
  452. * (1) holds cpuset_sem, and (2) might possibly alloc memory.
  453. * Call after obtaining cpuset_sem lock, before any possible
  454. * allocation. Otherwise one risks trying to allocate memory
  455. * while the task cpuset_mems_generation is not the same as
  456. * the mems_generation in its cpuset, which would deadlock on
  457. * cpuset_sem in cpuset_update_current_mems_allowed().
  458. *
  459. * Since we hold cpuset_sem, once refresh_mems() is called, the
  460. * test (current->cpuset_mems_generation != cs->mems_generation)
  461. * in cpuset_update_current_mems_allowed() will remain false,
  462. * until we drop cpuset_sem. Anyone else who would change our
  463. * cpusets mems_generation needs to lock cpuset_sem first.
  464. */
  465. static void refresh_mems(void)
  466. {
  467. struct cpuset *cs = current->cpuset;
  468. if (current->cpuset_mems_generation != cs->mems_generation) {
  469. guarantee_online_mems(cs, &current->mems_allowed);
  470. current->cpuset_mems_generation = cs->mems_generation;
  471. }
  472. }
  473. /*
  474. * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
  475. *
  476. * One cpuset is a subset of another if all its allowed CPUs and
  477. * Memory Nodes are a subset of the other, and its exclusive flags
  478. * are only set if the other's are set.
  479. */
  480. static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
  481. {
  482. return cpus_subset(p->cpus_allowed, q->cpus_allowed) &&
  483. nodes_subset(p->mems_allowed, q->mems_allowed) &&
  484. is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
  485. is_mem_exclusive(p) <= is_mem_exclusive(q);
  486. }
  487. /*
  488. * validate_change() - Used to validate that any proposed cpuset change
  489. * follows the structural rules for cpusets.
  490. *
  491. * If we replaced the flag and mask values of the current cpuset
  492. * (cur) with those values in the trial cpuset (trial), would
  493. * our various subset and exclusive rules still be valid? Presumes
  494. * cpuset_sem held.
  495. *
  496. * 'cur' is the address of an actual, in-use cpuset. Operations
  497. * such as list traversal that depend on the actual address of the
  498. * cpuset in the list must use cur below, not trial.
  499. *
  500. * 'trial' is the address of bulk structure copy of cur, with
  501. * perhaps one or more of the fields cpus_allowed, mems_allowed,
  502. * or flags changed to new, trial values.
  503. *
  504. * Return 0 if valid, -errno if not.
  505. */
  506. static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
  507. {
  508. struct cpuset *c, *par;
  509. /* Each of our child cpusets must be a subset of us */
  510. list_for_each_entry(c, &cur->children, sibling) {
  511. if (!is_cpuset_subset(c, trial))
  512. return -EBUSY;
  513. }
  514. /* Remaining checks don't apply to root cpuset */
  515. if ((par = cur->parent) == NULL)
  516. return 0;
  517. /* We must be a subset of our parent cpuset */
  518. if (!is_cpuset_subset(trial, par))
  519. return -EACCES;
  520. /* If either I or some sibling (!= me) is exclusive, we can't overlap */
  521. list_for_each_entry(c, &par->children, sibling) {
  522. if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
  523. c != cur &&
  524. cpus_intersects(trial->cpus_allowed, c->cpus_allowed))
  525. return -EINVAL;
  526. if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
  527. c != cur &&
  528. nodes_intersects(trial->mems_allowed, c->mems_allowed))
  529. return -EINVAL;
  530. }
  531. return 0;
  532. }
  533. static int update_cpumask(struct cpuset *cs, char *buf)
  534. {
  535. struct cpuset trialcs;
  536. int retval;
  537. trialcs = *cs;
  538. retval = cpulist_parse(buf, trialcs.cpus_allowed);
  539. if (retval < 0)
  540. return retval;
  541. cpus_and(trialcs.cpus_allowed, trialcs.cpus_allowed, cpu_online_map);
  542. if (cpus_empty(trialcs.cpus_allowed))
  543. return -ENOSPC;
  544. retval = validate_change(cs, &trialcs);
  545. if (retval == 0)
  546. cs->cpus_allowed = trialcs.cpus_allowed;
  547. return retval;
  548. }
  549. static int update_nodemask(struct cpuset *cs, char *buf)
  550. {
  551. struct cpuset trialcs;
  552. int retval;
  553. trialcs = *cs;
  554. retval = nodelist_parse(buf, trialcs.mems_allowed);
  555. if (retval < 0)
  556. return retval;
  557. nodes_and(trialcs.mems_allowed, trialcs.mems_allowed, node_online_map);
  558. if (nodes_empty(trialcs.mems_allowed))
  559. return -ENOSPC;
  560. retval = validate_change(cs, &trialcs);
  561. if (retval == 0) {
  562. cs->mems_allowed = trialcs.mems_allowed;
  563. atomic_inc(&cpuset_mems_generation);
  564. cs->mems_generation = atomic_read(&cpuset_mems_generation);
  565. }
  566. return retval;
  567. }
  568. /*
  569. * update_flag - read a 0 or a 1 in a file and update associated flag
  570. * bit: the bit to update (CS_CPU_EXCLUSIVE, CS_MEM_EXCLUSIVE,
  571. * CS_NOTIFY_ON_RELEASE)
  572. * cs: the cpuset to update
  573. * buf: the buffer where we read the 0 or 1
  574. */
  575. static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf)
  576. {
  577. int turning_on;
  578. struct cpuset trialcs;
  579. int err;
  580. turning_on = (simple_strtoul(buf, NULL, 10) != 0);
  581. trialcs = *cs;
  582. if (turning_on)
  583. set_bit(bit, &trialcs.flags);
  584. else
  585. clear_bit(bit, &trialcs.flags);
  586. err = validate_change(cs, &trialcs);
  587. if (err == 0) {
  588. if (turning_on)
  589. set_bit(bit, &cs->flags);
  590. else
  591. clear_bit(bit, &cs->flags);
  592. }
  593. return err;
  594. }
  595. static int attach_task(struct cpuset *cs, char *buf)
  596. {
  597. pid_t pid;
  598. struct task_struct *tsk;
  599. struct cpuset *oldcs;
  600. cpumask_t cpus;
  601. if (sscanf(buf, "%d", &pid) != 1)
  602. return -EIO;
  603. if (cpus_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
  604. return -ENOSPC;
  605. if (pid) {
  606. read_lock(&tasklist_lock);
  607. tsk = find_task_by_pid(pid);
  608. if (!tsk) {
  609. read_unlock(&tasklist_lock);
  610. return -ESRCH;
  611. }
  612. get_task_struct(tsk);
  613. read_unlock(&tasklist_lock);
  614. if ((current->euid) && (current->euid != tsk->uid)
  615. && (current->euid != tsk->suid)) {
  616. put_task_struct(tsk);
  617. return -EACCES;
  618. }
  619. } else {
  620. tsk = current;
  621. get_task_struct(tsk);
  622. }
  623. task_lock(tsk);
  624. oldcs = tsk->cpuset;
  625. if (!oldcs) {
  626. task_unlock(tsk);
  627. put_task_struct(tsk);
  628. return -ESRCH;
  629. }
  630. atomic_inc(&cs->count);
  631. tsk->cpuset = cs;
  632. task_unlock(tsk);
  633. guarantee_online_cpus(cs, &cpus);
  634. set_cpus_allowed(tsk, cpus);
  635. put_task_struct(tsk);
  636. if (atomic_dec_and_test(&oldcs->count))
  637. check_for_release(oldcs);
  638. return 0;
  639. }
  640. /* The various types of files and directories in a cpuset file system */
  641. typedef enum {
  642. FILE_ROOT,
  643. FILE_DIR,
  644. FILE_CPULIST,
  645. FILE_MEMLIST,
  646. FILE_CPU_EXCLUSIVE,
  647. FILE_MEM_EXCLUSIVE,
  648. FILE_NOTIFY_ON_RELEASE,
  649. FILE_TASKLIST,
  650. } cpuset_filetype_t;
  651. static ssize_t cpuset_common_file_write(struct file *file, const char __user *userbuf,
  652. size_t nbytes, loff_t *unused_ppos)
  653. {
  654. struct cpuset *cs = __d_cs(file->f_dentry->d_parent);
  655. struct cftype *cft = __d_cft(file->f_dentry);
  656. cpuset_filetype_t type = cft->private;
  657. char *buffer;
  658. int retval = 0;
  659. /* Crude upper limit on largest legitimate cpulist user might write. */
  660. if (nbytes > 100 + 6 * NR_CPUS)
  661. return -E2BIG;
  662. /* +1 for nul-terminator */
  663. if ((buffer = kmalloc(nbytes + 1, GFP_KERNEL)) == 0)
  664. return -ENOMEM;
  665. if (copy_from_user(buffer, userbuf, nbytes)) {
  666. retval = -EFAULT;
  667. goto out1;
  668. }
  669. buffer[nbytes] = 0; /* nul-terminate */
  670. down(&cpuset_sem);
  671. if (is_removed(cs)) {
  672. retval = -ENODEV;
  673. goto out2;
  674. }
  675. switch (type) {
  676. case FILE_CPULIST:
  677. retval = update_cpumask(cs, buffer);
  678. break;
  679. case FILE_MEMLIST:
  680. retval = update_nodemask(cs, buffer);
  681. break;
  682. case FILE_CPU_EXCLUSIVE:
  683. retval = update_flag(CS_CPU_EXCLUSIVE, cs, buffer);
  684. break;
  685. case FILE_MEM_EXCLUSIVE:
  686. retval = update_flag(CS_MEM_EXCLUSIVE, cs, buffer);
  687. break;
  688. case FILE_NOTIFY_ON_RELEASE:
  689. retval = update_flag(CS_NOTIFY_ON_RELEASE, cs, buffer);
  690. break;
  691. case FILE_TASKLIST:
  692. retval = attach_task(cs, buffer);
  693. break;
  694. default:
  695. retval = -EINVAL;
  696. goto out2;
  697. }
  698. if (retval == 0)
  699. retval = nbytes;
  700. out2:
  701. up(&cpuset_sem);
  702. out1:
  703. kfree(buffer);
  704. return retval;
  705. }
  706. static ssize_t cpuset_file_write(struct file *file, const char __user *buf,
  707. size_t nbytes, loff_t *ppos)
  708. {
  709. ssize_t retval = 0;
  710. struct cftype *cft = __d_cft(file->f_dentry);
  711. if (!cft)
  712. return -ENODEV;
  713. /* special function ? */
  714. if (cft->write)
  715. retval = cft->write(file, buf, nbytes, ppos);
  716. else
  717. retval = cpuset_common_file_write(file, buf, nbytes, ppos);
  718. return retval;
  719. }
  720. /*
  721. * These ascii lists should be read in a single call, by using a user
  722. * buffer large enough to hold the entire map. If read in smaller
  723. * chunks, there is no guarantee of atomicity. Since the display format
  724. * used, list of ranges of sequential numbers, is variable length,
  725. * and since these maps can change value dynamically, one could read
  726. * gibberish by doing partial reads while a list was changing.
  727. * A single large read to a buffer that crosses a page boundary is
  728. * ok, because the result being copied to user land is not recomputed
  729. * across a page fault.
  730. */
  731. static int cpuset_sprintf_cpulist(char *page, struct cpuset *cs)
  732. {
  733. cpumask_t mask;
  734. down(&cpuset_sem);
  735. mask = cs->cpus_allowed;
  736. up(&cpuset_sem);
  737. return cpulist_scnprintf(page, PAGE_SIZE, mask);
  738. }
  739. static int cpuset_sprintf_memlist(char *page, struct cpuset *cs)
  740. {
  741. nodemask_t mask;
  742. down(&cpuset_sem);
  743. mask = cs->mems_allowed;
  744. up(&cpuset_sem);
  745. return nodelist_scnprintf(page, PAGE_SIZE, mask);
  746. }
  747. static ssize_t cpuset_common_file_read(struct file *file, char __user *buf,
  748. size_t nbytes, loff_t *ppos)
  749. {
  750. struct cftype *cft = __d_cft(file->f_dentry);
  751. struct cpuset *cs = __d_cs(file->f_dentry->d_parent);
  752. cpuset_filetype_t type = cft->private;
  753. char *page;
  754. ssize_t retval = 0;
  755. char *s;
  756. char *start;
  757. size_t n;
  758. if (!(page = (char *)__get_free_page(GFP_KERNEL)))
  759. return -ENOMEM;
  760. s = page;
  761. switch (type) {
  762. case FILE_CPULIST:
  763. s += cpuset_sprintf_cpulist(s, cs);
  764. break;
  765. case FILE_MEMLIST:
  766. s += cpuset_sprintf_memlist(s, cs);
  767. break;
  768. case FILE_CPU_EXCLUSIVE:
  769. *s++ = is_cpu_exclusive(cs) ? '1' : '0';
  770. break;
  771. case FILE_MEM_EXCLUSIVE:
  772. *s++ = is_mem_exclusive(cs) ? '1' : '0';
  773. break;
  774. case FILE_NOTIFY_ON_RELEASE:
  775. *s++ = notify_on_release(cs) ? '1' : '0';
  776. break;
  777. default:
  778. retval = -EINVAL;
  779. goto out;
  780. }
  781. *s++ = '\n';
  782. *s = '\0';
  783. start = page + *ppos;
  784. n = s - start;
  785. retval = n - copy_to_user(buf, start, min(n, nbytes));
  786. *ppos += retval;
  787. out:
  788. free_page((unsigned long)page);
  789. return retval;
  790. }
  791. static ssize_t cpuset_file_read(struct file *file, char __user *buf, size_t nbytes,
  792. loff_t *ppos)
  793. {
  794. ssize_t retval = 0;
  795. struct cftype *cft = __d_cft(file->f_dentry);
  796. if (!cft)
  797. return -ENODEV;
  798. /* special function ? */
  799. if (cft->read)
  800. retval = cft->read(file, buf, nbytes, ppos);
  801. else
  802. retval = cpuset_common_file_read(file, buf, nbytes, ppos);
  803. return retval;
  804. }
  805. static int cpuset_file_open(struct inode *inode, struct file *file)
  806. {
  807. int err;
  808. struct cftype *cft;
  809. err = generic_file_open(inode, file);
  810. if (err)
  811. return err;
  812. cft = __d_cft(file->f_dentry);
  813. if (!cft)
  814. return -ENODEV;
  815. if (cft->open)
  816. err = cft->open(inode, file);
  817. else
  818. err = 0;
  819. return err;
  820. }
  821. static int cpuset_file_release(struct inode *inode, struct file *file)
  822. {
  823. struct cftype *cft = __d_cft(file->f_dentry);
  824. if (cft->release)
  825. return cft->release(inode, file);
  826. return 0;
  827. }
  828. static struct file_operations cpuset_file_operations = {
  829. .read = cpuset_file_read,
  830. .write = cpuset_file_write,
  831. .llseek = generic_file_llseek,
  832. .open = cpuset_file_open,
  833. .release = cpuset_file_release,
  834. };
  835. static struct inode_operations cpuset_dir_inode_operations = {
  836. .lookup = simple_lookup,
  837. .mkdir = cpuset_mkdir,
  838. .rmdir = cpuset_rmdir,
  839. };
  840. static int cpuset_create_file(struct dentry *dentry, int mode)
  841. {
  842. struct inode *inode;
  843. if (!dentry)
  844. return -ENOENT;
  845. if (dentry->d_inode)
  846. return -EEXIST;
  847. inode = cpuset_new_inode(mode);
  848. if (!inode)
  849. return -ENOMEM;
  850. if (S_ISDIR(mode)) {
  851. inode->i_op = &cpuset_dir_inode_operations;
  852. inode->i_fop = &simple_dir_operations;
  853. /* start off with i_nlink == 2 (for "." entry) */
  854. inode->i_nlink++;
  855. } else if (S_ISREG(mode)) {
  856. inode->i_size = 0;
  857. inode->i_fop = &cpuset_file_operations;
  858. }
  859. d_instantiate(dentry, inode);
  860. dget(dentry); /* Extra count - pin the dentry in core */
  861. return 0;
  862. }
  863. /*
  864. * cpuset_create_dir - create a directory for an object.
  865. * cs: the cpuset we create the directory for.
  866. * It must have a valid ->parent field
  867. * And we are going to fill its ->dentry field.
  868. * name: The name to give to the cpuset directory. Will be copied.
  869. * mode: mode to set on new directory.
  870. */
  871. static int cpuset_create_dir(struct cpuset *cs, const char *name, int mode)
  872. {
  873. struct dentry *dentry = NULL;
  874. struct dentry *parent;
  875. int error = 0;
  876. parent = cs->parent->dentry;
  877. dentry = cpuset_get_dentry(parent, name);
  878. if (IS_ERR(dentry))
  879. return PTR_ERR(dentry);
  880. error = cpuset_create_file(dentry, S_IFDIR | mode);
  881. if (!error) {
  882. dentry->d_fsdata = cs;
  883. parent->d_inode->i_nlink++;
  884. cs->dentry = dentry;
  885. }
  886. dput(dentry);
  887. return error;
  888. }
  889. static int cpuset_add_file(struct dentry *dir, const struct cftype *cft)
  890. {
  891. struct dentry *dentry;
  892. int error;
  893. down(&dir->d_inode->i_sem);
  894. dentry = cpuset_get_dentry(dir, cft->name);
  895. if (!IS_ERR(dentry)) {
  896. error = cpuset_create_file(dentry, 0644 | S_IFREG);
  897. if (!error)
  898. dentry->d_fsdata = (void *)cft;
  899. dput(dentry);
  900. } else
  901. error = PTR_ERR(dentry);
  902. up(&dir->d_inode->i_sem);
  903. return error;
  904. }
  905. /*
  906. * Stuff for reading the 'tasks' file.
  907. *
  908. * Reading this file can return large amounts of data if a cpuset has
  909. * *lots* of attached tasks. So it may need several calls to read(),
  910. * but we cannot guarantee that the information we produce is correct
  911. * unless we produce it entirely atomically.
  912. *
  913. * Upon tasks file open(), a struct ctr_struct is allocated, that
  914. * will have a pointer to an array (also allocated here). The struct
  915. * ctr_struct * is stored in file->private_data. Its resources will
  916. * be freed by release() when the file is closed. The array is used
  917. * to sprintf the PIDs and then used by read().
  918. */
  919. /* cpusets_tasks_read array */
  920. struct ctr_struct {
  921. char *buf;
  922. int bufsz;
  923. };
  924. /*
  925. * Load into 'pidarray' up to 'npids' of the tasks using cpuset 'cs'.
  926. * Return actual number of pids loaded.
  927. */
  928. static inline int pid_array_load(pid_t *pidarray, int npids, struct cpuset *cs)
  929. {
  930. int n = 0;
  931. struct task_struct *g, *p;
  932. read_lock(&tasklist_lock);
  933. do_each_thread(g, p) {
  934. if (p->cpuset == cs) {
  935. pidarray[n++] = p->pid;
  936. if (unlikely(n == npids))
  937. goto array_full;
  938. }
  939. } while_each_thread(g, p);
  940. array_full:
  941. read_unlock(&tasklist_lock);
  942. return n;
  943. }
  944. static int cmppid(const void *a, const void *b)
  945. {
  946. return *(pid_t *)a - *(pid_t *)b;
  947. }
  948. /*
  949. * Convert array 'a' of 'npids' pid_t's to a string of newline separated
  950. * decimal pids in 'buf'. Don't write more than 'sz' chars, but return
  951. * count 'cnt' of how many chars would be written if buf were large enough.
  952. */
  953. static int pid_array_to_buf(char *buf, int sz, pid_t *a, int npids)
  954. {
  955. int cnt = 0;
  956. int i;
  957. for (i = 0; i < npids; i++)
  958. cnt += snprintf(buf + cnt, max(sz - cnt, 0), "%d\n", a[i]);
  959. return cnt;
  960. }
  961. static int cpuset_tasks_open(struct inode *unused, struct file *file)
  962. {
  963. struct cpuset *cs = __d_cs(file->f_dentry->d_parent);
  964. struct ctr_struct *ctr;
  965. pid_t *pidarray;
  966. int npids;
  967. char c;
  968. if (!(file->f_mode & FMODE_READ))
  969. return 0;
  970. ctr = kmalloc(sizeof(*ctr), GFP_KERNEL);
  971. if (!ctr)
  972. goto err0;
  973. /*
  974. * If cpuset gets more users after we read count, we won't have
  975. * enough space - tough. This race is indistinguishable to the
  976. * caller from the case that the additional cpuset users didn't
  977. * show up until sometime later on.
  978. */
  979. npids = atomic_read(&cs->count);
  980. pidarray = kmalloc(npids * sizeof(pid_t), GFP_KERNEL);
  981. if (!pidarray)
  982. goto err1;
  983. npids = pid_array_load(pidarray, npids, cs);
  984. sort(pidarray, npids, sizeof(pid_t), cmppid, NULL);
  985. /* Call pid_array_to_buf() twice, first just to get bufsz */
  986. ctr->bufsz = pid_array_to_buf(&c, sizeof(c), pidarray, npids) + 1;
  987. ctr->buf = kmalloc(ctr->bufsz, GFP_KERNEL);
  988. if (!ctr->buf)
  989. goto err2;
  990. ctr->bufsz = pid_array_to_buf(ctr->buf, ctr->bufsz, pidarray, npids);
  991. kfree(pidarray);
  992. file->private_data = ctr;
  993. return 0;
  994. err2:
  995. kfree(pidarray);
  996. err1:
  997. kfree(ctr);
  998. err0:
  999. return -ENOMEM;
  1000. }
  1001. static ssize_t cpuset_tasks_read(struct file *file, char __user *buf,
  1002. size_t nbytes, loff_t *ppos)
  1003. {
  1004. struct ctr_struct *ctr = file->private_data;
  1005. if (*ppos + nbytes > ctr->bufsz)
  1006. nbytes = ctr->bufsz - *ppos;
  1007. if (copy_to_user(buf, ctr->buf + *ppos, nbytes))
  1008. return -EFAULT;
  1009. *ppos += nbytes;
  1010. return nbytes;
  1011. }
  1012. static int cpuset_tasks_release(struct inode *unused_inode, struct file *file)
  1013. {
  1014. struct ctr_struct *ctr;
  1015. if (file->f_mode & FMODE_READ) {
  1016. ctr = file->private_data;
  1017. kfree(ctr->buf);
  1018. kfree(ctr);
  1019. }
  1020. return 0;
  1021. }
  1022. /*
  1023. * for the common functions, 'private' gives the type of file
  1024. */
  1025. static struct cftype cft_tasks = {
  1026. .name = "tasks",
  1027. .open = cpuset_tasks_open,
  1028. .read = cpuset_tasks_read,
  1029. .release = cpuset_tasks_release,
  1030. .private = FILE_TASKLIST,
  1031. };
  1032. static struct cftype cft_cpus = {
  1033. .name = "cpus",
  1034. .private = FILE_CPULIST,
  1035. };
  1036. static struct cftype cft_mems = {
  1037. .name = "mems",
  1038. .private = FILE_MEMLIST,
  1039. };
  1040. static struct cftype cft_cpu_exclusive = {
  1041. .name = "cpu_exclusive",
  1042. .private = FILE_CPU_EXCLUSIVE,
  1043. };
  1044. static struct cftype cft_mem_exclusive = {
  1045. .name = "mem_exclusive",
  1046. .private = FILE_MEM_EXCLUSIVE,
  1047. };
  1048. static struct cftype cft_notify_on_release = {
  1049. .name = "notify_on_release",
  1050. .private = FILE_NOTIFY_ON_RELEASE,
  1051. };
  1052. static int cpuset_populate_dir(struct dentry *cs_dentry)
  1053. {
  1054. int err;
  1055. if ((err = cpuset_add_file(cs_dentry, &cft_cpus)) < 0)
  1056. return err;
  1057. if ((err = cpuset_add_file(cs_dentry, &cft_mems)) < 0)
  1058. return err;
  1059. if ((err = cpuset_add_file(cs_dentry, &cft_cpu_exclusive)) < 0)
  1060. return err;
  1061. if ((err = cpuset_add_file(cs_dentry, &cft_mem_exclusive)) < 0)
  1062. return err;
  1063. if ((err = cpuset_add_file(cs_dentry, &cft_notify_on_release)) < 0)
  1064. return err;
  1065. if ((err = cpuset_add_file(cs_dentry, &cft_tasks)) < 0)
  1066. return err;
  1067. return 0;
  1068. }
  1069. /*
  1070. * cpuset_create - create a cpuset
  1071. * parent: cpuset that will be parent of the new cpuset.
  1072. * name: name of the new cpuset. Will be strcpy'ed.
  1073. * mode: mode to set on new inode
  1074. *
  1075. * Must be called with the semaphore on the parent inode held
  1076. */
  1077. static long cpuset_create(struct cpuset *parent, const char *name, int mode)
  1078. {
  1079. struct cpuset *cs;
  1080. int err;
  1081. cs = kmalloc(sizeof(*cs), GFP_KERNEL);
  1082. if (!cs)
  1083. return -ENOMEM;
  1084. down(&cpuset_sem);
  1085. refresh_mems();
  1086. cs->flags = 0;
  1087. if (notify_on_release(parent))
  1088. set_bit(CS_NOTIFY_ON_RELEASE, &cs->flags);
  1089. cs->cpus_allowed = CPU_MASK_NONE;
  1090. cs->mems_allowed = NODE_MASK_NONE;
  1091. atomic_set(&cs->count, 0);
  1092. INIT_LIST_HEAD(&cs->sibling);
  1093. INIT_LIST_HEAD(&cs->children);
  1094. atomic_inc(&cpuset_mems_generation);
  1095. cs->mems_generation = atomic_read(&cpuset_mems_generation);
  1096. cs->parent = parent;
  1097. list_add(&cs->sibling, &cs->parent->children);
  1098. err = cpuset_create_dir(cs, name, mode);
  1099. if (err < 0)
  1100. goto err;
  1101. /*
  1102. * Release cpuset_sem before cpuset_populate_dir() because it
  1103. * will down() this new directory's i_sem and if we race with
  1104. * another mkdir, we might deadlock.
  1105. */
  1106. up(&cpuset_sem);
  1107. err = cpuset_populate_dir(cs->dentry);
  1108. /* If err < 0, we have a half-filled directory - oh well ;) */
  1109. return 0;
  1110. err:
  1111. list_del(&cs->sibling);
  1112. up(&cpuset_sem);
  1113. kfree(cs);
  1114. return err;
  1115. }
  1116. static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode)
  1117. {
  1118. struct cpuset *c_parent = dentry->d_parent->d_fsdata;
  1119. /* the vfs holds inode->i_sem already */
  1120. return cpuset_create(c_parent, dentry->d_name.name, mode | S_IFDIR);
  1121. }
  1122. static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
  1123. {
  1124. struct cpuset *cs = dentry->d_fsdata;
  1125. struct dentry *d;
  1126. struct cpuset *parent;
  1127. /* the vfs holds both inode->i_sem already */
  1128. down(&cpuset_sem);
  1129. refresh_mems();
  1130. if (atomic_read(&cs->count) > 0) {
  1131. up(&cpuset_sem);
  1132. return -EBUSY;
  1133. }
  1134. if (!list_empty(&cs->children)) {
  1135. up(&cpuset_sem);
  1136. return -EBUSY;
  1137. }
  1138. spin_lock(&cs->dentry->d_lock);
  1139. parent = cs->parent;
  1140. set_bit(CS_REMOVED, &cs->flags);
  1141. list_del(&cs->sibling); /* delete my sibling from parent->children */
  1142. if (list_empty(&parent->children))
  1143. check_for_release(parent);
  1144. d = dget(cs->dentry);
  1145. cs->dentry = NULL;
  1146. spin_unlock(&d->d_lock);
  1147. cpuset_d_remove_dir(d);
  1148. dput(d);
  1149. up(&cpuset_sem);
  1150. return 0;
  1151. }
  1152. /**
  1153. * cpuset_init - initialize cpusets at system boot
  1154. *
  1155. * Description: Initialize top_cpuset and the cpuset internal file system,
  1156. **/
  1157. int __init cpuset_init(void)
  1158. {
  1159. struct dentry *root;
  1160. int err;
  1161. top_cpuset.cpus_allowed = CPU_MASK_ALL;
  1162. top_cpuset.mems_allowed = NODE_MASK_ALL;
  1163. atomic_inc(&cpuset_mems_generation);
  1164. top_cpuset.mems_generation = atomic_read(&cpuset_mems_generation);
  1165. init_task.cpuset = &top_cpuset;
  1166. err = register_filesystem(&cpuset_fs_type);
  1167. if (err < 0)
  1168. goto out;
  1169. cpuset_mount = kern_mount(&cpuset_fs_type);
  1170. if (IS_ERR(cpuset_mount)) {
  1171. printk(KERN_ERR "cpuset: could not mount!\n");
  1172. err = PTR_ERR(cpuset_mount);
  1173. cpuset_mount = NULL;
  1174. goto out;
  1175. }
  1176. root = cpuset_mount->mnt_sb->s_root;
  1177. root->d_fsdata = &top_cpuset;
  1178. root->d_inode->i_nlink++;
  1179. top_cpuset.dentry = root;
  1180. root->d_inode->i_op = &cpuset_dir_inode_operations;
  1181. err = cpuset_populate_dir(root);
  1182. out:
  1183. return err;
  1184. }
  1185. /**
  1186. * cpuset_init_smp - initialize cpus_allowed
  1187. *
  1188. * Description: Finish top cpuset after cpu, node maps are initialized
  1189. **/
  1190. void __init cpuset_init_smp(void)
  1191. {
  1192. top_cpuset.cpus_allowed = cpu_online_map;
  1193. top_cpuset.mems_allowed = node_online_map;
  1194. }
  1195. /**
  1196. * cpuset_fork - attach newly forked task to its parents cpuset.
  1197. * @p: pointer to task_struct of forking parent process.
  1198. *
  1199. * Description: By default, on fork, a task inherits its
  1200. * parents cpuset. The pointer to the shared cpuset is
  1201. * automatically copied in fork.c by dup_task_struct().
  1202. * This cpuset_fork() routine need only increment the usage
  1203. * counter in that cpuset.
  1204. **/
  1205. void cpuset_fork(struct task_struct *tsk)
  1206. {
  1207. atomic_inc(&tsk->cpuset->count);
  1208. }
  1209. /**
  1210. * cpuset_exit - detach cpuset from exiting task
  1211. * @tsk: pointer to task_struct of exiting process
  1212. *
  1213. * Description: Detach cpuset from @tsk and release it.
  1214. *
  1215. * Note that cpusets marked notify_on_release force every task
  1216. * in them to take the global cpuset_sem semaphore when exiting.
  1217. * This could impact scaling on very large systems. Be reluctant
  1218. * to use notify_on_release cpusets where very high task exit
  1219. * scaling is required on large systems.
  1220. *
  1221. * Don't even think about derefencing 'cs' after the cpuset use
  1222. * count goes to zero, except inside a critical section guarded
  1223. * by the cpuset_sem semaphore. If you don't hold cpuset_sem,
  1224. * then a zero cpuset use count is a license to any other task to
  1225. * nuke the cpuset immediately.
  1226. *
  1227. **/
  1228. void cpuset_exit(struct task_struct *tsk)
  1229. {
  1230. struct cpuset *cs;
  1231. task_lock(tsk);
  1232. cs = tsk->cpuset;
  1233. tsk->cpuset = NULL;
  1234. task_unlock(tsk);
  1235. if (notify_on_release(cs)) {
  1236. down(&cpuset_sem);
  1237. if (atomic_dec_and_test(&cs->count))
  1238. check_for_release(cs);
  1239. up(&cpuset_sem);
  1240. } else {
  1241. atomic_dec(&cs->count);
  1242. }
  1243. }
  1244. /**
  1245. * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
  1246. * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
  1247. *
  1248. * Description: Returns the cpumask_t cpus_allowed of the cpuset
  1249. * attached to the specified @tsk. Guaranteed to return some non-empty
  1250. * subset of cpu_online_map, even if this means going outside the
  1251. * tasks cpuset.
  1252. **/
  1253. cpumask_t cpuset_cpus_allowed(const struct task_struct *tsk)
  1254. {
  1255. cpumask_t mask;
  1256. down(&cpuset_sem);
  1257. task_lock((struct task_struct *)tsk);
  1258. guarantee_online_cpus(tsk->cpuset, &mask);
  1259. task_unlock((struct task_struct *)tsk);
  1260. up(&cpuset_sem);
  1261. return mask;
  1262. }
  1263. void cpuset_init_current_mems_allowed(void)
  1264. {
  1265. current->mems_allowed = NODE_MASK_ALL;
  1266. }
  1267. /*
  1268. * If the current tasks cpusets mems_allowed changed behind our backs,
  1269. * update current->mems_allowed and mems_generation to the new value.
  1270. * Do not call this routine if in_interrupt().
  1271. */
  1272. void cpuset_update_current_mems_allowed(void)
  1273. {
  1274. struct cpuset *cs = current->cpuset;
  1275. if (!cs)
  1276. return; /* task is exiting */
  1277. if (current->cpuset_mems_generation != cs->mems_generation) {
  1278. down(&cpuset_sem);
  1279. refresh_mems();
  1280. up(&cpuset_sem);
  1281. }
  1282. }
  1283. void cpuset_restrict_to_mems_allowed(unsigned long *nodes)
  1284. {
  1285. bitmap_and(nodes, nodes, nodes_addr(current->mems_allowed),
  1286. MAX_NUMNODES);
  1287. }
  1288. /*
  1289. * Are any of the nodes on zonelist zl allowed in current->mems_allowed?
  1290. */
  1291. int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl)
  1292. {
  1293. int i;
  1294. for (i = 0; zl->zones[i]; i++) {
  1295. int nid = zl->zones[i]->zone_pgdat->node_id;
  1296. if (node_isset(nid, current->mems_allowed))
  1297. return 1;
  1298. }
  1299. return 0;
  1300. }
  1301. /*
  1302. * Is 'current' valid, and is zone z allowed in current->mems_allowed?
  1303. */
  1304. int cpuset_zone_allowed(struct zone *z)
  1305. {
  1306. return in_interrupt() ||
  1307. node_isset(z->zone_pgdat->node_id, current->mems_allowed);
  1308. }
  1309. /*
  1310. * proc_cpuset_show()
  1311. * - Print tasks cpuset path into seq_file.
  1312. * - Used for /proc/<pid>/cpuset.
  1313. */
  1314. static int proc_cpuset_show(struct seq_file *m, void *v)
  1315. {
  1316. struct cpuset *cs;
  1317. struct task_struct *tsk;
  1318. char *buf;
  1319. int retval = 0;
  1320. buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
  1321. if (!buf)
  1322. return -ENOMEM;
  1323. tsk = m->private;
  1324. down(&cpuset_sem);
  1325. task_lock(tsk);
  1326. cs = tsk->cpuset;
  1327. task_unlock(tsk);
  1328. if (!cs) {
  1329. retval = -EINVAL;
  1330. goto out;
  1331. }
  1332. retval = cpuset_path(cs, buf, PAGE_SIZE);
  1333. if (retval < 0)
  1334. goto out;
  1335. seq_puts(m, buf);
  1336. seq_putc(m, '\n');
  1337. out:
  1338. up(&cpuset_sem);
  1339. kfree(buf);
  1340. return retval;
  1341. }
  1342. static int cpuset_open(struct inode *inode, struct file *file)
  1343. {
  1344. struct task_struct *tsk = PROC_I(inode)->task;
  1345. return single_open(file, proc_cpuset_show, tsk);
  1346. }
  1347. struct file_operations proc_cpuset_operations = {
  1348. .open = cpuset_open,
  1349. .read = seq_read,
  1350. .llseek = seq_lseek,
  1351. .release = single_release,
  1352. };
  1353. /* Display task cpus_allowed, mems_allowed in /proc/<pid>/status file. */
  1354. char *cpuset_task_status_allowed(struct task_struct *task, char *buffer)
  1355. {
  1356. buffer += sprintf(buffer, "Cpus_allowed:\t");
  1357. buffer += cpumask_scnprintf(buffer, PAGE_SIZE, task->cpus_allowed);
  1358. buffer += sprintf(buffer, "\n");
  1359. buffer += sprintf(buffer, "Mems_allowed:\t");
  1360. buffer += nodemask_scnprintf(buffer, PAGE_SIZE, task->mems_allowed);
  1361. buffer += sprintf(buffer, "\n");
  1362. return buffer;
  1363. }