module.c 56 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221
  1. /* Rewritten by Rusty Russell, on the backs of many others...
  2. Copyright (C) 2002 Richard Henderson
  3. Copyright (C) 2001 Rusty Russell, 2002 Rusty Russell IBM.
  4. This program is free software; you can redistribute it and/or modify
  5. it under the terms of the GNU General Public License as published by
  6. the Free Software Foundation; either version 2 of the License, or
  7. (at your option) any later version.
  8. This program is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. GNU General Public License for more details.
  12. You should have received a copy of the GNU General Public License
  13. along with this program; if not, write to the Free Software
  14. Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  15. */
  16. #include <linux/config.h>
  17. #include <linux/module.h>
  18. #include <linux/moduleloader.h>
  19. #include <linux/init.h>
  20. #include <linux/slab.h>
  21. #include <linux/vmalloc.h>
  22. #include <linux/elf.h>
  23. #include <linux/seq_file.h>
  24. #include <linux/syscalls.h>
  25. #include <linux/fcntl.h>
  26. #include <linux/rcupdate.h>
  27. #include <linux/cpu.h>
  28. #include <linux/moduleparam.h>
  29. #include <linux/errno.h>
  30. #include <linux/err.h>
  31. #include <linux/vermagic.h>
  32. #include <linux/notifier.h>
  33. #include <linux/stop_machine.h>
  34. #include <linux/device.h>
  35. #include <linux/string.h>
  36. #include <asm/uaccess.h>
  37. #include <asm/semaphore.h>
  38. #include <asm/cacheflush.h>
  39. #if 0
  40. #define DEBUGP printk
  41. #else
  42. #define DEBUGP(fmt , a...)
  43. #endif
  44. #ifndef ARCH_SHF_SMALL
  45. #define ARCH_SHF_SMALL 0
  46. #endif
  47. /* If this is set, the section belongs in the init part of the module */
  48. #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
  49. /* Protects module list */
  50. static DEFINE_SPINLOCK(modlist_lock);
  51. /* List of modules, protected by module_mutex AND modlist_lock */
  52. static DECLARE_MUTEX(module_mutex);
  53. static LIST_HEAD(modules);
  54. static DECLARE_MUTEX(notify_mutex);
  55. static struct notifier_block * module_notify_list;
  56. int register_module_notifier(struct notifier_block * nb)
  57. {
  58. int err;
  59. down(&notify_mutex);
  60. err = notifier_chain_register(&module_notify_list, nb);
  61. up(&notify_mutex);
  62. return err;
  63. }
  64. EXPORT_SYMBOL(register_module_notifier);
  65. int unregister_module_notifier(struct notifier_block * nb)
  66. {
  67. int err;
  68. down(&notify_mutex);
  69. err = notifier_chain_unregister(&module_notify_list, nb);
  70. up(&notify_mutex);
  71. return err;
  72. }
  73. EXPORT_SYMBOL(unregister_module_notifier);
  74. /* We require a truly strong try_module_get() */
  75. static inline int strong_try_module_get(struct module *mod)
  76. {
  77. if (mod && mod->state == MODULE_STATE_COMING)
  78. return 0;
  79. return try_module_get(mod);
  80. }
  81. /* A thread that wants to hold a reference to a module only while it
  82. * is running can call ths to safely exit.
  83. * nfsd and lockd use this.
  84. */
  85. void __module_put_and_exit(struct module *mod, long code)
  86. {
  87. module_put(mod);
  88. do_exit(code);
  89. }
  90. EXPORT_SYMBOL(__module_put_and_exit);
  91. /* Find a module section: 0 means not found. */
  92. static unsigned int find_sec(Elf_Ehdr *hdr,
  93. Elf_Shdr *sechdrs,
  94. const char *secstrings,
  95. const char *name)
  96. {
  97. unsigned int i;
  98. for (i = 1; i < hdr->e_shnum; i++)
  99. /* Alloc bit cleared means "ignore it." */
  100. if ((sechdrs[i].sh_flags & SHF_ALLOC)
  101. && strcmp(secstrings+sechdrs[i].sh_name, name) == 0)
  102. return i;
  103. return 0;
  104. }
  105. /* Provided by the linker */
  106. extern const struct kernel_symbol __start___ksymtab[];
  107. extern const struct kernel_symbol __stop___ksymtab[];
  108. extern const struct kernel_symbol __start___ksymtab_gpl[];
  109. extern const struct kernel_symbol __stop___ksymtab_gpl[];
  110. extern const unsigned long __start___kcrctab[];
  111. extern const unsigned long __start___kcrctab_gpl[];
  112. #ifndef CONFIG_MODVERSIONS
  113. #define symversion(base, idx) NULL
  114. #else
  115. #define symversion(base, idx) ((base) ? ((base) + (idx)) : NULL)
  116. #endif
  117. /* Find a symbol, return value, crc and module which owns it */
  118. static unsigned long __find_symbol(const char *name,
  119. struct module **owner,
  120. const unsigned long **crc,
  121. int gplok)
  122. {
  123. struct module *mod;
  124. unsigned int i;
  125. /* Core kernel first. */
  126. *owner = NULL;
  127. for (i = 0; __start___ksymtab+i < __stop___ksymtab; i++) {
  128. if (strcmp(__start___ksymtab[i].name, name) == 0) {
  129. *crc = symversion(__start___kcrctab, i);
  130. return __start___ksymtab[i].value;
  131. }
  132. }
  133. if (gplok) {
  134. for (i = 0; __start___ksymtab_gpl+i<__stop___ksymtab_gpl; i++)
  135. if (strcmp(__start___ksymtab_gpl[i].name, name) == 0) {
  136. *crc = symversion(__start___kcrctab_gpl, i);
  137. return __start___ksymtab_gpl[i].value;
  138. }
  139. }
  140. /* Now try modules. */
  141. list_for_each_entry(mod, &modules, list) {
  142. *owner = mod;
  143. for (i = 0; i < mod->num_syms; i++)
  144. if (strcmp(mod->syms[i].name, name) == 0) {
  145. *crc = symversion(mod->crcs, i);
  146. return mod->syms[i].value;
  147. }
  148. if (gplok) {
  149. for (i = 0; i < mod->num_gpl_syms; i++) {
  150. if (strcmp(mod->gpl_syms[i].name, name) == 0) {
  151. *crc = symversion(mod->gpl_crcs, i);
  152. return mod->gpl_syms[i].value;
  153. }
  154. }
  155. }
  156. }
  157. DEBUGP("Failed to find symbol %s\n", name);
  158. return 0;
  159. }
  160. /* Find a symbol in this elf symbol table */
  161. static unsigned long find_local_symbol(Elf_Shdr *sechdrs,
  162. unsigned int symindex,
  163. const char *strtab,
  164. const char *name)
  165. {
  166. unsigned int i;
  167. Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
  168. /* Search (defined) internal symbols first. */
  169. for (i = 1; i < sechdrs[symindex].sh_size/sizeof(*sym); i++) {
  170. if (sym[i].st_shndx != SHN_UNDEF
  171. && strcmp(name, strtab + sym[i].st_name) == 0)
  172. return sym[i].st_value;
  173. }
  174. return 0;
  175. }
  176. /* Search for module by name: must hold module_mutex. */
  177. static struct module *find_module(const char *name)
  178. {
  179. struct module *mod;
  180. list_for_each_entry(mod, &modules, list) {
  181. if (strcmp(mod->name, name) == 0)
  182. return mod;
  183. }
  184. return NULL;
  185. }
  186. #ifdef CONFIG_SMP
  187. /* Number of blocks used and allocated. */
  188. static unsigned int pcpu_num_used, pcpu_num_allocated;
  189. /* Size of each block. -ve means used. */
  190. static int *pcpu_size;
  191. static int split_block(unsigned int i, unsigned short size)
  192. {
  193. /* Reallocation required? */
  194. if (pcpu_num_used + 1 > pcpu_num_allocated) {
  195. int *new = kmalloc(sizeof(new[0]) * pcpu_num_allocated*2,
  196. GFP_KERNEL);
  197. if (!new)
  198. return 0;
  199. memcpy(new, pcpu_size, sizeof(new[0])*pcpu_num_allocated);
  200. pcpu_num_allocated *= 2;
  201. kfree(pcpu_size);
  202. pcpu_size = new;
  203. }
  204. /* Insert a new subblock */
  205. memmove(&pcpu_size[i+1], &pcpu_size[i],
  206. sizeof(pcpu_size[0]) * (pcpu_num_used - i));
  207. pcpu_num_used++;
  208. pcpu_size[i+1] -= size;
  209. pcpu_size[i] = size;
  210. return 1;
  211. }
  212. static inline unsigned int block_size(int val)
  213. {
  214. if (val < 0)
  215. return -val;
  216. return val;
  217. }
  218. /* Created by linker magic */
  219. extern char __per_cpu_start[], __per_cpu_end[];
  220. static void *percpu_modalloc(unsigned long size, unsigned long align,
  221. const char *name)
  222. {
  223. unsigned long extra;
  224. unsigned int i;
  225. void *ptr;
  226. if (align > SMP_CACHE_BYTES) {
  227. printk(KERN_WARNING "%s: per-cpu alignment %li > %i\n",
  228. name, align, SMP_CACHE_BYTES);
  229. align = SMP_CACHE_BYTES;
  230. }
  231. ptr = __per_cpu_start;
  232. for (i = 0; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
  233. /* Extra for alignment requirement. */
  234. extra = ALIGN((unsigned long)ptr, align) - (unsigned long)ptr;
  235. BUG_ON(i == 0 && extra != 0);
  236. if (pcpu_size[i] < 0 || pcpu_size[i] < extra + size)
  237. continue;
  238. /* Transfer extra to previous block. */
  239. if (pcpu_size[i-1] < 0)
  240. pcpu_size[i-1] -= extra;
  241. else
  242. pcpu_size[i-1] += extra;
  243. pcpu_size[i] -= extra;
  244. ptr += extra;
  245. /* Split block if warranted */
  246. if (pcpu_size[i] - size > sizeof(unsigned long))
  247. if (!split_block(i, size))
  248. return NULL;
  249. /* Mark allocated */
  250. pcpu_size[i] = -pcpu_size[i];
  251. return ptr;
  252. }
  253. printk(KERN_WARNING "Could not allocate %lu bytes percpu data\n",
  254. size);
  255. return NULL;
  256. }
  257. static void percpu_modfree(void *freeme)
  258. {
  259. unsigned int i;
  260. void *ptr = __per_cpu_start + block_size(pcpu_size[0]);
  261. /* First entry is core kernel percpu data. */
  262. for (i = 1; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
  263. if (ptr == freeme) {
  264. pcpu_size[i] = -pcpu_size[i];
  265. goto free;
  266. }
  267. }
  268. BUG();
  269. free:
  270. /* Merge with previous? */
  271. if (pcpu_size[i-1] >= 0) {
  272. pcpu_size[i-1] += pcpu_size[i];
  273. pcpu_num_used--;
  274. memmove(&pcpu_size[i], &pcpu_size[i+1],
  275. (pcpu_num_used - i) * sizeof(pcpu_size[0]));
  276. i--;
  277. }
  278. /* Merge with next? */
  279. if (i+1 < pcpu_num_used && pcpu_size[i+1] >= 0) {
  280. pcpu_size[i] += pcpu_size[i+1];
  281. pcpu_num_used--;
  282. memmove(&pcpu_size[i+1], &pcpu_size[i+2],
  283. (pcpu_num_used - (i+1)) * sizeof(pcpu_size[0]));
  284. }
  285. }
  286. static unsigned int find_pcpusec(Elf_Ehdr *hdr,
  287. Elf_Shdr *sechdrs,
  288. const char *secstrings)
  289. {
  290. return find_sec(hdr, sechdrs, secstrings, ".data.percpu");
  291. }
  292. static int percpu_modinit(void)
  293. {
  294. pcpu_num_used = 2;
  295. pcpu_num_allocated = 2;
  296. pcpu_size = kmalloc(sizeof(pcpu_size[0]) * pcpu_num_allocated,
  297. GFP_KERNEL);
  298. /* Static in-kernel percpu data (used). */
  299. pcpu_size[0] = -ALIGN(__per_cpu_end-__per_cpu_start, SMP_CACHE_BYTES);
  300. /* Free room. */
  301. pcpu_size[1] = PERCPU_ENOUGH_ROOM + pcpu_size[0];
  302. if (pcpu_size[1] < 0) {
  303. printk(KERN_ERR "No per-cpu room for modules.\n");
  304. pcpu_num_used = 1;
  305. }
  306. return 0;
  307. }
  308. __initcall(percpu_modinit);
  309. #else /* ... !CONFIG_SMP */
  310. static inline void *percpu_modalloc(unsigned long size, unsigned long align,
  311. const char *name)
  312. {
  313. return NULL;
  314. }
  315. static inline void percpu_modfree(void *pcpuptr)
  316. {
  317. BUG();
  318. }
  319. static inline unsigned int find_pcpusec(Elf_Ehdr *hdr,
  320. Elf_Shdr *sechdrs,
  321. const char *secstrings)
  322. {
  323. return 0;
  324. }
  325. static inline void percpu_modcopy(void *pcpudst, const void *src,
  326. unsigned long size)
  327. {
  328. /* pcpusec should be 0, and size of that section should be 0. */
  329. BUG_ON(size != 0);
  330. }
  331. #endif /* CONFIG_SMP */
  332. #ifdef CONFIG_MODULE_UNLOAD
  333. #define MODINFO_ATTR(field) \
  334. static void setup_modinfo_##field(struct module *mod, const char *s) \
  335. { \
  336. mod->field = kstrdup(s, GFP_KERNEL); \
  337. } \
  338. static ssize_t show_modinfo_##field(struct module_attribute *mattr, \
  339. struct module *mod, char *buffer) \
  340. { \
  341. return sprintf(buffer, "%s\n", mod->field); \
  342. } \
  343. static int modinfo_##field##_exists(struct module *mod) \
  344. { \
  345. return mod->field != NULL; \
  346. } \
  347. static void free_modinfo_##field(struct module *mod) \
  348. { \
  349. kfree(mod->field); \
  350. mod->field = NULL; \
  351. } \
  352. static struct module_attribute modinfo_##field = { \
  353. .attr = { .name = __stringify(field), .mode = 0444, \
  354. .owner = THIS_MODULE }, \
  355. .show = show_modinfo_##field, \
  356. .setup = setup_modinfo_##field, \
  357. .test = modinfo_##field##_exists, \
  358. .free = free_modinfo_##field, \
  359. };
  360. MODINFO_ATTR(version);
  361. MODINFO_ATTR(srcversion);
  362. static struct module_attribute *modinfo_attrs[] = {
  363. &modinfo_version,
  364. &modinfo_srcversion,
  365. NULL,
  366. };
  367. /* Init the unload section of the module. */
  368. static void module_unload_init(struct module *mod)
  369. {
  370. unsigned int i;
  371. INIT_LIST_HEAD(&mod->modules_which_use_me);
  372. for (i = 0; i < NR_CPUS; i++)
  373. local_set(&mod->ref[i].count, 0);
  374. /* Hold reference count during initialization. */
  375. local_set(&mod->ref[raw_smp_processor_id()].count, 1);
  376. /* Backwards compatibility macros put refcount during init. */
  377. mod->waiter = current;
  378. }
  379. /* modules using other modules */
  380. struct module_use
  381. {
  382. struct list_head list;
  383. struct module *module_which_uses;
  384. };
  385. /* Does a already use b? */
  386. static int already_uses(struct module *a, struct module *b)
  387. {
  388. struct module_use *use;
  389. list_for_each_entry(use, &b->modules_which_use_me, list) {
  390. if (use->module_which_uses == a) {
  391. DEBUGP("%s uses %s!\n", a->name, b->name);
  392. return 1;
  393. }
  394. }
  395. DEBUGP("%s does not use %s!\n", a->name, b->name);
  396. return 0;
  397. }
  398. /* Module a uses b */
  399. static int use_module(struct module *a, struct module *b)
  400. {
  401. struct module_use *use;
  402. if (b == NULL || already_uses(a, b)) return 1;
  403. if (!strong_try_module_get(b))
  404. return 0;
  405. DEBUGP("Allocating new usage for %s.\n", a->name);
  406. use = kmalloc(sizeof(*use), GFP_ATOMIC);
  407. if (!use) {
  408. printk("%s: out of memory loading\n", a->name);
  409. module_put(b);
  410. return 0;
  411. }
  412. use->module_which_uses = a;
  413. list_add(&use->list, &b->modules_which_use_me);
  414. return 1;
  415. }
  416. /* Clear the unload stuff of the module. */
  417. static void module_unload_free(struct module *mod)
  418. {
  419. struct module *i;
  420. list_for_each_entry(i, &modules, list) {
  421. struct module_use *use;
  422. list_for_each_entry(use, &i->modules_which_use_me, list) {
  423. if (use->module_which_uses == mod) {
  424. DEBUGP("%s unusing %s\n", mod->name, i->name);
  425. module_put(i);
  426. list_del(&use->list);
  427. kfree(use);
  428. /* There can be at most one match. */
  429. break;
  430. }
  431. }
  432. }
  433. }
  434. #ifdef CONFIG_MODULE_FORCE_UNLOAD
  435. static inline int try_force(unsigned int flags)
  436. {
  437. int ret = (flags & O_TRUNC);
  438. if (ret)
  439. tainted |= TAINT_FORCED_MODULE;
  440. return ret;
  441. }
  442. #else
  443. static inline int try_force(unsigned int flags)
  444. {
  445. return 0;
  446. }
  447. #endif /* CONFIG_MODULE_FORCE_UNLOAD */
  448. struct stopref
  449. {
  450. struct module *mod;
  451. int flags;
  452. int *forced;
  453. };
  454. /* Whole machine is stopped with interrupts off when this runs. */
  455. static int __try_stop_module(void *_sref)
  456. {
  457. struct stopref *sref = _sref;
  458. /* If it's not unused, quit unless we are told to block. */
  459. if ((sref->flags & O_NONBLOCK) && module_refcount(sref->mod) != 0) {
  460. if (!(*sref->forced = try_force(sref->flags)))
  461. return -EWOULDBLOCK;
  462. }
  463. /* Mark it as dying. */
  464. sref->mod->state = MODULE_STATE_GOING;
  465. return 0;
  466. }
  467. static int try_stop_module(struct module *mod, int flags, int *forced)
  468. {
  469. struct stopref sref = { mod, flags, forced };
  470. return stop_machine_run(__try_stop_module, &sref, NR_CPUS);
  471. }
  472. unsigned int module_refcount(struct module *mod)
  473. {
  474. unsigned int i, total = 0;
  475. for (i = 0; i < NR_CPUS; i++)
  476. total += local_read(&mod->ref[i].count);
  477. return total;
  478. }
  479. EXPORT_SYMBOL(module_refcount);
  480. /* This exists whether we can unload or not */
  481. static void free_module(struct module *mod);
  482. static void wait_for_zero_refcount(struct module *mod)
  483. {
  484. /* Since we might sleep for some time, drop the semaphore first */
  485. up(&module_mutex);
  486. for (;;) {
  487. DEBUGP("Looking at refcount...\n");
  488. set_current_state(TASK_UNINTERRUPTIBLE);
  489. if (module_refcount(mod) == 0)
  490. break;
  491. schedule();
  492. }
  493. current->state = TASK_RUNNING;
  494. down(&module_mutex);
  495. }
  496. asmlinkage long
  497. sys_delete_module(const char __user *name_user, unsigned int flags)
  498. {
  499. struct module *mod;
  500. char name[MODULE_NAME_LEN];
  501. int ret, forced = 0;
  502. if (!capable(CAP_SYS_MODULE))
  503. return -EPERM;
  504. if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0)
  505. return -EFAULT;
  506. name[MODULE_NAME_LEN-1] = '\0';
  507. if (down_interruptible(&module_mutex) != 0)
  508. return -EINTR;
  509. mod = find_module(name);
  510. if (!mod) {
  511. ret = -ENOENT;
  512. goto out;
  513. }
  514. if (!list_empty(&mod->modules_which_use_me)) {
  515. /* Other modules depend on us: get rid of them first. */
  516. ret = -EWOULDBLOCK;
  517. goto out;
  518. }
  519. /* Doing init or already dying? */
  520. if (mod->state != MODULE_STATE_LIVE) {
  521. /* FIXME: if (force), slam module count and wake up
  522. waiter --RR */
  523. DEBUGP("%s already dying\n", mod->name);
  524. ret = -EBUSY;
  525. goto out;
  526. }
  527. /* If it has an init func, it must have an exit func to unload */
  528. if ((mod->init != NULL && mod->exit == NULL)
  529. || mod->unsafe) {
  530. forced = try_force(flags);
  531. if (!forced) {
  532. /* This module can't be removed */
  533. ret = -EBUSY;
  534. goto out;
  535. }
  536. }
  537. /* Set this up before setting mod->state */
  538. mod->waiter = current;
  539. /* Stop the machine so refcounts can't move and disable module. */
  540. ret = try_stop_module(mod, flags, &forced);
  541. if (ret != 0)
  542. goto out;
  543. /* Never wait if forced. */
  544. if (!forced && module_refcount(mod) != 0)
  545. wait_for_zero_refcount(mod);
  546. /* Final destruction now noone is using it. */
  547. if (mod->exit != NULL) {
  548. up(&module_mutex);
  549. mod->exit();
  550. down(&module_mutex);
  551. }
  552. free_module(mod);
  553. out:
  554. up(&module_mutex);
  555. return ret;
  556. }
  557. static void print_unload_info(struct seq_file *m, struct module *mod)
  558. {
  559. struct module_use *use;
  560. int printed_something = 0;
  561. seq_printf(m, " %u ", module_refcount(mod));
  562. /* Always include a trailing , so userspace can differentiate
  563. between this and the old multi-field proc format. */
  564. list_for_each_entry(use, &mod->modules_which_use_me, list) {
  565. printed_something = 1;
  566. seq_printf(m, "%s,", use->module_which_uses->name);
  567. }
  568. if (mod->unsafe) {
  569. printed_something = 1;
  570. seq_printf(m, "[unsafe],");
  571. }
  572. if (mod->init != NULL && mod->exit == NULL) {
  573. printed_something = 1;
  574. seq_printf(m, "[permanent],");
  575. }
  576. if (!printed_something)
  577. seq_printf(m, "-");
  578. }
  579. void __symbol_put(const char *symbol)
  580. {
  581. struct module *owner;
  582. unsigned long flags;
  583. const unsigned long *crc;
  584. spin_lock_irqsave(&modlist_lock, flags);
  585. if (!__find_symbol(symbol, &owner, &crc, 1))
  586. BUG();
  587. module_put(owner);
  588. spin_unlock_irqrestore(&modlist_lock, flags);
  589. }
  590. EXPORT_SYMBOL(__symbol_put);
  591. void symbol_put_addr(void *addr)
  592. {
  593. unsigned long flags;
  594. spin_lock_irqsave(&modlist_lock, flags);
  595. if (!kernel_text_address((unsigned long)addr))
  596. BUG();
  597. module_put(module_text_address((unsigned long)addr));
  598. spin_unlock_irqrestore(&modlist_lock, flags);
  599. }
  600. EXPORT_SYMBOL_GPL(symbol_put_addr);
  601. static ssize_t show_refcnt(struct module_attribute *mattr,
  602. struct module *mod, char *buffer)
  603. {
  604. /* sysfs holds a reference */
  605. return sprintf(buffer, "%u\n", module_refcount(mod)-1);
  606. }
  607. static struct module_attribute refcnt = {
  608. .attr = { .name = "refcnt", .mode = 0444, .owner = THIS_MODULE },
  609. .show = show_refcnt,
  610. };
  611. #else /* !CONFIG_MODULE_UNLOAD */
  612. static void print_unload_info(struct seq_file *m, struct module *mod)
  613. {
  614. /* We don't know the usage count, or what modules are using. */
  615. seq_printf(m, " - -");
  616. }
  617. static inline void module_unload_free(struct module *mod)
  618. {
  619. }
  620. static inline int use_module(struct module *a, struct module *b)
  621. {
  622. return strong_try_module_get(b);
  623. }
  624. static inline void module_unload_init(struct module *mod)
  625. {
  626. }
  627. #endif /* CONFIG_MODULE_UNLOAD */
  628. #ifdef CONFIG_OBSOLETE_MODPARM
  629. /* Bounds checking done below */
  630. static int obsparm_copy_string(const char *val, struct kernel_param *kp)
  631. {
  632. strcpy(kp->arg, val);
  633. return 0;
  634. }
  635. static int set_obsolete(const char *val, struct kernel_param *kp)
  636. {
  637. unsigned int min, max;
  638. unsigned int size, maxsize;
  639. int dummy;
  640. char *endp;
  641. const char *p;
  642. struct obsolete_modparm *obsparm = kp->arg;
  643. if (!val) {
  644. printk(KERN_ERR "Parameter %s needs an argument\n", kp->name);
  645. return -EINVAL;
  646. }
  647. /* type is: [min[-max]]{b,h,i,l,s} */
  648. p = obsparm->type;
  649. min = simple_strtol(p, &endp, 10);
  650. if (endp == obsparm->type)
  651. min = max = 1;
  652. else if (*endp == '-') {
  653. p = endp+1;
  654. max = simple_strtol(p, &endp, 10);
  655. } else
  656. max = min;
  657. switch (*endp) {
  658. case 'b':
  659. return param_array(kp->name, val, min, max, obsparm->addr,
  660. 1, param_set_byte, &dummy);
  661. case 'h':
  662. return param_array(kp->name, val, min, max, obsparm->addr,
  663. sizeof(short), param_set_short, &dummy);
  664. case 'i':
  665. return param_array(kp->name, val, min, max, obsparm->addr,
  666. sizeof(int), param_set_int, &dummy);
  667. case 'l':
  668. return param_array(kp->name, val, min, max, obsparm->addr,
  669. sizeof(long), param_set_long, &dummy);
  670. case 's':
  671. return param_array(kp->name, val, min, max, obsparm->addr,
  672. sizeof(char *), param_set_charp, &dummy);
  673. case 'c':
  674. /* Undocumented: 1-5c50 means 1-5 strings of up to 49 chars,
  675. and the decl is "char xxx[5][50];" */
  676. p = endp+1;
  677. maxsize = simple_strtol(p, &endp, 10);
  678. /* We check lengths here (yes, this is a hack). */
  679. p = val;
  680. while (p[size = strcspn(p, ",")]) {
  681. if (size >= maxsize)
  682. goto oversize;
  683. p += size+1;
  684. }
  685. if (size >= maxsize)
  686. goto oversize;
  687. return param_array(kp->name, val, min, max, obsparm->addr,
  688. maxsize, obsparm_copy_string, &dummy);
  689. }
  690. printk(KERN_ERR "Unknown obsolete parameter type %s\n", obsparm->type);
  691. return -EINVAL;
  692. oversize:
  693. printk(KERN_ERR
  694. "Parameter %s doesn't fit in %u chars.\n", kp->name, maxsize);
  695. return -EINVAL;
  696. }
  697. static int obsolete_params(const char *name,
  698. char *args,
  699. struct obsolete_modparm obsparm[],
  700. unsigned int num,
  701. Elf_Shdr *sechdrs,
  702. unsigned int symindex,
  703. const char *strtab)
  704. {
  705. struct kernel_param *kp;
  706. unsigned int i;
  707. int ret;
  708. kp = kmalloc(sizeof(kp[0]) * num, GFP_KERNEL);
  709. if (!kp)
  710. return -ENOMEM;
  711. for (i = 0; i < num; i++) {
  712. char sym_name[128 + sizeof(MODULE_SYMBOL_PREFIX)];
  713. snprintf(sym_name, sizeof(sym_name), "%s%s",
  714. MODULE_SYMBOL_PREFIX, obsparm[i].name);
  715. kp[i].name = obsparm[i].name;
  716. kp[i].perm = 000;
  717. kp[i].set = set_obsolete;
  718. kp[i].get = NULL;
  719. obsparm[i].addr
  720. = (void *)find_local_symbol(sechdrs, symindex, strtab,
  721. sym_name);
  722. if (!obsparm[i].addr) {
  723. printk("%s: falsely claims to have parameter %s\n",
  724. name, obsparm[i].name);
  725. ret = -EINVAL;
  726. goto out;
  727. }
  728. kp[i].arg = &obsparm[i];
  729. }
  730. ret = parse_args(name, args, kp, num, NULL);
  731. out:
  732. kfree(kp);
  733. return ret;
  734. }
  735. #else
  736. static int obsolete_params(const char *name,
  737. char *args,
  738. struct obsolete_modparm obsparm[],
  739. unsigned int num,
  740. Elf_Shdr *sechdrs,
  741. unsigned int symindex,
  742. const char *strtab)
  743. {
  744. if (num != 0)
  745. printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
  746. name);
  747. return 0;
  748. }
  749. #endif /* CONFIG_OBSOLETE_MODPARM */
  750. static const char vermagic[] = VERMAGIC_STRING;
  751. #ifdef CONFIG_MODVERSIONS
  752. static int check_version(Elf_Shdr *sechdrs,
  753. unsigned int versindex,
  754. const char *symname,
  755. struct module *mod,
  756. const unsigned long *crc)
  757. {
  758. unsigned int i, num_versions;
  759. struct modversion_info *versions;
  760. /* Exporting module didn't supply crcs? OK, we're already tainted. */
  761. if (!crc)
  762. return 1;
  763. versions = (void *) sechdrs[versindex].sh_addr;
  764. num_versions = sechdrs[versindex].sh_size
  765. / sizeof(struct modversion_info);
  766. for (i = 0; i < num_versions; i++) {
  767. if (strcmp(versions[i].name, symname) != 0)
  768. continue;
  769. if (versions[i].crc == *crc)
  770. return 1;
  771. printk("%s: disagrees about version of symbol %s\n",
  772. mod->name, symname);
  773. DEBUGP("Found checksum %lX vs module %lX\n",
  774. *crc, versions[i].crc);
  775. return 0;
  776. }
  777. /* Not in module's version table. OK, but that taints the kernel. */
  778. if (!(tainted & TAINT_FORCED_MODULE)) {
  779. printk("%s: no version for \"%s\" found: kernel tainted.\n",
  780. mod->name, symname);
  781. tainted |= TAINT_FORCED_MODULE;
  782. }
  783. return 1;
  784. }
  785. static inline int check_modstruct_version(Elf_Shdr *sechdrs,
  786. unsigned int versindex,
  787. struct module *mod)
  788. {
  789. const unsigned long *crc;
  790. struct module *owner;
  791. if (!__find_symbol("struct_module", &owner, &crc, 1))
  792. BUG();
  793. return check_version(sechdrs, versindex, "struct_module", mod,
  794. crc);
  795. }
  796. /* First part is kernel version, which we ignore. */
  797. static inline int same_magic(const char *amagic, const char *bmagic)
  798. {
  799. amagic += strcspn(amagic, " ");
  800. bmagic += strcspn(bmagic, " ");
  801. return strcmp(amagic, bmagic) == 0;
  802. }
  803. #else
  804. static inline int check_version(Elf_Shdr *sechdrs,
  805. unsigned int versindex,
  806. const char *symname,
  807. struct module *mod,
  808. const unsigned long *crc)
  809. {
  810. return 1;
  811. }
  812. static inline int check_modstruct_version(Elf_Shdr *sechdrs,
  813. unsigned int versindex,
  814. struct module *mod)
  815. {
  816. return 1;
  817. }
  818. static inline int same_magic(const char *amagic, const char *bmagic)
  819. {
  820. return strcmp(amagic, bmagic) == 0;
  821. }
  822. #endif /* CONFIG_MODVERSIONS */
  823. /* Resolve a symbol for this module. I.e. if we find one, record usage.
  824. Must be holding module_mutex. */
  825. static unsigned long resolve_symbol(Elf_Shdr *sechdrs,
  826. unsigned int versindex,
  827. const char *name,
  828. struct module *mod)
  829. {
  830. struct module *owner;
  831. unsigned long ret;
  832. const unsigned long *crc;
  833. spin_lock_irq(&modlist_lock);
  834. ret = __find_symbol(name, &owner, &crc, mod->license_gplok);
  835. if (ret) {
  836. /* use_module can fail due to OOM, or module unloading */
  837. if (!check_version(sechdrs, versindex, name, mod, crc) ||
  838. !use_module(mod, owner))
  839. ret = 0;
  840. }
  841. spin_unlock_irq(&modlist_lock);
  842. return ret;
  843. }
  844. /*
  845. * /sys/module/foo/sections stuff
  846. * J. Corbet <corbet@lwn.net>
  847. */
  848. #ifdef CONFIG_KALLSYMS
  849. static ssize_t module_sect_show(struct module_attribute *mattr,
  850. struct module *mod, char *buf)
  851. {
  852. struct module_sect_attr *sattr =
  853. container_of(mattr, struct module_sect_attr, mattr);
  854. return sprintf(buf, "0x%lx\n", sattr->address);
  855. }
  856. static void add_sect_attrs(struct module *mod, unsigned int nsect,
  857. char *secstrings, Elf_Shdr *sechdrs)
  858. {
  859. unsigned int nloaded = 0, i, size[2];
  860. struct module_sect_attrs *sect_attrs;
  861. struct module_sect_attr *sattr;
  862. struct attribute **gattr;
  863. /* Count loaded sections and allocate structures */
  864. for (i = 0; i < nsect; i++)
  865. if (sechdrs[i].sh_flags & SHF_ALLOC)
  866. nloaded++;
  867. size[0] = ALIGN(sizeof(*sect_attrs)
  868. + nloaded * sizeof(sect_attrs->attrs[0]),
  869. sizeof(sect_attrs->grp.attrs[0]));
  870. size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.attrs[0]);
  871. if (! (sect_attrs = kmalloc(size[0] + size[1], GFP_KERNEL)))
  872. return;
  873. /* Setup section attributes. */
  874. sect_attrs->grp.name = "sections";
  875. sect_attrs->grp.attrs = (void *)sect_attrs + size[0];
  876. sattr = &sect_attrs->attrs[0];
  877. gattr = &sect_attrs->grp.attrs[0];
  878. for (i = 0; i < nsect; i++) {
  879. if (! (sechdrs[i].sh_flags & SHF_ALLOC))
  880. continue;
  881. sattr->address = sechdrs[i].sh_addr;
  882. strlcpy(sattr->name, secstrings + sechdrs[i].sh_name,
  883. MODULE_SECT_NAME_LEN);
  884. sattr->mattr.show = module_sect_show;
  885. sattr->mattr.store = NULL;
  886. sattr->mattr.attr.name = sattr->name;
  887. sattr->mattr.attr.owner = mod;
  888. sattr->mattr.attr.mode = S_IRUGO;
  889. *(gattr++) = &(sattr++)->mattr.attr;
  890. }
  891. *gattr = NULL;
  892. if (sysfs_create_group(&mod->mkobj.kobj, &sect_attrs->grp))
  893. goto out;
  894. mod->sect_attrs = sect_attrs;
  895. return;
  896. out:
  897. kfree(sect_attrs);
  898. }
  899. static void remove_sect_attrs(struct module *mod)
  900. {
  901. if (mod->sect_attrs) {
  902. sysfs_remove_group(&mod->mkobj.kobj,
  903. &mod->sect_attrs->grp);
  904. /* We are positive that no one is using any sect attrs
  905. * at this point. Deallocate immediately. */
  906. kfree(mod->sect_attrs);
  907. mod->sect_attrs = NULL;
  908. }
  909. }
  910. #else
  911. static inline void add_sect_attrs(struct module *mod, unsigned int nsect,
  912. char *sectstrings, Elf_Shdr *sechdrs)
  913. {
  914. }
  915. static inline void remove_sect_attrs(struct module *mod)
  916. {
  917. }
  918. #endif /* CONFIG_KALLSYMS */
  919. #ifdef CONFIG_MODULE_UNLOAD
  920. static inline int module_add_refcnt_attr(struct module *mod)
  921. {
  922. return sysfs_create_file(&mod->mkobj.kobj, &refcnt.attr);
  923. }
  924. static void module_remove_refcnt_attr(struct module *mod)
  925. {
  926. return sysfs_remove_file(&mod->mkobj.kobj, &refcnt.attr);
  927. }
  928. #else
  929. static inline int module_add_refcnt_attr(struct module *mod)
  930. {
  931. return 0;
  932. }
  933. static void module_remove_refcnt_attr(struct module *mod)
  934. {
  935. }
  936. #endif
  937. #ifdef CONFIG_MODULE_UNLOAD
  938. static int module_add_modinfo_attrs(struct module *mod)
  939. {
  940. struct module_attribute *attr;
  941. int error = 0;
  942. int i;
  943. for (i = 0; (attr = modinfo_attrs[i]) && !error; i++) {
  944. if (!attr->test ||
  945. (attr->test && attr->test(mod)))
  946. error = sysfs_create_file(&mod->mkobj.kobj,&attr->attr);
  947. }
  948. return error;
  949. }
  950. static void module_remove_modinfo_attrs(struct module *mod)
  951. {
  952. struct module_attribute *attr;
  953. int i;
  954. for (i = 0; (attr = modinfo_attrs[i]); i++) {
  955. sysfs_remove_file(&mod->mkobj.kobj,&attr->attr);
  956. attr->free(mod);
  957. }
  958. }
  959. #endif
  960. static int mod_sysfs_setup(struct module *mod,
  961. struct kernel_param *kparam,
  962. unsigned int num_params)
  963. {
  964. int err;
  965. memset(&mod->mkobj.kobj, 0, sizeof(mod->mkobj.kobj));
  966. err = kobject_set_name(&mod->mkobj.kobj, "%s", mod->name);
  967. if (err)
  968. goto out;
  969. kobj_set_kset_s(&mod->mkobj, module_subsys);
  970. mod->mkobj.mod = mod;
  971. err = kobject_register(&mod->mkobj.kobj);
  972. if (err)
  973. goto out;
  974. err = module_add_refcnt_attr(mod);
  975. if (err)
  976. goto out_unreg;
  977. err = module_param_sysfs_setup(mod, kparam, num_params);
  978. if (err)
  979. goto out_unreg;
  980. #ifdef CONFIG_MODULE_UNLOAD
  981. err = module_add_modinfo_attrs(mod);
  982. if (err)
  983. goto out_unreg;
  984. #endif
  985. return 0;
  986. out_unreg:
  987. kobject_unregister(&mod->mkobj.kobj);
  988. out:
  989. return err;
  990. }
  991. static void mod_kobject_remove(struct module *mod)
  992. {
  993. #ifdef CONFIG_MODULE_UNLOAD
  994. module_remove_modinfo_attrs(mod);
  995. #endif
  996. module_remove_refcnt_attr(mod);
  997. module_param_sysfs_remove(mod);
  998. kobject_unregister(&mod->mkobj.kobj);
  999. }
  1000. /*
  1001. * unlink the module with the whole machine is stopped with interrupts off
  1002. * - this defends against kallsyms not taking locks
  1003. */
  1004. static int __unlink_module(void *_mod)
  1005. {
  1006. struct module *mod = _mod;
  1007. list_del(&mod->list);
  1008. return 0;
  1009. }
  1010. /* Free a module, remove from lists, etc (must hold module mutex). */
  1011. static void free_module(struct module *mod)
  1012. {
  1013. /* Delete from various lists */
  1014. stop_machine_run(__unlink_module, mod, NR_CPUS);
  1015. remove_sect_attrs(mod);
  1016. mod_kobject_remove(mod);
  1017. /* Arch-specific cleanup. */
  1018. module_arch_cleanup(mod);
  1019. /* Module unload stuff */
  1020. module_unload_free(mod);
  1021. /* This may be NULL, but that's OK */
  1022. module_free(mod, mod->module_init);
  1023. kfree(mod->args);
  1024. if (mod->percpu)
  1025. percpu_modfree(mod->percpu);
  1026. /* Finally, free the core (containing the module structure) */
  1027. module_free(mod, mod->module_core);
  1028. }
  1029. void *__symbol_get(const char *symbol)
  1030. {
  1031. struct module *owner;
  1032. unsigned long value, flags;
  1033. const unsigned long *crc;
  1034. spin_lock_irqsave(&modlist_lock, flags);
  1035. value = __find_symbol(symbol, &owner, &crc, 1);
  1036. if (value && !strong_try_module_get(owner))
  1037. value = 0;
  1038. spin_unlock_irqrestore(&modlist_lock, flags);
  1039. return (void *)value;
  1040. }
  1041. EXPORT_SYMBOL_GPL(__symbol_get);
  1042. /* Change all symbols so that sh_value encodes the pointer directly. */
  1043. static int simplify_symbols(Elf_Shdr *sechdrs,
  1044. unsigned int symindex,
  1045. const char *strtab,
  1046. unsigned int versindex,
  1047. unsigned int pcpuindex,
  1048. struct module *mod)
  1049. {
  1050. Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
  1051. unsigned long secbase;
  1052. unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
  1053. int ret = 0;
  1054. for (i = 1; i < n; i++) {
  1055. switch (sym[i].st_shndx) {
  1056. case SHN_COMMON:
  1057. /* We compiled with -fno-common. These are not
  1058. supposed to happen. */
  1059. DEBUGP("Common symbol: %s\n", strtab + sym[i].st_name);
  1060. printk("%s: please compile with -fno-common\n",
  1061. mod->name);
  1062. ret = -ENOEXEC;
  1063. break;
  1064. case SHN_ABS:
  1065. /* Don't need to do anything */
  1066. DEBUGP("Absolute symbol: 0x%08lx\n",
  1067. (long)sym[i].st_value);
  1068. break;
  1069. case SHN_UNDEF:
  1070. sym[i].st_value
  1071. = resolve_symbol(sechdrs, versindex,
  1072. strtab + sym[i].st_name, mod);
  1073. /* Ok if resolved. */
  1074. if (sym[i].st_value != 0)
  1075. break;
  1076. /* Ok if weak. */
  1077. if (ELF_ST_BIND(sym[i].st_info) == STB_WEAK)
  1078. break;
  1079. printk(KERN_WARNING "%s: Unknown symbol %s\n",
  1080. mod->name, strtab + sym[i].st_name);
  1081. ret = -ENOENT;
  1082. break;
  1083. default:
  1084. /* Divert to percpu allocation if a percpu var. */
  1085. if (sym[i].st_shndx == pcpuindex)
  1086. secbase = (unsigned long)mod->percpu;
  1087. else
  1088. secbase = sechdrs[sym[i].st_shndx].sh_addr;
  1089. sym[i].st_value += secbase;
  1090. break;
  1091. }
  1092. }
  1093. return ret;
  1094. }
  1095. /* Update size with this section: return offset. */
  1096. static long get_offset(unsigned long *size, Elf_Shdr *sechdr)
  1097. {
  1098. long ret;
  1099. ret = ALIGN(*size, sechdr->sh_addralign ?: 1);
  1100. *size = ret + sechdr->sh_size;
  1101. return ret;
  1102. }
  1103. /* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
  1104. might -- code, read-only data, read-write data, small data. Tally
  1105. sizes, and place the offsets into sh_entsize fields: high bit means it
  1106. belongs in init. */
  1107. static void layout_sections(struct module *mod,
  1108. const Elf_Ehdr *hdr,
  1109. Elf_Shdr *sechdrs,
  1110. const char *secstrings)
  1111. {
  1112. static unsigned long const masks[][2] = {
  1113. /* NOTE: all executable code must be the first section
  1114. * in this array; otherwise modify the text_size
  1115. * finder in the two loops below */
  1116. { SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL },
  1117. { SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL },
  1118. { SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL },
  1119. { ARCH_SHF_SMALL | SHF_ALLOC, 0 }
  1120. };
  1121. unsigned int m, i;
  1122. for (i = 0; i < hdr->e_shnum; i++)
  1123. sechdrs[i].sh_entsize = ~0UL;
  1124. DEBUGP("Core section allocation order:\n");
  1125. for (m = 0; m < ARRAY_SIZE(masks); ++m) {
  1126. for (i = 0; i < hdr->e_shnum; ++i) {
  1127. Elf_Shdr *s = &sechdrs[i];
  1128. if ((s->sh_flags & masks[m][0]) != masks[m][0]
  1129. || (s->sh_flags & masks[m][1])
  1130. || s->sh_entsize != ~0UL
  1131. || strncmp(secstrings + s->sh_name,
  1132. ".init", 5) == 0)
  1133. continue;
  1134. s->sh_entsize = get_offset(&mod->core_size, s);
  1135. DEBUGP("\t%s\n", secstrings + s->sh_name);
  1136. }
  1137. if (m == 0)
  1138. mod->core_text_size = mod->core_size;
  1139. }
  1140. DEBUGP("Init section allocation order:\n");
  1141. for (m = 0; m < ARRAY_SIZE(masks); ++m) {
  1142. for (i = 0; i < hdr->e_shnum; ++i) {
  1143. Elf_Shdr *s = &sechdrs[i];
  1144. if ((s->sh_flags & masks[m][0]) != masks[m][0]
  1145. || (s->sh_flags & masks[m][1])
  1146. || s->sh_entsize != ~0UL
  1147. || strncmp(secstrings + s->sh_name,
  1148. ".init", 5) != 0)
  1149. continue;
  1150. s->sh_entsize = (get_offset(&mod->init_size, s)
  1151. | INIT_OFFSET_MASK);
  1152. DEBUGP("\t%s\n", secstrings + s->sh_name);
  1153. }
  1154. if (m == 0)
  1155. mod->init_text_size = mod->init_size;
  1156. }
  1157. }
  1158. static inline int license_is_gpl_compatible(const char *license)
  1159. {
  1160. return (strcmp(license, "GPL") == 0
  1161. || strcmp(license, "GPL v2") == 0
  1162. || strcmp(license, "GPL and additional rights") == 0
  1163. || strcmp(license, "Dual BSD/GPL") == 0
  1164. || strcmp(license, "Dual MPL/GPL") == 0);
  1165. }
  1166. static void set_license(struct module *mod, const char *license)
  1167. {
  1168. if (!license)
  1169. license = "unspecified";
  1170. mod->license_gplok = license_is_gpl_compatible(license);
  1171. if (!mod->license_gplok && !(tainted & TAINT_PROPRIETARY_MODULE)) {
  1172. printk(KERN_WARNING "%s: module license '%s' taints kernel.\n",
  1173. mod->name, license);
  1174. tainted |= TAINT_PROPRIETARY_MODULE;
  1175. }
  1176. }
  1177. /* Parse tag=value strings from .modinfo section */
  1178. static char *next_string(char *string, unsigned long *secsize)
  1179. {
  1180. /* Skip non-zero chars */
  1181. while (string[0]) {
  1182. string++;
  1183. if ((*secsize)-- <= 1)
  1184. return NULL;
  1185. }
  1186. /* Skip any zero padding. */
  1187. while (!string[0]) {
  1188. string++;
  1189. if ((*secsize)-- <= 1)
  1190. return NULL;
  1191. }
  1192. return string;
  1193. }
  1194. static char *get_modinfo(Elf_Shdr *sechdrs,
  1195. unsigned int info,
  1196. const char *tag)
  1197. {
  1198. char *p;
  1199. unsigned int taglen = strlen(tag);
  1200. unsigned long size = sechdrs[info].sh_size;
  1201. for (p = (char *)sechdrs[info].sh_addr; p; p = next_string(p, &size)) {
  1202. if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=')
  1203. return p + taglen + 1;
  1204. }
  1205. return NULL;
  1206. }
  1207. #ifdef CONFIG_MODULE_UNLOAD
  1208. static void setup_modinfo(struct module *mod, Elf_Shdr *sechdrs,
  1209. unsigned int infoindex)
  1210. {
  1211. struct module_attribute *attr;
  1212. int i;
  1213. for (i = 0; (attr = modinfo_attrs[i]); i++) {
  1214. if (attr->setup)
  1215. attr->setup(mod,
  1216. get_modinfo(sechdrs,
  1217. infoindex,
  1218. attr->attr.name));
  1219. }
  1220. }
  1221. #endif
  1222. #ifdef CONFIG_KALLSYMS
  1223. int is_exported(const char *name, const struct module *mod)
  1224. {
  1225. unsigned int i;
  1226. if (!mod) {
  1227. for (i = 0; __start___ksymtab+i < __stop___ksymtab; i++)
  1228. if (strcmp(__start___ksymtab[i].name, name) == 0)
  1229. return 1;
  1230. return 0;
  1231. }
  1232. for (i = 0; i < mod->num_syms; i++)
  1233. if (strcmp(mod->syms[i].name, name) == 0)
  1234. return 1;
  1235. return 0;
  1236. }
  1237. /* As per nm */
  1238. static char elf_type(const Elf_Sym *sym,
  1239. Elf_Shdr *sechdrs,
  1240. const char *secstrings,
  1241. struct module *mod)
  1242. {
  1243. if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
  1244. if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
  1245. return 'v';
  1246. else
  1247. return 'w';
  1248. }
  1249. if (sym->st_shndx == SHN_UNDEF)
  1250. return 'U';
  1251. if (sym->st_shndx == SHN_ABS)
  1252. return 'a';
  1253. if (sym->st_shndx >= SHN_LORESERVE)
  1254. return '?';
  1255. if (sechdrs[sym->st_shndx].sh_flags & SHF_EXECINSTR)
  1256. return 't';
  1257. if (sechdrs[sym->st_shndx].sh_flags & SHF_ALLOC
  1258. && sechdrs[sym->st_shndx].sh_type != SHT_NOBITS) {
  1259. if (!(sechdrs[sym->st_shndx].sh_flags & SHF_WRITE))
  1260. return 'r';
  1261. else if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
  1262. return 'g';
  1263. else
  1264. return 'd';
  1265. }
  1266. if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) {
  1267. if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
  1268. return 's';
  1269. else
  1270. return 'b';
  1271. }
  1272. if (strncmp(secstrings + sechdrs[sym->st_shndx].sh_name,
  1273. ".debug", strlen(".debug")) == 0)
  1274. return 'n';
  1275. return '?';
  1276. }
  1277. static void add_kallsyms(struct module *mod,
  1278. Elf_Shdr *sechdrs,
  1279. unsigned int symindex,
  1280. unsigned int strindex,
  1281. const char *secstrings)
  1282. {
  1283. unsigned int i;
  1284. mod->symtab = (void *)sechdrs[symindex].sh_addr;
  1285. mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
  1286. mod->strtab = (void *)sechdrs[strindex].sh_addr;
  1287. /* Set types up while we still have access to sections. */
  1288. for (i = 0; i < mod->num_symtab; i++)
  1289. mod->symtab[i].st_info
  1290. = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
  1291. }
  1292. #else
  1293. static inline void add_kallsyms(struct module *mod,
  1294. Elf_Shdr *sechdrs,
  1295. unsigned int symindex,
  1296. unsigned int strindex,
  1297. const char *secstrings)
  1298. {
  1299. }
  1300. #endif /* CONFIG_KALLSYMS */
  1301. /* Allocate and load the module: note that size of section 0 is always
  1302. zero, and we rely on this for optional sections. */
  1303. static struct module *load_module(void __user *umod,
  1304. unsigned long len,
  1305. const char __user *uargs)
  1306. {
  1307. Elf_Ehdr *hdr;
  1308. Elf_Shdr *sechdrs;
  1309. char *secstrings, *args, *modmagic, *strtab = NULL;
  1310. unsigned int i, symindex = 0, strindex = 0, setupindex, exindex,
  1311. exportindex, modindex, obsparmindex, infoindex, gplindex,
  1312. crcindex, gplcrcindex, versindex, pcpuindex;
  1313. long arglen;
  1314. struct module *mod;
  1315. long err = 0;
  1316. void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */
  1317. struct exception_table_entry *extable;
  1318. mm_segment_t old_fs;
  1319. DEBUGP("load_module: umod=%p, len=%lu, uargs=%p\n",
  1320. umod, len, uargs);
  1321. if (len < sizeof(*hdr))
  1322. return ERR_PTR(-ENOEXEC);
  1323. /* Suck in entire file: we'll want most of it. */
  1324. /* vmalloc barfs on "unusual" numbers. Check here */
  1325. if (len > 64 * 1024 * 1024 || (hdr = vmalloc(len)) == NULL)
  1326. return ERR_PTR(-ENOMEM);
  1327. if (copy_from_user(hdr, umod, len) != 0) {
  1328. err = -EFAULT;
  1329. goto free_hdr;
  1330. }
  1331. /* Sanity checks against insmoding binaries or wrong arch,
  1332. weird elf version */
  1333. if (memcmp(hdr->e_ident, ELFMAG, 4) != 0
  1334. || hdr->e_type != ET_REL
  1335. || !elf_check_arch(hdr)
  1336. || hdr->e_shentsize != sizeof(*sechdrs)) {
  1337. err = -ENOEXEC;
  1338. goto free_hdr;
  1339. }
  1340. if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr))
  1341. goto truncated;
  1342. /* Convenience variables */
  1343. sechdrs = (void *)hdr + hdr->e_shoff;
  1344. secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
  1345. sechdrs[0].sh_addr = 0;
  1346. for (i = 1; i < hdr->e_shnum; i++) {
  1347. if (sechdrs[i].sh_type != SHT_NOBITS
  1348. && len < sechdrs[i].sh_offset + sechdrs[i].sh_size)
  1349. goto truncated;
  1350. /* Mark all sections sh_addr with their address in the
  1351. temporary image. */
  1352. sechdrs[i].sh_addr = (size_t)hdr + sechdrs[i].sh_offset;
  1353. /* Internal symbols and strings. */
  1354. if (sechdrs[i].sh_type == SHT_SYMTAB) {
  1355. symindex = i;
  1356. strindex = sechdrs[i].sh_link;
  1357. strtab = (char *)hdr + sechdrs[strindex].sh_offset;
  1358. }
  1359. #ifndef CONFIG_MODULE_UNLOAD
  1360. /* Don't load .exit sections */
  1361. if (strncmp(secstrings+sechdrs[i].sh_name, ".exit", 5) == 0)
  1362. sechdrs[i].sh_flags &= ~(unsigned long)SHF_ALLOC;
  1363. #endif
  1364. }
  1365. modindex = find_sec(hdr, sechdrs, secstrings,
  1366. ".gnu.linkonce.this_module");
  1367. if (!modindex) {
  1368. printk(KERN_WARNING "No module found in object\n");
  1369. err = -ENOEXEC;
  1370. goto free_hdr;
  1371. }
  1372. mod = (void *)sechdrs[modindex].sh_addr;
  1373. if (symindex == 0) {
  1374. printk(KERN_WARNING "%s: module has no symbols (stripped?)\n",
  1375. mod->name);
  1376. err = -ENOEXEC;
  1377. goto free_hdr;
  1378. }
  1379. /* Optional sections */
  1380. exportindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab");
  1381. gplindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_gpl");
  1382. crcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab");
  1383. gplcrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_gpl");
  1384. setupindex = find_sec(hdr, sechdrs, secstrings, "__param");
  1385. exindex = find_sec(hdr, sechdrs, secstrings, "__ex_table");
  1386. obsparmindex = find_sec(hdr, sechdrs, secstrings, "__obsparm");
  1387. versindex = find_sec(hdr, sechdrs, secstrings, "__versions");
  1388. infoindex = find_sec(hdr, sechdrs, secstrings, ".modinfo");
  1389. pcpuindex = find_pcpusec(hdr, sechdrs, secstrings);
  1390. /* Don't keep modinfo section */
  1391. sechdrs[infoindex].sh_flags &= ~(unsigned long)SHF_ALLOC;
  1392. #ifdef CONFIG_KALLSYMS
  1393. /* Keep symbol and string tables for decoding later. */
  1394. sechdrs[symindex].sh_flags |= SHF_ALLOC;
  1395. sechdrs[strindex].sh_flags |= SHF_ALLOC;
  1396. #endif
  1397. /* Check module struct version now, before we try to use module. */
  1398. if (!check_modstruct_version(sechdrs, versindex, mod)) {
  1399. err = -ENOEXEC;
  1400. goto free_hdr;
  1401. }
  1402. modmagic = get_modinfo(sechdrs, infoindex, "vermagic");
  1403. /* This is allowed: modprobe --force will invalidate it. */
  1404. if (!modmagic) {
  1405. tainted |= TAINT_FORCED_MODULE;
  1406. printk(KERN_WARNING "%s: no version magic, tainting kernel.\n",
  1407. mod->name);
  1408. } else if (!same_magic(modmagic, vermagic)) {
  1409. printk(KERN_ERR "%s: version magic '%s' should be '%s'\n",
  1410. mod->name, modmagic, vermagic);
  1411. err = -ENOEXEC;
  1412. goto free_hdr;
  1413. }
  1414. /* Now copy in args */
  1415. arglen = strlen_user(uargs);
  1416. if (!arglen) {
  1417. err = -EFAULT;
  1418. goto free_hdr;
  1419. }
  1420. args = kmalloc(arglen, GFP_KERNEL);
  1421. if (!args) {
  1422. err = -ENOMEM;
  1423. goto free_hdr;
  1424. }
  1425. if (copy_from_user(args, uargs, arglen) != 0) {
  1426. err = -EFAULT;
  1427. goto free_mod;
  1428. }
  1429. if (find_module(mod->name)) {
  1430. err = -EEXIST;
  1431. goto free_mod;
  1432. }
  1433. mod->state = MODULE_STATE_COMING;
  1434. /* Allow arches to frob section contents and sizes. */
  1435. err = module_frob_arch_sections(hdr, sechdrs, secstrings, mod);
  1436. if (err < 0)
  1437. goto free_mod;
  1438. if (pcpuindex) {
  1439. /* We have a special allocation for this section. */
  1440. percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size,
  1441. sechdrs[pcpuindex].sh_addralign,
  1442. mod->name);
  1443. if (!percpu) {
  1444. err = -ENOMEM;
  1445. goto free_mod;
  1446. }
  1447. sechdrs[pcpuindex].sh_flags &= ~(unsigned long)SHF_ALLOC;
  1448. mod->percpu = percpu;
  1449. }
  1450. /* Determine total sizes, and put offsets in sh_entsize. For now
  1451. this is done generically; there doesn't appear to be any
  1452. special cases for the architectures. */
  1453. layout_sections(mod, hdr, sechdrs, secstrings);
  1454. /* Do the allocs. */
  1455. ptr = module_alloc(mod->core_size);
  1456. if (!ptr) {
  1457. err = -ENOMEM;
  1458. goto free_percpu;
  1459. }
  1460. memset(ptr, 0, mod->core_size);
  1461. mod->module_core = ptr;
  1462. ptr = module_alloc(mod->init_size);
  1463. if (!ptr && mod->init_size) {
  1464. err = -ENOMEM;
  1465. goto free_core;
  1466. }
  1467. memset(ptr, 0, mod->init_size);
  1468. mod->module_init = ptr;
  1469. /* Transfer each section which specifies SHF_ALLOC */
  1470. DEBUGP("final section addresses:\n");
  1471. for (i = 0; i < hdr->e_shnum; i++) {
  1472. void *dest;
  1473. if (!(sechdrs[i].sh_flags & SHF_ALLOC))
  1474. continue;
  1475. if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
  1476. dest = mod->module_init
  1477. + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
  1478. else
  1479. dest = mod->module_core + sechdrs[i].sh_entsize;
  1480. if (sechdrs[i].sh_type != SHT_NOBITS)
  1481. memcpy(dest, (void *)sechdrs[i].sh_addr,
  1482. sechdrs[i].sh_size);
  1483. /* Update sh_addr to point to copy in image. */
  1484. sechdrs[i].sh_addr = (unsigned long)dest;
  1485. DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
  1486. }
  1487. /* Module has been moved. */
  1488. mod = (void *)sechdrs[modindex].sh_addr;
  1489. /* Now we've moved module, initialize linked lists, etc. */
  1490. module_unload_init(mod);
  1491. /* Set up license info based on the info section */
  1492. set_license(mod, get_modinfo(sechdrs, infoindex, "license"));
  1493. #ifdef CONFIG_MODULE_UNLOAD
  1494. /* Set up MODINFO_ATTR fields */
  1495. setup_modinfo(mod, sechdrs, infoindex);
  1496. #endif
  1497. /* Fix up syms, so that st_value is a pointer to location. */
  1498. err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
  1499. mod);
  1500. if (err < 0)
  1501. goto cleanup;
  1502. /* Set up EXPORTed & EXPORT_GPLed symbols (section 0 is 0 length) */
  1503. mod->num_syms = sechdrs[exportindex].sh_size / sizeof(*mod->syms);
  1504. mod->syms = (void *)sechdrs[exportindex].sh_addr;
  1505. if (crcindex)
  1506. mod->crcs = (void *)sechdrs[crcindex].sh_addr;
  1507. mod->num_gpl_syms = sechdrs[gplindex].sh_size / sizeof(*mod->gpl_syms);
  1508. mod->gpl_syms = (void *)sechdrs[gplindex].sh_addr;
  1509. if (gplcrcindex)
  1510. mod->gpl_crcs = (void *)sechdrs[gplcrcindex].sh_addr;
  1511. #ifdef CONFIG_MODVERSIONS
  1512. if ((mod->num_syms && !crcindex) ||
  1513. (mod->num_gpl_syms && !gplcrcindex)) {
  1514. printk(KERN_WARNING "%s: No versions for exported symbols."
  1515. " Tainting kernel.\n", mod->name);
  1516. tainted |= TAINT_FORCED_MODULE;
  1517. }
  1518. #endif
  1519. /* Now do relocations. */
  1520. for (i = 1; i < hdr->e_shnum; i++) {
  1521. const char *strtab = (char *)sechdrs[strindex].sh_addr;
  1522. unsigned int info = sechdrs[i].sh_info;
  1523. /* Not a valid relocation section? */
  1524. if (info >= hdr->e_shnum)
  1525. continue;
  1526. /* Don't bother with non-allocated sections */
  1527. if (!(sechdrs[info].sh_flags & SHF_ALLOC))
  1528. continue;
  1529. if (sechdrs[i].sh_type == SHT_REL)
  1530. err = apply_relocate(sechdrs, strtab, symindex, i,mod);
  1531. else if (sechdrs[i].sh_type == SHT_RELA)
  1532. err = apply_relocate_add(sechdrs, strtab, symindex, i,
  1533. mod);
  1534. if (err < 0)
  1535. goto cleanup;
  1536. }
  1537. /* Set up and sort exception table */
  1538. mod->num_exentries = sechdrs[exindex].sh_size / sizeof(*mod->extable);
  1539. mod->extable = extable = (void *)sechdrs[exindex].sh_addr;
  1540. sort_extable(extable, extable + mod->num_exentries);
  1541. /* Finally, copy percpu area over. */
  1542. percpu_modcopy(mod->percpu, (void *)sechdrs[pcpuindex].sh_addr,
  1543. sechdrs[pcpuindex].sh_size);
  1544. add_kallsyms(mod, sechdrs, symindex, strindex, secstrings);
  1545. err = module_finalize(hdr, sechdrs, mod);
  1546. if (err < 0)
  1547. goto cleanup;
  1548. /* flush the icache in correct context */
  1549. old_fs = get_fs();
  1550. set_fs(KERNEL_DS);
  1551. /*
  1552. * Flush the instruction cache, since we've played with text.
  1553. * Do it before processing of module parameters, so the module
  1554. * can provide parameter accessor functions of its own.
  1555. */
  1556. if (mod->module_init)
  1557. flush_icache_range((unsigned long)mod->module_init,
  1558. (unsigned long)mod->module_init
  1559. + mod->init_size);
  1560. flush_icache_range((unsigned long)mod->module_core,
  1561. (unsigned long)mod->module_core + mod->core_size);
  1562. set_fs(old_fs);
  1563. mod->args = args;
  1564. if (obsparmindex) {
  1565. err = obsolete_params(mod->name, mod->args,
  1566. (struct obsolete_modparm *)
  1567. sechdrs[obsparmindex].sh_addr,
  1568. sechdrs[obsparmindex].sh_size
  1569. / sizeof(struct obsolete_modparm),
  1570. sechdrs, symindex,
  1571. (char *)sechdrs[strindex].sh_addr);
  1572. if (setupindex)
  1573. printk(KERN_WARNING "%s: Ignoring new-style "
  1574. "parameters in presence of obsolete ones\n",
  1575. mod->name);
  1576. } else {
  1577. /* Size of section 0 is 0, so this works well if no params */
  1578. err = parse_args(mod->name, mod->args,
  1579. (struct kernel_param *)
  1580. sechdrs[setupindex].sh_addr,
  1581. sechdrs[setupindex].sh_size
  1582. / sizeof(struct kernel_param),
  1583. NULL);
  1584. }
  1585. if (err < 0)
  1586. goto arch_cleanup;
  1587. err = mod_sysfs_setup(mod,
  1588. (struct kernel_param *)
  1589. sechdrs[setupindex].sh_addr,
  1590. sechdrs[setupindex].sh_size
  1591. / sizeof(struct kernel_param));
  1592. if (err < 0)
  1593. goto arch_cleanup;
  1594. add_sect_attrs(mod, hdr->e_shnum, secstrings, sechdrs);
  1595. /* Get rid of temporary copy */
  1596. vfree(hdr);
  1597. /* Done! */
  1598. return mod;
  1599. arch_cleanup:
  1600. module_arch_cleanup(mod);
  1601. cleanup:
  1602. module_unload_free(mod);
  1603. module_free(mod, mod->module_init);
  1604. free_core:
  1605. module_free(mod, mod->module_core);
  1606. free_percpu:
  1607. if (percpu)
  1608. percpu_modfree(percpu);
  1609. free_mod:
  1610. kfree(args);
  1611. free_hdr:
  1612. vfree(hdr);
  1613. if (err < 0) return ERR_PTR(err);
  1614. else return ptr;
  1615. truncated:
  1616. printk(KERN_ERR "Module len %lu truncated\n", len);
  1617. err = -ENOEXEC;
  1618. goto free_hdr;
  1619. }
  1620. /*
  1621. * link the module with the whole machine is stopped with interrupts off
  1622. * - this defends against kallsyms not taking locks
  1623. */
  1624. static int __link_module(void *_mod)
  1625. {
  1626. struct module *mod = _mod;
  1627. list_add(&mod->list, &modules);
  1628. return 0;
  1629. }
  1630. /* This is where the real work happens */
  1631. asmlinkage long
  1632. sys_init_module(void __user *umod,
  1633. unsigned long len,
  1634. const char __user *uargs)
  1635. {
  1636. struct module *mod;
  1637. int ret = 0;
  1638. /* Must have permission */
  1639. if (!capable(CAP_SYS_MODULE))
  1640. return -EPERM;
  1641. /* Only one module load at a time, please */
  1642. if (down_interruptible(&module_mutex) != 0)
  1643. return -EINTR;
  1644. /* Do all the hard work */
  1645. mod = load_module(umod, len, uargs);
  1646. if (IS_ERR(mod)) {
  1647. up(&module_mutex);
  1648. return PTR_ERR(mod);
  1649. }
  1650. /* Now sew it into the lists. They won't access us, since
  1651. strong_try_module_get() will fail. */
  1652. stop_machine_run(__link_module, mod, NR_CPUS);
  1653. /* Drop lock so they can recurse */
  1654. up(&module_mutex);
  1655. down(&notify_mutex);
  1656. notifier_call_chain(&module_notify_list, MODULE_STATE_COMING, mod);
  1657. up(&notify_mutex);
  1658. /* Start the module */
  1659. if (mod->init != NULL)
  1660. ret = mod->init();
  1661. if (ret < 0) {
  1662. /* Init routine failed: abort. Try to protect us from
  1663. buggy refcounters. */
  1664. mod->state = MODULE_STATE_GOING;
  1665. synchronize_sched();
  1666. if (mod->unsafe)
  1667. printk(KERN_ERR "%s: module is now stuck!\n",
  1668. mod->name);
  1669. else {
  1670. module_put(mod);
  1671. down(&module_mutex);
  1672. free_module(mod);
  1673. up(&module_mutex);
  1674. }
  1675. return ret;
  1676. }
  1677. /* Now it's a first class citizen! */
  1678. down(&module_mutex);
  1679. mod->state = MODULE_STATE_LIVE;
  1680. /* Drop initial reference. */
  1681. module_put(mod);
  1682. module_free(mod, mod->module_init);
  1683. mod->module_init = NULL;
  1684. mod->init_size = 0;
  1685. mod->init_text_size = 0;
  1686. up(&module_mutex);
  1687. return 0;
  1688. }
  1689. static inline int within(unsigned long addr, void *start, unsigned long size)
  1690. {
  1691. return ((void *)addr >= start && (void *)addr < start + size);
  1692. }
  1693. #ifdef CONFIG_KALLSYMS
  1694. /*
  1695. * This ignores the intensely annoying "mapping symbols" found
  1696. * in ARM ELF files: $a, $t and $d.
  1697. */
  1698. static inline int is_arm_mapping_symbol(const char *str)
  1699. {
  1700. return str[0] == '$' && strchr("atd", str[1])
  1701. && (str[2] == '\0' || str[2] == '.');
  1702. }
  1703. static const char *get_ksymbol(struct module *mod,
  1704. unsigned long addr,
  1705. unsigned long *size,
  1706. unsigned long *offset)
  1707. {
  1708. unsigned int i, best = 0;
  1709. unsigned long nextval;
  1710. /* At worse, next value is at end of module */
  1711. if (within(addr, mod->module_init, mod->init_size))
  1712. nextval = (unsigned long)mod->module_init+mod->init_text_size;
  1713. else
  1714. nextval = (unsigned long)mod->module_core+mod->core_text_size;
  1715. /* Scan for closest preceeding symbol, and next symbol. (ELF
  1716. starts real symbols at 1). */
  1717. for (i = 1; i < mod->num_symtab; i++) {
  1718. if (mod->symtab[i].st_shndx == SHN_UNDEF)
  1719. continue;
  1720. /* We ignore unnamed symbols: they're uninformative
  1721. * and inserted at a whim. */
  1722. if (mod->symtab[i].st_value <= addr
  1723. && mod->symtab[i].st_value > mod->symtab[best].st_value
  1724. && *(mod->strtab + mod->symtab[i].st_name) != '\0'
  1725. && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
  1726. best = i;
  1727. if (mod->symtab[i].st_value > addr
  1728. && mod->symtab[i].st_value < nextval
  1729. && *(mod->strtab + mod->symtab[i].st_name) != '\0'
  1730. && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
  1731. nextval = mod->symtab[i].st_value;
  1732. }
  1733. if (!best)
  1734. return NULL;
  1735. *size = nextval - mod->symtab[best].st_value;
  1736. *offset = addr - mod->symtab[best].st_value;
  1737. return mod->strtab + mod->symtab[best].st_name;
  1738. }
  1739. /* For kallsyms to ask for address resolution. NULL means not found.
  1740. We don't lock, as this is used for oops resolution and races are a
  1741. lesser concern. */
  1742. const char *module_address_lookup(unsigned long addr,
  1743. unsigned long *size,
  1744. unsigned long *offset,
  1745. char **modname)
  1746. {
  1747. struct module *mod;
  1748. list_for_each_entry(mod, &modules, list) {
  1749. if (within(addr, mod->module_init, mod->init_size)
  1750. || within(addr, mod->module_core, mod->core_size)) {
  1751. *modname = mod->name;
  1752. return get_ksymbol(mod, addr, size, offset);
  1753. }
  1754. }
  1755. return NULL;
  1756. }
  1757. struct module *module_get_kallsym(unsigned int symnum,
  1758. unsigned long *value,
  1759. char *type,
  1760. char namebuf[128])
  1761. {
  1762. struct module *mod;
  1763. down(&module_mutex);
  1764. list_for_each_entry(mod, &modules, list) {
  1765. if (symnum < mod->num_symtab) {
  1766. *value = mod->symtab[symnum].st_value;
  1767. *type = mod->symtab[symnum].st_info;
  1768. strncpy(namebuf,
  1769. mod->strtab + mod->symtab[symnum].st_name,
  1770. 127);
  1771. up(&module_mutex);
  1772. return mod;
  1773. }
  1774. symnum -= mod->num_symtab;
  1775. }
  1776. up(&module_mutex);
  1777. return NULL;
  1778. }
  1779. static unsigned long mod_find_symname(struct module *mod, const char *name)
  1780. {
  1781. unsigned int i;
  1782. for (i = 0; i < mod->num_symtab; i++)
  1783. if (strcmp(name, mod->strtab+mod->symtab[i].st_name) == 0)
  1784. return mod->symtab[i].st_value;
  1785. return 0;
  1786. }
  1787. /* Look for this name: can be of form module:name. */
  1788. unsigned long module_kallsyms_lookup_name(const char *name)
  1789. {
  1790. struct module *mod;
  1791. char *colon;
  1792. unsigned long ret = 0;
  1793. /* Don't lock: we're in enough trouble already. */
  1794. if ((colon = strchr(name, ':')) != NULL) {
  1795. *colon = '\0';
  1796. if ((mod = find_module(name)) != NULL)
  1797. ret = mod_find_symname(mod, colon+1);
  1798. *colon = ':';
  1799. } else {
  1800. list_for_each_entry(mod, &modules, list)
  1801. if ((ret = mod_find_symname(mod, name)) != 0)
  1802. break;
  1803. }
  1804. return ret;
  1805. }
  1806. #endif /* CONFIG_KALLSYMS */
  1807. /* Called by the /proc file system to return a list of modules. */
  1808. static void *m_start(struct seq_file *m, loff_t *pos)
  1809. {
  1810. struct list_head *i;
  1811. loff_t n = 0;
  1812. down(&module_mutex);
  1813. list_for_each(i, &modules) {
  1814. if (n++ == *pos)
  1815. break;
  1816. }
  1817. if (i == &modules)
  1818. return NULL;
  1819. return i;
  1820. }
  1821. static void *m_next(struct seq_file *m, void *p, loff_t *pos)
  1822. {
  1823. struct list_head *i = p;
  1824. (*pos)++;
  1825. if (i->next == &modules)
  1826. return NULL;
  1827. return i->next;
  1828. }
  1829. static void m_stop(struct seq_file *m, void *p)
  1830. {
  1831. up(&module_mutex);
  1832. }
  1833. static int m_show(struct seq_file *m, void *p)
  1834. {
  1835. struct module *mod = list_entry(p, struct module, list);
  1836. seq_printf(m, "%s %lu",
  1837. mod->name, mod->init_size + mod->core_size);
  1838. print_unload_info(m, mod);
  1839. /* Informative for users. */
  1840. seq_printf(m, " %s",
  1841. mod->state == MODULE_STATE_GOING ? "Unloading":
  1842. mod->state == MODULE_STATE_COMING ? "Loading":
  1843. "Live");
  1844. /* Used by oprofile and other similar tools. */
  1845. seq_printf(m, " 0x%p", mod->module_core);
  1846. seq_printf(m, "\n");
  1847. return 0;
  1848. }
  1849. /* Format: modulename size refcount deps address
  1850. Where refcount is a number or -, and deps is a comma-separated list
  1851. of depends or -.
  1852. */
  1853. struct seq_operations modules_op = {
  1854. .start = m_start,
  1855. .next = m_next,
  1856. .stop = m_stop,
  1857. .show = m_show
  1858. };
  1859. /* Given an address, look for it in the module exception tables. */
  1860. const struct exception_table_entry *search_module_extables(unsigned long addr)
  1861. {
  1862. unsigned long flags;
  1863. const struct exception_table_entry *e = NULL;
  1864. struct module *mod;
  1865. spin_lock_irqsave(&modlist_lock, flags);
  1866. list_for_each_entry(mod, &modules, list) {
  1867. if (mod->num_exentries == 0)
  1868. continue;
  1869. e = search_extable(mod->extable,
  1870. mod->extable + mod->num_exentries - 1,
  1871. addr);
  1872. if (e)
  1873. break;
  1874. }
  1875. spin_unlock_irqrestore(&modlist_lock, flags);
  1876. /* Now, if we found one, we are running inside it now, hence
  1877. we cannot unload the module, hence no refcnt needed. */
  1878. return e;
  1879. }
  1880. /* Is this a valid kernel address? We don't grab the lock: we are oopsing. */
  1881. struct module *__module_text_address(unsigned long addr)
  1882. {
  1883. struct module *mod;
  1884. list_for_each_entry(mod, &modules, list)
  1885. if (within(addr, mod->module_init, mod->init_text_size)
  1886. || within(addr, mod->module_core, mod->core_text_size))
  1887. return mod;
  1888. return NULL;
  1889. }
  1890. struct module *module_text_address(unsigned long addr)
  1891. {
  1892. struct module *mod;
  1893. unsigned long flags;
  1894. spin_lock_irqsave(&modlist_lock, flags);
  1895. mod = __module_text_address(addr);
  1896. spin_unlock_irqrestore(&modlist_lock, flags);
  1897. return mod;
  1898. }
  1899. /* Don't grab lock, we're oopsing. */
  1900. void print_modules(void)
  1901. {
  1902. struct module *mod;
  1903. printk("Modules linked in:");
  1904. list_for_each_entry(mod, &modules, list)
  1905. printk(" %s", mod->name);
  1906. printk("\n");
  1907. }
  1908. void module_add_driver(struct module *mod, struct device_driver *drv)
  1909. {
  1910. if (!mod || !drv)
  1911. return;
  1912. /* Don't check return code; this call is idempotent */
  1913. sysfs_create_link(&drv->kobj, &mod->mkobj.kobj, "module");
  1914. }
  1915. EXPORT_SYMBOL(module_add_driver);
  1916. void module_remove_driver(struct device_driver *drv)
  1917. {
  1918. if (!drv)
  1919. return;
  1920. sysfs_remove_link(&drv->kobj, "module");
  1921. }
  1922. EXPORT_SYMBOL(module_remove_driver);
  1923. #ifdef CONFIG_MODVERSIONS
  1924. /* Generate the signature for struct module here, too, for modversions. */
  1925. void struct_module(struct module *mod) { return; }
  1926. EXPORT_SYMBOL(struct_module);
  1927. #endif