module.c 55 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126
  1. /* Rewritten by Rusty Russell, on the backs of many others...
  2. Copyright (C) 2002 Richard Henderson
  3. Copyright (C) 2001 Rusty Russell, 2002 Rusty Russell IBM.
  4. This program is free software; you can redistribute it and/or modify
  5. it under the terms of the GNU General Public License as published by
  6. the Free Software Foundation; either version 2 of the License, or
  7. (at your option) any later version.
  8. This program is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. GNU General Public License for more details.
  12. You should have received a copy of the GNU General Public License
  13. along with this program; if not, write to the Free Software
  14. Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  15. */
  16. #include <linux/config.h>
  17. #include <linux/module.h>
  18. #include <linux/moduleloader.h>
  19. #include <linux/init.h>
  20. #include <linux/kernel.h>
  21. #include <linux/slab.h>
  22. #include <linux/vmalloc.h>
  23. #include <linux/elf.h>
  24. #include <linux/seq_file.h>
  25. #include <linux/syscalls.h>
  26. #include <linux/fcntl.h>
  27. #include <linux/rcupdate.h>
  28. #include <linux/capability.h>
  29. #include <linux/cpu.h>
  30. #include <linux/moduleparam.h>
  31. #include <linux/errno.h>
  32. #include <linux/err.h>
  33. #include <linux/vermagic.h>
  34. #include <linux/notifier.h>
  35. #include <linux/stop_machine.h>
  36. #include <linux/device.h>
  37. #include <linux/string.h>
  38. #include <linux/sched.h>
  39. #include <linux/mutex.h>
  40. #include <asm/uaccess.h>
  41. #include <asm/semaphore.h>
  42. #include <asm/cacheflush.h>
  43. #if 0
  44. #define DEBUGP printk
  45. #else
  46. #define DEBUGP(fmt , a...)
  47. #endif
  48. #ifndef ARCH_SHF_SMALL
  49. #define ARCH_SHF_SMALL 0
  50. #endif
  51. /* If this is set, the section belongs in the init part of the module */
  52. #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
  53. /* Protects module list */
  54. static DEFINE_SPINLOCK(modlist_lock);
  55. /* List of modules, protected by module_mutex AND modlist_lock */
  56. static DEFINE_MUTEX(module_mutex);
  57. static LIST_HEAD(modules);
  58. static BLOCKING_NOTIFIER_HEAD(module_notify_list);
  59. int register_module_notifier(struct notifier_block * nb)
  60. {
  61. return blocking_notifier_chain_register(&module_notify_list, nb);
  62. }
  63. EXPORT_SYMBOL(register_module_notifier);
  64. int unregister_module_notifier(struct notifier_block * nb)
  65. {
  66. return blocking_notifier_chain_unregister(&module_notify_list, nb);
  67. }
  68. EXPORT_SYMBOL(unregister_module_notifier);
  69. /* We require a truly strong try_module_get() */
  70. static inline int strong_try_module_get(struct module *mod)
  71. {
  72. if (mod && mod->state == MODULE_STATE_COMING)
  73. return 0;
  74. return try_module_get(mod);
  75. }
  76. /* A thread that wants to hold a reference to a module only while it
  77. * is running can call ths to safely exit.
  78. * nfsd and lockd use this.
  79. */
  80. void __module_put_and_exit(struct module *mod, long code)
  81. {
  82. module_put(mod);
  83. do_exit(code);
  84. }
  85. EXPORT_SYMBOL(__module_put_and_exit);
  86. /* Find a module section: 0 means not found. */
  87. static unsigned int find_sec(Elf_Ehdr *hdr,
  88. Elf_Shdr *sechdrs,
  89. const char *secstrings,
  90. const char *name)
  91. {
  92. unsigned int i;
  93. for (i = 1; i < hdr->e_shnum; i++)
  94. /* Alloc bit cleared means "ignore it." */
  95. if ((sechdrs[i].sh_flags & SHF_ALLOC)
  96. && strcmp(secstrings+sechdrs[i].sh_name, name) == 0)
  97. return i;
  98. return 0;
  99. }
  100. /* Provided by the linker */
  101. extern const struct kernel_symbol __start___ksymtab[];
  102. extern const struct kernel_symbol __stop___ksymtab[];
  103. extern const struct kernel_symbol __start___ksymtab_gpl[];
  104. extern const struct kernel_symbol __stop___ksymtab_gpl[];
  105. extern const struct kernel_symbol __start___ksymtab_gpl_future[];
  106. extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
  107. extern const unsigned long __start___kcrctab[];
  108. extern const unsigned long __start___kcrctab_gpl[];
  109. extern const unsigned long __start___kcrctab_gpl_future[];
  110. #ifndef CONFIG_MODVERSIONS
  111. #define symversion(base, idx) NULL
  112. #else
  113. #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
  114. #endif
  115. /* lookup symbol in given range of kernel_symbols */
  116. static const struct kernel_symbol *lookup_symbol(const char *name,
  117. const struct kernel_symbol *start,
  118. const struct kernel_symbol *stop)
  119. {
  120. const struct kernel_symbol *ks = start;
  121. for (; ks < stop; ks++)
  122. if (strcmp(ks->name, name) == 0)
  123. return ks;
  124. return NULL;
  125. }
  126. /* Find a symbol, return value, crc and module which owns it */
  127. static unsigned long __find_symbol(const char *name,
  128. struct module **owner,
  129. const unsigned long **crc,
  130. int gplok)
  131. {
  132. struct module *mod;
  133. const struct kernel_symbol *ks;
  134. /* Core kernel first. */
  135. *owner = NULL;
  136. ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab);
  137. if (ks) {
  138. *crc = symversion(__start___kcrctab, (ks - __start___ksymtab));
  139. return ks->value;
  140. }
  141. if (gplok) {
  142. ks = lookup_symbol(name, __start___ksymtab_gpl,
  143. __stop___ksymtab_gpl);
  144. if (ks) {
  145. *crc = symversion(__start___kcrctab_gpl,
  146. (ks - __start___ksymtab_gpl));
  147. return ks->value;
  148. }
  149. }
  150. ks = lookup_symbol(name, __start___ksymtab_gpl_future,
  151. __stop___ksymtab_gpl_future);
  152. if (ks) {
  153. if (!gplok) {
  154. printk(KERN_WARNING "Symbol %s is being used "
  155. "by a non-GPL module, which will not "
  156. "be allowed in the future\n", name);
  157. printk(KERN_WARNING "Please see the file "
  158. "Documentation/feature-removal-schedule.txt "
  159. "in the kernel source tree for more "
  160. "details.\n");
  161. }
  162. *crc = symversion(__start___kcrctab_gpl_future,
  163. (ks - __start___ksymtab_gpl_future));
  164. return ks->value;
  165. }
  166. /* Now try modules. */
  167. list_for_each_entry(mod, &modules, list) {
  168. *owner = mod;
  169. ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms);
  170. if (ks) {
  171. *crc = symversion(mod->crcs, (ks - mod->syms));
  172. return ks->value;
  173. }
  174. if (gplok) {
  175. ks = lookup_symbol(name, mod->gpl_syms,
  176. mod->gpl_syms + mod->num_gpl_syms);
  177. if (ks) {
  178. *crc = symversion(mod->gpl_crcs,
  179. (ks - mod->gpl_syms));
  180. return ks->value;
  181. }
  182. }
  183. ks = lookup_symbol(name, mod->gpl_future_syms,
  184. (mod->gpl_future_syms +
  185. mod->num_gpl_future_syms));
  186. if (ks) {
  187. if (!gplok) {
  188. printk(KERN_WARNING "Symbol %s is being used "
  189. "by a non-GPL module, which will not "
  190. "be allowed in the future\n", name);
  191. printk(KERN_WARNING "Please see the file "
  192. "Documentation/feature-removal-schedule.txt "
  193. "in the kernel source tree for more "
  194. "details.\n");
  195. }
  196. *crc = symversion(mod->gpl_future_crcs,
  197. (ks - mod->gpl_future_syms));
  198. return ks->value;
  199. }
  200. }
  201. DEBUGP("Failed to find symbol %s\n", name);
  202. return 0;
  203. }
  204. /* Search for module by name: must hold module_mutex. */
  205. static struct module *find_module(const char *name)
  206. {
  207. struct module *mod;
  208. list_for_each_entry(mod, &modules, list) {
  209. if (strcmp(mod->name, name) == 0)
  210. return mod;
  211. }
  212. return NULL;
  213. }
  214. #ifdef CONFIG_SMP
  215. /* Number of blocks used and allocated. */
  216. static unsigned int pcpu_num_used, pcpu_num_allocated;
  217. /* Size of each block. -ve means used. */
  218. static int *pcpu_size;
  219. static int split_block(unsigned int i, unsigned short size)
  220. {
  221. /* Reallocation required? */
  222. if (pcpu_num_used + 1 > pcpu_num_allocated) {
  223. int *new = kmalloc(sizeof(new[0]) * pcpu_num_allocated*2,
  224. GFP_KERNEL);
  225. if (!new)
  226. return 0;
  227. memcpy(new, pcpu_size, sizeof(new[0])*pcpu_num_allocated);
  228. pcpu_num_allocated *= 2;
  229. kfree(pcpu_size);
  230. pcpu_size = new;
  231. }
  232. /* Insert a new subblock */
  233. memmove(&pcpu_size[i+1], &pcpu_size[i],
  234. sizeof(pcpu_size[0]) * (pcpu_num_used - i));
  235. pcpu_num_used++;
  236. pcpu_size[i+1] -= size;
  237. pcpu_size[i] = size;
  238. return 1;
  239. }
  240. static inline unsigned int block_size(int val)
  241. {
  242. if (val < 0)
  243. return -val;
  244. return val;
  245. }
  246. /* Created by linker magic */
  247. extern char __per_cpu_start[], __per_cpu_end[];
  248. static void *percpu_modalloc(unsigned long size, unsigned long align,
  249. const char *name)
  250. {
  251. unsigned long extra;
  252. unsigned int i;
  253. void *ptr;
  254. if (align > SMP_CACHE_BYTES) {
  255. printk(KERN_WARNING "%s: per-cpu alignment %li > %i\n",
  256. name, align, SMP_CACHE_BYTES);
  257. align = SMP_CACHE_BYTES;
  258. }
  259. ptr = __per_cpu_start;
  260. for (i = 0; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
  261. /* Extra for alignment requirement. */
  262. extra = ALIGN((unsigned long)ptr, align) - (unsigned long)ptr;
  263. BUG_ON(i == 0 && extra != 0);
  264. if (pcpu_size[i] < 0 || pcpu_size[i] < extra + size)
  265. continue;
  266. /* Transfer extra to previous block. */
  267. if (pcpu_size[i-1] < 0)
  268. pcpu_size[i-1] -= extra;
  269. else
  270. pcpu_size[i-1] += extra;
  271. pcpu_size[i] -= extra;
  272. ptr += extra;
  273. /* Split block if warranted */
  274. if (pcpu_size[i] - size > sizeof(unsigned long))
  275. if (!split_block(i, size))
  276. return NULL;
  277. /* Mark allocated */
  278. pcpu_size[i] = -pcpu_size[i];
  279. return ptr;
  280. }
  281. printk(KERN_WARNING "Could not allocate %lu bytes percpu data\n",
  282. size);
  283. return NULL;
  284. }
  285. static void percpu_modfree(void *freeme)
  286. {
  287. unsigned int i;
  288. void *ptr = __per_cpu_start + block_size(pcpu_size[0]);
  289. /* First entry is core kernel percpu data. */
  290. for (i = 1; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
  291. if (ptr == freeme) {
  292. pcpu_size[i] = -pcpu_size[i];
  293. goto free;
  294. }
  295. }
  296. BUG();
  297. free:
  298. /* Merge with previous? */
  299. if (pcpu_size[i-1] >= 0) {
  300. pcpu_size[i-1] += pcpu_size[i];
  301. pcpu_num_used--;
  302. memmove(&pcpu_size[i], &pcpu_size[i+1],
  303. (pcpu_num_used - i) * sizeof(pcpu_size[0]));
  304. i--;
  305. }
  306. /* Merge with next? */
  307. if (i+1 < pcpu_num_used && pcpu_size[i+1] >= 0) {
  308. pcpu_size[i] += pcpu_size[i+1];
  309. pcpu_num_used--;
  310. memmove(&pcpu_size[i+1], &pcpu_size[i+2],
  311. (pcpu_num_used - (i+1)) * sizeof(pcpu_size[0]));
  312. }
  313. }
  314. static unsigned int find_pcpusec(Elf_Ehdr *hdr,
  315. Elf_Shdr *sechdrs,
  316. const char *secstrings)
  317. {
  318. return find_sec(hdr, sechdrs, secstrings, ".data.percpu");
  319. }
  320. static int percpu_modinit(void)
  321. {
  322. pcpu_num_used = 2;
  323. pcpu_num_allocated = 2;
  324. pcpu_size = kmalloc(sizeof(pcpu_size[0]) * pcpu_num_allocated,
  325. GFP_KERNEL);
  326. /* Static in-kernel percpu data (used). */
  327. pcpu_size[0] = -ALIGN(__per_cpu_end-__per_cpu_start, SMP_CACHE_BYTES);
  328. /* Free room. */
  329. pcpu_size[1] = PERCPU_ENOUGH_ROOM + pcpu_size[0];
  330. if (pcpu_size[1] < 0) {
  331. printk(KERN_ERR "No per-cpu room for modules.\n");
  332. pcpu_num_used = 1;
  333. }
  334. return 0;
  335. }
  336. __initcall(percpu_modinit);
  337. #else /* ... !CONFIG_SMP */
  338. static inline void *percpu_modalloc(unsigned long size, unsigned long align,
  339. const char *name)
  340. {
  341. return NULL;
  342. }
  343. static inline void percpu_modfree(void *pcpuptr)
  344. {
  345. BUG();
  346. }
  347. static inline unsigned int find_pcpusec(Elf_Ehdr *hdr,
  348. Elf_Shdr *sechdrs,
  349. const char *secstrings)
  350. {
  351. return 0;
  352. }
  353. static inline void percpu_modcopy(void *pcpudst, const void *src,
  354. unsigned long size)
  355. {
  356. /* pcpusec should be 0, and size of that section should be 0. */
  357. BUG_ON(size != 0);
  358. }
  359. #endif /* CONFIG_SMP */
  360. #define MODINFO_ATTR(field) \
  361. static void setup_modinfo_##field(struct module *mod, const char *s) \
  362. { \
  363. mod->field = kstrdup(s, GFP_KERNEL); \
  364. } \
  365. static ssize_t show_modinfo_##field(struct module_attribute *mattr, \
  366. struct module *mod, char *buffer) \
  367. { \
  368. return sprintf(buffer, "%s\n", mod->field); \
  369. } \
  370. static int modinfo_##field##_exists(struct module *mod) \
  371. { \
  372. return mod->field != NULL; \
  373. } \
  374. static void free_modinfo_##field(struct module *mod) \
  375. { \
  376. kfree(mod->field); \
  377. mod->field = NULL; \
  378. } \
  379. static struct module_attribute modinfo_##field = { \
  380. .attr = { .name = __stringify(field), .mode = 0444, \
  381. .owner = THIS_MODULE }, \
  382. .show = show_modinfo_##field, \
  383. .setup = setup_modinfo_##field, \
  384. .test = modinfo_##field##_exists, \
  385. .free = free_modinfo_##field, \
  386. };
  387. MODINFO_ATTR(version);
  388. MODINFO_ATTR(srcversion);
  389. #ifdef CONFIG_MODULE_UNLOAD
  390. /* Init the unload section of the module. */
  391. static void module_unload_init(struct module *mod)
  392. {
  393. unsigned int i;
  394. INIT_LIST_HEAD(&mod->modules_which_use_me);
  395. for (i = 0; i < NR_CPUS; i++)
  396. local_set(&mod->ref[i].count, 0);
  397. /* Hold reference count during initialization. */
  398. local_set(&mod->ref[raw_smp_processor_id()].count, 1);
  399. /* Backwards compatibility macros put refcount during init. */
  400. mod->waiter = current;
  401. }
  402. /* modules using other modules */
  403. struct module_use
  404. {
  405. struct list_head list;
  406. struct module *module_which_uses;
  407. };
  408. /* Does a already use b? */
  409. static int already_uses(struct module *a, struct module *b)
  410. {
  411. struct module_use *use;
  412. list_for_each_entry(use, &b->modules_which_use_me, list) {
  413. if (use->module_which_uses == a) {
  414. DEBUGP("%s uses %s!\n", a->name, b->name);
  415. return 1;
  416. }
  417. }
  418. DEBUGP("%s does not use %s!\n", a->name, b->name);
  419. return 0;
  420. }
  421. /* Module a uses b */
  422. static int use_module(struct module *a, struct module *b)
  423. {
  424. struct module_use *use;
  425. if (b == NULL || already_uses(a, b)) return 1;
  426. if (!strong_try_module_get(b))
  427. return 0;
  428. DEBUGP("Allocating new usage for %s.\n", a->name);
  429. use = kmalloc(sizeof(*use), GFP_ATOMIC);
  430. if (!use) {
  431. printk("%s: out of memory loading\n", a->name);
  432. module_put(b);
  433. return 0;
  434. }
  435. use->module_which_uses = a;
  436. list_add(&use->list, &b->modules_which_use_me);
  437. return 1;
  438. }
  439. /* Clear the unload stuff of the module. */
  440. static void module_unload_free(struct module *mod)
  441. {
  442. struct module *i;
  443. list_for_each_entry(i, &modules, list) {
  444. struct module_use *use;
  445. list_for_each_entry(use, &i->modules_which_use_me, list) {
  446. if (use->module_which_uses == mod) {
  447. DEBUGP("%s unusing %s\n", mod->name, i->name);
  448. module_put(i);
  449. list_del(&use->list);
  450. kfree(use);
  451. /* There can be at most one match. */
  452. break;
  453. }
  454. }
  455. }
  456. }
  457. #ifdef CONFIG_MODULE_FORCE_UNLOAD
  458. static inline int try_force_unload(unsigned int flags)
  459. {
  460. int ret = (flags & O_TRUNC);
  461. if (ret)
  462. add_taint(TAINT_FORCED_RMMOD);
  463. return ret;
  464. }
  465. #else
  466. static inline int try_force_unload(unsigned int flags)
  467. {
  468. return 0;
  469. }
  470. #endif /* CONFIG_MODULE_FORCE_UNLOAD */
  471. struct stopref
  472. {
  473. struct module *mod;
  474. int flags;
  475. int *forced;
  476. };
  477. /* Whole machine is stopped with interrupts off when this runs. */
  478. static int __try_stop_module(void *_sref)
  479. {
  480. struct stopref *sref = _sref;
  481. /* If it's not unused, quit unless we are told to block. */
  482. if ((sref->flags & O_NONBLOCK) && module_refcount(sref->mod) != 0) {
  483. if (!(*sref->forced = try_force_unload(sref->flags)))
  484. return -EWOULDBLOCK;
  485. }
  486. /* Mark it as dying. */
  487. sref->mod->state = MODULE_STATE_GOING;
  488. return 0;
  489. }
  490. static int try_stop_module(struct module *mod, int flags, int *forced)
  491. {
  492. struct stopref sref = { mod, flags, forced };
  493. return stop_machine_run(__try_stop_module, &sref, NR_CPUS);
  494. }
  495. unsigned int module_refcount(struct module *mod)
  496. {
  497. unsigned int i, total = 0;
  498. for (i = 0; i < NR_CPUS; i++)
  499. total += local_read(&mod->ref[i].count);
  500. return total;
  501. }
  502. EXPORT_SYMBOL(module_refcount);
  503. /* This exists whether we can unload or not */
  504. static void free_module(struct module *mod);
  505. static void wait_for_zero_refcount(struct module *mod)
  506. {
  507. /* Since we might sleep for some time, drop the semaphore first */
  508. mutex_unlock(&module_mutex);
  509. for (;;) {
  510. DEBUGP("Looking at refcount...\n");
  511. set_current_state(TASK_UNINTERRUPTIBLE);
  512. if (module_refcount(mod) == 0)
  513. break;
  514. schedule();
  515. }
  516. current->state = TASK_RUNNING;
  517. mutex_lock(&module_mutex);
  518. }
  519. asmlinkage long
  520. sys_delete_module(const char __user *name_user, unsigned int flags)
  521. {
  522. struct module *mod;
  523. char name[MODULE_NAME_LEN];
  524. int ret, forced = 0;
  525. if (!capable(CAP_SYS_MODULE))
  526. return -EPERM;
  527. if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0)
  528. return -EFAULT;
  529. name[MODULE_NAME_LEN-1] = '\0';
  530. if (mutex_lock_interruptible(&module_mutex) != 0)
  531. return -EINTR;
  532. mod = find_module(name);
  533. if (!mod) {
  534. ret = -ENOENT;
  535. goto out;
  536. }
  537. if (!list_empty(&mod->modules_which_use_me)) {
  538. /* Other modules depend on us: get rid of them first. */
  539. ret = -EWOULDBLOCK;
  540. goto out;
  541. }
  542. /* Doing init or already dying? */
  543. if (mod->state != MODULE_STATE_LIVE) {
  544. /* FIXME: if (force), slam module count and wake up
  545. waiter --RR */
  546. DEBUGP("%s already dying\n", mod->name);
  547. ret = -EBUSY;
  548. goto out;
  549. }
  550. /* If it has an init func, it must have an exit func to unload */
  551. if ((mod->init != NULL && mod->exit == NULL)
  552. || mod->unsafe) {
  553. forced = try_force_unload(flags);
  554. if (!forced) {
  555. /* This module can't be removed */
  556. ret = -EBUSY;
  557. goto out;
  558. }
  559. }
  560. /* Set this up before setting mod->state */
  561. mod->waiter = current;
  562. /* Stop the machine so refcounts can't move and disable module. */
  563. ret = try_stop_module(mod, flags, &forced);
  564. if (ret != 0)
  565. goto out;
  566. /* Never wait if forced. */
  567. if (!forced && module_refcount(mod) != 0)
  568. wait_for_zero_refcount(mod);
  569. /* Final destruction now noone is using it. */
  570. if (mod->exit != NULL) {
  571. mutex_unlock(&module_mutex);
  572. mod->exit();
  573. mutex_lock(&module_mutex);
  574. }
  575. free_module(mod);
  576. out:
  577. mutex_unlock(&module_mutex);
  578. return ret;
  579. }
  580. static void print_unload_info(struct seq_file *m, struct module *mod)
  581. {
  582. struct module_use *use;
  583. int printed_something = 0;
  584. seq_printf(m, " %u ", module_refcount(mod));
  585. /* Always include a trailing , so userspace can differentiate
  586. between this and the old multi-field proc format. */
  587. list_for_each_entry(use, &mod->modules_which_use_me, list) {
  588. printed_something = 1;
  589. seq_printf(m, "%s,", use->module_which_uses->name);
  590. }
  591. if (mod->unsafe) {
  592. printed_something = 1;
  593. seq_printf(m, "[unsafe],");
  594. }
  595. if (mod->init != NULL && mod->exit == NULL) {
  596. printed_something = 1;
  597. seq_printf(m, "[permanent],");
  598. }
  599. if (!printed_something)
  600. seq_printf(m, "-");
  601. }
  602. void __symbol_put(const char *symbol)
  603. {
  604. struct module *owner;
  605. unsigned long flags;
  606. const unsigned long *crc;
  607. spin_lock_irqsave(&modlist_lock, flags);
  608. if (!__find_symbol(symbol, &owner, &crc, 1))
  609. BUG();
  610. module_put(owner);
  611. spin_unlock_irqrestore(&modlist_lock, flags);
  612. }
  613. EXPORT_SYMBOL(__symbol_put);
  614. void symbol_put_addr(void *addr)
  615. {
  616. unsigned long flags;
  617. spin_lock_irqsave(&modlist_lock, flags);
  618. if (!kernel_text_address((unsigned long)addr))
  619. BUG();
  620. module_put(module_text_address((unsigned long)addr));
  621. spin_unlock_irqrestore(&modlist_lock, flags);
  622. }
  623. EXPORT_SYMBOL_GPL(symbol_put_addr);
  624. static ssize_t show_refcnt(struct module_attribute *mattr,
  625. struct module *mod, char *buffer)
  626. {
  627. /* sysfs holds a reference */
  628. return sprintf(buffer, "%u\n", module_refcount(mod)-1);
  629. }
  630. static struct module_attribute refcnt = {
  631. .attr = { .name = "refcnt", .mode = 0444, .owner = THIS_MODULE },
  632. .show = show_refcnt,
  633. };
  634. #else /* !CONFIG_MODULE_UNLOAD */
  635. static void print_unload_info(struct seq_file *m, struct module *mod)
  636. {
  637. /* We don't know the usage count, or what modules are using. */
  638. seq_printf(m, " - -");
  639. }
  640. static inline void module_unload_free(struct module *mod)
  641. {
  642. }
  643. static inline int use_module(struct module *a, struct module *b)
  644. {
  645. return strong_try_module_get(b);
  646. }
  647. static inline void module_unload_init(struct module *mod)
  648. {
  649. }
  650. #endif /* CONFIG_MODULE_UNLOAD */
  651. static struct module_attribute *modinfo_attrs[] = {
  652. &modinfo_version,
  653. &modinfo_srcversion,
  654. #ifdef CONFIG_MODULE_UNLOAD
  655. &refcnt,
  656. #endif
  657. NULL,
  658. };
  659. static const char vermagic[] = VERMAGIC_STRING;
  660. #ifdef CONFIG_MODVERSIONS
  661. static int check_version(Elf_Shdr *sechdrs,
  662. unsigned int versindex,
  663. const char *symname,
  664. struct module *mod,
  665. const unsigned long *crc)
  666. {
  667. unsigned int i, num_versions;
  668. struct modversion_info *versions;
  669. /* Exporting module didn't supply crcs? OK, we're already tainted. */
  670. if (!crc)
  671. return 1;
  672. versions = (void *) sechdrs[versindex].sh_addr;
  673. num_versions = sechdrs[versindex].sh_size
  674. / sizeof(struct modversion_info);
  675. for (i = 0; i < num_versions; i++) {
  676. if (strcmp(versions[i].name, symname) != 0)
  677. continue;
  678. if (versions[i].crc == *crc)
  679. return 1;
  680. printk("%s: disagrees about version of symbol %s\n",
  681. mod->name, symname);
  682. DEBUGP("Found checksum %lX vs module %lX\n",
  683. *crc, versions[i].crc);
  684. return 0;
  685. }
  686. /* Not in module's version table. OK, but that taints the kernel. */
  687. if (!(tainted & TAINT_FORCED_MODULE)) {
  688. printk("%s: no version for \"%s\" found: kernel tainted.\n",
  689. mod->name, symname);
  690. add_taint(TAINT_FORCED_MODULE);
  691. }
  692. return 1;
  693. }
  694. static inline int check_modstruct_version(Elf_Shdr *sechdrs,
  695. unsigned int versindex,
  696. struct module *mod)
  697. {
  698. const unsigned long *crc;
  699. struct module *owner;
  700. if (!__find_symbol("struct_module", &owner, &crc, 1))
  701. BUG();
  702. return check_version(sechdrs, versindex, "struct_module", mod,
  703. crc);
  704. }
  705. /* First part is kernel version, which we ignore. */
  706. static inline int same_magic(const char *amagic, const char *bmagic)
  707. {
  708. amagic += strcspn(amagic, " ");
  709. bmagic += strcspn(bmagic, " ");
  710. return strcmp(amagic, bmagic) == 0;
  711. }
  712. #else
  713. static inline int check_version(Elf_Shdr *sechdrs,
  714. unsigned int versindex,
  715. const char *symname,
  716. struct module *mod,
  717. const unsigned long *crc)
  718. {
  719. return 1;
  720. }
  721. static inline int check_modstruct_version(Elf_Shdr *sechdrs,
  722. unsigned int versindex,
  723. struct module *mod)
  724. {
  725. return 1;
  726. }
  727. static inline int same_magic(const char *amagic, const char *bmagic)
  728. {
  729. return strcmp(amagic, bmagic) == 0;
  730. }
  731. #endif /* CONFIG_MODVERSIONS */
  732. /* Resolve a symbol for this module. I.e. if we find one, record usage.
  733. Must be holding module_mutex. */
  734. static unsigned long resolve_symbol(Elf_Shdr *sechdrs,
  735. unsigned int versindex,
  736. const char *name,
  737. struct module *mod)
  738. {
  739. struct module *owner;
  740. unsigned long ret;
  741. const unsigned long *crc;
  742. ret = __find_symbol(name, &owner, &crc, mod->license_gplok);
  743. if (ret) {
  744. /* use_module can fail due to OOM, or module unloading */
  745. if (!check_version(sechdrs, versindex, name, mod, crc) ||
  746. !use_module(mod, owner))
  747. ret = 0;
  748. }
  749. return ret;
  750. }
  751. /*
  752. * /sys/module/foo/sections stuff
  753. * J. Corbet <corbet@lwn.net>
  754. */
  755. #ifdef CONFIG_KALLSYMS
  756. static ssize_t module_sect_show(struct module_attribute *mattr,
  757. struct module *mod, char *buf)
  758. {
  759. struct module_sect_attr *sattr =
  760. container_of(mattr, struct module_sect_attr, mattr);
  761. return sprintf(buf, "0x%lx\n", sattr->address);
  762. }
  763. static void add_sect_attrs(struct module *mod, unsigned int nsect,
  764. char *secstrings, Elf_Shdr *sechdrs)
  765. {
  766. unsigned int nloaded = 0, i, size[2];
  767. struct module_sect_attrs *sect_attrs;
  768. struct module_sect_attr *sattr;
  769. struct attribute **gattr;
  770. /* Count loaded sections and allocate structures */
  771. for (i = 0; i < nsect; i++)
  772. if (sechdrs[i].sh_flags & SHF_ALLOC)
  773. nloaded++;
  774. size[0] = ALIGN(sizeof(*sect_attrs)
  775. + nloaded * sizeof(sect_attrs->attrs[0]),
  776. sizeof(sect_attrs->grp.attrs[0]));
  777. size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.attrs[0]);
  778. if (! (sect_attrs = kmalloc(size[0] + size[1], GFP_KERNEL)))
  779. return;
  780. /* Setup section attributes. */
  781. sect_attrs->grp.name = "sections";
  782. sect_attrs->grp.attrs = (void *)sect_attrs + size[0];
  783. sattr = &sect_attrs->attrs[0];
  784. gattr = &sect_attrs->grp.attrs[0];
  785. for (i = 0; i < nsect; i++) {
  786. if (! (sechdrs[i].sh_flags & SHF_ALLOC))
  787. continue;
  788. sattr->address = sechdrs[i].sh_addr;
  789. strlcpy(sattr->name, secstrings + sechdrs[i].sh_name,
  790. MODULE_SECT_NAME_LEN);
  791. sattr->mattr.show = module_sect_show;
  792. sattr->mattr.store = NULL;
  793. sattr->mattr.attr.name = sattr->name;
  794. sattr->mattr.attr.owner = mod;
  795. sattr->mattr.attr.mode = S_IRUGO;
  796. *(gattr++) = &(sattr++)->mattr.attr;
  797. }
  798. *gattr = NULL;
  799. if (sysfs_create_group(&mod->mkobj.kobj, &sect_attrs->grp))
  800. goto out;
  801. mod->sect_attrs = sect_attrs;
  802. return;
  803. out:
  804. kfree(sect_attrs);
  805. }
  806. static void remove_sect_attrs(struct module *mod)
  807. {
  808. if (mod->sect_attrs) {
  809. sysfs_remove_group(&mod->mkobj.kobj,
  810. &mod->sect_attrs->grp);
  811. /* We are positive that no one is using any sect attrs
  812. * at this point. Deallocate immediately. */
  813. kfree(mod->sect_attrs);
  814. mod->sect_attrs = NULL;
  815. }
  816. }
  817. #else
  818. static inline void add_sect_attrs(struct module *mod, unsigned int nsect,
  819. char *sectstrings, Elf_Shdr *sechdrs)
  820. {
  821. }
  822. static inline void remove_sect_attrs(struct module *mod)
  823. {
  824. }
  825. #endif /* CONFIG_KALLSYMS */
  826. static int module_add_modinfo_attrs(struct module *mod)
  827. {
  828. struct module_attribute *attr;
  829. struct module_attribute *temp_attr;
  830. int error = 0;
  831. int i;
  832. mod->modinfo_attrs = kzalloc((sizeof(struct module_attribute) *
  833. (ARRAY_SIZE(modinfo_attrs) + 1)),
  834. GFP_KERNEL);
  835. if (!mod->modinfo_attrs)
  836. return -ENOMEM;
  837. temp_attr = mod->modinfo_attrs;
  838. for (i = 0; (attr = modinfo_attrs[i]) && !error; i++) {
  839. if (!attr->test ||
  840. (attr->test && attr->test(mod))) {
  841. memcpy(temp_attr, attr, sizeof(*temp_attr));
  842. temp_attr->attr.owner = mod;
  843. error = sysfs_create_file(&mod->mkobj.kobj,&temp_attr->attr);
  844. ++temp_attr;
  845. }
  846. }
  847. return error;
  848. }
  849. static void module_remove_modinfo_attrs(struct module *mod)
  850. {
  851. struct module_attribute *attr;
  852. int i;
  853. for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) {
  854. /* pick a field to test for end of list */
  855. if (!attr->attr.name)
  856. break;
  857. sysfs_remove_file(&mod->mkobj.kobj,&attr->attr);
  858. if (attr->free)
  859. attr->free(mod);
  860. }
  861. kfree(mod->modinfo_attrs);
  862. }
  863. static int mod_sysfs_setup(struct module *mod,
  864. struct kernel_param *kparam,
  865. unsigned int num_params)
  866. {
  867. int err;
  868. memset(&mod->mkobj.kobj, 0, sizeof(mod->mkobj.kobj));
  869. err = kobject_set_name(&mod->mkobj.kobj, "%s", mod->name);
  870. if (err)
  871. goto out;
  872. kobj_set_kset_s(&mod->mkobj, module_subsys);
  873. mod->mkobj.mod = mod;
  874. err = kobject_register(&mod->mkobj.kobj);
  875. if (err)
  876. goto out;
  877. err = module_param_sysfs_setup(mod, kparam, num_params);
  878. if (err)
  879. goto out_unreg;
  880. err = module_add_modinfo_attrs(mod);
  881. if (err)
  882. goto out_unreg;
  883. return 0;
  884. out_unreg:
  885. kobject_unregister(&mod->mkobj.kobj);
  886. out:
  887. return err;
  888. }
  889. static void mod_kobject_remove(struct module *mod)
  890. {
  891. module_remove_modinfo_attrs(mod);
  892. module_param_sysfs_remove(mod);
  893. kobject_unregister(&mod->mkobj.kobj);
  894. }
  895. /*
  896. * unlink the module with the whole machine is stopped with interrupts off
  897. * - this defends against kallsyms not taking locks
  898. */
  899. static int __unlink_module(void *_mod)
  900. {
  901. struct module *mod = _mod;
  902. list_del(&mod->list);
  903. return 0;
  904. }
  905. /* Free a module, remove from lists, etc (must hold module mutex). */
  906. static void free_module(struct module *mod)
  907. {
  908. /* Delete from various lists */
  909. stop_machine_run(__unlink_module, mod, NR_CPUS);
  910. remove_sect_attrs(mod);
  911. mod_kobject_remove(mod);
  912. /* Arch-specific cleanup. */
  913. module_arch_cleanup(mod);
  914. /* Module unload stuff */
  915. module_unload_free(mod);
  916. /* This may be NULL, but that's OK */
  917. module_free(mod, mod->module_init);
  918. kfree(mod->args);
  919. if (mod->percpu)
  920. percpu_modfree(mod->percpu);
  921. /* Finally, free the core (containing the module structure) */
  922. module_free(mod, mod->module_core);
  923. }
  924. void *__symbol_get(const char *symbol)
  925. {
  926. struct module *owner;
  927. unsigned long value, flags;
  928. const unsigned long *crc;
  929. spin_lock_irqsave(&modlist_lock, flags);
  930. value = __find_symbol(symbol, &owner, &crc, 1);
  931. if (value && !strong_try_module_get(owner))
  932. value = 0;
  933. spin_unlock_irqrestore(&modlist_lock, flags);
  934. return (void *)value;
  935. }
  936. EXPORT_SYMBOL_GPL(__symbol_get);
  937. /*
  938. * Ensure that an exported symbol [global namespace] does not already exist
  939. * in the Kernel or in some other modules exported symbol table.
  940. */
  941. static int verify_export_symbols(struct module *mod)
  942. {
  943. const char *name = NULL;
  944. unsigned long i, ret = 0;
  945. struct module *owner;
  946. const unsigned long *crc;
  947. for (i = 0; i < mod->num_syms; i++)
  948. if (__find_symbol(mod->syms[i].name, &owner, &crc, 1)) {
  949. name = mod->syms[i].name;
  950. ret = -ENOEXEC;
  951. goto dup;
  952. }
  953. for (i = 0; i < mod->num_gpl_syms; i++)
  954. if (__find_symbol(mod->gpl_syms[i].name, &owner, &crc, 1)) {
  955. name = mod->gpl_syms[i].name;
  956. ret = -ENOEXEC;
  957. goto dup;
  958. }
  959. dup:
  960. if (ret)
  961. printk(KERN_ERR "%s: exports duplicate symbol %s (owned by %s)\n",
  962. mod->name, name, module_name(owner));
  963. return ret;
  964. }
  965. /* Change all symbols so that sh_value encodes the pointer directly. */
  966. static int simplify_symbols(Elf_Shdr *sechdrs,
  967. unsigned int symindex,
  968. const char *strtab,
  969. unsigned int versindex,
  970. unsigned int pcpuindex,
  971. struct module *mod)
  972. {
  973. Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
  974. unsigned long secbase;
  975. unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
  976. int ret = 0;
  977. for (i = 1; i < n; i++) {
  978. switch (sym[i].st_shndx) {
  979. case SHN_COMMON:
  980. /* We compiled with -fno-common. These are not
  981. supposed to happen. */
  982. DEBUGP("Common symbol: %s\n", strtab + sym[i].st_name);
  983. printk("%s: please compile with -fno-common\n",
  984. mod->name);
  985. ret = -ENOEXEC;
  986. break;
  987. case SHN_ABS:
  988. /* Don't need to do anything */
  989. DEBUGP("Absolute symbol: 0x%08lx\n",
  990. (long)sym[i].st_value);
  991. break;
  992. case SHN_UNDEF:
  993. sym[i].st_value
  994. = resolve_symbol(sechdrs, versindex,
  995. strtab + sym[i].st_name, mod);
  996. /* Ok if resolved. */
  997. if (sym[i].st_value != 0)
  998. break;
  999. /* Ok if weak. */
  1000. if (ELF_ST_BIND(sym[i].st_info) == STB_WEAK)
  1001. break;
  1002. printk(KERN_WARNING "%s: Unknown symbol %s\n",
  1003. mod->name, strtab + sym[i].st_name);
  1004. ret = -ENOENT;
  1005. break;
  1006. default:
  1007. /* Divert to percpu allocation if a percpu var. */
  1008. if (sym[i].st_shndx == pcpuindex)
  1009. secbase = (unsigned long)mod->percpu;
  1010. else
  1011. secbase = sechdrs[sym[i].st_shndx].sh_addr;
  1012. sym[i].st_value += secbase;
  1013. break;
  1014. }
  1015. }
  1016. return ret;
  1017. }
  1018. /* Update size with this section: return offset. */
  1019. static long get_offset(unsigned long *size, Elf_Shdr *sechdr)
  1020. {
  1021. long ret;
  1022. ret = ALIGN(*size, sechdr->sh_addralign ?: 1);
  1023. *size = ret + sechdr->sh_size;
  1024. return ret;
  1025. }
  1026. /* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
  1027. might -- code, read-only data, read-write data, small data. Tally
  1028. sizes, and place the offsets into sh_entsize fields: high bit means it
  1029. belongs in init. */
  1030. static void layout_sections(struct module *mod,
  1031. const Elf_Ehdr *hdr,
  1032. Elf_Shdr *sechdrs,
  1033. const char *secstrings)
  1034. {
  1035. static unsigned long const masks[][2] = {
  1036. /* NOTE: all executable code must be the first section
  1037. * in this array; otherwise modify the text_size
  1038. * finder in the two loops below */
  1039. { SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL },
  1040. { SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL },
  1041. { SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL },
  1042. { ARCH_SHF_SMALL | SHF_ALLOC, 0 }
  1043. };
  1044. unsigned int m, i;
  1045. for (i = 0; i < hdr->e_shnum; i++)
  1046. sechdrs[i].sh_entsize = ~0UL;
  1047. DEBUGP("Core section allocation order:\n");
  1048. for (m = 0; m < ARRAY_SIZE(masks); ++m) {
  1049. for (i = 0; i < hdr->e_shnum; ++i) {
  1050. Elf_Shdr *s = &sechdrs[i];
  1051. if ((s->sh_flags & masks[m][0]) != masks[m][0]
  1052. || (s->sh_flags & masks[m][1])
  1053. || s->sh_entsize != ~0UL
  1054. || strncmp(secstrings + s->sh_name,
  1055. ".init", 5) == 0)
  1056. continue;
  1057. s->sh_entsize = get_offset(&mod->core_size, s);
  1058. DEBUGP("\t%s\n", secstrings + s->sh_name);
  1059. }
  1060. if (m == 0)
  1061. mod->core_text_size = mod->core_size;
  1062. }
  1063. DEBUGP("Init section allocation order:\n");
  1064. for (m = 0; m < ARRAY_SIZE(masks); ++m) {
  1065. for (i = 0; i < hdr->e_shnum; ++i) {
  1066. Elf_Shdr *s = &sechdrs[i];
  1067. if ((s->sh_flags & masks[m][0]) != masks[m][0]
  1068. || (s->sh_flags & masks[m][1])
  1069. || s->sh_entsize != ~0UL
  1070. || strncmp(secstrings + s->sh_name,
  1071. ".init", 5) != 0)
  1072. continue;
  1073. s->sh_entsize = (get_offset(&mod->init_size, s)
  1074. | INIT_OFFSET_MASK);
  1075. DEBUGP("\t%s\n", secstrings + s->sh_name);
  1076. }
  1077. if (m == 0)
  1078. mod->init_text_size = mod->init_size;
  1079. }
  1080. }
  1081. static inline int license_is_gpl_compatible(const char *license)
  1082. {
  1083. return (strcmp(license, "GPL") == 0
  1084. || strcmp(license, "GPL v2") == 0
  1085. || strcmp(license, "GPL and additional rights") == 0
  1086. || strcmp(license, "Dual BSD/GPL") == 0
  1087. || strcmp(license, "Dual MIT/GPL") == 0
  1088. || strcmp(license, "Dual MPL/GPL") == 0);
  1089. }
  1090. static void set_license(struct module *mod, const char *license)
  1091. {
  1092. if (!license)
  1093. license = "unspecified";
  1094. mod->license_gplok = license_is_gpl_compatible(license);
  1095. if (!mod->license_gplok && !(tainted & TAINT_PROPRIETARY_MODULE)) {
  1096. printk(KERN_WARNING "%s: module license '%s' taints kernel.\n",
  1097. mod->name, license);
  1098. add_taint(TAINT_PROPRIETARY_MODULE);
  1099. }
  1100. }
  1101. /* Parse tag=value strings from .modinfo section */
  1102. static char *next_string(char *string, unsigned long *secsize)
  1103. {
  1104. /* Skip non-zero chars */
  1105. while (string[0]) {
  1106. string++;
  1107. if ((*secsize)-- <= 1)
  1108. return NULL;
  1109. }
  1110. /* Skip any zero padding. */
  1111. while (!string[0]) {
  1112. string++;
  1113. if ((*secsize)-- <= 1)
  1114. return NULL;
  1115. }
  1116. return string;
  1117. }
  1118. static char *get_modinfo(Elf_Shdr *sechdrs,
  1119. unsigned int info,
  1120. const char *tag)
  1121. {
  1122. char *p;
  1123. unsigned int taglen = strlen(tag);
  1124. unsigned long size = sechdrs[info].sh_size;
  1125. for (p = (char *)sechdrs[info].sh_addr; p; p = next_string(p, &size)) {
  1126. if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=')
  1127. return p + taglen + 1;
  1128. }
  1129. return NULL;
  1130. }
  1131. static void setup_modinfo(struct module *mod, Elf_Shdr *sechdrs,
  1132. unsigned int infoindex)
  1133. {
  1134. struct module_attribute *attr;
  1135. int i;
  1136. for (i = 0; (attr = modinfo_attrs[i]); i++) {
  1137. if (attr->setup)
  1138. attr->setup(mod,
  1139. get_modinfo(sechdrs,
  1140. infoindex,
  1141. attr->attr.name));
  1142. }
  1143. }
  1144. #ifdef CONFIG_KALLSYMS
  1145. int is_exported(const char *name, const struct module *mod)
  1146. {
  1147. if (!mod && lookup_symbol(name, __start___ksymtab, __stop___ksymtab))
  1148. return 1;
  1149. else
  1150. if (lookup_symbol(name, mod->syms, mod->syms + mod->num_syms))
  1151. return 1;
  1152. else
  1153. return 0;
  1154. }
  1155. /* As per nm */
  1156. static char elf_type(const Elf_Sym *sym,
  1157. Elf_Shdr *sechdrs,
  1158. const char *secstrings,
  1159. struct module *mod)
  1160. {
  1161. if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
  1162. if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
  1163. return 'v';
  1164. else
  1165. return 'w';
  1166. }
  1167. if (sym->st_shndx == SHN_UNDEF)
  1168. return 'U';
  1169. if (sym->st_shndx == SHN_ABS)
  1170. return 'a';
  1171. if (sym->st_shndx >= SHN_LORESERVE)
  1172. return '?';
  1173. if (sechdrs[sym->st_shndx].sh_flags & SHF_EXECINSTR)
  1174. return 't';
  1175. if (sechdrs[sym->st_shndx].sh_flags & SHF_ALLOC
  1176. && sechdrs[sym->st_shndx].sh_type != SHT_NOBITS) {
  1177. if (!(sechdrs[sym->st_shndx].sh_flags & SHF_WRITE))
  1178. return 'r';
  1179. else if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
  1180. return 'g';
  1181. else
  1182. return 'd';
  1183. }
  1184. if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) {
  1185. if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
  1186. return 's';
  1187. else
  1188. return 'b';
  1189. }
  1190. if (strncmp(secstrings + sechdrs[sym->st_shndx].sh_name,
  1191. ".debug", strlen(".debug")) == 0)
  1192. return 'n';
  1193. return '?';
  1194. }
  1195. static void add_kallsyms(struct module *mod,
  1196. Elf_Shdr *sechdrs,
  1197. unsigned int symindex,
  1198. unsigned int strindex,
  1199. const char *secstrings)
  1200. {
  1201. unsigned int i;
  1202. mod->symtab = (void *)sechdrs[symindex].sh_addr;
  1203. mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
  1204. mod->strtab = (void *)sechdrs[strindex].sh_addr;
  1205. /* Set types up while we still have access to sections. */
  1206. for (i = 0; i < mod->num_symtab; i++)
  1207. mod->symtab[i].st_info
  1208. = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
  1209. }
  1210. #else
  1211. static inline void add_kallsyms(struct module *mod,
  1212. Elf_Shdr *sechdrs,
  1213. unsigned int symindex,
  1214. unsigned int strindex,
  1215. const char *secstrings)
  1216. {
  1217. }
  1218. #endif /* CONFIG_KALLSYMS */
  1219. /* Allocate and load the module: note that size of section 0 is always
  1220. zero, and we rely on this for optional sections. */
  1221. static struct module *load_module(void __user *umod,
  1222. unsigned long len,
  1223. const char __user *uargs)
  1224. {
  1225. Elf_Ehdr *hdr;
  1226. Elf_Shdr *sechdrs;
  1227. char *secstrings, *args, *modmagic, *strtab = NULL;
  1228. unsigned int i, symindex = 0, strindex = 0, setupindex, exindex,
  1229. exportindex, modindex, obsparmindex, infoindex, gplindex,
  1230. crcindex, gplcrcindex, versindex, pcpuindex, gplfutureindex,
  1231. gplfuturecrcindex;
  1232. struct module *mod;
  1233. long err = 0;
  1234. void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */
  1235. struct exception_table_entry *extable;
  1236. mm_segment_t old_fs;
  1237. DEBUGP("load_module: umod=%p, len=%lu, uargs=%p\n",
  1238. umod, len, uargs);
  1239. if (len < sizeof(*hdr))
  1240. return ERR_PTR(-ENOEXEC);
  1241. /* Suck in entire file: we'll want most of it. */
  1242. /* vmalloc barfs on "unusual" numbers. Check here */
  1243. if (len > 64 * 1024 * 1024 || (hdr = vmalloc(len)) == NULL)
  1244. return ERR_PTR(-ENOMEM);
  1245. if (copy_from_user(hdr, umod, len) != 0) {
  1246. err = -EFAULT;
  1247. goto free_hdr;
  1248. }
  1249. /* Sanity checks against insmoding binaries or wrong arch,
  1250. weird elf version */
  1251. if (memcmp(hdr->e_ident, ELFMAG, 4) != 0
  1252. || hdr->e_type != ET_REL
  1253. || !elf_check_arch(hdr)
  1254. || hdr->e_shentsize != sizeof(*sechdrs)) {
  1255. err = -ENOEXEC;
  1256. goto free_hdr;
  1257. }
  1258. if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr))
  1259. goto truncated;
  1260. /* Convenience variables */
  1261. sechdrs = (void *)hdr + hdr->e_shoff;
  1262. secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
  1263. sechdrs[0].sh_addr = 0;
  1264. for (i = 1; i < hdr->e_shnum; i++) {
  1265. if (sechdrs[i].sh_type != SHT_NOBITS
  1266. && len < sechdrs[i].sh_offset + sechdrs[i].sh_size)
  1267. goto truncated;
  1268. /* Mark all sections sh_addr with their address in the
  1269. temporary image. */
  1270. sechdrs[i].sh_addr = (size_t)hdr + sechdrs[i].sh_offset;
  1271. /* Internal symbols and strings. */
  1272. if (sechdrs[i].sh_type == SHT_SYMTAB) {
  1273. symindex = i;
  1274. strindex = sechdrs[i].sh_link;
  1275. strtab = (char *)hdr + sechdrs[strindex].sh_offset;
  1276. }
  1277. #ifndef CONFIG_MODULE_UNLOAD
  1278. /* Don't load .exit sections */
  1279. if (strncmp(secstrings+sechdrs[i].sh_name, ".exit", 5) == 0)
  1280. sechdrs[i].sh_flags &= ~(unsigned long)SHF_ALLOC;
  1281. #endif
  1282. }
  1283. modindex = find_sec(hdr, sechdrs, secstrings,
  1284. ".gnu.linkonce.this_module");
  1285. if (!modindex) {
  1286. printk(KERN_WARNING "No module found in object\n");
  1287. err = -ENOEXEC;
  1288. goto free_hdr;
  1289. }
  1290. mod = (void *)sechdrs[modindex].sh_addr;
  1291. if (symindex == 0) {
  1292. printk(KERN_WARNING "%s: module has no symbols (stripped?)\n",
  1293. mod->name);
  1294. err = -ENOEXEC;
  1295. goto free_hdr;
  1296. }
  1297. /* Optional sections */
  1298. exportindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab");
  1299. gplindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_gpl");
  1300. gplfutureindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_gpl_future");
  1301. crcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab");
  1302. gplcrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_gpl");
  1303. gplfuturecrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_gpl_future");
  1304. setupindex = find_sec(hdr, sechdrs, secstrings, "__param");
  1305. exindex = find_sec(hdr, sechdrs, secstrings, "__ex_table");
  1306. obsparmindex = find_sec(hdr, sechdrs, secstrings, "__obsparm");
  1307. versindex = find_sec(hdr, sechdrs, secstrings, "__versions");
  1308. infoindex = find_sec(hdr, sechdrs, secstrings, ".modinfo");
  1309. pcpuindex = find_pcpusec(hdr, sechdrs, secstrings);
  1310. /* Don't keep modinfo section */
  1311. sechdrs[infoindex].sh_flags &= ~(unsigned long)SHF_ALLOC;
  1312. #ifdef CONFIG_KALLSYMS
  1313. /* Keep symbol and string tables for decoding later. */
  1314. sechdrs[symindex].sh_flags |= SHF_ALLOC;
  1315. sechdrs[strindex].sh_flags |= SHF_ALLOC;
  1316. #endif
  1317. /* Check module struct version now, before we try to use module. */
  1318. if (!check_modstruct_version(sechdrs, versindex, mod)) {
  1319. err = -ENOEXEC;
  1320. goto free_hdr;
  1321. }
  1322. modmagic = get_modinfo(sechdrs, infoindex, "vermagic");
  1323. /* This is allowed: modprobe --force will invalidate it. */
  1324. if (!modmagic) {
  1325. add_taint(TAINT_FORCED_MODULE);
  1326. printk(KERN_WARNING "%s: no version magic, tainting kernel.\n",
  1327. mod->name);
  1328. } else if (!same_magic(modmagic, vermagic)) {
  1329. printk(KERN_ERR "%s: version magic '%s' should be '%s'\n",
  1330. mod->name, modmagic, vermagic);
  1331. err = -ENOEXEC;
  1332. goto free_hdr;
  1333. }
  1334. /* Now copy in args */
  1335. args = strndup_user(uargs, ~0UL >> 1);
  1336. if (IS_ERR(args)) {
  1337. err = PTR_ERR(args);
  1338. goto free_hdr;
  1339. }
  1340. if (find_module(mod->name)) {
  1341. err = -EEXIST;
  1342. goto free_mod;
  1343. }
  1344. mod->state = MODULE_STATE_COMING;
  1345. /* Allow arches to frob section contents and sizes. */
  1346. err = module_frob_arch_sections(hdr, sechdrs, secstrings, mod);
  1347. if (err < 0)
  1348. goto free_mod;
  1349. if (pcpuindex) {
  1350. /* We have a special allocation for this section. */
  1351. percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size,
  1352. sechdrs[pcpuindex].sh_addralign,
  1353. mod->name);
  1354. if (!percpu) {
  1355. err = -ENOMEM;
  1356. goto free_mod;
  1357. }
  1358. sechdrs[pcpuindex].sh_flags &= ~(unsigned long)SHF_ALLOC;
  1359. mod->percpu = percpu;
  1360. }
  1361. /* Determine total sizes, and put offsets in sh_entsize. For now
  1362. this is done generically; there doesn't appear to be any
  1363. special cases for the architectures. */
  1364. layout_sections(mod, hdr, sechdrs, secstrings);
  1365. /* Do the allocs. */
  1366. ptr = module_alloc(mod->core_size);
  1367. if (!ptr) {
  1368. err = -ENOMEM;
  1369. goto free_percpu;
  1370. }
  1371. memset(ptr, 0, mod->core_size);
  1372. mod->module_core = ptr;
  1373. ptr = module_alloc(mod->init_size);
  1374. if (!ptr && mod->init_size) {
  1375. err = -ENOMEM;
  1376. goto free_core;
  1377. }
  1378. memset(ptr, 0, mod->init_size);
  1379. mod->module_init = ptr;
  1380. /* Transfer each section which specifies SHF_ALLOC */
  1381. DEBUGP("final section addresses:\n");
  1382. for (i = 0; i < hdr->e_shnum; i++) {
  1383. void *dest;
  1384. if (!(sechdrs[i].sh_flags & SHF_ALLOC))
  1385. continue;
  1386. if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
  1387. dest = mod->module_init
  1388. + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
  1389. else
  1390. dest = mod->module_core + sechdrs[i].sh_entsize;
  1391. if (sechdrs[i].sh_type != SHT_NOBITS)
  1392. memcpy(dest, (void *)sechdrs[i].sh_addr,
  1393. sechdrs[i].sh_size);
  1394. /* Update sh_addr to point to copy in image. */
  1395. sechdrs[i].sh_addr = (unsigned long)dest;
  1396. DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
  1397. }
  1398. /* Module has been moved. */
  1399. mod = (void *)sechdrs[modindex].sh_addr;
  1400. /* Now we've moved module, initialize linked lists, etc. */
  1401. module_unload_init(mod);
  1402. /* Set up license info based on the info section */
  1403. set_license(mod, get_modinfo(sechdrs, infoindex, "license"));
  1404. if (strcmp(mod->name, "ndiswrapper") == 0)
  1405. add_taint(TAINT_PROPRIETARY_MODULE);
  1406. if (strcmp(mod->name, "driverloader") == 0)
  1407. add_taint(TAINT_PROPRIETARY_MODULE);
  1408. /* Set up MODINFO_ATTR fields */
  1409. setup_modinfo(mod, sechdrs, infoindex);
  1410. /* Fix up syms, so that st_value is a pointer to location. */
  1411. err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
  1412. mod);
  1413. if (err < 0)
  1414. goto cleanup;
  1415. /* Set up EXPORTed & EXPORT_GPLed symbols (section 0 is 0 length) */
  1416. mod->num_syms = sechdrs[exportindex].sh_size / sizeof(*mod->syms);
  1417. mod->syms = (void *)sechdrs[exportindex].sh_addr;
  1418. if (crcindex)
  1419. mod->crcs = (void *)sechdrs[crcindex].sh_addr;
  1420. mod->num_gpl_syms = sechdrs[gplindex].sh_size / sizeof(*mod->gpl_syms);
  1421. mod->gpl_syms = (void *)sechdrs[gplindex].sh_addr;
  1422. if (gplcrcindex)
  1423. mod->gpl_crcs = (void *)sechdrs[gplcrcindex].sh_addr;
  1424. mod->num_gpl_future_syms = sechdrs[gplfutureindex].sh_size /
  1425. sizeof(*mod->gpl_future_syms);
  1426. mod->gpl_future_syms = (void *)sechdrs[gplfutureindex].sh_addr;
  1427. if (gplfuturecrcindex)
  1428. mod->gpl_future_crcs = (void *)sechdrs[gplfuturecrcindex].sh_addr;
  1429. #ifdef CONFIG_MODVERSIONS
  1430. if ((mod->num_syms && !crcindex) ||
  1431. (mod->num_gpl_syms && !gplcrcindex) ||
  1432. (mod->num_gpl_future_syms && !gplfuturecrcindex)) {
  1433. printk(KERN_WARNING "%s: No versions for exported symbols."
  1434. " Tainting kernel.\n", mod->name);
  1435. add_taint(TAINT_FORCED_MODULE);
  1436. }
  1437. #endif
  1438. /* Now do relocations. */
  1439. for (i = 1; i < hdr->e_shnum; i++) {
  1440. const char *strtab = (char *)sechdrs[strindex].sh_addr;
  1441. unsigned int info = sechdrs[i].sh_info;
  1442. /* Not a valid relocation section? */
  1443. if (info >= hdr->e_shnum)
  1444. continue;
  1445. /* Don't bother with non-allocated sections */
  1446. if (!(sechdrs[info].sh_flags & SHF_ALLOC))
  1447. continue;
  1448. if (sechdrs[i].sh_type == SHT_REL)
  1449. err = apply_relocate(sechdrs, strtab, symindex, i,mod);
  1450. else if (sechdrs[i].sh_type == SHT_RELA)
  1451. err = apply_relocate_add(sechdrs, strtab, symindex, i,
  1452. mod);
  1453. if (err < 0)
  1454. goto cleanup;
  1455. }
  1456. /* Find duplicate symbols */
  1457. err = verify_export_symbols(mod);
  1458. if (err < 0)
  1459. goto cleanup;
  1460. /* Set up and sort exception table */
  1461. mod->num_exentries = sechdrs[exindex].sh_size / sizeof(*mod->extable);
  1462. mod->extable = extable = (void *)sechdrs[exindex].sh_addr;
  1463. sort_extable(extable, extable + mod->num_exentries);
  1464. /* Finally, copy percpu area over. */
  1465. percpu_modcopy(mod->percpu, (void *)sechdrs[pcpuindex].sh_addr,
  1466. sechdrs[pcpuindex].sh_size);
  1467. add_kallsyms(mod, sechdrs, symindex, strindex, secstrings);
  1468. err = module_finalize(hdr, sechdrs, mod);
  1469. if (err < 0)
  1470. goto cleanup;
  1471. /* flush the icache in correct context */
  1472. old_fs = get_fs();
  1473. set_fs(KERNEL_DS);
  1474. /*
  1475. * Flush the instruction cache, since we've played with text.
  1476. * Do it before processing of module parameters, so the module
  1477. * can provide parameter accessor functions of its own.
  1478. */
  1479. if (mod->module_init)
  1480. flush_icache_range((unsigned long)mod->module_init,
  1481. (unsigned long)mod->module_init
  1482. + mod->init_size);
  1483. flush_icache_range((unsigned long)mod->module_core,
  1484. (unsigned long)mod->module_core + mod->core_size);
  1485. set_fs(old_fs);
  1486. mod->args = args;
  1487. if (obsparmindex)
  1488. printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
  1489. mod->name);
  1490. /* Size of section 0 is 0, so this works well if no params */
  1491. err = parse_args(mod->name, mod->args,
  1492. (struct kernel_param *)
  1493. sechdrs[setupindex].sh_addr,
  1494. sechdrs[setupindex].sh_size
  1495. / sizeof(struct kernel_param),
  1496. NULL);
  1497. if (err < 0)
  1498. goto arch_cleanup;
  1499. err = mod_sysfs_setup(mod,
  1500. (struct kernel_param *)
  1501. sechdrs[setupindex].sh_addr,
  1502. sechdrs[setupindex].sh_size
  1503. / sizeof(struct kernel_param));
  1504. if (err < 0)
  1505. goto arch_cleanup;
  1506. add_sect_attrs(mod, hdr->e_shnum, secstrings, sechdrs);
  1507. /* Get rid of temporary copy */
  1508. vfree(hdr);
  1509. /* Done! */
  1510. return mod;
  1511. arch_cleanup:
  1512. module_arch_cleanup(mod);
  1513. cleanup:
  1514. module_unload_free(mod);
  1515. module_free(mod, mod->module_init);
  1516. free_core:
  1517. module_free(mod, mod->module_core);
  1518. free_percpu:
  1519. if (percpu)
  1520. percpu_modfree(percpu);
  1521. free_mod:
  1522. kfree(args);
  1523. free_hdr:
  1524. vfree(hdr);
  1525. return ERR_PTR(err);
  1526. truncated:
  1527. printk(KERN_ERR "Module len %lu truncated\n", len);
  1528. err = -ENOEXEC;
  1529. goto free_hdr;
  1530. }
  1531. /*
  1532. * link the module with the whole machine is stopped with interrupts off
  1533. * - this defends against kallsyms not taking locks
  1534. */
  1535. static int __link_module(void *_mod)
  1536. {
  1537. struct module *mod = _mod;
  1538. list_add(&mod->list, &modules);
  1539. return 0;
  1540. }
  1541. /* This is where the real work happens */
  1542. asmlinkage long
  1543. sys_init_module(void __user *umod,
  1544. unsigned long len,
  1545. const char __user *uargs)
  1546. {
  1547. struct module *mod;
  1548. int ret = 0;
  1549. /* Must have permission */
  1550. if (!capable(CAP_SYS_MODULE))
  1551. return -EPERM;
  1552. /* Only one module load at a time, please */
  1553. if (mutex_lock_interruptible(&module_mutex) != 0)
  1554. return -EINTR;
  1555. /* Do all the hard work */
  1556. mod = load_module(umod, len, uargs);
  1557. if (IS_ERR(mod)) {
  1558. mutex_unlock(&module_mutex);
  1559. return PTR_ERR(mod);
  1560. }
  1561. /* Now sew it into the lists. They won't access us, since
  1562. strong_try_module_get() will fail. */
  1563. stop_machine_run(__link_module, mod, NR_CPUS);
  1564. /* Drop lock so they can recurse */
  1565. mutex_unlock(&module_mutex);
  1566. blocking_notifier_call_chain(&module_notify_list,
  1567. MODULE_STATE_COMING, mod);
  1568. /* Start the module */
  1569. if (mod->init != NULL)
  1570. ret = mod->init();
  1571. if (ret < 0) {
  1572. /* Init routine failed: abort. Try to protect us from
  1573. buggy refcounters. */
  1574. mod->state = MODULE_STATE_GOING;
  1575. synchronize_sched();
  1576. if (mod->unsafe)
  1577. printk(KERN_ERR "%s: module is now stuck!\n",
  1578. mod->name);
  1579. else {
  1580. module_put(mod);
  1581. mutex_lock(&module_mutex);
  1582. free_module(mod);
  1583. mutex_unlock(&module_mutex);
  1584. }
  1585. return ret;
  1586. }
  1587. /* Now it's a first class citizen! */
  1588. mutex_lock(&module_mutex);
  1589. mod->state = MODULE_STATE_LIVE;
  1590. /* Drop initial reference. */
  1591. module_put(mod);
  1592. module_free(mod, mod->module_init);
  1593. mod->module_init = NULL;
  1594. mod->init_size = 0;
  1595. mod->init_text_size = 0;
  1596. mutex_unlock(&module_mutex);
  1597. return 0;
  1598. }
  1599. static inline int within(unsigned long addr, void *start, unsigned long size)
  1600. {
  1601. return ((void *)addr >= start && (void *)addr < start + size);
  1602. }
  1603. #ifdef CONFIG_KALLSYMS
  1604. /*
  1605. * This ignores the intensely annoying "mapping symbols" found
  1606. * in ARM ELF files: $a, $t and $d.
  1607. */
  1608. static inline int is_arm_mapping_symbol(const char *str)
  1609. {
  1610. return str[0] == '$' && strchr("atd", str[1])
  1611. && (str[2] == '\0' || str[2] == '.');
  1612. }
  1613. static const char *get_ksymbol(struct module *mod,
  1614. unsigned long addr,
  1615. unsigned long *size,
  1616. unsigned long *offset)
  1617. {
  1618. unsigned int i, best = 0;
  1619. unsigned long nextval;
  1620. /* At worse, next value is at end of module */
  1621. if (within(addr, mod->module_init, mod->init_size))
  1622. nextval = (unsigned long)mod->module_init+mod->init_text_size;
  1623. else
  1624. nextval = (unsigned long)mod->module_core+mod->core_text_size;
  1625. /* Scan for closest preceeding symbol, and next symbol. (ELF
  1626. starts real symbols at 1). */
  1627. for (i = 1; i < mod->num_symtab; i++) {
  1628. if (mod->symtab[i].st_shndx == SHN_UNDEF)
  1629. continue;
  1630. /* We ignore unnamed symbols: they're uninformative
  1631. * and inserted at a whim. */
  1632. if (mod->symtab[i].st_value <= addr
  1633. && mod->symtab[i].st_value > mod->symtab[best].st_value
  1634. && *(mod->strtab + mod->symtab[i].st_name) != '\0'
  1635. && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
  1636. best = i;
  1637. if (mod->symtab[i].st_value > addr
  1638. && mod->symtab[i].st_value < nextval
  1639. && *(mod->strtab + mod->symtab[i].st_name) != '\0'
  1640. && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
  1641. nextval = mod->symtab[i].st_value;
  1642. }
  1643. if (!best)
  1644. return NULL;
  1645. *size = nextval - mod->symtab[best].st_value;
  1646. *offset = addr - mod->symtab[best].st_value;
  1647. return mod->strtab + mod->symtab[best].st_name;
  1648. }
  1649. /* For kallsyms to ask for address resolution. NULL means not found.
  1650. We don't lock, as this is used for oops resolution and races are a
  1651. lesser concern. */
  1652. const char *module_address_lookup(unsigned long addr,
  1653. unsigned long *size,
  1654. unsigned long *offset,
  1655. char **modname)
  1656. {
  1657. struct module *mod;
  1658. list_for_each_entry(mod, &modules, list) {
  1659. if (within(addr, mod->module_init, mod->init_size)
  1660. || within(addr, mod->module_core, mod->core_size)) {
  1661. *modname = mod->name;
  1662. return get_ksymbol(mod, addr, size, offset);
  1663. }
  1664. }
  1665. return NULL;
  1666. }
  1667. struct module *module_get_kallsym(unsigned int symnum,
  1668. unsigned long *value,
  1669. char *type,
  1670. char namebuf[128])
  1671. {
  1672. struct module *mod;
  1673. mutex_lock(&module_mutex);
  1674. list_for_each_entry(mod, &modules, list) {
  1675. if (symnum < mod->num_symtab) {
  1676. *value = mod->symtab[symnum].st_value;
  1677. *type = mod->symtab[symnum].st_info;
  1678. strncpy(namebuf,
  1679. mod->strtab + mod->symtab[symnum].st_name,
  1680. 127);
  1681. mutex_unlock(&module_mutex);
  1682. return mod;
  1683. }
  1684. symnum -= mod->num_symtab;
  1685. }
  1686. mutex_unlock(&module_mutex);
  1687. return NULL;
  1688. }
  1689. static unsigned long mod_find_symname(struct module *mod, const char *name)
  1690. {
  1691. unsigned int i;
  1692. for (i = 0; i < mod->num_symtab; i++)
  1693. if (strcmp(name, mod->strtab+mod->symtab[i].st_name) == 0 &&
  1694. mod->symtab[i].st_info != 'U')
  1695. return mod->symtab[i].st_value;
  1696. return 0;
  1697. }
  1698. /* Look for this name: can be of form module:name. */
  1699. unsigned long module_kallsyms_lookup_name(const char *name)
  1700. {
  1701. struct module *mod;
  1702. char *colon;
  1703. unsigned long ret = 0;
  1704. /* Don't lock: we're in enough trouble already. */
  1705. if ((colon = strchr(name, ':')) != NULL) {
  1706. *colon = '\0';
  1707. if ((mod = find_module(name)) != NULL)
  1708. ret = mod_find_symname(mod, colon+1);
  1709. *colon = ':';
  1710. } else {
  1711. list_for_each_entry(mod, &modules, list)
  1712. if ((ret = mod_find_symname(mod, name)) != 0)
  1713. break;
  1714. }
  1715. return ret;
  1716. }
  1717. #endif /* CONFIG_KALLSYMS */
  1718. /* Called by the /proc file system to return a list of modules. */
  1719. static void *m_start(struct seq_file *m, loff_t *pos)
  1720. {
  1721. struct list_head *i;
  1722. loff_t n = 0;
  1723. mutex_lock(&module_mutex);
  1724. list_for_each(i, &modules) {
  1725. if (n++ == *pos)
  1726. break;
  1727. }
  1728. if (i == &modules)
  1729. return NULL;
  1730. return i;
  1731. }
  1732. static void *m_next(struct seq_file *m, void *p, loff_t *pos)
  1733. {
  1734. struct list_head *i = p;
  1735. (*pos)++;
  1736. if (i->next == &modules)
  1737. return NULL;
  1738. return i->next;
  1739. }
  1740. static void m_stop(struct seq_file *m, void *p)
  1741. {
  1742. mutex_unlock(&module_mutex);
  1743. }
  1744. static int m_show(struct seq_file *m, void *p)
  1745. {
  1746. struct module *mod = list_entry(p, struct module, list);
  1747. seq_printf(m, "%s %lu",
  1748. mod->name, mod->init_size + mod->core_size);
  1749. print_unload_info(m, mod);
  1750. /* Informative for users. */
  1751. seq_printf(m, " %s",
  1752. mod->state == MODULE_STATE_GOING ? "Unloading":
  1753. mod->state == MODULE_STATE_COMING ? "Loading":
  1754. "Live");
  1755. /* Used by oprofile and other similar tools. */
  1756. seq_printf(m, " 0x%p", mod->module_core);
  1757. seq_printf(m, "\n");
  1758. return 0;
  1759. }
  1760. /* Format: modulename size refcount deps address
  1761. Where refcount is a number or -, and deps is a comma-separated list
  1762. of depends or -.
  1763. */
  1764. struct seq_operations modules_op = {
  1765. .start = m_start,
  1766. .next = m_next,
  1767. .stop = m_stop,
  1768. .show = m_show
  1769. };
  1770. /* Given an address, look for it in the module exception tables. */
  1771. const struct exception_table_entry *search_module_extables(unsigned long addr)
  1772. {
  1773. unsigned long flags;
  1774. const struct exception_table_entry *e = NULL;
  1775. struct module *mod;
  1776. spin_lock_irqsave(&modlist_lock, flags);
  1777. list_for_each_entry(mod, &modules, list) {
  1778. if (mod->num_exentries == 0)
  1779. continue;
  1780. e = search_extable(mod->extable,
  1781. mod->extable + mod->num_exentries - 1,
  1782. addr);
  1783. if (e)
  1784. break;
  1785. }
  1786. spin_unlock_irqrestore(&modlist_lock, flags);
  1787. /* Now, if we found one, we are running inside it now, hence
  1788. we cannot unload the module, hence no refcnt needed. */
  1789. return e;
  1790. }
  1791. /* Is this a valid kernel address? We don't grab the lock: we are oopsing. */
  1792. struct module *__module_text_address(unsigned long addr)
  1793. {
  1794. struct module *mod;
  1795. list_for_each_entry(mod, &modules, list)
  1796. if (within(addr, mod->module_init, mod->init_text_size)
  1797. || within(addr, mod->module_core, mod->core_text_size))
  1798. return mod;
  1799. return NULL;
  1800. }
  1801. struct module *module_text_address(unsigned long addr)
  1802. {
  1803. struct module *mod;
  1804. unsigned long flags;
  1805. spin_lock_irqsave(&modlist_lock, flags);
  1806. mod = __module_text_address(addr);
  1807. spin_unlock_irqrestore(&modlist_lock, flags);
  1808. return mod;
  1809. }
  1810. /* Don't grab lock, we're oopsing. */
  1811. void print_modules(void)
  1812. {
  1813. struct module *mod;
  1814. printk("Modules linked in:");
  1815. list_for_each_entry(mod, &modules, list)
  1816. printk(" %s", mod->name);
  1817. printk("\n");
  1818. }
  1819. void module_add_driver(struct module *mod, struct device_driver *drv)
  1820. {
  1821. if (!mod || !drv)
  1822. return;
  1823. /* Don't check return code; this call is idempotent */
  1824. sysfs_create_link(&drv->kobj, &mod->mkobj.kobj, "module");
  1825. }
  1826. EXPORT_SYMBOL(module_add_driver);
  1827. void module_remove_driver(struct device_driver *drv)
  1828. {
  1829. if (!drv)
  1830. return;
  1831. sysfs_remove_link(&drv->kobj, "module");
  1832. }
  1833. EXPORT_SYMBOL(module_remove_driver);
  1834. #ifdef CONFIG_MODVERSIONS
  1835. /* Generate the signature for struct module here, too, for modversions. */
  1836. void struct_module(struct module *mod) { return; }
  1837. EXPORT_SYMBOL(struct_module);
  1838. #endif