printk.c 70 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835
  1. /*
  2. * linux/kernel/printk.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. *
  6. * Modified to make sys_syslog() more flexible: added commands to
  7. * return the last 4k of kernel messages, regardless of whether
  8. * they've been read or not. Added option to suppress kernel printk's
  9. * to the console. Added hook for sending the console messages
  10. * elsewhere, in preparation for a serial line console (someday).
  11. * Ted Ts'o, 2/11/93.
  12. * Modified for sysctl support, 1/8/97, Chris Horn.
  13. * Fixed SMP synchronization, 08/08/99, Manfred Spraul
  14. * manfred@colorfullife.com
  15. * Rewrote bits to get rid of console_lock
  16. * 01Mar01 Andrew Morton
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/mm.h>
  20. #include <linux/tty.h>
  21. #include <linux/tty_driver.h>
  22. #include <linux/console.h>
  23. #include <linux/init.h>
  24. #include <linux/jiffies.h>
  25. #include <linux/nmi.h>
  26. #include <linux/module.h>
  27. #include <linux/moduleparam.h>
  28. #include <linux/interrupt.h> /* For in_interrupt() */
  29. #include <linux/delay.h>
  30. #include <linux/smp.h>
  31. #include <linux/security.h>
  32. #include <linux/bootmem.h>
  33. #include <linux/memblock.h>
  34. #include <linux/syscalls.h>
  35. #include <linux/kexec.h>
  36. #include <linux/kdb.h>
  37. #include <linux/ratelimit.h>
  38. #include <linux/kmsg_dump.h>
  39. #include <linux/syslog.h>
  40. #include <linux/cpu.h>
  41. #include <linux/notifier.h>
  42. #include <linux/rculist.h>
  43. #include <linux/poll.h>
  44. #include <linux/irq_work.h>
  45. #include <asm/uaccess.h>
  46. #define CREATE_TRACE_POINTS
  47. #include <trace/events/printk.h>
  48. /*
  49. * Architectures can override it:
  50. */
  51. void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...)
  52. {
  53. }
  54. /* printk's without a loglevel use this.. */
  55. #define DEFAULT_MESSAGE_LOGLEVEL CONFIG_DEFAULT_MESSAGE_LOGLEVEL
  56. /* We show everything that is MORE important than this.. */
  57. #define MINIMUM_CONSOLE_LOGLEVEL 1 /* Minimum loglevel we let people use */
  58. #define DEFAULT_CONSOLE_LOGLEVEL 7 /* anything MORE serious than KERN_DEBUG */
  59. int console_printk[4] = {
  60. DEFAULT_CONSOLE_LOGLEVEL, /* console_loglevel */
  61. DEFAULT_MESSAGE_LOGLEVEL, /* default_message_loglevel */
  62. MINIMUM_CONSOLE_LOGLEVEL, /* minimum_console_loglevel */
  63. DEFAULT_CONSOLE_LOGLEVEL, /* default_console_loglevel */
  64. };
  65. /*
  66. * Low level drivers may need that to know if they can schedule in
  67. * their unblank() callback or not. So let's export it.
  68. */
  69. int oops_in_progress;
  70. EXPORT_SYMBOL(oops_in_progress);
  71. /*
  72. * console_sem protects the console_drivers list, and also
  73. * provides serialisation for access to the entire console
  74. * driver system.
  75. */
  76. static DEFINE_SEMAPHORE(console_sem);
  77. struct console *console_drivers;
  78. EXPORT_SYMBOL_GPL(console_drivers);
  79. #ifdef CONFIG_LOCKDEP
  80. static struct lockdep_map console_lock_dep_map = {
  81. .name = "console_lock"
  82. };
  83. #endif
  84. /*
  85. * This is used for debugging the mess that is the VT code by
  86. * keeping track if we have the console semaphore held. It's
  87. * definitely not the perfect debug tool (we don't know if _WE_
  88. * hold it are racing, but it helps tracking those weird code
  89. * path in the console code where we end up in places I want
  90. * locked without the console sempahore held
  91. */
  92. static int console_locked, console_suspended;
  93. /*
  94. * If exclusive_console is non-NULL then only this console is to be printed to.
  95. */
  96. static struct console *exclusive_console;
  97. /*
  98. * Array of consoles built from command line options (console=)
  99. */
  100. struct console_cmdline
  101. {
  102. char name[8]; /* Name of the driver */
  103. int index; /* Minor dev. to use */
  104. char *options; /* Options for the driver */
  105. #ifdef CONFIG_A11Y_BRAILLE_CONSOLE
  106. char *brl_options; /* Options for braille driver */
  107. #endif
  108. };
  109. #define MAX_CMDLINECONSOLES 8
  110. static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES];
  111. static int selected_console = -1;
  112. static int preferred_console = -1;
  113. int console_set_on_cmdline;
  114. EXPORT_SYMBOL(console_set_on_cmdline);
  115. /* Flag: console code may call schedule() */
  116. static int console_may_schedule;
  117. /*
  118. * The printk log buffer consists of a chain of concatenated variable
  119. * length records. Every record starts with a record header, containing
  120. * the overall length of the record.
  121. *
  122. * The heads to the first and last entry in the buffer, as well as the
  123. * sequence numbers of these both entries are maintained when messages
  124. * are stored..
  125. *
  126. * If the heads indicate available messages, the length in the header
  127. * tells the start next message. A length == 0 for the next message
  128. * indicates a wrap-around to the beginning of the buffer.
  129. *
  130. * Every record carries the monotonic timestamp in microseconds, as well as
  131. * the standard userspace syslog level and syslog facility. The usual
  132. * kernel messages use LOG_KERN; userspace-injected messages always carry
  133. * a matching syslog facility, by default LOG_USER. The origin of every
  134. * message can be reliably determined that way.
  135. *
  136. * The human readable log message directly follows the message header. The
  137. * length of the message text is stored in the header, the stored message
  138. * is not terminated.
  139. *
  140. * Optionally, a message can carry a dictionary of properties (key/value pairs),
  141. * to provide userspace with a machine-readable message context.
  142. *
  143. * Examples for well-defined, commonly used property names are:
  144. * DEVICE=b12:8 device identifier
  145. * b12:8 block dev_t
  146. * c127:3 char dev_t
  147. * n8 netdev ifindex
  148. * +sound:card0 subsystem:devname
  149. * SUBSYSTEM=pci driver-core subsystem name
  150. *
  151. * Valid characters in property names are [a-zA-Z0-9.-_]. The plain text value
  152. * follows directly after a '=' character. Every property is terminated by
  153. * a '\0' character. The last property is not terminated.
  154. *
  155. * Example of a message structure:
  156. * 0000 ff 8f 00 00 00 00 00 00 monotonic time in nsec
  157. * 0008 34 00 record is 52 bytes long
  158. * 000a 0b 00 text is 11 bytes long
  159. * 000c 1f 00 dictionary is 23 bytes long
  160. * 000e 03 00 LOG_KERN (facility) LOG_ERR (level)
  161. * 0010 69 74 27 73 20 61 20 6c "it's a l"
  162. * 69 6e 65 "ine"
  163. * 001b 44 45 56 49 43 "DEVIC"
  164. * 45 3d 62 38 3a 32 00 44 "E=b8:2\0D"
  165. * 52 49 56 45 52 3d 62 75 "RIVER=bu"
  166. * 67 "g"
  167. * 0032 00 00 00 padding to next message header
  168. *
  169. * The 'struct log' buffer header must never be directly exported to
  170. * userspace, it is a kernel-private implementation detail that might
  171. * need to be changed in the future, when the requirements change.
  172. *
  173. * /dev/kmsg exports the structured data in the following line format:
  174. * "level,sequnum,timestamp;<message text>\n"
  175. *
  176. * The optional key/value pairs are attached as continuation lines starting
  177. * with a space character and terminated by a newline. All possible
  178. * non-prinatable characters are escaped in the "\xff" notation.
  179. *
  180. * Users of the export format should ignore possible additional values
  181. * separated by ',', and find the message after the ';' character.
  182. */
  183. enum log_flags {
  184. LOG_NOCONS = 1, /* already flushed, do not print to console */
  185. LOG_NEWLINE = 2, /* text ended with a newline */
  186. LOG_PREFIX = 4, /* text started with a prefix */
  187. LOG_CONT = 8, /* text is a fragment of a continuation line */
  188. };
  189. struct log {
  190. u64 ts_nsec; /* timestamp in nanoseconds */
  191. u16 len; /* length of entire record */
  192. u16 text_len; /* length of text buffer */
  193. u16 dict_len; /* length of dictionary buffer */
  194. u8 facility; /* syslog facility */
  195. u8 flags:5; /* internal record flags */
  196. u8 level:3; /* syslog level */
  197. };
  198. /*
  199. * The logbuf_lock protects kmsg buffer, indices, counters. It is also
  200. * used in interesting ways to provide interlocking in console_unlock();
  201. */
  202. static DEFINE_RAW_SPINLOCK(logbuf_lock);
  203. #ifdef CONFIG_PRINTK
  204. DECLARE_WAIT_QUEUE_HEAD(log_wait);
  205. /* the next printk record to read by syslog(READ) or /proc/kmsg */
  206. static u64 syslog_seq;
  207. static u32 syslog_idx;
  208. static enum log_flags syslog_prev;
  209. static size_t syslog_partial;
  210. /* index and sequence number of the first record stored in the buffer */
  211. static u64 log_first_seq;
  212. static u32 log_first_idx;
  213. /* index and sequence number of the next record to store in the buffer */
  214. static u64 log_next_seq;
  215. static u32 log_next_idx;
  216. /* the next printk record to write to the console */
  217. static u64 console_seq;
  218. static u32 console_idx;
  219. static enum log_flags console_prev;
  220. /* the next printk record to read after the last 'clear' command */
  221. static u64 clear_seq;
  222. static u32 clear_idx;
  223. #define PREFIX_MAX 32
  224. #define LOG_LINE_MAX 1024 - PREFIX_MAX
  225. /* record buffer */
  226. #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
  227. #define LOG_ALIGN 4
  228. #else
  229. #define LOG_ALIGN __alignof__(struct log)
  230. #endif
  231. #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
  232. static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
  233. static char *log_buf = __log_buf;
  234. static u32 log_buf_len = __LOG_BUF_LEN;
  235. /* cpu currently holding logbuf_lock */
  236. static volatile unsigned int logbuf_cpu = UINT_MAX;
  237. /* human readable text of the record */
  238. static char *log_text(const struct log *msg)
  239. {
  240. return (char *)msg + sizeof(struct log);
  241. }
  242. /* optional key/value pair dictionary attached to the record */
  243. static char *log_dict(const struct log *msg)
  244. {
  245. return (char *)msg + sizeof(struct log) + msg->text_len;
  246. }
  247. /* get record by index; idx must point to valid msg */
  248. static struct log *log_from_idx(u32 idx)
  249. {
  250. struct log *msg = (struct log *)(log_buf + idx);
  251. /*
  252. * A length == 0 record is the end of buffer marker. Wrap around and
  253. * read the message at the start of the buffer.
  254. */
  255. if (!msg->len)
  256. return (struct log *)log_buf;
  257. return msg;
  258. }
  259. /* get next record; idx must point to valid msg */
  260. static u32 log_next(u32 idx)
  261. {
  262. struct log *msg = (struct log *)(log_buf + idx);
  263. /* length == 0 indicates the end of the buffer; wrap */
  264. /*
  265. * A length == 0 record is the end of buffer marker. Wrap around and
  266. * read the message at the start of the buffer as *this* one, and
  267. * return the one after that.
  268. */
  269. if (!msg->len) {
  270. msg = (struct log *)log_buf;
  271. return msg->len;
  272. }
  273. return idx + msg->len;
  274. }
  275. /* insert record into the buffer, discard old ones, update heads */
  276. static void log_store(int facility, int level,
  277. enum log_flags flags, u64 ts_nsec,
  278. const char *dict, u16 dict_len,
  279. const char *text, u16 text_len)
  280. {
  281. struct log *msg;
  282. u32 size, pad_len;
  283. /* number of '\0' padding bytes to next message */
  284. size = sizeof(struct log) + text_len + dict_len;
  285. pad_len = (-size) & (LOG_ALIGN - 1);
  286. size += pad_len;
  287. while (log_first_seq < log_next_seq) {
  288. u32 free;
  289. if (log_next_idx > log_first_idx)
  290. free = max(log_buf_len - log_next_idx, log_first_idx);
  291. else
  292. free = log_first_idx - log_next_idx;
  293. if (free > size + sizeof(struct log))
  294. break;
  295. /* drop old messages until we have enough contiuous space */
  296. log_first_idx = log_next(log_first_idx);
  297. log_first_seq++;
  298. }
  299. if (log_next_idx + size + sizeof(struct log) >= log_buf_len) {
  300. /*
  301. * This message + an additional empty header does not fit
  302. * at the end of the buffer. Add an empty header with len == 0
  303. * to signify a wrap around.
  304. */
  305. memset(log_buf + log_next_idx, 0, sizeof(struct log));
  306. log_next_idx = 0;
  307. }
  308. /* fill message */
  309. msg = (struct log *)(log_buf + log_next_idx);
  310. memcpy(log_text(msg), text, text_len);
  311. msg->text_len = text_len;
  312. memcpy(log_dict(msg), dict, dict_len);
  313. msg->dict_len = dict_len;
  314. msg->facility = facility;
  315. msg->level = level & 7;
  316. msg->flags = flags & 0x1f;
  317. if (ts_nsec > 0)
  318. msg->ts_nsec = ts_nsec;
  319. else
  320. msg->ts_nsec = local_clock();
  321. memset(log_dict(msg) + dict_len, 0, pad_len);
  322. msg->len = sizeof(struct log) + text_len + dict_len + pad_len;
  323. /* insert message */
  324. log_next_idx += msg->len;
  325. log_next_seq++;
  326. }
  327. /* /dev/kmsg - userspace message inject/listen interface */
  328. struct devkmsg_user {
  329. u64 seq;
  330. u32 idx;
  331. enum log_flags prev;
  332. struct mutex lock;
  333. char buf[8192];
  334. };
  335. static ssize_t devkmsg_writev(struct kiocb *iocb, const struct iovec *iv,
  336. unsigned long count, loff_t pos)
  337. {
  338. char *buf, *line;
  339. int i;
  340. int level = default_message_loglevel;
  341. int facility = 1; /* LOG_USER */
  342. size_t len = iov_length(iv, count);
  343. ssize_t ret = len;
  344. if (len > LOG_LINE_MAX)
  345. return -EINVAL;
  346. buf = kmalloc(len+1, GFP_KERNEL);
  347. if (buf == NULL)
  348. return -ENOMEM;
  349. line = buf;
  350. for (i = 0; i < count; i++) {
  351. if (copy_from_user(line, iv[i].iov_base, iv[i].iov_len)) {
  352. ret = -EFAULT;
  353. goto out;
  354. }
  355. line += iv[i].iov_len;
  356. }
  357. /*
  358. * Extract and skip the syslog prefix <[0-9]*>. Coming from userspace
  359. * the decimal value represents 32bit, the lower 3 bit are the log
  360. * level, the rest are the log facility.
  361. *
  362. * If no prefix or no userspace facility is specified, we
  363. * enforce LOG_USER, to be able to reliably distinguish
  364. * kernel-generated messages from userspace-injected ones.
  365. */
  366. line = buf;
  367. if (line[0] == '<') {
  368. char *endp = NULL;
  369. i = simple_strtoul(line+1, &endp, 10);
  370. if (endp && endp[0] == '>') {
  371. level = i & 7;
  372. if (i >> 3)
  373. facility = i >> 3;
  374. endp++;
  375. len -= endp - line;
  376. line = endp;
  377. }
  378. }
  379. line[len] = '\0';
  380. printk_emit(facility, level, NULL, 0, "%s", line);
  381. out:
  382. kfree(buf);
  383. return ret;
  384. }
  385. static ssize_t devkmsg_read(struct file *file, char __user *buf,
  386. size_t count, loff_t *ppos)
  387. {
  388. struct devkmsg_user *user = file->private_data;
  389. struct log *msg;
  390. u64 ts_usec;
  391. size_t i;
  392. char cont = '-';
  393. size_t len;
  394. ssize_t ret;
  395. if (!user)
  396. return -EBADF;
  397. ret = mutex_lock_interruptible(&user->lock);
  398. if (ret)
  399. return ret;
  400. raw_spin_lock_irq(&logbuf_lock);
  401. while (user->seq == log_next_seq) {
  402. if (file->f_flags & O_NONBLOCK) {
  403. ret = -EAGAIN;
  404. raw_spin_unlock_irq(&logbuf_lock);
  405. goto out;
  406. }
  407. raw_spin_unlock_irq(&logbuf_lock);
  408. ret = wait_event_interruptible(log_wait,
  409. user->seq != log_next_seq);
  410. if (ret)
  411. goto out;
  412. raw_spin_lock_irq(&logbuf_lock);
  413. }
  414. if (user->seq < log_first_seq) {
  415. /* our last seen message is gone, return error and reset */
  416. user->idx = log_first_idx;
  417. user->seq = log_first_seq;
  418. ret = -EPIPE;
  419. raw_spin_unlock_irq(&logbuf_lock);
  420. goto out;
  421. }
  422. msg = log_from_idx(user->idx);
  423. ts_usec = msg->ts_nsec;
  424. do_div(ts_usec, 1000);
  425. /*
  426. * If we couldn't merge continuation line fragments during the print,
  427. * export the stored flags to allow an optional external merge of the
  428. * records. Merging the records isn't always neccessarily correct, like
  429. * when we hit a race during printing. In most cases though, it produces
  430. * better readable output. 'c' in the record flags mark the first
  431. * fragment of a line, '+' the following.
  432. */
  433. if (msg->flags & LOG_CONT && !(user->prev & LOG_CONT))
  434. cont = 'c';
  435. else if ((msg->flags & LOG_CONT) ||
  436. ((user->prev & LOG_CONT) && !(msg->flags & LOG_PREFIX)))
  437. cont = '+';
  438. len = sprintf(user->buf, "%u,%llu,%llu,%c;",
  439. (msg->facility << 3) | msg->level,
  440. user->seq, ts_usec, cont);
  441. user->prev = msg->flags;
  442. /* escape non-printable characters */
  443. for (i = 0; i < msg->text_len; i++) {
  444. unsigned char c = log_text(msg)[i];
  445. if (c < ' ' || c >= 127 || c == '\\')
  446. len += sprintf(user->buf + len, "\\x%02x", c);
  447. else
  448. user->buf[len++] = c;
  449. }
  450. user->buf[len++] = '\n';
  451. if (msg->dict_len) {
  452. bool line = true;
  453. for (i = 0; i < msg->dict_len; i++) {
  454. unsigned char c = log_dict(msg)[i];
  455. if (line) {
  456. user->buf[len++] = ' ';
  457. line = false;
  458. }
  459. if (c == '\0') {
  460. user->buf[len++] = '\n';
  461. line = true;
  462. continue;
  463. }
  464. if (c < ' ' || c >= 127 || c == '\\') {
  465. len += sprintf(user->buf + len, "\\x%02x", c);
  466. continue;
  467. }
  468. user->buf[len++] = c;
  469. }
  470. user->buf[len++] = '\n';
  471. }
  472. user->idx = log_next(user->idx);
  473. user->seq++;
  474. raw_spin_unlock_irq(&logbuf_lock);
  475. if (len > count) {
  476. ret = -EINVAL;
  477. goto out;
  478. }
  479. if (copy_to_user(buf, user->buf, len)) {
  480. ret = -EFAULT;
  481. goto out;
  482. }
  483. ret = len;
  484. out:
  485. mutex_unlock(&user->lock);
  486. return ret;
  487. }
  488. static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
  489. {
  490. struct devkmsg_user *user = file->private_data;
  491. loff_t ret = 0;
  492. if (!user)
  493. return -EBADF;
  494. if (offset)
  495. return -ESPIPE;
  496. raw_spin_lock_irq(&logbuf_lock);
  497. switch (whence) {
  498. case SEEK_SET:
  499. /* the first record */
  500. user->idx = log_first_idx;
  501. user->seq = log_first_seq;
  502. break;
  503. case SEEK_DATA:
  504. /*
  505. * The first record after the last SYSLOG_ACTION_CLEAR,
  506. * like issued by 'dmesg -c'. Reading /dev/kmsg itself
  507. * changes no global state, and does not clear anything.
  508. */
  509. user->idx = clear_idx;
  510. user->seq = clear_seq;
  511. break;
  512. case SEEK_END:
  513. /* after the last record */
  514. user->idx = log_next_idx;
  515. user->seq = log_next_seq;
  516. break;
  517. default:
  518. ret = -EINVAL;
  519. }
  520. raw_spin_unlock_irq(&logbuf_lock);
  521. return ret;
  522. }
  523. static unsigned int devkmsg_poll(struct file *file, poll_table *wait)
  524. {
  525. struct devkmsg_user *user = file->private_data;
  526. int ret = 0;
  527. if (!user)
  528. return POLLERR|POLLNVAL;
  529. poll_wait(file, &log_wait, wait);
  530. raw_spin_lock_irq(&logbuf_lock);
  531. if (user->seq < log_next_seq) {
  532. /* return error when data has vanished underneath us */
  533. if (user->seq < log_first_seq)
  534. ret = POLLIN|POLLRDNORM|POLLERR|POLLPRI;
  535. ret = POLLIN|POLLRDNORM;
  536. }
  537. raw_spin_unlock_irq(&logbuf_lock);
  538. return ret;
  539. }
  540. static int devkmsg_open(struct inode *inode, struct file *file)
  541. {
  542. struct devkmsg_user *user;
  543. int err;
  544. /* write-only does not need any file context */
  545. if ((file->f_flags & O_ACCMODE) == O_WRONLY)
  546. return 0;
  547. err = security_syslog(SYSLOG_ACTION_READ_ALL);
  548. if (err)
  549. return err;
  550. user = kmalloc(sizeof(struct devkmsg_user), GFP_KERNEL);
  551. if (!user)
  552. return -ENOMEM;
  553. mutex_init(&user->lock);
  554. raw_spin_lock_irq(&logbuf_lock);
  555. user->idx = log_first_idx;
  556. user->seq = log_first_seq;
  557. raw_spin_unlock_irq(&logbuf_lock);
  558. file->private_data = user;
  559. return 0;
  560. }
  561. static int devkmsg_release(struct inode *inode, struct file *file)
  562. {
  563. struct devkmsg_user *user = file->private_data;
  564. if (!user)
  565. return 0;
  566. mutex_destroy(&user->lock);
  567. kfree(user);
  568. return 0;
  569. }
  570. const struct file_operations kmsg_fops = {
  571. .open = devkmsg_open,
  572. .read = devkmsg_read,
  573. .aio_write = devkmsg_writev,
  574. .llseek = devkmsg_llseek,
  575. .poll = devkmsg_poll,
  576. .release = devkmsg_release,
  577. };
  578. #ifdef CONFIG_KEXEC
  579. /*
  580. * This appends the listed symbols to /proc/vmcoreinfo
  581. *
  582. * /proc/vmcoreinfo is used by various utiilties, like crash and makedumpfile to
  583. * obtain access to symbols that are otherwise very difficult to locate. These
  584. * symbols are specifically used so that utilities can access and extract the
  585. * dmesg log from a vmcore file after a crash.
  586. */
  587. void log_buf_kexec_setup(void)
  588. {
  589. VMCOREINFO_SYMBOL(log_buf);
  590. VMCOREINFO_SYMBOL(log_buf_len);
  591. VMCOREINFO_SYMBOL(log_first_idx);
  592. VMCOREINFO_SYMBOL(log_next_idx);
  593. /*
  594. * Export struct log size and field offsets. User space tools can
  595. * parse it and detect any changes to structure down the line.
  596. */
  597. VMCOREINFO_STRUCT_SIZE(log);
  598. VMCOREINFO_OFFSET(log, ts_nsec);
  599. VMCOREINFO_OFFSET(log, len);
  600. VMCOREINFO_OFFSET(log, text_len);
  601. VMCOREINFO_OFFSET(log, dict_len);
  602. }
  603. #endif
  604. /* requested log_buf_len from kernel cmdline */
  605. static unsigned long __initdata new_log_buf_len;
  606. /* save requested log_buf_len since it's too early to process it */
  607. static int __init log_buf_len_setup(char *str)
  608. {
  609. unsigned size = memparse(str, &str);
  610. if (size)
  611. size = roundup_pow_of_two(size);
  612. if (size > log_buf_len)
  613. new_log_buf_len = size;
  614. return 0;
  615. }
  616. early_param("log_buf_len", log_buf_len_setup);
  617. void __init setup_log_buf(int early)
  618. {
  619. unsigned long flags;
  620. char *new_log_buf;
  621. int free;
  622. if (!new_log_buf_len)
  623. return;
  624. if (early) {
  625. unsigned long mem;
  626. mem = memblock_alloc(new_log_buf_len, PAGE_SIZE);
  627. if (!mem)
  628. return;
  629. new_log_buf = __va(mem);
  630. } else {
  631. new_log_buf = alloc_bootmem_nopanic(new_log_buf_len);
  632. }
  633. if (unlikely(!new_log_buf)) {
  634. pr_err("log_buf_len: %ld bytes not available\n",
  635. new_log_buf_len);
  636. return;
  637. }
  638. raw_spin_lock_irqsave(&logbuf_lock, flags);
  639. log_buf_len = new_log_buf_len;
  640. log_buf = new_log_buf;
  641. new_log_buf_len = 0;
  642. free = __LOG_BUF_LEN - log_next_idx;
  643. memcpy(log_buf, __log_buf, __LOG_BUF_LEN);
  644. raw_spin_unlock_irqrestore(&logbuf_lock, flags);
  645. pr_info("log_buf_len: %d\n", log_buf_len);
  646. pr_info("early log buf free: %d(%d%%)\n",
  647. free, (free * 100) / __LOG_BUF_LEN);
  648. }
  649. static bool __read_mostly ignore_loglevel;
  650. static int __init ignore_loglevel_setup(char *str)
  651. {
  652. ignore_loglevel = 1;
  653. printk(KERN_INFO "debug: ignoring loglevel setting.\n");
  654. return 0;
  655. }
  656. early_param("ignore_loglevel", ignore_loglevel_setup);
  657. module_param(ignore_loglevel, bool, S_IRUGO | S_IWUSR);
  658. MODULE_PARM_DESC(ignore_loglevel, "ignore loglevel setting, to"
  659. "print all kernel messages to the console.");
  660. #ifdef CONFIG_BOOT_PRINTK_DELAY
  661. static int boot_delay; /* msecs delay after each printk during bootup */
  662. static unsigned long long loops_per_msec; /* based on boot_delay */
  663. static int __init boot_delay_setup(char *str)
  664. {
  665. unsigned long lpj;
  666. lpj = preset_lpj ? preset_lpj : 1000000; /* some guess */
  667. loops_per_msec = (unsigned long long)lpj / 1000 * HZ;
  668. get_option(&str, &boot_delay);
  669. if (boot_delay > 10 * 1000)
  670. boot_delay = 0;
  671. pr_debug("boot_delay: %u, preset_lpj: %ld, lpj: %lu, "
  672. "HZ: %d, loops_per_msec: %llu\n",
  673. boot_delay, preset_lpj, lpj, HZ, loops_per_msec);
  674. return 1;
  675. }
  676. __setup("boot_delay=", boot_delay_setup);
  677. static void boot_delay_msec(int level)
  678. {
  679. unsigned long long k;
  680. unsigned long timeout;
  681. if ((boot_delay == 0 || system_state != SYSTEM_BOOTING)
  682. || (level >= console_loglevel && !ignore_loglevel)) {
  683. return;
  684. }
  685. k = (unsigned long long)loops_per_msec * boot_delay;
  686. timeout = jiffies + msecs_to_jiffies(boot_delay);
  687. while (k) {
  688. k--;
  689. cpu_relax();
  690. /*
  691. * use (volatile) jiffies to prevent
  692. * compiler reduction; loop termination via jiffies
  693. * is secondary and may or may not happen.
  694. */
  695. if (time_after(jiffies, timeout))
  696. break;
  697. touch_nmi_watchdog();
  698. }
  699. }
  700. #else
  701. static inline void boot_delay_msec(int level)
  702. {
  703. }
  704. #endif
  705. #ifdef CONFIG_SECURITY_DMESG_RESTRICT
  706. int dmesg_restrict = 1;
  707. #else
  708. int dmesg_restrict;
  709. #endif
  710. static int syslog_action_restricted(int type)
  711. {
  712. if (dmesg_restrict)
  713. return 1;
  714. /* Unless restricted, we allow "read all" and "get buffer size" for everybody */
  715. return type != SYSLOG_ACTION_READ_ALL && type != SYSLOG_ACTION_SIZE_BUFFER;
  716. }
  717. static int check_syslog_permissions(int type, bool from_file)
  718. {
  719. /*
  720. * If this is from /proc/kmsg and we've already opened it, then we've
  721. * already done the capabilities checks at open time.
  722. */
  723. if (from_file && type != SYSLOG_ACTION_OPEN)
  724. return 0;
  725. if (syslog_action_restricted(type)) {
  726. if (capable(CAP_SYSLOG))
  727. return 0;
  728. /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
  729. if (capable(CAP_SYS_ADMIN)) {
  730. printk_once(KERN_WARNING "%s (%d): "
  731. "Attempt to access syslog with CAP_SYS_ADMIN "
  732. "but no CAP_SYSLOG (deprecated).\n",
  733. current->comm, task_pid_nr(current));
  734. return 0;
  735. }
  736. return -EPERM;
  737. }
  738. return 0;
  739. }
  740. #if defined(CONFIG_PRINTK_TIME)
  741. static bool printk_time = 1;
  742. #else
  743. static bool printk_time;
  744. #endif
  745. module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
  746. static size_t print_time(u64 ts, char *buf)
  747. {
  748. unsigned long rem_nsec;
  749. if (!printk_time)
  750. return 0;
  751. rem_nsec = do_div(ts, 1000000000);
  752. if (!buf)
  753. return snprintf(NULL, 0, "[%5lu.000000] ", (unsigned long)ts);
  754. return sprintf(buf, "[%5lu.%06lu] ",
  755. (unsigned long)ts, rem_nsec / 1000);
  756. }
  757. static size_t print_prefix(const struct log *msg, bool syslog, char *buf)
  758. {
  759. size_t len = 0;
  760. unsigned int prefix = (msg->facility << 3) | msg->level;
  761. if (syslog) {
  762. if (buf) {
  763. len += sprintf(buf, "<%u>", prefix);
  764. } else {
  765. len += 3;
  766. if (prefix > 999)
  767. len += 3;
  768. else if (prefix > 99)
  769. len += 2;
  770. else if (prefix > 9)
  771. len++;
  772. }
  773. }
  774. len += print_time(msg->ts_nsec, buf ? buf + len : NULL);
  775. return len;
  776. }
  777. static size_t msg_print_text(const struct log *msg, enum log_flags prev,
  778. bool syslog, char *buf, size_t size)
  779. {
  780. const char *text = log_text(msg);
  781. size_t text_size = msg->text_len;
  782. bool prefix = true;
  783. bool newline = true;
  784. size_t len = 0;
  785. if ((prev & LOG_CONT) && !(msg->flags & LOG_PREFIX))
  786. prefix = false;
  787. if (msg->flags & LOG_CONT) {
  788. if ((prev & LOG_CONT) && !(prev & LOG_NEWLINE))
  789. prefix = false;
  790. if (!(msg->flags & LOG_NEWLINE))
  791. newline = false;
  792. }
  793. do {
  794. const char *next = memchr(text, '\n', text_size);
  795. size_t text_len;
  796. if (next) {
  797. text_len = next - text;
  798. next++;
  799. text_size -= next - text;
  800. } else {
  801. text_len = text_size;
  802. }
  803. if (buf) {
  804. if (print_prefix(msg, syslog, NULL) +
  805. text_len + 1 >= size - len)
  806. break;
  807. if (prefix)
  808. len += print_prefix(msg, syslog, buf + len);
  809. memcpy(buf + len, text, text_len);
  810. len += text_len;
  811. if (next || newline)
  812. buf[len++] = '\n';
  813. } else {
  814. /* SYSLOG_ACTION_* buffer size only calculation */
  815. if (prefix)
  816. len += print_prefix(msg, syslog, NULL);
  817. len += text_len;
  818. if (next || newline)
  819. len++;
  820. }
  821. prefix = true;
  822. text = next;
  823. } while (text);
  824. return len;
  825. }
  826. static int syslog_print(char __user *buf, int size)
  827. {
  828. char *text;
  829. struct log *msg;
  830. int len = 0;
  831. text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
  832. if (!text)
  833. return -ENOMEM;
  834. while (size > 0) {
  835. size_t n;
  836. size_t skip;
  837. raw_spin_lock_irq(&logbuf_lock);
  838. if (syslog_seq < log_first_seq) {
  839. /* messages are gone, move to first one */
  840. syslog_seq = log_first_seq;
  841. syslog_idx = log_first_idx;
  842. syslog_prev = 0;
  843. syslog_partial = 0;
  844. }
  845. if (syslog_seq == log_next_seq) {
  846. raw_spin_unlock_irq(&logbuf_lock);
  847. break;
  848. }
  849. skip = syslog_partial;
  850. msg = log_from_idx(syslog_idx);
  851. n = msg_print_text(msg, syslog_prev, true, text,
  852. LOG_LINE_MAX + PREFIX_MAX);
  853. if (n - syslog_partial <= size) {
  854. /* message fits into buffer, move forward */
  855. syslog_idx = log_next(syslog_idx);
  856. syslog_seq++;
  857. syslog_prev = msg->flags;
  858. n -= syslog_partial;
  859. syslog_partial = 0;
  860. } else if (!len){
  861. /* partial read(), remember position */
  862. n = size;
  863. syslog_partial += n;
  864. } else
  865. n = 0;
  866. raw_spin_unlock_irq(&logbuf_lock);
  867. if (!n)
  868. break;
  869. if (copy_to_user(buf, text + skip, n)) {
  870. if (!len)
  871. len = -EFAULT;
  872. break;
  873. }
  874. len += n;
  875. size -= n;
  876. buf += n;
  877. }
  878. kfree(text);
  879. return len;
  880. }
  881. static int syslog_print_all(char __user *buf, int size, bool clear)
  882. {
  883. char *text;
  884. int len = 0;
  885. text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
  886. if (!text)
  887. return -ENOMEM;
  888. raw_spin_lock_irq(&logbuf_lock);
  889. if (buf) {
  890. u64 next_seq;
  891. u64 seq;
  892. u32 idx;
  893. enum log_flags prev;
  894. if (clear_seq < log_first_seq) {
  895. /* messages are gone, move to first available one */
  896. clear_seq = log_first_seq;
  897. clear_idx = log_first_idx;
  898. }
  899. /*
  900. * Find first record that fits, including all following records,
  901. * into the user-provided buffer for this dump.
  902. */
  903. seq = clear_seq;
  904. idx = clear_idx;
  905. prev = 0;
  906. while (seq < log_next_seq) {
  907. struct log *msg = log_from_idx(idx);
  908. len += msg_print_text(msg, prev, true, NULL, 0);
  909. prev = msg->flags;
  910. idx = log_next(idx);
  911. seq++;
  912. }
  913. /* move first record forward until length fits into the buffer */
  914. seq = clear_seq;
  915. idx = clear_idx;
  916. prev = 0;
  917. while (len > size && seq < log_next_seq) {
  918. struct log *msg = log_from_idx(idx);
  919. len -= msg_print_text(msg, prev, true, NULL, 0);
  920. prev = msg->flags;
  921. idx = log_next(idx);
  922. seq++;
  923. }
  924. /* last message fitting into this dump */
  925. next_seq = log_next_seq;
  926. len = 0;
  927. prev = 0;
  928. while (len >= 0 && seq < next_seq) {
  929. struct log *msg = log_from_idx(idx);
  930. int textlen;
  931. textlen = msg_print_text(msg, prev, true, text,
  932. LOG_LINE_MAX + PREFIX_MAX);
  933. if (textlen < 0) {
  934. len = textlen;
  935. break;
  936. }
  937. idx = log_next(idx);
  938. seq++;
  939. prev = msg->flags;
  940. raw_spin_unlock_irq(&logbuf_lock);
  941. if (copy_to_user(buf + len, text, textlen))
  942. len = -EFAULT;
  943. else
  944. len += textlen;
  945. raw_spin_lock_irq(&logbuf_lock);
  946. if (seq < log_first_seq) {
  947. /* messages are gone, move to next one */
  948. seq = log_first_seq;
  949. idx = log_first_idx;
  950. prev = 0;
  951. }
  952. }
  953. }
  954. if (clear) {
  955. clear_seq = log_next_seq;
  956. clear_idx = log_next_idx;
  957. }
  958. raw_spin_unlock_irq(&logbuf_lock);
  959. kfree(text);
  960. return len;
  961. }
  962. int do_syslog(int type, char __user *buf, int len, bool from_file)
  963. {
  964. bool clear = false;
  965. static int saved_console_loglevel = -1;
  966. int error;
  967. error = check_syslog_permissions(type, from_file);
  968. if (error)
  969. goto out;
  970. error = security_syslog(type);
  971. if (error)
  972. return error;
  973. switch (type) {
  974. case SYSLOG_ACTION_CLOSE: /* Close log */
  975. break;
  976. case SYSLOG_ACTION_OPEN: /* Open log */
  977. break;
  978. case SYSLOG_ACTION_READ: /* Read from log */
  979. error = -EINVAL;
  980. if (!buf || len < 0)
  981. goto out;
  982. error = 0;
  983. if (!len)
  984. goto out;
  985. if (!access_ok(VERIFY_WRITE, buf, len)) {
  986. error = -EFAULT;
  987. goto out;
  988. }
  989. error = wait_event_interruptible(log_wait,
  990. syslog_seq != log_next_seq);
  991. if (error)
  992. goto out;
  993. error = syslog_print(buf, len);
  994. break;
  995. /* Read/clear last kernel messages */
  996. case SYSLOG_ACTION_READ_CLEAR:
  997. clear = true;
  998. /* FALL THRU */
  999. /* Read last kernel messages */
  1000. case SYSLOG_ACTION_READ_ALL:
  1001. error = -EINVAL;
  1002. if (!buf || len < 0)
  1003. goto out;
  1004. error = 0;
  1005. if (!len)
  1006. goto out;
  1007. if (!access_ok(VERIFY_WRITE, buf, len)) {
  1008. error = -EFAULT;
  1009. goto out;
  1010. }
  1011. error = syslog_print_all(buf, len, clear);
  1012. break;
  1013. /* Clear ring buffer */
  1014. case SYSLOG_ACTION_CLEAR:
  1015. syslog_print_all(NULL, 0, true);
  1016. break;
  1017. /* Disable logging to console */
  1018. case SYSLOG_ACTION_CONSOLE_OFF:
  1019. if (saved_console_loglevel == -1)
  1020. saved_console_loglevel = console_loglevel;
  1021. console_loglevel = minimum_console_loglevel;
  1022. break;
  1023. /* Enable logging to console */
  1024. case SYSLOG_ACTION_CONSOLE_ON:
  1025. if (saved_console_loglevel != -1) {
  1026. console_loglevel = saved_console_loglevel;
  1027. saved_console_loglevel = -1;
  1028. }
  1029. break;
  1030. /* Set level of messages printed to console */
  1031. case SYSLOG_ACTION_CONSOLE_LEVEL:
  1032. error = -EINVAL;
  1033. if (len < 1 || len > 8)
  1034. goto out;
  1035. if (len < minimum_console_loglevel)
  1036. len = minimum_console_loglevel;
  1037. console_loglevel = len;
  1038. /* Implicitly re-enable logging to console */
  1039. saved_console_loglevel = -1;
  1040. error = 0;
  1041. break;
  1042. /* Number of chars in the log buffer */
  1043. case SYSLOG_ACTION_SIZE_UNREAD:
  1044. raw_spin_lock_irq(&logbuf_lock);
  1045. if (syslog_seq < log_first_seq) {
  1046. /* messages are gone, move to first one */
  1047. syslog_seq = log_first_seq;
  1048. syslog_idx = log_first_idx;
  1049. syslog_prev = 0;
  1050. syslog_partial = 0;
  1051. }
  1052. if (from_file) {
  1053. /*
  1054. * Short-cut for poll(/"proc/kmsg") which simply checks
  1055. * for pending data, not the size; return the count of
  1056. * records, not the length.
  1057. */
  1058. error = log_next_idx - syslog_idx;
  1059. } else {
  1060. u64 seq = syslog_seq;
  1061. u32 idx = syslog_idx;
  1062. enum log_flags prev = syslog_prev;
  1063. error = 0;
  1064. while (seq < log_next_seq) {
  1065. struct log *msg = log_from_idx(idx);
  1066. error += msg_print_text(msg, prev, true, NULL, 0);
  1067. idx = log_next(idx);
  1068. seq++;
  1069. prev = msg->flags;
  1070. }
  1071. error -= syslog_partial;
  1072. }
  1073. raw_spin_unlock_irq(&logbuf_lock);
  1074. break;
  1075. /* Size of the log buffer */
  1076. case SYSLOG_ACTION_SIZE_BUFFER:
  1077. error = log_buf_len;
  1078. break;
  1079. default:
  1080. error = -EINVAL;
  1081. break;
  1082. }
  1083. out:
  1084. return error;
  1085. }
  1086. SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
  1087. {
  1088. return do_syslog(type, buf, len, SYSLOG_FROM_CALL);
  1089. }
  1090. /*
  1091. * Call the console drivers, asking them to write out
  1092. * log_buf[start] to log_buf[end - 1].
  1093. * The console_lock must be held.
  1094. */
  1095. static void call_console_drivers(int level, const char *text, size_t len)
  1096. {
  1097. struct console *con;
  1098. trace_console(text, 0, len, len);
  1099. if (level >= console_loglevel && !ignore_loglevel)
  1100. return;
  1101. if (!console_drivers)
  1102. return;
  1103. for_each_console(con) {
  1104. if (exclusive_console && con != exclusive_console)
  1105. continue;
  1106. if (!(con->flags & CON_ENABLED))
  1107. continue;
  1108. if (!con->write)
  1109. continue;
  1110. if (!cpu_online(smp_processor_id()) &&
  1111. !(con->flags & CON_ANYTIME))
  1112. continue;
  1113. con->write(con, text, len);
  1114. }
  1115. }
  1116. /*
  1117. * Zap console related locks when oopsing. Only zap at most once
  1118. * every 10 seconds, to leave time for slow consoles to print a
  1119. * full oops.
  1120. */
  1121. static void zap_locks(void)
  1122. {
  1123. static unsigned long oops_timestamp;
  1124. if (time_after_eq(jiffies, oops_timestamp) &&
  1125. !time_after(jiffies, oops_timestamp + 30 * HZ))
  1126. return;
  1127. oops_timestamp = jiffies;
  1128. debug_locks_off();
  1129. /* If a crash is occurring, make sure we can't deadlock */
  1130. raw_spin_lock_init(&logbuf_lock);
  1131. /* And make sure that we print immediately */
  1132. sema_init(&console_sem, 1);
  1133. }
  1134. /* Check if we have any console registered that can be called early in boot. */
  1135. static int have_callable_console(void)
  1136. {
  1137. struct console *con;
  1138. for_each_console(con)
  1139. if (con->flags & CON_ANYTIME)
  1140. return 1;
  1141. return 0;
  1142. }
  1143. /*
  1144. * Can we actually use the console at this time on this cpu?
  1145. *
  1146. * Console drivers may assume that per-cpu resources have
  1147. * been allocated. So unless they're explicitly marked as
  1148. * being able to cope (CON_ANYTIME) don't call them until
  1149. * this CPU is officially up.
  1150. */
  1151. static inline int can_use_console(unsigned int cpu)
  1152. {
  1153. return cpu_online(cpu) || have_callable_console();
  1154. }
  1155. /*
  1156. * Try to get console ownership to actually show the kernel
  1157. * messages from a 'printk'. Return true (and with the
  1158. * console_lock held, and 'console_locked' set) if it
  1159. * is successful, false otherwise.
  1160. *
  1161. * This gets called with the 'logbuf_lock' spinlock held and
  1162. * interrupts disabled. It should return with 'lockbuf_lock'
  1163. * released but interrupts still disabled.
  1164. */
  1165. static int console_trylock_for_printk(unsigned int cpu)
  1166. __releases(&logbuf_lock)
  1167. {
  1168. int retval = 0, wake = 0;
  1169. if (console_trylock()) {
  1170. retval = 1;
  1171. /*
  1172. * If we can't use the console, we need to release
  1173. * the console semaphore by hand to avoid flushing
  1174. * the buffer. We need to hold the console semaphore
  1175. * in order to do this test safely.
  1176. */
  1177. if (!can_use_console(cpu)) {
  1178. console_locked = 0;
  1179. wake = 1;
  1180. retval = 0;
  1181. }
  1182. }
  1183. logbuf_cpu = UINT_MAX;
  1184. if (wake)
  1185. up(&console_sem);
  1186. raw_spin_unlock(&logbuf_lock);
  1187. return retval;
  1188. }
  1189. int printk_delay_msec __read_mostly;
  1190. static inline void printk_delay(void)
  1191. {
  1192. if (unlikely(printk_delay_msec)) {
  1193. int m = printk_delay_msec;
  1194. while (m--) {
  1195. mdelay(1);
  1196. touch_nmi_watchdog();
  1197. }
  1198. }
  1199. }
  1200. /*
  1201. * Continuation lines are buffered, and not committed to the record buffer
  1202. * until the line is complete, or a race forces it. The line fragments
  1203. * though, are printed immediately to the consoles to ensure everything has
  1204. * reached the console in case of a kernel crash.
  1205. */
  1206. static struct cont {
  1207. char buf[LOG_LINE_MAX];
  1208. size_t len; /* length == 0 means unused buffer */
  1209. size_t cons; /* bytes written to console */
  1210. struct task_struct *owner; /* task of first print*/
  1211. u64 ts_nsec; /* time of first print */
  1212. u8 level; /* log level of first message */
  1213. u8 facility; /* log level of first message */
  1214. enum log_flags flags; /* prefix, newline flags */
  1215. bool flushed:1; /* buffer sealed and committed */
  1216. } cont;
  1217. static void cont_flush(enum log_flags flags)
  1218. {
  1219. if (cont.flushed)
  1220. return;
  1221. if (cont.len == 0)
  1222. return;
  1223. if (cont.cons) {
  1224. /*
  1225. * If a fragment of this line was directly flushed to the
  1226. * console; wait for the console to pick up the rest of the
  1227. * line. LOG_NOCONS suppresses a duplicated output.
  1228. */
  1229. log_store(cont.facility, cont.level, flags | LOG_NOCONS,
  1230. cont.ts_nsec, NULL, 0, cont.buf, cont.len);
  1231. cont.flags = flags;
  1232. cont.flushed = true;
  1233. } else {
  1234. /*
  1235. * If no fragment of this line ever reached the console,
  1236. * just submit it to the store and free the buffer.
  1237. */
  1238. log_store(cont.facility, cont.level, flags, 0,
  1239. NULL, 0, cont.buf, cont.len);
  1240. cont.len = 0;
  1241. }
  1242. }
  1243. static bool cont_add(int facility, int level, const char *text, size_t len)
  1244. {
  1245. if (cont.len && cont.flushed)
  1246. return false;
  1247. if (cont.len + len > sizeof(cont.buf)) {
  1248. /* the line gets too long, split it up in separate records */
  1249. cont_flush(LOG_CONT);
  1250. return false;
  1251. }
  1252. if (!cont.len) {
  1253. cont.facility = facility;
  1254. cont.level = level;
  1255. cont.owner = current;
  1256. cont.ts_nsec = local_clock();
  1257. cont.flags = 0;
  1258. cont.cons = 0;
  1259. cont.flushed = false;
  1260. }
  1261. memcpy(cont.buf + cont.len, text, len);
  1262. cont.len += len;
  1263. if (cont.len > (sizeof(cont.buf) * 80) / 100)
  1264. cont_flush(LOG_CONT);
  1265. return true;
  1266. }
  1267. static size_t cont_print_text(char *text, size_t size)
  1268. {
  1269. size_t textlen = 0;
  1270. size_t len;
  1271. if (cont.cons == 0 && (console_prev & LOG_NEWLINE)) {
  1272. textlen += print_time(cont.ts_nsec, text);
  1273. size -= textlen;
  1274. }
  1275. len = cont.len - cont.cons;
  1276. if (len > 0) {
  1277. if (len+1 > size)
  1278. len = size-1;
  1279. memcpy(text + textlen, cont.buf + cont.cons, len);
  1280. textlen += len;
  1281. cont.cons = cont.len;
  1282. }
  1283. if (cont.flushed) {
  1284. if (cont.flags & LOG_NEWLINE)
  1285. text[textlen++] = '\n';
  1286. /* got everything, release buffer */
  1287. cont.len = 0;
  1288. }
  1289. return textlen;
  1290. }
  1291. asmlinkage int vprintk_emit(int facility, int level,
  1292. const char *dict, size_t dictlen,
  1293. const char *fmt, va_list args)
  1294. {
  1295. static int recursion_bug;
  1296. static char textbuf[LOG_LINE_MAX];
  1297. char *text = textbuf;
  1298. size_t text_len;
  1299. enum log_flags lflags = 0;
  1300. unsigned long flags;
  1301. int this_cpu;
  1302. int printed_len = 0;
  1303. boot_delay_msec(level);
  1304. printk_delay();
  1305. /* This stops the holder of console_sem just where we want him */
  1306. local_irq_save(flags);
  1307. this_cpu = smp_processor_id();
  1308. /*
  1309. * Ouch, printk recursed into itself!
  1310. */
  1311. if (unlikely(logbuf_cpu == this_cpu)) {
  1312. /*
  1313. * If a crash is occurring during printk() on this CPU,
  1314. * then try to get the crash message out but make sure
  1315. * we can't deadlock. Otherwise just return to avoid the
  1316. * recursion and return - but flag the recursion so that
  1317. * it can be printed at the next appropriate moment:
  1318. */
  1319. if (!oops_in_progress && !lockdep_recursing(current)) {
  1320. recursion_bug = 1;
  1321. goto out_restore_irqs;
  1322. }
  1323. zap_locks();
  1324. }
  1325. lockdep_off();
  1326. raw_spin_lock(&logbuf_lock);
  1327. logbuf_cpu = this_cpu;
  1328. if (recursion_bug) {
  1329. static const char recursion_msg[] =
  1330. "BUG: recent printk recursion!";
  1331. recursion_bug = 0;
  1332. printed_len += strlen(recursion_msg);
  1333. /* emit KERN_CRIT message */
  1334. log_store(0, 2, LOG_PREFIX|LOG_NEWLINE, 0,
  1335. NULL, 0, recursion_msg, printed_len);
  1336. }
  1337. /*
  1338. * The printf needs to come first; we need the syslog
  1339. * prefix which might be passed-in as a parameter.
  1340. */
  1341. text_len = vscnprintf(text, sizeof(textbuf), fmt, args);
  1342. /* mark and strip a trailing newline */
  1343. if (text_len && text[text_len-1] == '\n') {
  1344. text_len--;
  1345. lflags |= LOG_NEWLINE;
  1346. }
  1347. /* strip kernel syslog prefix and extract log level or control flags */
  1348. if (facility == 0) {
  1349. int kern_level = printk_get_level(text);
  1350. if (kern_level) {
  1351. const char *end_of_header = printk_skip_level(text);
  1352. switch (kern_level) {
  1353. case '0' ... '7':
  1354. if (level == -1)
  1355. level = kern_level - '0';
  1356. case 'd': /* KERN_DEFAULT */
  1357. lflags |= LOG_PREFIX;
  1358. case 'c': /* KERN_CONT */
  1359. break;
  1360. }
  1361. text_len -= end_of_header - text;
  1362. text = (char *)end_of_header;
  1363. }
  1364. }
  1365. if (level == -1)
  1366. level = default_message_loglevel;
  1367. if (dict)
  1368. lflags |= LOG_PREFIX|LOG_NEWLINE;
  1369. if (!(lflags & LOG_NEWLINE)) {
  1370. /*
  1371. * Flush the conflicting buffer. An earlier newline was missing,
  1372. * or another task also prints continuation lines.
  1373. */
  1374. if (cont.len && (lflags & LOG_PREFIX || cont.owner != current))
  1375. cont_flush(LOG_NEWLINE);
  1376. /* buffer line if possible, otherwise store it right away */
  1377. if (!cont_add(facility, level, text, text_len))
  1378. log_store(facility, level, lflags | LOG_CONT, 0,
  1379. dict, dictlen, text, text_len);
  1380. } else {
  1381. bool stored = false;
  1382. /*
  1383. * If an earlier newline was missing and it was the same task,
  1384. * either merge it with the current buffer and flush, or if
  1385. * there was a race with interrupts (prefix == true) then just
  1386. * flush it out and store this line separately.
  1387. */
  1388. if (cont.len && cont.owner == current) {
  1389. if (!(lflags & LOG_PREFIX))
  1390. stored = cont_add(facility, level, text, text_len);
  1391. cont_flush(LOG_NEWLINE);
  1392. }
  1393. if (!stored)
  1394. log_store(facility, level, lflags, 0,
  1395. dict, dictlen, text, text_len);
  1396. }
  1397. printed_len += text_len;
  1398. /*
  1399. * Try to acquire and then immediately release the console semaphore.
  1400. * The release will print out buffers and wake up /dev/kmsg and syslog()
  1401. * users.
  1402. *
  1403. * The console_trylock_for_printk() function will release 'logbuf_lock'
  1404. * regardless of whether it actually gets the console semaphore or not.
  1405. */
  1406. if (console_trylock_for_printk(this_cpu))
  1407. console_unlock();
  1408. lockdep_on();
  1409. out_restore_irqs:
  1410. local_irq_restore(flags);
  1411. return printed_len;
  1412. }
  1413. EXPORT_SYMBOL(vprintk_emit);
  1414. asmlinkage int vprintk(const char *fmt, va_list args)
  1415. {
  1416. return vprintk_emit(0, -1, NULL, 0, fmt, args);
  1417. }
  1418. EXPORT_SYMBOL(vprintk);
  1419. asmlinkage int printk_emit(int facility, int level,
  1420. const char *dict, size_t dictlen,
  1421. const char *fmt, ...)
  1422. {
  1423. va_list args;
  1424. int r;
  1425. va_start(args, fmt);
  1426. r = vprintk_emit(facility, level, dict, dictlen, fmt, args);
  1427. va_end(args);
  1428. return r;
  1429. }
  1430. EXPORT_SYMBOL(printk_emit);
  1431. /**
  1432. * printk - print a kernel message
  1433. * @fmt: format string
  1434. *
  1435. * This is printk(). It can be called from any context. We want it to work.
  1436. *
  1437. * We try to grab the console_lock. If we succeed, it's easy - we log the
  1438. * output and call the console drivers. If we fail to get the semaphore, we
  1439. * place the output into the log buffer and return. The current holder of
  1440. * the console_sem will notice the new output in console_unlock(); and will
  1441. * send it to the consoles before releasing the lock.
  1442. *
  1443. * One effect of this deferred printing is that code which calls printk() and
  1444. * then changes console_loglevel may break. This is because console_loglevel
  1445. * is inspected when the actual printing occurs.
  1446. *
  1447. * See also:
  1448. * printf(3)
  1449. *
  1450. * See the vsnprintf() documentation for format string extensions over C99.
  1451. */
  1452. asmlinkage int printk(const char *fmt, ...)
  1453. {
  1454. va_list args;
  1455. int r;
  1456. #ifdef CONFIG_KGDB_KDB
  1457. if (unlikely(kdb_trap_printk)) {
  1458. va_start(args, fmt);
  1459. r = vkdb_printf(fmt, args);
  1460. va_end(args);
  1461. return r;
  1462. }
  1463. #endif
  1464. va_start(args, fmt);
  1465. r = vprintk_emit(0, -1, NULL, 0, fmt, args);
  1466. va_end(args);
  1467. return r;
  1468. }
  1469. EXPORT_SYMBOL(printk);
  1470. #else /* CONFIG_PRINTK */
  1471. #define LOG_LINE_MAX 0
  1472. #define PREFIX_MAX 0
  1473. #define LOG_LINE_MAX 0
  1474. static u64 syslog_seq;
  1475. static u32 syslog_idx;
  1476. static u64 console_seq;
  1477. static u32 console_idx;
  1478. static enum log_flags syslog_prev;
  1479. static u64 log_first_seq;
  1480. static u32 log_first_idx;
  1481. static u64 log_next_seq;
  1482. static enum log_flags console_prev;
  1483. static struct cont {
  1484. size_t len;
  1485. size_t cons;
  1486. u8 level;
  1487. bool flushed:1;
  1488. } cont;
  1489. static struct log *log_from_idx(u32 idx) { return NULL; }
  1490. static u32 log_next(u32 idx) { return 0; }
  1491. static void call_console_drivers(int level, const char *text, size_t len) {}
  1492. static size_t msg_print_text(const struct log *msg, enum log_flags prev,
  1493. bool syslog, char *buf, size_t size) { return 0; }
  1494. static size_t cont_print_text(char *text, size_t size) { return 0; }
  1495. #endif /* CONFIG_PRINTK */
  1496. static int __add_preferred_console(char *name, int idx, char *options,
  1497. char *brl_options)
  1498. {
  1499. struct console_cmdline *c;
  1500. int i;
  1501. /*
  1502. * See if this tty is not yet registered, and
  1503. * if we have a slot free.
  1504. */
  1505. for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0]; i++)
  1506. if (strcmp(console_cmdline[i].name, name) == 0 &&
  1507. console_cmdline[i].index == idx) {
  1508. if (!brl_options)
  1509. selected_console = i;
  1510. return 0;
  1511. }
  1512. if (i == MAX_CMDLINECONSOLES)
  1513. return -E2BIG;
  1514. if (!brl_options)
  1515. selected_console = i;
  1516. c = &console_cmdline[i];
  1517. strlcpy(c->name, name, sizeof(c->name));
  1518. c->options = options;
  1519. #ifdef CONFIG_A11Y_BRAILLE_CONSOLE
  1520. c->brl_options = brl_options;
  1521. #endif
  1522. c->index = idx;
  1523. return 0;
  1524. }
  1525. /*
  1526. * Set up a list of consoles. Called from init/main.c
  1527. */
  1528. static int __init console_setup(char *str)
  1529. {
  1530. char buf[sizeof(console_cmdline[0].name) + 4]; /* 4 for index */
  1531. char *s, *options, *brl_options = NULL;
  1532. int idx;
  1533. #ifdef CONFIG_A11Y_BRAILLE_CONSOLE
  1534. if (!memcmp(str, "brl,", 4)) {
  1535. brl_options = "";
  1536. str += 4;
  1537. } else if (!memcmp(str, "brl=", 4)) {
  1538. brl_options = str + 4;
  1539. str = strchr(brl_options, ',');
  1540. if (!str) {
  1541. printk(KERN_ERR "need port name after brl=\n");
  1542. return 1;
  1543. }
  1544. *(str++) = 0;
  1545. }
  1546. #endif
  1547. /*
  1548. * Decode str into name, index, options.
  1549. */
  1550. if (str[0] >= '0' && str[0] <= '9') {
  1551. strcpy(buf, "ttyS");
  1552. strncpy(buf + 4, str, sizeof(buf) - 5);
  1553. } else {
  1554. strncpy(buf, str, sizeof(buf) - 1);
  1555. }
  1556. buf[sizeof(buf) - 1] = 0;
  1557. if ((options = strchr(str, ',')) != NULL)
  1558. *(options++) = 0;
  1559. #ifdef __sparc__
  1560. if (!strcmp(str, "ttya"))
  1561. strcpy(buf, "ttyS0");
  1562. if (!strcmp(str, "ttyb"))
  1563. strcpy(buf, "ttyS1");
  1564. #endif
  1565. for (s = buf; *s; s++)
  1566. if ((*s >= '0' && *s <= '9') || *s == ',')
  1567. break;
  1568. idx = simple_strtoul(s, NULL, 10);
  1569. *s = 0;
  1570. __add_preferred_console(buf, idx, options, brl_options);
  1571. console_set_on_cmdline = 1;
  1572. return 1;
  1573. }
  1574. __setup("console=", console_setup);
  1575. /**
  1576. * add_preferred_console - add a device to the list of preferred consoles.
  1577. * @name: device name
  1578. * @idx: device index
  1579. * @options: options for this console
  1580. *
  1581. * The last preferred console added will be used for kernel messages
  1582. * and stdin/out/err for init. Normally this is used by console_setup
  1583. * above to handle user-supplied console arguments; however it can also
  1584. * be used by arch-specific code either to override the user or more
  1585. * commonly to provide a default console (ie from PROM variables) when
  1586. * the user has not supplied one.
  1587. */
  1588. int add_preferred_console(char *name, int idx, char *options)
  1589. {
  1590. return __add_preferred_console(name, idx, options, NULL);
  1591. }
  1592. int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, char *options)
  1593. {
  1594. struct console_cmdline *c;
  1595. int i;
  1596. for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0]; i++)
  1597. if (strcmp(console_cmdline[i].name, name) == 0 &&
  1598. console_cmdline[i].index == idx) {
  1599. c = &console_cmdline[i];
  1600. strlcpy(c->name, name_new, sizeof(c->name));
  1601. c->name[sizeof(c->name) - 1] = 0;
  1602. c->options = options;
  1603. c->index = idx_new;
  1604. return i;
  1605. }
  1606. /* not found */
  1607. return -1;
  1608. }
  1609. bool console_suspend_enabled = 1;
  1610. EXPORT_SYMBOL(console_suspend_enabled);
  1611. static int __init console_suspend_disable(char *str)
  1612. {
  1613. console_suspend_enabled = 0;
  1614. return 1;
  1615. }
  1616. __setup("no_console_suspend", console_suspend_disable);
  1617. module_param_named(console_suspend, console_suspend_enabled,
  1618. bool, S_IRUGO | S_IWUSR);
  1619. MODULE_PARM_DESC(console_suspend, "suspend console during suspend"
  1620. " and hibernate operations");
  1621. /**
  1622. * suspend_console - suspend the console subsystem
  1623. *
  1624. * This disables printk() while we go into suspend states
  1625. */
  1626. void suspend_console(void)
  1627. {
  1628. if (!console_suspend_enabled)
  1629. return;
  1630. printk("Suspending console(s) (use no_console_suspend to debug)\n");
  1631. console_lock();
  1632. console_suspended = 1;
  1633. up(&console_sem);
  1634. }
  1635. void resume_console(void)
  1636. {
  1637. if (!console_suspend_enabled)
  1638. return;
  1639. down(&console_sem);
  1640. console_suspended = 0;
  1641. console_unlock();
  1642. }
  1643. /**
  1644. * console_cpu_notify - print deferred console messages after CPU hotplug
  1645. * @self: notifier struct
  1646. * @action: CPU hotplug event
  1647. * @hcpu: unused
  1648. *
  1649. * If printk() is called from a CPU that is not online yet, the messages
  1650. * will be spooled but will not show up on the console. This function is
  1651. * called when a new CPU comes online (or fails to come up), and ensures
  1652. * that any such output gets printed.
  1653. */
  1654. static int __cpuinit console_cpu_notify(struct notifier_block *self,
  1655. unsigned long action, void *hcpu)
  1656. {
  1657. switch (action) {
  1658. case CPU_ONLINE:
  1659. case CPU_DEAD:
  1660. case CPU_DOWN_FAILED:
  1661. case CPU_UP_CANCELED:
  1662. console_lock();
  1663. console_unlock();
  1664. }
  1665. return NOTIFY_OK;
  1666. }
  1667. /**
  1668. * console_lock - lock the console system for exclusive use.
  1669. *
  1670. * Acquires a lock which guarantees that the caller has
  1671. * exclusive access to the console system and the console_drivers list.
  1672. *
  1673. * Can sleep, returns nothing.
  1674. */
  1675. void console_lock(void)
  1676. {
  1677. might_sleep();
  1678. down(&console_sem);
  1679. if (console_suspended)
  1680. return;
  1681. console_locked = 1;
  1682. console_may_schedule = 1;
  1683. mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_);
  1684. }
  1685. EXPORT_SYMBOL(console_lock);
  1686. /**
  1687. * console_trylock - try to lock the console system for exclusive use.
  1688. *
  1689. * Tried to acquire a lock which guarantees that the caller has
  1690. * exclusive access to the console system and the console_drivers list.
  1691. *
  1692. * returns 1 on success, and 0 on failure to acquire the lock.
  1693. */
  1694. int console_trylock(void)
  1695. {
  1696. if (down_trylock(&console_sem))
  1697. return 0;
  1698. if (console_suspended) {
  1699. up(&console_sem);
  1700. return 0;
  1701. }
  1702. console_locked = 1;
  1703. console_may_schedule = 0;
  1704. mutex_acquire(&console_lock_dep_map, 0, 1, _RET_IP_);
  1705. return 1;
  1706. }
  1707. EXPORT_SYMBOL(console_trylock);
  1708. int is_console_locked(void)
  1709. {
  1710. return console_locked;
  1711. }
  1712. static void console_cont_flush(char *text, size_t size)
  1713. {
  1714. unsigned long flags;
  1715. size_t len;
  1716. raw_spin_lock_irqsave(&logbuf_lock, flags);
  1717. if (!cont.len)
  1718. goto out;
  1719. /*
  1720. * We still queue earlier records, likely because the console was
  1721. * busy. The earlier ones need to be printed before this one, we
  1722. * did not flush any fragment so far, so just let it queue up.
  1723. */
  1724. if (console_seq < log_next_seq && !cont.cons)
  1725. goto out;
  1726. len = cont_print_text(text, size);
  1727. raw_spin_unlock(&logbuf_lock);
  1728. stop_critical_timings();
  1729. call_console_drivers(cont.level, text, len);
  1730. start_critical_timings();
  1731. local_irq_restore(flags);
  1732. return;
  1733. out:
  1734. raw_spin_unlock_irqrestore(&logbuf_lock, flags);
  1735. }
  1736. /**
  1737. * console_unlock - unlock the console system
  1738. *
  1739. * Releases the console_lock which the caller holds on the console system
  1740. * and the console driver list.
  1741. *
  1742. * While the console_lock was held, console output may have been buffered
  1743. * by printk(). If this is the case, console_unlock(); emits
  1744. * the output prior to releasing the lock.
  1745. *
  1746. * If there is output waiting, we wake /dev/kmsg and syslog() users.
  1747. *
  1748. * console_unlock(); may be called from any context.
  1749. */
  1750. void console_unlock(void)
  1751. {
  1752. static char text[LOG_LINE_MAX + PREFIX_MAX];
  1753. static u64 seen_seq;
  1754. unsigned long flags;
  1755. bool wake_klogd = false;
  1756. bool retry;
  1757. if (console_suspended) {
  1758. up(&console_sem);
  1759. return;
  1760. }
  1761. console_may_schedule = 0;
  1762. /* flush buffered message fragment immediately to console */
  1763. console_cont_flush(text, sizeof(text));
  1764. again:
  1765. for (;;) {
  1766. struct log *msg;
  1767. size_t len;
  1768. int level;
  1769. raw_spin_lock_irqsave(&logbuf_lock, flags);
  1770. if (seen_seq != log_next_seq) {
  1771. wake_klogd = true;
  1772. seen_seq = log_next_seq;
  1773. }
  1774. if (console_seq < log_first_seq) {
  1775. /* messages are gone, move to first one */
  1776. console_seq = log_first_seq;
  1777. console_idx = log_first_idx;
  1778. console_prev = 0;
  1779. }
  1780. skip:
  1781. if (console_seq == log_next_seq)
  1782. break;
  1783. msg = log_from_idx(console_idx);
  1784. if (msg->flags & LOG_NOCONS) {
  1785. /*
  1786. * Skip record we have buffered and already printed
  1787. * directly to the console when we received it.
  1788. */
  1789. console_idx = log_next(console_idx);
  1790. console_seq++;
  1791. /*
  1792. * We will get here again when we register a new
  1793. * CON_PRINTBUFFER console. Clear the flag so we
  1794. * will properly dump everything later.
  1795. */
  1796. msg->flags &= ~LOG_NOCONS;
  1797. console_prev = msg->flags;
  1798. goto skip;
  1799. }
  1800. level = msg->level;
  1801. len = msg_print_text(msg, console_prev, false,
  1802. text, sizeof(text));
  1803. console_idx = log_next(console_idx);
  1804. console_seq++;
  1805. console_prev = msg->flags;
  1806. raw_spin_unlock(&logbuf_lock);
  1807. stop_critical_timings(); /* don't trace print latency */
  1808. call_console_drivers(level, text, len);
  1809. start_critical_timings();
  1810. local_irq_restore(flags);
  1811. }
  1812. console_locked = 0;
  1813. mutex_release(&console_lock_dep_map, 1, _RET_IP_);
  1814. /* Release the exclusive_console once it is used */
  1815. if (unlikely(exclusive_console))
  1816. exclusive_console = NULL;
  1817. raw_spin_unlock(&logbuf_lock);
  1818. up(&console_sem);
  1819. /*
  1820. * Someone could have filled up the buffer again, so re-check if there's
  1821. * something to flush. In case we cannot trylock the console_sem again,
  1822. * there's a new owner and the console_unlock() from them will do the
  1823. * flush, no worries.
  1824. */
  1825. raw_spin_lock(&logbuf_lock);
  1826. retry = console_seq != log_next_seq;
  1827. raw_spin_unlock_irqrestore(&logbuf_lock, flags);
  1828. if (retry && console_trylock())
  1829. goto again;
  1830. if (wake_klogd)
  1831. wake_up_klogd();
  1832. }
  1833. EXPORT_SYMBOL(console_unlock);
  1834. /**
  1835. * console_conditional_schedule - yield the CPU if required
  1836. *
  1837. * If the console code is currently allowed to sleep, and
  1838. * if this CPU should yield the CPU to another task, do
  1839. * so here.
  1840. *
  1841. * Must be called within console_lock();.
  1842. */
  1843. void __sched console_conditional_schedule(void)
  1844. {
  1845. if (console_may_schedule)
  1846. cond_resched();
  1847. }
  1848. EXPORT_SYMBOL(console_conditional_schedule);
  1849. void console_unblank(void)
  1850. {
  1851. struct console *c;
  1852. /*
  1853. * console_unblank can no longer be called in interrupt context unless
  1854. * oops_in_progress is set to 1..
  1855. */
  1856. if (oops_in_progress) {
  1857. if (down_trylock(&console_sem) != 0)
  1858. return;
  1859. } else
  1860. console_lock();
  1861. console_locked = 1;
  1862. console_may_schedule = 0;
  1863. for_each_console(c)
  1864. if ((c->flags & CON_ENABLED) && c->unblank)
  1865. c->unblank();
  1866. console_unlock();
  1867. }
  1868. /*
  1869. * Return the console tty driver structure and its associated index
  1870. */
  1871. struct tty_driver *console_device(int *index)
  1872. {
  1873. struct console *c;
  1874. struct tty_driver *driver = NULL;
  1875. console_lock();
  1876. for_each_console(c) {
  1877. if (!c->device)
  1878. continue;
  1879. driver = c->device(c, index);
  1880. if (driver)
  1881. break;
  1882. }
  1883. console_unlock();
  1884. return driver;
  1885. }
  1886. /*
  1887. * Prevent further output on the passed console device so that (for example)
  1888. * serial drivers can disable console output before suspending a port, and can
  1889. * re-enable output afterwards.
  1890. */
  1891. void console_stop(struct console *console)
  1892. {
  1893. console_lock();
  1894. console->flags &= ~CON_ENABLED;
  1895. console_unlock();
  1896. }
  1897. EXPORT_SYMBOL(console_stop);
  1898. void console_start(struct console *console)
  1899. {
  1900. console_lock();
  1901. console->flags |= CON_ENABLED;
  1902. console_unlock();
  1903. }
  1904. EXPORT_SYMBOL(console_start);
  1905. static int __read_mostly keep_bootcon;
  1906. static int __init keep_bootcon_setup(char *str)
  1907. {
  1908. keep_bootcon = 1;
  1909. printk(KERN_INFO "debug: skip boot console de-registration.\n");
  1910. return 0;
  1911. }
  1912. early_param("keep_bootcon", keep_bootcon_setup);
  1913. /*
  1914. * The console driver calls this routine during kernel initialization
  1915. * to register the console printing procedure with printk() and to
  1916. * print any messages that were printed by the kernel before the
  1917. * console driver was initialized.
  1918. *
  1919. * This can happen pretty early during the boot process (because of
  1920. * early_printk) - sometimes before setup_arch() completes - be careful
  1921. * of what kernel features are used - they may not be initialised yet.
  1922. *
  1923. * There are two types of consoles - bootconsoles (early_printk) and
  1924. * "real" consoles (everything which is not a bootconsole) which are
  1925. * handled differently.
  1926. * - Any number of bootconsoles can be registered at any time.
  1927. * - As soon as a "real" console is registered, all bootconsoles
  1928. * will be unregistered automatically.
  1929. * - Once a "real" console is registered, any attempt to register a
  1930. * bootconsoles will be rejected
  1931. */
  1932. void register_console(struct console *newcon)
  1933. {
  1934. int i;
  1935. unsigned long flags;
  1936. struct console *bcon = NULL;
  1937. /*
  1938. * before we register a new CON_BOOT console, make sure we don't
  1939. * already have a valid console
  1940. */
  1941. if (console_drivers && newcon->flags & CON_BOOT) {
  1942. /* find the last or real console */
  1943. for_each_console(bcon) {
  1944. if (!(bcon->flags & CON_BOOT)) {
  1945. printk(KERN_INFO "Too late to register bootconsole %s%d\n",
  1946. newcon->name, newcon->index);
  1947. return;
  1948. }
  1949. }
  1950. }
  1951. if (console_drivers && console_drivers->flags & CON_BOOT)
  1952. bcon = console_drivers;
  1953. if (preferred_console < 0 || bcon || !console_drivers)
  1954. preferred_console = selected_console;
  1955. if (newcon->early_setup)
  1956. newcon->early_setup();
  1957. /*
  1958. * See if we want to use this console driver. If we
  1959. * didn't select a console we take the first one
  1960. * that registers here.
  1961. */
  1962. if (preferred_console < 0) {
  1963. if (newcon->index < 0)
  1964. newcon->index = 0;
  1965. if (newcon->setup == NULL ||
  1966. newcon->setup(newcon, NULL) == 0) {
  1967. newcon->flags |= CON_ENABLED;
  1968. if (newcon->device) {
  1969. newcon->flags |= CON_CONSDEV;
  1970. preferred_console = 0;
  1971. }
  1972. }
  1973. }
  1974. /*
  1975. * See if this console matches one we selected on
  1976. * the command line.
  1977. */
  1978. for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0];
  1979. i++) {
  1980. if (strcmp(console_cmdline[i].name, newcon->name) != 0)
  1981. continue;
  1982. if (newcon->index >= 0 &&
  1983. newcon->index != console_cmdline[i].index)
  1984. continue;
  1985. if (newcon->index < 0)
  1986. newcon->index = console_cmdline[i].index;
  1987. #ifdef CONFIG_A11Y_BRAILLE_CONSOLE
  1988. if (console_cmdline[i].brl_options) {
  1989. newcon->flags |= CON_BRL;
  1990. braille_register_console(newcon,
  1991. console_cmdline[i].index,
  1992. console_cmdline[i].options,
  1993. console_cmdline[i].brl_options);
  1994. return;
  1995. }
  1996. #endif
  1997. if (newcon->setup &&
  1998. newcon->setup(newcon, console_cmdline[i].options) != 0)
  1999. break;
  2000. newcon->flags |= CON_ENABLED;
  2001. newcon->index = console_cmdline[i].index;
  2002. if (i == selected_console) {
  2003. newcon->flags |= CON_CONSDEV;
  2004. preferred_console = selected_console;
  2005. }
  2006. break;
  2007. }
  2008. if (!(newcon->flags & CON_ENABLED))
  2009. return;
  2010. /*
  2011. * If we have a bootconsole, and are switching to a real console,
  2012. * don't print everything out again, since when the boot console, and
  2013. * the real console are the same physical device, it's annoying to
  2014. * see the beginning boot messages twice
  2015. */
  2016. if (bcon && ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV))
  2017. newcon->flags &= ~CON_PRINTBUFFER;
  2018. /*
  2019. * Put this console in the list - keep the
  2020. * preferred driver at the head of the list.
  2021. */
  2022. console_lock();
  2023. if ((newcon->flags & CON_CONSDEV) || console_drivers == NULL) {
  2024. newcon->next = console_drivers;
  2025. console_drivers = newcon;
  2026. if (newcon->next)
  2027. newcon->next->flags &= ~CON_CONSDEV;
  2028. } else {
  2029. newcon->next = console_drivers->next;
  2030. console_drivers->next = newcon;
  2031. }
  2032. if (newcon->flags & CON_PRINTBUFFER) {
  2033. /*
  2034. * console_unlock(); will print out the buffered messages
  2035. * for us.
  2036. */
  2037. raw_spin_lock_irqsave(&logbuf_lock, flags);
  2038. console_seq = syslog_seq;
  2039. console_idx = syslog_idx;
  2040. console_prev = syslog_prev;
  2041. raw_spin_unlock_irqrestore(&logbuf_lock, flags);
  2042. /*
  2043. * We're about to replay the log buffer. Only do this to the
  2044. * just-registered console to avoid excessive message spam to
  2045. * the already-registered consoles.
  2046. */
  2047. exclusive_console = newcon;
  2048. }
  2049. console_unlock();
  2050. console_sysfs_notify();
  2051. /*
  2052. * By unregistering the bootconsoles after we enable the real console
  2053. * we get the "console xxx enabled" message on all the consoles -
  2054. * boot consoles, real consoles, etc - this is to ensure that end
  2055. * users know there might be something in the kernel's log buffer that
  2056. * went to the bootconsole (that they do not see on the real console)
  2057. */
  2058. if (bcon &&
  2059. ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV) &&
  2060. !keep_bootcon) {
  2061. /* we need to iterate through twice, to make sure we print
  2062. * everything out, before we unregister the console(s)
  2063. */
  2064. printk(KERN_INFO "console [%s%d] enabled, bootconsole disabled\n",
  2065. newcon->name, newcon->index);
  2066. for_each_console(bcon)
  2067. if (bcon->flags & CON_BOOT)
  2068. unregister_console(bcon);
  2069. } else {
  2070. printk(KERN_INFO "%sconsole [%s%d] enabled\n",
  2071. (newcon->flags & CON_BOOT) ? "boot" : "" ,
  2072. newcon->name, newcon->index);
  2073. }
  2074. }
  2075. EXPORT_SYMBOL(register_console);
  2076. int unregister_console(struct console *console)
  2077. {
  2078. struct console *a, *b;
  2079. int res = 1;
  2080. #ifdef CONFIG_A11Y_BRAILLE_CONSOLE
  2081. if (console->flags & CON_BRL)
  2082. return braille_unregister_console(console);
  2083. #endif
  2084. console_lock();
  2085. if (console_drivers == console) {
  2086. console_drivers=console->next;
  2087. res = 0;
  2088. } else if (console_drivers) {
  2089. for (a=console_drivers->next, b=console_drivers ;
  2090. a; b=a, a=b->next) {
  2091. if (a == console) {
  2092. b->next = a->next;
  2093. res = 0;
  2094. break;
  2095. }
  2096. }
  2097. }
  2098. /*
  2099. * If this isn't the last console and it has CON_CONSDEV set, we
  2100. * need to set it on the next preferred console.
  2101. */
  2102. if (console_drivers != NULL && console->flags & CON_CONSDEV)
  2103. console_drivers->flags |= CON_CONSDEV;
  2104. console_unlock();
  2105. console_sysfs_notify();
  2106. return res;
  2107. }
  2108. EXPORT_SYMBOL(unregister_console);
  2109. static int __init printk_late_init(void)
  2110. {
  2111. struct console *con;
  2112. for_each_console(con) {
  2113. if (!keep_bootcon && con->flags & CON_BOOT) {
  2114. printk(KERN_INFO "turn off boot console %s%d\n",
  2115. con->name, con->index);
  2116. unregister_console(con);
  2117. }
  2118. }
  2119. hotcpu_notifier(console_cpu_notify, 0);
  2120. return 0;
  2121. }
  2122. late_initcall(printk_late_init);
  2123. #if defined CONFIG_PRINTK
  2124. /*
  2125. * Delayed printk version, for scheduler-internal messages:
  2126. */
  2127. #define PRINTK_BUF_SIZE 512
  2128. #define PRINTK_PENDING_WAKEUP 0x01
  2129. #define PRINTK_PENDING_SCHED 0x02
  2130. static DEFINE_PER_CPU(int, printk_pending);
  2131. static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf);
  2132. static void wake_up_klogd_work_func(struct irq_work *irq_work)
  2133. {
  2134. int pending = __this_cpu_xchg(printk_pending, 0);
  2135. if (pending & PRINTK_PENDING_SCHED) {
  2136. char *buf = __get_cpu_var(printk_sched_buf);
  2137. printk(KERN_WARNING "[sched_delayed] %s", buf);
  2138. }
  2139. if (pending & PRINTK_PENDING_WAKEUP)
  2140. wake_up_interruptible(&log_wait);
  2141. }
  2142. static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = {
  2143. .func = wake_up_klogd_work_func,
  2144. .flags = IRQ_WORK_LAZY,
  2145. };
  2146. void wake_up_klogd(void)
  2147. {
  2148. preempt_disable();
  2149. if (waitqueue_active(&log_wait)) {
  2150. this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP);
  2151. irq_work_queue(&__get_cpu_var(wake_up_klogd_work));
  2152. }
  2153. preempt_enable();
  2154. }
  2155. int printk_sched(const char *fmt, ...)
  2156. {
  2157. unsigned long flags;
  2158. va_list args;
  2159. char *buf;
  2160. int r;
  2161. local_irq_save(flags);
  2162. buf = __get_cpu_var(printk_sched_buf);
  2163. va_start(args, fmt);
  2164. r = vsnprintf(buf, PRINTK_BUF_SIZE, fmt, args);
  2165. va_end(args);
  2166. __this_cpu_or(printk_pending, PRINTK_PENDING_SCHED);
  2167. irq_work_queue(&__get_cpu_var(wake_up_klogd_work));
  2168. local_irq_restore(flags);
  2169. return r;
  2170. }
  2171. /*
  2172. * printk rate limiting, lifted from the networking subsystem.
  2173. *
  2174. * This enforces a rate limit: not more than 10 kernel messages
  2175. * every 5s to make a denial-of-service attack impossible.
  2176. */
  2177. DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10);
  2178. int __printk_ratelimit(const char *func)
  2179. {
  2180. return ___ratelimit(&printk_ratelimit_state, func);
  2181. }
  2182. EXPORT_SYMBOL(__printk_ratelimit);
  2183. /**
  2184. * printk_timed_ratelimit - caller-controlled printk ratelimiting
  2185. * @caller_jiffies: pointer to caller's state
  2186. * @interval_msecs: minimum interval between prints
  2187. *
  2188. * printk_timed_ratelimit() returns true if more than @interval_msecs
  2189. * milliseconds have elapsed since the last time printk_timed_ratelimit()
  2190. * returned true.
  2191. */
  2192. bool printk_timed_ratelimit(unsigned long *caller_jiffies,
  2193. unsigned int interval_msecs)
  2194. {
  2195. if (*caller_jiffies == 0
  2196. || !time_in_range(jiffies, *caller_jiffies,
  2197. *caller_jiffies
  2198. + msecs_to_jiffies(interval_msecs))) {
  2199. *caller_jiffies = jiffies;
  2200. return true;
  2201. }
  2202. return false;
  2203. }
  2204. EXPORT_SYMBOL(printk_timed_ratelimit);
  2205. static DEFINE_SPINLOCK(dump_list_lock);
  2206. static LIST_HEAD(dump_list);
  2207. /**
  2208. * kmsg_dump_register - register a kernel log dumper.
  2209. * @dumper: pointer to the kmsg_dumper structure
  2210. *
  2211. * Adds a kernel log dumper to the system. The dump callback in the
  2212. * structure will be called when the kernel oopses or panics and must be
  2213. * set. Returns zero on success and %-EINVAL or %-EBUSY otherwise.
  2214. */
  2215. int kmsg_dump_register(struct kmsg_dumper *dumper)
  2216. {
  2217. unsigned long flags;
  2218. int err = -EBUSY;
  2219. /* The dump callback needs to be set */
  2220. if (!dumper->dump)
  2221. return -EINVAL;
  2222. spin_lock_irqsave(&dump_list_lock, flags);
  2223. /* Don't allow registering multiple times */
  2224. if (!dumper->registered) {
  2225. dumper->registered = 1;
  2226. list_add_tail_rcu(&dumper->list, &dump_list);
  2227. err = 0;
  2228. }
  2229. spin_unlock_irqrestore(&dump_list_lock, flags);
  2230. return err;
  2231. }
  2232. EXPORT_SYMBOL_GPL(kmsg_dump_register);
  2233. /**
  2234. * kmsg_dump_unregister - unregister a kmsg dumper.
  2235. * @dumper: pointer to the kmsg_dumper structure
  2236. *
  2237. * Removes a dump device from the system. Returns zero on success and
  2238. * %-EINVAL otherwise.
  2239. */
  2240. int kmsg_dump_unregister(struct kmsg_dumper *dumper)
  2241. {
  2242. unsigned long flags;
  2243. int err = -EINVAL;
  2244. spin_lock_irqsave(&dump_list_lock, flags);
  2245. if (dumper->registered) {
  2246. dumper->registered = 0;
  2247. list_del_rcu(&dumper->list);
  2248. err = 0;
  2249. }
  2250. spin_unlock_irqrestore(&dump_list_lock, flags);
  2251. synchronize_rcu();
  2252. return err;
  2253. }
  2254. EXPORT_SYMBOL_GPL(kmsg_dump_unregister);
  2255. static bool always_kmsg_dump;
  2256. module_param_named(always_kmsg_dump, always_kmsg_dump, bool, S_IRUGO | S_IWUSR);
  2257. /**
  2258. * kmsg_dump - dump kernel log to kernel message dumpers.
  2259. * @reason: the reason (oops, panic etc) for dumping
  2260. *
  2261. * Call each of the registered dumper's dump() callback, which can
  2262. * retrieve the kmsg records with kmsg_dump_get_line() or
  2263. * kmsg_dump_get_buffer().
  2264. */
  2265. void kmsg_dump(enum kmsg_dump_reason reason)
  2266. {
  2267. struct kmsg_dumper *dumper;
  2268. unsigned long flags;
  2269. if ((reason > KMSG_DUMP_OOPS) && !always_kmsg_dump)
  2270. return;
  2271. rcu_read_lock();
  2272. list_for_each_entry_rcu(dumper, &dump_list, list) {
  2273. if (dumper->max_reason && reason > dumper->max_reason)
  2274. continue;
  2275. /* initialize iterator with data about the stored records */
  2276. dumper->active = true;
  2277. raw_spin_lock_irqsave(&logbuf_lock, flags);
  2278. dumper->cur_seq = clear_seq;
  2279. dumper->cur_idx = clear_idx;
  2280. dumper->next_seq = log_next_seq;
  2281. dumper->next_idx = log_next_idx;
  2282. raw_spin_unlock_irqrestore(&logbuf_lock, flags);
  2283. /* invoke dumper which will iterate over records */
  2284. dumper->dump(dumper, reason);
  2285. /* reset iterator */
  2286. dumper->active = false;
  2287. }
  2288. rcu_read_unlock();
  2289. }
  2290. /**
  2291. * kmsg_dump_get_line_nolock - retrieve one kmsg log line (unlocked version)
  2292. * @dumper: registered kmsg dumper
  2293. * @syslog: include the "<4>" prefixes
  2294. * @line: buffer to copy the line to
  2295. * @size: maximum size of the buffer
  2296. * @len: length of line placed into buffer
  2297. *
  2298. * Start at the beginning of the kmsg buffer, with the oldest kmsg
  2299. * record, and copy one record into the provided buffer.
  2300. *
  2301. * Consecutive calls will return the next available record moving
  2302. * towards the end of the buffer with the youngest messages.
  2303. *
  2304. * A return value of FALSE indicates that there are no more records to
  2305. * read.
  2306. *
  2307. * The function is similar to kmsg_dump_get_line(), but grabs no locks.
  2308. */
  2309. bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog,
  2310. char *line, size_t size, size_t *len)
  2311. {
  2312. struct log *msg;
  2313. size_t l = 0;
  2314. bool ret = false;
  2315. if (!dumper->active)
  2316. goto out;
  2317. if (dumper->cur_seq < log_first_seq) {
  2318. /* messages are gone, move to first available one */
  2319. dumper->cur_seq = log_first_seq;
  2320. dumper->cur_idx = log_first_idx;
  2321. }
  2322. /* last entry */
  2323. if (dumper->cur_seq >= log_next_seq)
  2324. goto out;
  2325. msg = log_from_idx(dumper->cur_idx);
  2326. l = msg_print_text(msg, 0, syslog, line, size);
  2327. dumper->cur_idx = log_next(dumper->cur_idx);
  2328. dumper->cur_seq++;
  2329. ret = true;
  2330. out:
  2331. if (len)
  2332. *len = l;
  2333. return ret;
  2334. }
  2335. /**
  2336. * kmsg_dump_get_line - retrieve one kmsg log line
  2337. * @dumper: registered kmsg dumper
  2338. * @syslog: include the "<4>" prefixes
  2339. * @line: buffer to copy the line to
  2340. * @size: maximum size of the buffer
  2341. * @len: length of line placed into buffer
  2342. *
  2343. * Start at the beginning of the kmsg buffer, with the oldest kmsg
  2344. * record, and copy one record into the provided buffer.
  2345. *
  2346. * Consecutive calls will return the next available record moving
  2347. * towards the end of the buffer with the youngest messages.
  2348. *
  2349. * A return value of FALSE indicates that there are no more records to
  2350. * read.
  2351. */
  2352. bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
  2353. char *line, size_t size, size_t *len)
  2354. {
  2355. unsigned long flags;
  2356. bool ret;
  2357. raw_spin_lock_irqsave(&logbuf_lock, flags);
  2358. ret = kmsg_dump_get_line_nolock(dumper, syslog, line, size, len);
  2359. raw_spin_unlock_irqrestore(&logbuf_lock, flags);
  2360. return ret;
  2361. }
  2362. EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
  2363. /**
  2364. * kmsg_dump_get_buffer - copy kmsg log lines
  2365. * @dumper: registered kmsg dumper
  2366. * @syslog: include the "<4>" prefixes
  2367. * @buf: buffer to copy the line to
  2368. * @size: maximum size of the buffer
  2369. * @len: length of line placed into buffer
  2370. *
  2371. * Start at the end of the kmsg buffer and fill the provided buffer
  2372. * with as many of the the *youngest* kmsg records that fit into it.
  2373. * If the buffer is large enough, all available kmsg records will be
  2374. * copied with a single call.
  2375. *
  2376. * Consecutive calls will fill the buffer with the next block of
  2377. * available older records, not including the earlier retrieved ones.
  2378. *
  2379. * A return value of FALSE indicates that there are no more records to
  2380. * read.
  2381. */
  2382. bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
  2383. char *buf, size_t size, size_t *len)
  2384. {
  2385. unsigned long flags;
  2386. u64 seq;
  2387. u32 idx;
  2388. u64 next_seq;
  2389. u32 next_idx;
  2390. enum log_flags prev;
  2391. size_t l = 0;
  2392. bool ret = false;
  2393. if (!dumper->active)
  2394. goto out;
  2395. raw_spin_lock_irqsave(&logbuf_lock, flags);
  2396. if (dumper->cur_seq < log_first_seq) {
  2397. /* messages are gone, move to first available one */
  2398. dumper->cur_seq = log_first_seq;
  2399. dumper->cur_idx = log_first_idx;
  2400. }
  2401. /* last entry */
  2402. if (dumper->cur_seq >= dumper->next_seq) {
  2403. raw_spin_unlock_irqrestore(&logbuf_lock, flags);
  2404. goto out;
  2405. }
  2406. /* calculate length of entire buffer */
  2407. seq = dumper->cur_seq;
  2408. idx = dumper->cur_idx;
  2409. prev = 0;
  2410. while (seq < dumper->next_seq) {
  2411. struct log *msg = log_from_idx(idx);
  2412. l += msg_print_text(msg, prev, true, NULL, 0);
  2413. idx = log_next(idx);
  2414. seq++;
  2415. prev = msg->flags;
  2416. }
  2417. /* move first record forward until length fits into the buffer */
  2418. seq = dumper->cur_seq;
  2419. idx = dumper->cur_idx;
  2420. prev = 0;
  2421. while (l > size && seq < dumper->next_seq) {
  2422. struct log *msg = log_from_idx(idx);
  2423. l -= msg_print_text(msg, prev, true, NULL, 0);
  2424. idx = log_next(idx);
  2425. seq++;
  2426. prev = msg->flags;
  2427. }
  2428. /* last message in next interation */
  2429. next_seq = seq;
  2430. next_idx = idx;
  2431. l = 0;
  2432. prev = 0;
  2433. while (seq < dumper->next_seq) {
  2434. struct log *msg = log_from_idx(idx);
  2435. l += msg_print_text(msg, prev, syslog, buf + l, size - l);
  2436. idx = log_next(idx);
  2437. seq++;
  2438. prev = msg->flags;
  2439. }
  2440. dumper->next_seq = next_seq;
  2441. dumper->next_idx = next_idx;
  2442. ret = true;
  2443. raw_spin_unlock_irqrestore(&logbuf_lock, flags);
  2444. out:
  2445. if (len)
  2446. *len = l;
  2447. return ret;
  2448. }
  2449. EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
  2450. /**
  2451. * kmsg_dump_rewind_nolock - reset the interator (unlocked version)
  2452. * @dumper: registered kmsg dumper
  2453. *
  2454. * Reset the dumper's iterator so that kmsg_dump_get_line() and
  2455. * kmsg_dump_get_buffer() can be called again and used multiple
  2456. * times within the same dumper.dump() callback.
  2457. *
  2458. * The function is similar to kmsg_dump_rewind(), but grabs no locks.
  2459. */
  2460. void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper)
  2461. {
  2462. dumper->cur_seq = clear_seq;
  2463. dumper->cur_idx = clear_idx;
  2464. dumper->next_seq = log_next_seq;
  2465. dumper->next_idx = log_next_idx;
  2466. }
  2467. /**
  2468. * kmsg_dump_rewind - reset the interator
  2469. * @dumper: registered kmsg dumper
  2470. *
  2471. * Reset the dumper's iterator so that kmsg_dump_get_line() and
  2472. * kmsg_dump_get_buffer() can be called again and used multiple
  2473. * times within the same dumper.dump() callback.
  2474. */
  2475. void kmsg_dump_rewind(struct kmsg_dumper *dumper)
  2476. {
  2477. unsigned long flags;
  2478. raw_spin_lock_irqsave(&logbuf_lock, flags);
  2479. kmsg_dump_rewind_nolock(dumper);
  2480. raw_spin_unlock_irqrestore(&logbuf_lock, flags);
  2481. }
  2482. EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
  2483. #endif