ppp_generic.c 68 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928
  1. /*
  2. * Generic PPP layer for Linux.
  3. *
  4. * Copyright 1999-2002 Paul Mackerras.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. *
  11. * The generic PPP layer handles the PPP network interfaces, the
  12. * /dev/ppp device, packet and VJ compression, and multilink.
  13. * It talks to PPP `channels' via the interface defined in
  14. * include/linux/ppp_channel.h. Channels provide the basic means for
  15. * sending and receiving PPP frames on some kind of communications
  16. * channel.
  17. *
  18. * Part of the code in this driver was inspired by the old async-only
  19. * PPP driver, written by Michael Callahan and Al Longyear, and
  20. * subsequently hacked by Paul Mackerras.
  21. *
  22. * ==FILEVERSION 20041108==
  23. */
  24. #include <linux/module.h>
  25. #include <linux/kernel.h>
  26. #include <linux/kmod.h>
  27. #include <linux/init.h>
  28. #include <linux/list.h>
  29. #include <linux/idr.h>
  30. #include <linux/netdevice.h>
  31. #include <linux/poll.h>
  32. #include <linux/ppp_defs.h>
  33. #include <linux/filter.h>
  34. #include <linux/if_ppp.h>
  35. #include <linux/ppp_channel.h>
  36. #include <linux/ppp-comp.h>
  37. #include <linux/skbuff.h>
  38. #include <linux/rtnetlink.h>
  39. #include <linux/if_arp.h>
  40. #include <linux/ip.h>
  41. #include <linux/tcp.h>
  42. #include <linux/spinlock.h>
  43. #include <linux/rwsem.h>
  44. #include <linux/stddef.h>
  45. #include <linux/device.h>
  46. #include <linux/mutex.h>
  47. #include <linux/slab.h>
  48. #include <net/slhc_vj.h>
  49. #include <asm/atomic.h>
  50. #include <linux/nsproxy.h>
  51. #include <net/net_namespace.h>
  52. #include <net/netns/generic.h>
  53. #define PPP_VERSION "2.4.2"
  54. /*
  55. * Network protocols we support.
  56. */
  57. #define NP_IP 0 /* Internet Protocol V4 */
  58. #define NP_IPV6 1 /* Internet Protocol V6 */
  59. #define NP_IPX 2 /* IPX protocol */
  60. #define NP_AT 3 /* Appletalk protocol */
  61. #define NP_MPLS_UC 4 /* MPLS unicast */
  62. #define NP_MPLS_MC 5 /* MPLS multicast */
  63. #define NUM_NP 6 /* Number of NPs. */
  64. #define MPHDRLEN 6 /* multilink protocol header length */
  65. #define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */
  66. /*
  67. * An instance of /dev/ppp can be associated with either a ppp
  68. * interface unit or a ppp channel. In both cases, file->private_data
  69. * points to one of these.
  70. */
  71. struct ppp_file {
  72. enum {
  73. INTERFACE=1, CHANNEL
  74. } kind;
  75. struct sk_buff_head xq; /* pppd transmit queue */
  76. struct sk_buff_head rq; /* receive queue for pppd */
  77. wait_queue_head_t rwait; /* for poll on reading /dev/ppp */
  78. atomic_t refcnt; /* # refs (incl /dev/ppp attached) */
  79. int hdrlen; /* space to leave for headers */
  80. int index; /* interface unit / channel number */
  81. int dead; /* unit/channel has been shut down */
  82. };
  83. #define PF_TO_X(pf, X) container_of(pf, X, file)
  84. #define PF_TO_PPP(pf) PF_TO_X(pf, struct ppp)
  85. #define PF_TO_CHANNEL(pf) PF_TO_X(pf, struct channel)
  86. /*
  87. * Data structure describing one ppp unit.
  88. * A ppp unit corresponds to a ppp network interface device
  89. * and represents a multilink bundle.
  90. * It can have 0 or more ppp channels connected to it.
  91. */
  92. struct ppp {
  93. struct ppp_file file; /* stuff for read/write/poll 0 */
  94. struct file *owner; /* file that owns this unit 48 */
  95. struct list_head channels; /* list of attached channels 4c */
  96. int n_channels; /* how many channels are attached 54 */
  97. spinlock_t rlock; /* lock for receive side 58 */
  98. spinlock_t wlock; /* lock for transmit side 5c */
  99. int mru; /* max receive unit 60 */
  100. unsigned int flags; /* control bits 64 */
  101. unsigned int xstate; /* transmit state bits 68 */
  102. unsigned int rstate; /* receive state bits 6c */
  103. int debug; /* debug flags 70 */
  104. struct slcompress *vj; /* state for VJ header compression */
  105. enum NPmode npmode[NUM_NP]; /* what to do with each net proto 78 */
  106. struct sk_buff *xmit_pending; /* a packet ready to go out 88 */
  107. struct compressor *xcomp; /* transmit packet compressor 8c */
  108. void *xc_state; /* its internal state 90 */
  109. struct compressor *rcomp; /* receive decompressor 94 */
  110. void *rc_state; /* its internal state 98 */
  111. unsigned long last_xmit; /* jiffies when last pkt sent 9c */
  112. unsigned long last_recv; /* jiffies when last pkt rcvd a0 */
  113. struct net_device *dev; /* network interface device a4 */
  114. int closing; /* is device closing down? a8 */
  115. #ifdef CONFIG_PPP_MULTILINK
  116. int nxchan; /* next channel to send something on */
  117. u32 nxseq; /* next sequence number to send */
  118. int mrru; /* MP: max reconst. receive unit */
  119. u32 nextseq; /* MP: seq no of next packet */
  120. u32 minseq; /* MP: min of most recent seqnos */
  121. struct sk_buff_head mrq; /* MP: receive reconstruction queue */
  122. #endif /* CONFIG_PPP_MULTILINK */
  123. #ifdef CONFIG_PPP_FILTER
  124. struct sock_filter *pass_filter; /* filter for packets to pass */
  125. struct sock_filter *active_filter;/* filter for pkts to reset idle */
  126. unsigned pass_len, active_len;
  127. #endif /* CONFIG_PPP_FILTER */
  128. struct net *ppp_net; /* the net we belong to */
  129. };
  130. /*
  131. * Bits in flags: SC_NO_TCP_CCID, SC_CCP_OPEN, SC_CCP_UP, SC_LOOP_TRAFFIC,
  132. * SC_MULTILINK, SC_MP_SHORTSEQ, SC_MP_XSHORTSEQ, SC_COMP_TCP, SC_REJ_COMP_TCP,
  133. * SC_MUST_COMP
  134. * Bits in rstate: SC_DECOMP_RUN, SC_DC_ERROR, SC_DC_FERROR.
  135. * Bits in xstate: SC_COMP_RUN
  136. */
  137. #define SC_FLAG_BITS (SC_NO_TCP_CCID|SC_CCP_OPEN|SC_CCP_UP|SC_LOOP_TRAFFIC \
  138. |SC_MULTILINK|SC_MP_SHORTSEQ|SC_MP_XSHORTSEQ \
  139. |SC_COMP_TCP|SC_REJ_COMP_TCP|SC_MUST_COMP)
  140. /*
  141. * Private data structure for each channel.
  142. * This includes the data structure used for multilink.
  143. */
  144. struct channel {
  145. struct ppp_file file; /* stuff for read/write/poll */
  146. struct list_head list; /* link in all/new_channels list */
  147. struct ppp_channel *chan; /* public channel data structure */
  148. struct rw_semaphore chan_sem; /* protects `chan' during chan ioctl */
  149. spinlock_t downl; /* protects `chan', file.xq dequeue */
  150. struct ppp *ppp; /* ppp unit we're connected to */
  151. struct net *chan_net; /* the net channel belongs to */
  152. struct list_head clist; /* link in list of channels per unit */
  153. rwlock_t upl; /* protects `ppp' */
  154. #ifdef CONFIG_PPP_MULTILINK
  155. u8 avail; /* flag used in multilink stuff */
  156. u8 had_frag; /* >= 1 fragments have been sent */
  157. u32 lastseq; /* MP: last sequence # received */
  158. int speed; /* speed of the corresponding ppp channel*/
  159. #endif /* CONFIG_PPP_MULTILINK */
  160. };
  161. /*
  162. * SMP locking issues:
  163. * Both the ppp.rlock and ppp.wlock locks protect the ppp.channels
  164. * list and the ppp.n_channels field, you need to take both locks
  165. * before you modify them.
  166. * The lock ordering is: channel.upl -> ppp.wlock -> ppp.rlock ->
  167. * channel.downl.
  168. */
  169. static DEFINE_MUTEX(ppp_mutex);
  170. static atomic_t ppp_unit_count = ATOMIC_INIT(0);
  171. static atomic_t channel_count = ATOMIC_INIT(0);
  172. /* per-net private data for this module */
  173. static int ppp_net_id __read_mostly;
  174. struct ppp_net {
  175. /* units to ppp mapping */
  176. struct idr units_idr;
  177. /*
  178. * all_ppp_mutex protects the units_idr mapping.
  179. * It also ensures that finding a ppp unit in the units_idr
  180. * map and updating its file.refcnt field is atomic.
  181. */
  182. struct mutex all_ppp_mutex;
  183. /* channels */
  184. struct list_head all_channels;
  185. struct list_head new_channels;
  186. int last_channel_index;
  187. /*
  188. * all_channels_lock protects all_channels and
  189. * last_channel_index, and the atomicity of find
  190. * a channel and updating its file.refcnt field.
  191. */
  192. spinlock_t all_channels_lock;
  193. };
  194. /* Get the PPP protocol number from a skb */
  195. #define PPP_PROTO(skb) (((skb)->data[0] << 8) + (skb)->data[1])
  196. /* We limit the length of ppp->file.rq to this (arbitrary) value */
  197. #define PPP_MAX_RQLEN 32
  198. /*
  199. * Maximum number of multilink fragments queued up.
  200. * This has to be large enough to cope with the maximum latency of
  201. * the slowest channel relative to the others. Strictly it should
  202. * depend on the number of channels and their characteristics.
  203. */
  204. #define PPP_MP_MAX_QLEN 128
  205. /* Multilink header bits. */
  206. #define B 0x80 /* this fragment begins a packet */
  207. #define E 0x40 /* this fragment ends a packet */
  208. /* Compare multilink sequence numbers (assumed to be 32 bits wide) */
  209. #define seq_before(a, b) ((s32)((a) - (b)) < 0)
  210. #define seq_after(a, b) ((s32)((a) - (b)) > 0)
  211. /* Prototypes. */
  212. static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
  213. struct file *file, unsigned int cmd, unsigned long arg);
  214. static void ppp_xmit_process(struct ppp *ppp);
  215. static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
  216. static void ppp_push(struct ppp *ppp);
  217. static void ppp_channel_push(struct channel *pch);
  218. static void ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb,
  219. struct channel *pch);
  220. static void ppp_receive_error(struct ppp *ppp);
  221. static void ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb);
  222. static struct sk_buff *ppp_decompress_frame(struct ppp *ppp,
  223. struct sk_buff *skb);
  224. #ifdef CONFIG_PPP_MULTILINK
  225. static void ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb,
  226. struct channel *pch);
  227. static void ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb);
  228. static struct sk_buff *ppp_mp_reconstruct(struct ppp *ppp);
  229. static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb);
  230. #endif /* CONFIG_PPP_MULTILINK */
  231. static int ppp_set_compress(struct ppp *ppp, unsigned long arg);
  232. static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound);
  233. static void ppp_ccp_closed(struct ppp *ppp);
  234. static struct compressor *find_compressor(int type);
  235. static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st);
  236. static struct ppp *ppp_create_interface(struct net *net, int unit, int *retp);
  237. static void init_ppp_file(struct ppp_file *pf, int kind);
  238. static void ppp_shutdown_interface(struct ppp *ppp);
  239. static void ppp_destroy_interface(struct ppp *ppp);
  240. static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit);
  241. static struct channel *ppp_find_channel(struct ppp_net *pn, int unit);
  242. static int ppp_connect_channel(struct channel *pch, int unit);
  243. static int ppp_disconnect_channel(struct channel *pch);
  244. static void ppp_destroy_channel(struct channel *pch);
  245. static int unit_get(struct idr *p, void *ptr);
  246. static int unit_set(struct idr *p, void *ptr, int n);
  247. static void unit_put(struct idr *p, int n);
  248. static void *unit_find(struct idr *p, int n);
  249. static struct class *ppp_class;
  250. /* per net-namespace data */
  251. static inline struct ppp_net *ppp_pernet(struct net *net)
  252. {
  253. BUG_ON(!net);
  254. return net_generic(net, ppp_net_id);
  255. }
  256. /* Translates a PPP protocol number to a NP index (NP == network protocol) */
  257. static inline int proto_to_npindex(int proto)
  258. {
  259. switch (proto) {
  260. case PPP_IP:
  261. return NP_IP;
  262. case PPP_IPV6:
  263. return NP_IPV6;
  264. case PPP_IPX:
  265. return NP_IPX;
  266. case PPP_AT:
  267. return NP_AT;
  268. case PPP_MPLS_UC:
  269. return NP_MPLS_UC;
  270. case PPP_MPLS_MC:
  271. return NP_MPLS_MC;
  272. }
  273. return -EINVAL;
  274. }
  275. /* Translates an NP index into a PPP protocol number */
  276. static const int npindex_to_proto[NUM_NP] = {
  277. PPP_IP,
  278. PPP_IPV6,
  279. PPP_IPX,
  280. PPP_AT,
  281. PPP_MPLS_UC,
  282. PPP_MPLS_MC,
  283. };
  284. /* Translates an ethertype into an NP index */
  285. static inline int ethertype_to_npindex(int ethertype)
  286. {
  287. switch (ethertype) {
  288. case ETH_P_IP:
  289. return NP_IP;
  290. case ETH_P_IPV6:
  291. return NP_IPV6;
  292. case ETH_P_IPX:
  293. return NP_IPX;
  294. case ETH_P_PPPTALK:
  295. case ETH_P_ATALK:
  296. return NP_AT;
  297. case ETH_P_MPLS_UC:
  298. return NP_MPLS_UC;
  299. case ETH_P_MPLS_MC:
  300. return NP_MPLS_MC;
  301. }
  302. return -1;
  303. }
  304. /* Translates an NP index into an ethertype */
  305. static const int npindex_to_ethertype[NUM_NP] = {
  306. ETH_P_IP,
  307. ETH_P_IPV6,
  308. ETH_P_IPX,
  309. ETH_P_PPPTALK,
  310. ETH_P_MPLS_UC,
  311. ETH_P_MPLS_MC,
  312. };
  313. /*
  314. * Locking shorthand.
  315. */
  316. #define ppp_xmit_lock(ppp) spin_lock_bh(&(ppp)->wlock)
  317. #define ppp_xmit_unlock(ppp) spin_unlock_bh(&(ppp)->wlock)
  318. #define ppp_recv_lock(ppp) spin_lock_bh(&(ppp)->rlock)
  319. #define ppp_recv_unlock(ppp) spin_unlock_bh(&(ppp)->rlock)
  320. #define ppp_lock(ppp) do { ppp_xmit_lock(ppp); \
  321. ppp_recv_lock(ppp); } while (0)
  322. #define ppp_unlock(ppp) do { ppp_recv_unlock(ppp); \
  323. ppp_xmit_unlock(ppp); } while (0)
  324. /*
  325. * /dev/ppp device routines.
  326. * The /dev/ppp device is used by pppd to control the ppp unit.
  327. * It supports the read, write, ioctl and poll functions.
  328. * Open instances of /dev/ppp can be in one of three states:
  329. * unattached, attached to a ppp unit, or attached to a ppp channel.
  330. */
  331. static int ppp_open(struct inode *inode, struct file *file)
  332. {
  333. /*
  334. * This could (should?) be enforced by the permissions on /dev/ppp.
  335. */
  336. if (!capable(CAP_NET_ADMIN))
  337. return -EPERM;
  338. return 0;
  339. }
  340. static int ppp_release(struct inode *unused, struct file *file)
  341. {
  342. struct ppp_file *pf = file->private_data;
  343. struct ppp *ppp;
  344. if (pf) {
  345. file->private_data = NULL;
  346. if (pf->kind == INTERFACE) {
  347. ppp = PF_TO_PPP(pf);
  348. if (file == ppp->owner)
  349. ppp_shutdown_interface(ppp);
  350. }
  351. if (atomic_dec_and_test(&pf->refcnt)) {
  352. switch (pf->kind) {
  353. case INTERFACE:
  354. ppp_destroy_interface(PF_TO_PPP(pf));
  355. break;
  356. case CHANNEL:
  357. ppp_destroy_channel(PF_TO_CHANNEL(pf));
  358. break;
  359. }
  360. }
  361. }
  362. return 0;
  363. }
  364. static ssize_t ppp_read(struct file *file, char __user *buf,
  365. size_t count, loff_t *ppos)
  366. {
  367. struct ppp_file *pf = file->private_data;
  368. DECLARE_WAITQUEUE(wait, current);
  369. ssize_t ret;
  370. struct sk_buff *skb = NULL;
  371. struct iovec iov;
  372. ret = count;
  373. if (!pf)
  374. return -ENXIO;
  375. add_wait_queue(&pf->rwait, &wait);
  376. for (;;) {
  377. set_current_state(TASK_INTERRUPTIBLE);
  378. skb = skb_dequeue(&pf->rq);
  379. if (skb)
  380. break;
  381. ret = 0;
  382. if (pf->dead)
  383. break;
  384. if (pf->kind == INTERFACE) {
  385. /*
  386. * Return 0 (EOF) on an interface that has no
  387. * channels connected, unless it is looping
  388. * network traffic (demand mode).
  389. */
  390. struct ppp *ppp = PF_TO_PPP(pf);
  391. if (ppp->n_channels == 0 &&
  392. (ppp->flags & SC_LOOP_TRAFFIC) == 0)
  393. break;
  394. }
  395. ret = -EAGAIN;
  396. if (file->f_flags & O_NONBLOCK)
  397. break;
  398. ret = -ERESTARTSYS;
  399. if (signal_pending(current))
  400. break;
  401. schedule();
  402. }
  403. set_current_state(TASK_RUNNING);
  404. remove_wait_queue(&pf->rwait, &wait);
  405. if (!skb)
  406. goto out;
  407. ret = -EOVERFLOW;
  408. if (skb->len > count)
  409. goto outf;
  410. ret = -EFAULT;
  411. iov.iov_base = buf;
  412. iov.iov_len = count;
  413. if (skb_copy_datagram_iovec(skb, 0, &iov, skb->len))
  414. goto outf;
  415. ret = skb->len;
  416. outf:
  417. kfree_skb(skb);
  418. out:
  419. return ret;
  420. }
  421. static ssize_t ppp_write(struct file *file, const char __user *buf,
  422. size_t count, loff_t *ppos)
  423. {
  424. struct ppp_file *pf = file->private_data;
  425. struct sk_buff *skb;
  426. ssize_t ret;
  427. if (!pf)
  428. return -ENXIO;
  429. ret = -ENOMEM;
  430. skb = alloc_skb(count + pf->hdrlen, GFP_KERNEL);
  431. if (!skb)
  432. goto out;
  433. skb_reserve(skb, pf->hdrlen);
  434. ret = -EFAULT;
  435. if (copy_from_user(skb_put(skb, count), buf, count)) {
  436. kfree_skb(skb);
  437. goto out;
  438. }
  439. skb_queue_tail(&pf->xq, skb);
  440. switch (pf->kind) {
  441. case INTERFACE:
  442. ppp_xmit_process(PF_TO_PPP(pf));
  443. break;
  444. case CHANNEL:
  445. ppp_channel_push(PF_TO_CHANNEL(pf));
  446. break;
  447. }
  448. ret = count;
  449. out:
  450. return ret;
  451. }
  452. /* No kernel lock - fine */
  453. static unsigned int ppp_poll(struct file *file, poll_table *wait)
  454. {
  455. struct ppp_file *pf = file->private_data;
  456. unsigned int mask;
  457. if (!pf)
  458. return 0;
  459. poll_wait(file, &pf->rwait, wait);
  460. mask = POLLOUT | POLLWRNORM;
  461. if (skb_peek(&pf->rq))
  462. mask |= POLLIN | POLLRDNORM;
  463. if (pf->dead)
  464. mask |= POLLHUP;
  465. else if (pf->kind == INTERFACE) {
  466. /* see comment in ppp_read */
  467. struct ppp *ppp = PF_TO_PPP(pf);
  468. if (ppp->n_channels == 0 &&
  469. (ppp->flags & SC_LOOP_TRAFFIC) == 0)
  470. mask |= POLLIN | POLLRDNORM;
  471. }
  472. return mask;
  473. }
  474. #ifdef CONFIG_PPP_FILTER
  475. static int get_filter(void __user *arg, struct sock_filter **p)
  476. {
  477. struct sock_fprog uprog;
  478. struct sock_filter *code = NULL;
  479. int len, err;
  480. if (copy_from_user(&uprog, arg, sizeof(uprog)))
  481. return -EFAULT;
  482. if (!uprog.len) {
  483. *p = NULL;
  484. return 0;
  485. }
  486. len = uprog.len * sizeof(struct sock_filter);
  487. code = memdup_user(uprog.filter, len);
  488. if (IS_ERR(code))
  489. return PTR_ERR(code);
  490. err = sk_chk_filter(code, uprog.len);
  491. if (err) {
  492. kfree(code);
  493. return err;
  494. }
  495. *p = code;
  496. return uprog.len;
  497. }
  498. #endif /* CONFIG_PPP_FILTER */
  499. static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  500. {
  501. struct ppp_file *pf = file->private_data;
  502. struct ppp *ppp;
  503. int err = -EFAULT, val, val2, i;
  504. struct ppp_idle idle;
  505. struct npioctl npi;
  506. int unit, cflags;
  507. struct slcompress *vj;
  508. void __user *argp = (void __user *)arg;
  509. int __user *p = argp;
  510. if (!pf)
  511. return ppp_unattached_ioctl(current->nsproxy->net_ns,
  512. pf, file, cmd, arg);
  513. if (cmd == PPPIOCDETACH) {
  514. /*
  515. * We have to be careful here... if the file descriptor
  516. * has been dup'd, we could have another process in the
  517. * middle of a poll using the same file *, so we had
  518. * better not free the interface data structures -
  519. * instead we fail the ioctl. Even in this case, we
  520. * shut down the interface if we are the owner of it.
  521. * Actually, we should get rid of PPPIOCDETACH, userland
  522. * (i.e. pppd) could achieve the same effect by closing
  523. * this fd and reopening /dev/ppp.
  524. */
  525. err = -EINVAL;
  526. mutex_lock(&ppp_mutex);
  527. if (pf->kind == INTERFACE) {
  528. ppp = PF_TO_PPP(pf);
  529. if (file == ppp->owner)
  530. ppp_shutdown_interface(ppp);
  531. }
  532. if (atomic_long_read(&file->f_count) <= 2) {
  533. ppp_release(NULL, file);
  534. err = 0;
  535. } else
  536. printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%ld\n",
  537. atomic_long_read(&file->f_count));
  538. mutex_unlock(&ppp_mutex);
  539. return err;
  540. }
  541. if (pf->kind == CHANNEL) {
  542. struct channel *pch;
  543. struct ppp_channel *chan;
  544. mutex_lock(&ppp_mutex);
  545. pch = PF_TO_CHANNEL(pf);
  546. switch (cmd) {
  547. case PPPIOCCONNECT:
  548. if (get_user(unit, p))
  549. break;
  550. err = ppp_connect_channel(pch, unit);
  551. break;
  552. case PPPIOCDISCONN:
  553. err = ppp_disconnect_channel(pch);
  554. break;
  555. default:
  556. down_read(&pch->chan_sem);
  557. chan = pch->chan;
  558. err = -ENOTTY;
  559. if (chan && chan->ops->ioctl)
  560. err = chan->ops->ioctl(chan, cmd, arg);
  561. up_read(&pch->chan_sem);
  562. }
  563. mutex_unlock(&ppp_mutex);
  564. return err;
  565. }
  566. if (pf->kind != INTERFACE) {
  567. /* can't happen */
  568. printk(KERN_ERR "PPP: not interface or channel??\n");
  569. return -EINVAL;
  570. }
  571. mutex_lock(&ppp_mutex);
  572. ppp = PF_TO_PPP(pf);
  573. switch (cmd) {
  574. case PPPIOCSMRU:
  575. if (get_user(val, p))
  576. break;
  577. ppp->mru = val;
  578. err = 0;
  579. break;
  580. case PPPIOCSFLAGS:
  581. if (get_user(val, p))
  582. break;
  583. ppp_lock(ppp);
  584. cflags = ppp->flags & ~val;
  585. ppp->flags = val & SC_FLAG_BITS;
  586. ppp_unlock(ppp);
  587. if (cflags & SC_CCP_OPEN)
  588. ppp_ccp_closed(ppp);
  589. err = 0;
  590. break;
  591. case PPPIOCGFLAGS:
  592. val = ppp->flags | ppp->xstate | ppp->rstate;
  593. if (put_user(val, p))
  594. break;
  595. err = 0;
  596. break;
  597. case PPPIOCSCOMPRESS:
  598. err = ppp_set_compress(ppp, arg);
  599. break;
  600. case PPPIOCGUNIT:
  601. if (put_user(ppp->file.index, p))
  602. break;
  603. err = 0;
  604. break;
  605. case PPPIOCSDEBUG:
  606. if (get_user(val, p))
  607. break;
  608. ppp->debug = val;
  609. err = 0;
  610. break;
  611. case PPPIOCGDEBUG:
  612. if (put_user(ppp->debug, p))
  613. break;
  614. err = 0;
  615. break;
  616. case PPPIOCGIDLE:
  617. idle.xmit_idle = (jiffies - ppp->last_xmit) / HZ;
  618. idle.recv_idle = (jiffies - ppp->last_recv) / HZ;
  619. if (copy_to_user(argp, &idle, sizeof(idle)))
  620. break;
  621. err = 0;
  622. break;
  623. case PPPIOCSMAXCID:
  624. if (get_user(val, p))
  625. break;
  626. val2 = 15;
  627. if ((val >> 16) != 0) {
  628. val2 = val >> 16;
  629. val &= 0xffff;
  630. }
  631. vj = slhc_init(val2+1, val+1);
  632. if (!vj) {
  633. printk(KERN_ERR "PPP: no memory (VJ compressor)\n");
  634. err = -ENOMEM;
  635. break;
  636. }
  637. ppp_lock(ppp);
  638. if (ppp->vj)
  639. slhc_free(ppp->vj);
  640. ppp->vj = vj;
  641. ppp_unlock(ppp);
  642. err = 0;
  643. break;
  644. case PPPIOCGNPMODE:
  645. case PPPIOCSNPMODE:
  646. if (copy_from_user(&npi, argp, sizeof(npi)))
  647. break;
  648. err = proto_to_npindex(npi.protocol);
  649. if (err < 0)
  650. break;
  651. i = err;
  652. if (cmd == PPPIOCGNPMODE) {
  653. err = -EFAULT;
  654. npi.mode = ppp->npmode[i];
  655. if (copy_to_user(argp, &npi, sizeof(npi)))
  656. break;
  657. } else {
  658. ppp->npmode[i] = npi.mode;
  659. /* we may be able to transmit more packets now (??) */
  660. netif_wake_queue(ppp->dev);
  661. }
  662. err = 0;
  663. break;
  664. #ifdef CONFIG_PPP_FILTER
  665. case PPPIOCSPASS:
  666. {
  667. struct sock_filter *code;
  668. err = get_filter(argp, &code);
  669. if (err >= 0) {
  670. ppp_lock(ppp);
  671. kfree(ppp->pass_filter);
  672. ppp->pass_filter = code;
  673. ppp->pass_len = err;
  674. ppp_unlock(ppp);
  675. err = 0;
  676. }
  677. break;
  678. }
  679. case PPPIOCSACTIVE:
  680. {
  681. struct sock_filter *code;
  682. err = get_filter(argp, &code);
  683. if (err >= 0) {
  684. ppp_lock(ppp);
  685. kfree(ppp->active_filter);
  686. ppp->active_filter = code;
  687. ppp->active_len = err;
  688. ppp_unlock(ppp);
  689. err = 0;
  690. }
  691. break;
  692. }
  693. #endif /* CONFIG_PPP_FILTER */
  694. #ifdef CONFIG_PPP_MULTILINK
  695. case PPPIOCSMRRU:
  696. if (get_user(val, p))
  697. break;
  698. ppp_recv_lock(ppp);
  699. ppp->mrru = val;
  700. ppp_recv_unlock(ppp);
  701. err = 0;
  702. break;
  703. #endif /* CONFIG_PPP_MULTILINK */
  704. default:
  705. err = -ENOTTY;
  706. }
  707. mutex_unlock(&ppp_mutex);
  708. return err;
  709. }
  710. static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
  711. struct file *file, unsigned int cmd, unsigned long arg)
  712. {
  713. int unit, err = -EFAULT;
  714. struct ppp *ppp;
  715. struct channel *chan;
  716. struct ppp_net *pn;
  717. int __user *p = (int __user *)arg;
  718. mutex_lock(&ppp_mutex);
  719. switch (cmd) {
  720. case PPPIOCNEWUNIT:
  721. /* Create a new ppp unit */
  722. if (get_user(unit, p))
  723. break;
  724. ppp = ppp_create_interface(net, unit, &err);
  725. if (!ppp)
  726. break;
  727. file->private_data = &ppp->file;
  728. ppp->owner = file;
  729. err = -EFAULT;
  730. if (put_user(ppp->file.index, p))
  731. break;
  732. err = 0;
  733. break;
  734. case PPPIOCATTACH:
  735. /* Attach to an existing ppp unit */
  736. if (get_user(unit, p))
  737. break;
  738. err = -ENXIO;
  739. pn = ppp_pernet(net);
  740. mutex_lock(&pn->all_ppp_mutex);
  741. ppp = ppp_find_unit(pn, unit);
  742. if (ppp) {
  743. atomic_inc(&ppp->file.refcnt);
  744. file->private_data = &ppp->file;
  745. err = 0;
  746. }
  747. mutex_unlock(&pn->all_ppp_mutex);
  748. break;
  749. case PPPIOCATTCHAN:
  750. if (get_user(unit, p))
  751. break;
  752. err = -ENXIO;
  753. pn = ppp_pernet(net);
  754. spin_lock_bh(&pn->all_channels_lock);
  755. chan = ppp_find_channel(pn, unit);
  756. if (chan) {
  757. atomic_inc(&chan->file.refcnt);
  758. file->private_data = &chan->file;
  759. err = 0;
  760. }
  761. spin_unlock_bh(&pn->all_channels_lock);
  762. break;
  763. default:
  764. err = -ENOTTY;
  765. }
  766. mutex_unlock(&ppp_mutex);
  767. return err;
  768. }
  769. static const struct file_operations ppp_device_fops = {
  770. .owner = THIS_MODULE,
  771. .read = ppp_read,
  772. .write = ppp_write,
  773. .poll = ppp_poll,
  774. .unlocked_ioctl = ppp_ioctl,
  775. .open = ppp_open,
  776. .release = ppp_release
  777. };
  778. static __net_init int ppp_init_net(struct net *net)
  779. {
  780. struct ppp_net *pn = net_generic(net, ppp_net_id);
  781. idr_init(&pn->units_idr);
  782. mutex_init(&pn->all_ppp_mutex);
  783. INIT_LIST_HEAD(&pn->all_channels);
  784. INIT_LIST_HEAD(&pn->new_channels);
  785. spin_lock_init(&pn->all_channels_lock);
  786. return 0;
  787. }
  788. static __net_exit void ppp_exit_net(struct net *net)
  789. {
  790. struct ppp_net *pn = net_generic(net, ppp_net_id);
  791. idr_destroy(&pn->units_idr);
  792. }
  793. static struct pernet_operations ppp_net_ops = {
  794. .init = ppp_init_net,
  795. .exit = ppp_exit_net,
  796. .id = &ppp_net_id,
  797. .size = sizeof(struct ppp_net),
  798. };
  799. #define PPP_MAJOR 108
  800. /* Called at boot time if ppp is compiled into the kernel,
  801. or at module load time (from init_module) if compiled as a module. */
  802. static int __init ppp_init(void)
  803. {
  804. int err;
  805. printk(KERN_INFO "PPP generic driver version " PPP_VERSION "\n");
  806. err = register_pernet_device(&ppp_net_ops);
  807. if (err) {
  808. printk(KERN_ERR "failed to register PPP pernet device (%d)\n", err);
  809. goto out;
  810. }
  811. err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops);
  812. if (err) {
  813. printk(KERN_ERR "failed to register PPP device (%d)\n", err);
  814. goto out_net;
  815. }
  816. ppp_class = class_create(THIS_MODULE, "ppp");
  817. if (IS_ERR(ppp_class)) {
  818. err = PTR_ERR(ppp_class);
  819. goto out_chrdev;
  820. }
  821. /* not a big deal if we fail here :-) */
  822. device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp");
  823. return 0;
  824. out_chrdev:
  825. unregister_chrdev(PPP_MAJOR, "ppp");
  826. out_net:
  827. unregister_pernet_device(&ppp_net_ops);
  828. out:
  829. return err;
  830. }
  831. /*
  832. * Network interface unit routines.
  833. */
  834. static netdev_tx_t
  835. ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
  836. {
  837. struct ppp *ppp = netdev_priv(dev);
  838. int npi, proto;
  839. unsigned char *pp;
  840. npi = ethertype_to_npindex(ntohs(skb->protocol));
  841. if (npi < 0)
  842. goto outf;
  843. /* Drop, accept or reject the packet */
  844. switch (ppp->npmode[npi]) {
  845. case NPMODE_PASS:
  846. break;
  847. case NPMODE_QUEUE:
  848. /* it would be nice to have a way to tell the network
  849. system to queue this one up for later. */
  850. goto outf;
  851. case NPMODE_DROP:
  852. case NPMODE_ERROR:
  853. goto outf;
  854. }
  855. /* Put the 2-byte PPP protocol number on the front,
  856. making sure there is room for the address and control fields. */
  857. if (skb_cow_head(skb, PPP_HDRLEN))
  858. goto outf;
  859. pp = skb_push(skb, 2);
  860. proto = npindex_to_proto[npi];
  861. pp[0] = proto >> 8;
  862. pp[1] = proto;
  863. netif_stop_queue(dev);
  864. skb_queue_tail(&ppp->file.xq, skb);
  865. ppp_xmit_process(ppp);
  866. return NETDEV_TX_OK;
  867. outf:
  868. kfree_skb(skb);
  869. ++dev->stats.tx_dropped;
  870. return NETDEV_TX_OK;
  871. }
  872. static int
  873. ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  874. {
  875. struct ppp *ppp = netdev_priv(dev);
  876. int err = -EFAULT;
  877. void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
  878. struct ppp_stats stats;
  879. struct ppp_comp_stats cstats;
  880. char *vers;
  881. switch (cmd) {
  882. case SIOCGPPPSTATS:
  883. ppp_get_stats(ppp, &stats);
  884. if (copy_to_user(addr, &stats, sizeof(stats)))
  885. break;
  886. err = 0;
  887. break;
  888. case SIOCGPPPCSTATS:
  889. memset(&cstats, 0, sizeof(cstats));
  890. if (ppp->xc_state)
  891. ppp->xcomp->comp_stat(ppp->xc_state, &cstats.c);
  892. if (ppp->rc_state)
  893. ppp->rcomp->decomp_stat(ppp->rc_state, &cstats.d);
  894. if (copy_to_user(addr, &cstats, sizeof(cstats)))
  895. break;
  896. err = 0;
  897. break;
  898. case SIOCGPPPVER:
  899. vers = PPP_VERSION;
  900. if (copy_to_user(addr, vers, strlen(vers) + 1))
  901. break;
  902. err = 0;
  903. break;
  904. default:
  905. err = -EINVAL;
  906. }
  907. return err;
  908. }
  909. static const struct net_device_ops ppp_netdev_ops = {
  910. .ndo_start_xmit = ppp_start_xmit,
  911. .ndo_do_ioctl = ppp_net_ioctl,
  912. };
  913. static void ppp_setup(struct net_device *dev)
  914. {
  915. dev->netdev_ops = &ppp_netdev_ops;
  916. dev->hard_header_len = PPP_HDRLEN;
  917. dev->mtu = PPP_MTU;
  918. dev->addr_len = 0;
  919. dev->tx_queue_len = 3;
  920. dev->type = ARPHRD_PPP;
  921. dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
  922. dev->features |= NETIF_F_NETNS_LOCAL;
  923. dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
  924. }
  925. /*
  926. * Transmit-side routines.
  927. */
  928. /*
  929. * Called to do any work queued up on the transmit side
  930. * that can now be done.
  931. */
  932. static void
  933. ppp_xmit_process(struct ppp *ppp)
  934. {
  935. struct sk_buff *skb;
  936. ppp_xmit_lock(ppp);
  937. if (!ppp->closing) {
  938. ppp_push(ppp);
  939. while (!ppp->xmit_pending &&
  940. (skb = skb_dequeue(&ppp->file.xq)))
  941. ppp_send_frame(ppp, skb);
  942. /* If there's no work left to do, tell the core net
  943. code that we can accept some more. */
  944. if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq))
  945. netif_wake_queue(ppp->dev);
  946. }
  947. ppp_xmit_unlock(ppp);
  948. }
  949. static inline struct sk_buff *
  950. pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
  951. {
  952. struct sk_buff *new_skb;
  953. int len;
  954. int new_skb_size = ppp->dev->mtu +
  955. ppp->xcomp->comp_extra + ppp->dev->hard_header_len;
  956. int compressor_skb_size = ppp->dev->mtu +
  957. ppp->xcomp->comp_extra + PPP_HDRLEN;
  958. new_skb = alloc_skb(new_skb_size, GFP_ATOMIC);
  959. if (!new_skb) {
  960. if (net_ratelimit())
  961. printk(KERN_ERR "PPP: no memory (comp pkt)\n");
  962. return NULL;
  963. }
  964. if (ppp->dev->hard_header_len > PPP_HDRLEN)
  965. skb_reserve(new_skb,
  966. ppp->dev->hard_header_len - PPP_HDRLEN);
  967. /* compressor still expects A/C bytes in hdr */
  968. len = ppp->xcomp->compress(ppp->xc_state, skb->data - 2,
  969. new_skb->data, skb->len + 2,
  970. compressor_skb_size);
  971. if (len > 0 && (ppp->flags & SC_CCP_UP)) {
  972. kfree_skb(skb);
  973. skb = new_skb;
  974. skb_put(skb, len);
  975. skb_pull(skb, 2); /* pull off A/C bytes */
  976. } else if (len == 0) {
  977. /* didn't compress, or CCP not up yet */
  978. kfree_skb(new_skb);
  979. new_skb = skb;
  980. } else {
  981. /*
  982. * (len < 0)
  983. * MPPE requires that we do not send unencrypted
  984. * frames. The compressor will return -1 if we
  985. * should drop the frame. We cannot simply test
  986. * the compress_proto because MPPE and MPPC share
  987. * the same number.
  988. */
  989. if (net_ratelimit())
  990. printk(KERN_ERR "ppp: compressor dropped pkt\n");
  991. kfree_skb(skb);
  992. kfree_skb(new_skb);
  993. new_skb = NULL;
  994. }
  995. return new_skb;
  996. }
  997. /*
  998. * Compress and send a frame.
  999. * The caller should have locked the xmit path,
  1000. * and xmit_pending should be 0.
  1001. */
  1002. static void
  1003. ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
  1004. {
  1005. int proto = PPP_PROTO(skb);
  1006. struct sk_buff *new_skb;
  1007. int len;
  1008. unsigned char *cp;
  1009. if (proto < 0x8000) {
  1010. #ifdef CONFIG_PPP_FILTER
  1011. /* check if we should pass this packet */
  1012. /* the filter instructions are constructed assuming
  1013. a four-byte PPP header on each packet */
  1014. *skb_push(skb, 2) = 1;
  1015. if (ppp->pass_filter &&
  1016. sk_run_filter(skb, ppp->pass_filter,
  1017. ppp->pass_len) == 0) {
  1018. if (ppp->debug & 1)
  1019. printk(KERN_DEBUG "PPP: outbound frame not passed\n");
  1020. kfree_skb(skb);
  1021. return;
  1022. }
  1023. /* if this packet passes the active filter, record the time */
  1024. if (!(ppp->active_filter &&
  1025. sk_run_filter(skb, ppp->active_filter,
  1026. ppp->active_len) == 0))
  1027. ppp->last_xmit = jiffies;
  1028. skb_pull(skb, 2);
  1029. #else
  1030. /* for data packets, record the time */
  1031. ppp->last_xmit = jiffies;
  1032. #endif /* CONFIG_PPP_FILTER */
  1033. }
  1034. ++ppp->dev->stats.tx_packets;
  1035. ppp->dev->stats.tx_bytes += skb->len - 2;
  1036. switch (proto) {
  1037. case PPP_IP:
  1038. if (!ppp->vj || (ppp->flags & SC_COMP_TCP) == 0)
  1039. break;
  1040. /* try to do VJ TCP header compression */
  1041. new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2,
  1042. GFP_ATOMIC);
  1043. if (!new_skb) {
  1044. printk(KERN_ERR "PPP: no memory (VJ comp pkt)\n");
  1045. goto drop;
  1046. }
  1047. skb_reserve(new_skb, ppp->dev->hard_header_len - 2);
  1048. cp = skb->data + 2;
  1049. len = slhc_compress(ppp->vj, cp, skb->len - 2,
  1050. new_skb->data + 2, &cp,
  1051. !(ppp->flags & SC_NO_TCP_CCID));
  1052. if (cp == skb->data + 2) {
  1053. /* didn't compress */
  1054. kfree_skb(new_skb);
  1055. } else {
  1056. if (cp[0] & SL_TYPE_COMPRESSED_TCP) {
  1057. proto = PPP_VJC_COMP;
  1058. cp[0] &= ~SL_TYPE_COMPRESSED_TCP;
  1059. } else {
  1060. proto = PPP_VJC_UNCOMP;
  1061. cp[0] = skb->data[2];
  1062. }
  1063. kfree_skb(skb);
  1064. skb = new_skb;
  1065. cp = skb_put(skb, len + 2);
  1066. cp[0] = 0;
  1067. cp[1] = proto;
  1068. }
  1069. break;
  1070. case PPP_CCP:
  1071. /* peek at outbound CCP frames */
  1072. ppp_ccp_peek(ppp, skb, 0);
  1073. break;
  1074. }
  1075. /* try to do packet compression */
  1076. if ((ppp->xstate & SC_COMP_RUN) && ppp->xc_state &&
  1077. proto != PPP_LCP && proto != PPP_CCP) {
  1078. if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) {
  1079. if (net_ratelimit())
  1080. printk(KERN_ERR "ppp: compression required but down - pkt dropped.\n");
  1081. goto drop;
  1082. }
  1083. skb = pad_compress_skb(ppp, skb);
  1084. if (!skb)
  1085. goto drop;
  1086. }
  1087. /*
  1088. * If we are waiting for traffic (demand dialling),
  1089. * queue it up for pppd to receive.
  1090. */
  1091. if (ppp->flags & SC_LOOP_TRAFFIC) {
  1092. if (ppp->file.rq.qlen > PPP_MAX_RQLEN)
  1093. goto drop;
  1094. skb_queue_tail(&ppp->file.rq, skb);
  1095. wake_up_interruptible(&ppp->file.rwait);
  1096. return;
  1097. }
  1098. ppp->xmit_pending = skb;
  1099. ppp_push(ppp);
  1100. return;
  1101. drop:
  1102. kfree_skb(skb);
  1103. ++ppp->dev->stats.tx_errors;
  1104. }
  1105. /*
  1106. * Try to send the frame in xmit_pending.
  1107. * The caller should have the xmit path locked.
  1108. */
  1109. static void
  1110. ppp_push(struct ppp *ppp)
  1111. {
  1112. struct list_head *list;
  1113. struct channel *pch;
  1114. struct sk_buff *skb = ppp->xmit_pending;
  1115. if (!skb)
  1116. return;
  1117. list = &ppp->channels;
  1118. if (list_empty(list)) {
  1119. /* nowhere to send the packet, just drop it */
  1120. ppp->xmit_pending = NULL;
  1121. kfree_skb(skb);
  1122. return;
  1123. }
  1124. if ((ppp->flags & SC_MULTILINK) == 0) {
  1125. /* not doing multilink: send it down the first channel */
  1126. list = list->next;
  1127. pch = list_entry(list, struct channel, clist);
  1128. spin_lock_bh(&pch->downl);
  1129. if (pch->chan) {
  1130. if (pch->chan->ops->start_xmit(pch->chan, skb))
  1131. ppp->xmit_pending = NULL;
  1132. } else {
  1133. /* channel got unregistered */
  1134. kfree_skb(skb);
  1135. ppp->xmit_pending = NULL;
  1136. }
  1137. spin_unlock_bh(&pch->downl);
  1138. return;
  1139. }
  1140. #ifdef CONFIG_PPP_MULTILINK
  1141. /* Multilink: fragment the packet over as many links
  1142. as can take the packet at the moment. */
  1143. if (!ppp_mp_explode(ppp, skb))
  1144. return;
  1145. #endif /* CONFIG_PPP_MULTILINK */
  1146. ppp->xmit_pending = NULL;
  1147. kfree_skb(skb);
  1148. }
  1149. #ifdef CONFIG_PPP_MULTILINK
  1150. /*
  1151. * Divide a packet to be transmitted into fragments and
  1152. * send them out the individual links.
  1153. */
  1154. static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
  1155. {
  1156. int len, totlen;
  1157. int i, bits, hdrlen, mtu;
  1158. int flen;
  1159. int navail, nfree, nzero;
  1160. int nbigger;
  1161. int totspeed;
  1162. int totfree;
  1163. unsigned char *p, *q;
  1164. struct list_head *list;
  1165. struct channel *pch;
  1166. struct sk_buff *frag;
  1167. struct ppp_channel *chan;
  1168. totspeed = 0; /*total bitrate of the bundle*/
  1169. nfree = 0; /* # channels which have no packet already queued */
  1170. navail = 0; /* total # of usable channels (not deregistered) */
  1171. nzero = 0; /* number of channels with zero speed associated*/
  1172. totfree = 0; /*total # of channels available and
  1173. *having no queued packets before
  1174. *starting the fragmentation*/
  1175. hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
  1176. i = 0;
  1177. list_for_each_entry(pch, &ppp->channels, clist) {
  1178. if (pch->chan) {
  1179. pch->avail = 1;
  1180. navail++;
  1181. pch->speed = pch->chan->speed;
  1182. } else {
  1183. pch->avail = 0;
  1184. }
  1185. if (pch->avail) {
  1186. if (skb_queue_empty(&pch->file.xq) ||
  1187. !pch->had_frag) {
  1188. if (pch->speed == 0)
  1189. nzero++;
  1190. else
  1191. totspeed += pch->speed;
  1192. pch->avail = 2;
  1193. ++nfree;
  1194. ++totfree;
  1195. }
  1196. if (!pch->had_frag && i < ppp->nxchan)
  1197. ppp->nxchan = i;
  1198. }
  1199. ++i;
  1200. }
  1201. /*
  1202. * Don't start sending this packet unless at least half of
  1203. * the channels are free. This gives much better TCP
  1204. * performance if we have a lot of channels.
  1205. */
  1206. if (nfree == 0 || nfree < navail / 2)
  1207. return 0; /* can't take now, leave it in xmit_pending */
  1208. /* Do protocol field compression (XXX this should be optional) */
  1209. p = skb->data;
  1210. len = skb->len;
  1211. if (*p == 0) {
  1212. ++p;
  1213. --len;
  1214. }
  1215. totlen = len;
  1216. nbigger = len % nfree;
  1217. /* skip to the channel after the one we last used
  1218. and start at that one */
  1219. list = &ppp->channels;
  1220. for (i = 0; i < ppp->nxchan; ++i) {
  1221. list = list->next;
  1222. if (list == &ppp->channels) {
  1223. i = 0;
  1224. break;
  1225. }
  1226. }
  1227. /* create a fragment for each channel */
  1228. bits = B;
  1229. while (len > 0) {
  1230. list = list->next;
  1231. if (list == &ppp->channels) {
  1232. i = 0;
  1233. continue;
  1234. }
  1235. pch = list_entry(list, struct channel, clist);
  1236. ++i;
  1237. if (!pch->avail)
  1238. continue;
  1239. /*
  1240. * Skip this channel if it has a fragment pending already and
  1241. * we haven't given a fragment to all of the free channels.
  1242. */
  1243. if (pch->avail == 1) {
  1244. if (nfree > 0)
  1245. continue;
  1246. } else {
  1247. pch->avail = 1;
  1248. }
  1249. /* check the channel's mtu and whether it is still attached. */
  1250. spin_lock_bh(&pch->downl);
  1251. if (pch->chan == NULL) {
  1252. /* can't use this channel, it's being deregistered */
  1253. if (pch->speed == 0)
  1254. nzero--;
  1255. else
  1256. totspeed -= pch->speed;
  1257. spin_unlock_bh(&pch->downl);
  1258. pch->avail = 0;
  1259. totlen = len;
  1260. totfree--;
  1261. nfree--;
  1262. if (--navail == 0)
  1263. break;
  1264. continue;
  1265. }
  1266. /*
  1267. *if the channel speed is not set divide
  1268. *the packet evenly among the free channels;
  1269. *otherwise divide it according to the speed
  1270. *of the channel we are going to transmit on
  1271. */
  1272. flen = len;
  1273. if (nfree > 0) {
  1274. if (pch->speed == 0) {
  1275. flen = len/nfree;
  1276. if (nbigger > 0) {
  1277. flen++;
  1278. nbigger--;
  1279. }
  1280. } else {
  1281. flen = (((totfree - nzero)*(totlen + hdrlen*totfree)) /
  1282. ((totspeed*totfree)/pch->speed)) - hdrlen;
  1283. if (nbigger > 0) {
  1284. flen += ((totfree - nzero)*pch->speed)/totspeed;
  1285. nbigger -= ((totfree - nzero)*pch->speed)/
  1286. totspeed;
  1287. }
  1288. }
  1289. nfree--;
  1290. }
  1291. /*
  1292. *check if we are on the last channel or
  1293. *we exceded the lenght of the data to
  1294. *fragment
  1295. */
  1296. if ((nfree <= 0) || (flen > len))
  1297. flen = len;
  1298. /*
  1299. *it is not worth to tx on slow channels:
  1300. *in that case from the resulting flen according to the
  1301. *above formula will be equal or less than zero.
  1302. *Skip the channel in this case
  1303. */
  1304. if (flen <= 0) {
  1305. pch->avail = 2;
  1306. spin_unlock_bh(&pch->downl);
  1307. continue;
  1308. }
  1309. mtu = pch->chan->mtu - hdrlen;
  1310. if (mtu < 4)
  1311. mtu = 4;
  1312. if (flen > mtu)
  1313. flen = mtu;
  1314. if (flen == len)
  1315. bits |= E;
  1316. frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC);
  1317. if (!frag)
  1318. goto noskb;
  1319. q = skb_put(frag, flen + hdrlen);
  1320. /* make the MP header */
  1321. q[0] = PPP_MP >> 8;
  1322. q[1] = PPP_MP;
  1323. if (ppp->flags & SC_MP_XSHORTSEQ) {
  1324. q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
  1325. q[3] = ppp->nxseq;
  1326. } else {
  1327. q[2] = bits;
  1328. q[3] = ppp->nxseq >> 16;
  1329. q[4] = ppp->nxseq >> 8;
  1330. q[5] = ppp->nxseq;
  1331. }
  1332. memcpy(q + hdrlen, p, flen);
  1333. /* try to send it down the channel */
  1334. chan = pch->chan;
  1335. if (!skb_queue_empty(&pch->file.xq) ||
  1336. !chan->ops->start_xmit(chan, frag))
  1337. skb_queue_tail(&pch->file.xq, frag);
  1338. pch->had_frag = 1;
  1339. p += flen;
  1340. len -= flen;
  1341. ++ppp->nxseq;
  1342. bits = 0;
  1343. spin_unlock_bh(&pch->downl);
  1344. }
  1345. ppp->nxchan = i;
  1346. return 1;
  1347. noskb:
  1348. spin_unlock_bh(&pch->downl);
  1349. if (ppp->debug & 1)
  1350. printk(KERN_ERR "PPP: no memory (fragment)\n");
  1351. ++ppp->dev->stats.tx_errors;
  1352. ++ppp->nxseq;
  1353. return 1; /* abandon the frame */
  1354. }
  1355. #endif /* CONFIG_PPP_MULTILINK */
  1356. /*
  1357. * Try to send data out on a channel.
  1358. */
  1359. static void
  1360. ppp_channel_push(struct channel *pch)
  1361. {
  1362. struct sk_buff *skb;
  1363. struct ppp *ppp;
  1364. spin_lock_bh(&pch->downl);
  1365. if (pch->chan) {
  1366. while (!skb_queue_empty(&pch->file.xq)) {
  1367. skb = skb_dequeue(&pch->file.xq);
  1368. if (!pch->chan->ops->start_xmit(pch->chan, skb)) {
  1369. /* put the packet back and try again later */
  1370. skb_queue_head(&pch->file.xq, skb);
  1371. break;
  1372. }
  1373. }
  1374. } else {
  1375. /* channel got deregistered */
  1376. skb_queue_purge(&pch->file.xq);
  1377. }
  1378. spin_unlock_bh(&pch->downl);
  1379. /* see if there is anything from the attached unit to be sent */
  1380. if (skb_queue_empty(&pch->file.xq)) {
  1381. read_lock_bh(&pch->upl);
  1382. ppp = pch->ppp;
  1383. if (ppp)
  1384. ppp_xmit_process(ppp);
  1385. read_unlock_bh(&pch->upl);
  1386. }
  1387. }
  1388. /*
  1389. * Receive-side routines.
  1390. */
  1391. /* misuse a few fields of the skb for MP reconstruction */
  1392. #define sequence priority
  1393. #define BEbits cb[0]
  1394. static inline void
  1395. ppp_do_recv(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
  1396. {
  1397. ppp_recv_lock(ppp);
  1398. if (!ppp->closing)
  1399. ppp_receive_frame(ppp, skb, pch);
  1400. else
  1401. kfree_skb(skb);
  1402. ppp_recv_unlock(ppp);
  1403. }
  1404. void
  1405. ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
  1406. {
  1407. struct channel *pch = chan->ppp;
  1408. int proto;
  1409. if (!pch) {
  1410. kfree_skb(skb);
  1411. return;
  1412. }
  1413. read_lock_bh(&pch->upl);
  1414. if (!pskb_may_pull(skb, 2)) {
  1415. kfree_skb(skb);
  1416. if (pch->ppp) {
  1417. ++pch->ppp->dev->stats.rx_length_errors;
  1418. ppp_receive_error(pch->ppp);
  1419. }
  1420. goto done;
  1421. }
  1422. proto = PPP_PROTO(skb);
  1423. if (!pch->ppp || proto >= 0xc000 || proto == PPP_CCPFRAG) {
  1424. /* put it on the channel queue */
  1425. skb_queue_tail(&pch->file.rq, skb);
  1426. /* drop old frames if queue too long */
  1427. while (pch->file.rq.qlen > PPP_MAX_RQLEN &&
  1428. (skb = skb_dequeue(&pch->file.rq)))
  1429. kfree_skb(skb);
  1430. wake_up_interruptible(&pch->file.rwait);
  1431. } else {
  1432. ppp_do_recv(pch->ppp, skb, pch);
  1433. }
  1434. done:
  1435. read_unlock_bh(&pch->upl);
  1436. }
  1437. /* Put a 0-length skb in the receive queue as an error indication */
  1438. void
  1439. ppp_input_error(struct ppp_channel *chan, int code)
  1440. {
  1441. struct channel *pch = chan->ppp;
  1442. struct sk_buff *skb;
  1443. if (!pch)
  1444. return;
  1445. read_lock_bh(&pch->upl);
  1446. if (pch->ppp) {
  1447. skb = alloc_skb(0, GFP_ATOMIC);
  1448. if (skb) {
  1449. skb->len = 0; /* probably unnecessary */
  1450. skb->cb[0] = code;
  1451. ppp_do_recv(pch->ppp, skb, pch);
  1452. }
  1453. }
  1454. read_unlock_bh(&pch->upl);
  1455. }
  1456. /*
  1457. * We come in here to process a received frame.
  1458. * The receive side of the ppp unit is locked.
  1459. */
  1460. static void
  1461. ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
  1462. {
  1463. /* note: a 0-length skb is used as an error indication */
  1464. if (skb->len > 0) {
  1465. #ifdef CONFIG_PPP_MULTILINK
  1466. /* XXX do channel-level decompression here */
  1467. if (PPP_PROTO(skb) == PPP_MP)
  1468. ppp_receive_mp_frame(ppp, skb, pch);
  1469. else
  1470. #endif /* CONFIG_PPP_MULTILINK */
  1471. ppp_receive_nonmp_frame(ppp, skb);
  1472. } else {
  1473. kfree_skb(skb);
  1474. ppp_receive_error(ppp);
  1475. }
  1476. }
  1477. static void
  1478. ppp_receive_error(struct ppp *ppp)
  1479. {
  1480. ++ppp->dev->stats.rx_errors;
  1481. if (ppp->vj)
  1482. slhc_toss(ppp->vj);
  1483. }
  1484. static void
  1485. ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
  1486. {
  1487. struct sk_buff *ns;
  1488. int proto, len, npi;
  1489. /*
  1490. * Decompress the frame, if compressed.
  1491. * Note that some decompressors need to see uncompressed frames
  1492. * that come in as well as compressed frames.
  1493. */
  1494. if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN) &&
  1495. (ppp->rstate & (SC_DC_FERROR | SC_DC_ERROR)) == 0)
  1496. skb = ppp_decompress_frame(ppp, skb);
  1497. if (ppp->flags & SC_MUST_COMP && ppp->rstate & SC_DC_FERROR)
  1498. goto err;
  1499. proto = PPP_PROTO(skb);
  1500. switch (proto) {
  1501. case PPP_VJC_COMP:
  1502. /* decompress VJ compressed packets */
  1503. if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP))
  1504. goto err;
  1505. if (skb_tailroom(skb) < 124 || skb_cloned(skb)) {
  1506. /* copy to a new sk_buff with more tailroom */
  1507. ns = dev_alloc_skb(skb->len + 128);
  1508. if (!ns) {
  1509. printk(KERN_ERR"PPP: no memory (VJ decomp)\n");
  1510. goto err;
  1511. }
  1512. skb_reserve(ns, 2);
  1513. skb_copy_bits(skb, 0, skb_put(ns, skb->len), skb->len);
  1514. kfree_skb(skb);
  1515. skb = ns;
  1516. }
  1517. else
  1518. skb->ip_summed = CHECKSUM_NONE;
  1519. len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2);
  1520. if (len <= 0) {
  1521. printk(KERN_DEBUG "PPP: VJ decompression error\n");
  1522. goto err;
  1523. }
  1524. len += 2;
  1525. if (len > skb->len)
  1526. skb_put(skb, len - skb->len);
  1527. else if (len < skb->len)
  1528. skb_trim(skb, len);
  1529. proto = PPP_IP;
  1530. break;
  1531. case PPP_VJC_UNCOMP:
  1532. if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP))
  1533. goto err;
  1534. /* Until we fix the decompressor need to make sure
  1535. * data portion is linear.
  1536. */
  1537. if (!pskb_may_pull(skb, skb->len))
  1538. goto err;
  1539. if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) {
  1540. printk(KERN_ERR "PPP: VJ uncompressed error\n");
  1541. goto err;
  1542. }
  1543. proto = PPP_IP;
  1544. break;
  1545. case PPP_CCP:
  1546. ppp_ccp_peek(ppp, skb, 1);
  1547. break;
  1548. }
  1549. ++ppp->dev->stats.rx_packets;
  1550. ppp->dev->stats.rx_bytes += skb->len - 2;
  1551. npi = proto_to_npindex(proto);
  1552. if (npi < 0) {
  1553. /* control or unknown frame - pass it to pppd */
  1554. skb_queue_tail(&ppp->file.rq, skb);
  1555. /* limit queue length by dropping old frames */
  1556. while (ppp->file.rq.qlen > PPP_MAX_RQLEN &&
  1557. (skb = skb_dequeue(&ppp->file.rq)))
  1558. kfree_skb(skb);
  1559. /* wake up any process polling or blocking on read */
  1560. wake_up_interruptible(&ppp->file.rwait);
  1561. } else {
  1562. /* network protocol frame - give it to the kernel */
  1563. #ifdef CONFIG_PPP_FILTER
  1564. /* check if the packet passes the pass and active filters */
  1565. /* the filter instructions are constructed assuming
  1566. a four-byte PPP header on each packet */
  1567. if (ppp->pass_filter || ppp->active_filter) {
  1568. if (skb_cloned(skb) &&
  1569. pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
  1570. goto err;
  1571. *skb_push(skb, 2) = 0;
  1572. if (ppp->pass_filter &&
  1573. sk_run_filter(skb, ppp->pass_filter,
  1574. ppp->pass_len) == 0) {
  1575. if (ppp->debug & 1)
  1576. printk(KERN_DEBUG "PPP: inbound frame "
  1577. "not passed\n");
  1578. kfree_skb(skb);
  1579. return;
  1580. }
  1581. if (!(ppp->active_filter &&
  1582. sk_run_filter(skb, ppp->active_filter,
  1583. ppp->active_len) == 0))
  1584. ppp->last_recv = jiffies;
  1585. __skb_pull(skb, 2);
  1586. } else
  1587. #endif /* CONFIG_PPP_FILTER */
  1588. ppp->last_recv = jiffies;
  1589. if ((ppp->dev->flags & IFF_UP) == 0 ||
  1590. ppp->npmode[npi] != NPMODE_PASS) {
  1591. kfree_skb(skb);
  1592. } else {
  1593. /* chop off protocol */
  1594. skb_pull_rcsum(skb, 2);
  1595. skb->dev = ppp->dev;
  1596. skb->protocol = htons(npindex_to_ethertype[npi]);
  1597. skb_reset_mac_header(skb);
  1598. netif_rx(skb);
  1599. }
  1600. }
  1601. return;
  1602. err:
  1603. kfree_skb(skb);
  1604. ppp_receive_error(ppp);
  1605. }
  1606. static struct sk_buff *
  1607. ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb)
  1608. {
  1609. int proto = PPP_PROTO(skb);
  1610. struct sk_buff *ns;
  1611. int len;
  1612. /* Until we fix all the decompressor's need to make sure
  1613. * data portion is linear.
  1614. */
  1615. if (!pskb_may_pull(skb, skb->len))
  1616. goto err;
  1617. if (proto == PPP_COMP) {
  1618. int obuff_size;
  1619. switch(ppp->rcomp->compress_proto) {
  1620. case CI_MPPE:
  1621. obuff_size = ppp->mru + PPP_HDRLEN + 1;
  1622. break;
  1623. default:
  1624. obuff_size = ppp->mru + PPP_HDRLEN;
  1625. break;
  1626. }
  1627. ns = dev_alloc_skb(obuff_size);
  1628. if (!ns) {
  1629. printk(KERN_ERR "ppp_decompress_frame: no memory\n");
  1630. goto err;
  1631. }
  1632. /* the decompressor still expects the A/C bytes in the hdr */
  1633. len = ppp->rcomp->decompress(ppp->rc_state, skb->data - 2,
  1634. skb->len + 2, ns->data, obuff_size);
  1635. if (len < 0) {
  1636. /* Pass the compressed frame to pppd as an
  1637. error indication. */
  1638. if (len == DECOMP_FATALERROR)
  1639. ppp->rstate |= SC_DC_FERROR;
  1640. kfree_skb(ns);
  1641. goto err;
  1642. }
  1643. kfree_skb(skb);
  1644. skb = ns;
  1645. skb_put(skb, len);
  1646. skb_pull(skb, 2); /* pull off the A/C bytes */
  1647. } else {
  1648. /* Uncompressed frame - pass to decompressor so it
  1649. can update its dictionary if necessary. */
  1650. if (ppp->rcomp->incomp)
  1651. ppp->rcomp->incomp(ppp->rc_state, skb->data - 2,
  1652. skb->len + 2);
  1653. }
  1654. return skb;
  1655. err:
  1656. ppp->rstate |= SC_DC_ERROR;
  1657. ppp_receive_error(ppp);
  1658. return skb;
  1659. }
  1660. #ifdef CONFIG_PPP_MULTILINK
  1661. /*
  1662. * Receive a multilink frame.
  1663. * We put it on the reconstruction queue and then pull off
  1664. * as many completed frames as we can.
  1665. */
  1666. static void
  1667. ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
  1668. {
  1669. u32 mask, seq;
  1670. struct channel *ch;
  1671. int mphdrlen = (ppp->flags & SC_MP_SHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
  1672. if (!pskb_may_pull(skb, mphdrlen + 1) || ppp->mrru == 0)
  1673. goto err; /* no good, throw it away */
  1674. /* Decode sequence number and begin/end bits */
  1675. if (ppp->flags & SC_MP_SHORTSEQ) {
  1676. seq = ((skb->data[2] & 0x0f) << 8) | skb->data[3];
  1677. mask = 0xfff;
  1678. } else {
  1679. seq = (skb->data[3] << 16) | (skb->data[4] << 8)| skb->data[5];
  1680. mask = 0xffffff;
  1681. }
  1682. skb->BEbits = skb->data[2];
  1683. skb_pull(skb, mphdrlen); /* pull off PPP and MP headers */
  1684. /*
  1685. * Do protocol ID decompression on the first fragment of each packet.
  1686. */
  1687. if ((skb->BEbits & B) && (skb->data[0] & 1))
  1688. *skb_push(skb, 1) = 0;
  1689. /*
  1690. * Expand sequence number to 32 bits, making it as close
  1691. * as possible to ppp->minseq.
  1692. */
  1693. seq |= ppp->minseq & ~mask;
  1694. if ((int)(ppp->minseq - seq) > (int)(mask >> 1))
  1695. seq += mask + 1;
  1696. else if ((int)(seq - ppp->minseq) > (int)(mask >> 1))
  1697. seq -= mask + 1; /* should never happen */
  1698. skb->sequence = seq;
  1699. pch->lastseq = seq;
  1700. /*
  1701. * If this packet comes before the next one we were expecting,
  1702. * drop it.
  1703. */
  1704. if (seq_before(seq, ppp->nextseq)) {
  1705. kfree_skb(skb);
  1706. ++ppp->dev->stats.rx_dropped;
  1707. ppp_receive_error(ppp);
  1708. return;
  1709. }
  1710. /*
  1711. * Reevaluate minseq, the minimum over all channels of the
  1712. * last sequence number received on each channel. Because of
  1713. * the increasing sequence number rule, we know that any fragment
  1714. * before `minseq' which hasn't arrived is never going to arrive.
  1715. * The list of channels can't change because we have the receive
  1716. * side of the ppp unit locked.
  1717. */
  1718. list_for_each_entry(ch, &ppp->channels, clist) {
  1719. if (seq_before(ch->lastseq, seq))
  1720. seq = ch->lastseq;
  1721. }
  1722. if (seq_before(ppp->minseq, seq))
  1723. ppp->minseq = seq;
  1724. /* Put the fragment on the reconstruction queue */
  1725. ppp_mp_insert(ppp, skb);
  1726. /* If the queue is getting long, don't wait any longer for packets
  1727. before the start of the queue. */
  1728. if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN) {
  1729. struct sk_buff *mskb = skb_peek(&ppp->mrq);
  1730. if (seq_before(ppp->minseq, mskb->sequence))
  1731. ppp->minseq = mskb->sequence;
  1732. }
  1733. /* Pull completed packets off the queue and receive them. */
  1734. while ((skb = ppp_mp_reconstruct(ppp))) {
  1735. if (pskb_may_pull(skb, 2))
  1736. ppp_receive_nonmp_frame(ppp, skb);
  1737. else {
  1738. ++ppp->dev->stats.rx_length_errors;
  1739. kfree_skb(skb);
  1740. ppp_receive_error(ppp);
  1741. }
  1742. }
  1743. return;
  1744. err:
  1745. kfree_skb(skb);
  1746. ppp_receive_error(ppp);
  1747. }
  1748. /*
  1749. * Insert a fragment on the MP reconstruction queue.
  1750. * The queue is ordered by increasing sequence number.
  1751. */
  1752. static void
  1753. ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb)
  1754. {
  1755. struct sk_buff *p;
  1756. struct sk_buff_head *list = &ppp->mrq;
  1757. u32 seq = skb->sequence;
  1758. /* N.B. we don't need to lock the list lock because we have the
  1759. ppp unit receive-side lock. */
  1760. skb_queue_walk(list, p) {
  1761. if (seq_before(seq, p->sequence))
  1762. break;
  1763. }
  1764. __skb_queue_before(list, p, skb);
  1765. }
  1766. /*
  1767. * Reconstruct a packet from the MP fragment queue.
  1768. * We go through increasing sequence numbers until we find a
  1769. * complete packet, or we get to the sequence number for a fragment
  1770. * which hasn't arrived but might still do so.
  1771. */
  1772. static struct sk_buff *
  1773. ppp_mp_reconstruct(struct ppp *ppp)
  1774. {
  1775. u32 seq = ppp->nextseq;
  1776. u32 minseq = ppp->minseq;
  1777. struct sk_buff_head *list = &ppp->mrq;
  1778. struct sk_buff *p, *next;
  1779. struct sk_buff *head, *tail;
  1780. struct sk_buff *skb = NULL;
  1781. int lost = 0, len = 0;
  1782. if (ppp->mrru == 0) /* do nothing until mrru is set */
  1783. return NULL;
  1784. head = list->next;
  1785. tail = NULL;
  1786. for (p = head; p != (struct sk_buff *) list; p = next) {
  1787. next = p->next;
  1788. if (seq_before(p->sequence, seq)) {
  1789. /* this can't happen, anyway ignore the skb */
  1790. printk(KERN_ERR "ppp_mp_reconstruct bad seq %u < %u\n",
  1791. p->sequence, seq);
  1792. head = next;
  1793. continue;
  1794. }
  1795. if (p->sequence != seq) {
  1796. /* Fragment `seq' is missing. If it is after
  1797. minseq, it might arrive later, so stop here. */
  1798. if (seq_after(seq, minseq))
  1799. break;
  1800. /* Fragment `seq' is lost, keep going. */
  1801. lost = 1;
  1802. seq = seq_before(minseq, p->sequence)?
  1803. minseq + 1: p->sequence;
  1804. next = p;
  1805. continue;
  1806. }
  1807. /*
  1808. * At this point we know that all the fragments from
  1809. * ppp->nextseq to seq are either present or lost.
  1810. * Also, there are no complete packets in the queue
  1811. * that have no missing fragments and end before this
  1812. * fragment.
  1813. */
  1814. /* B bit set indicates this fragment starts a packet */
  1815. if (p->BEbits & B) {
  1816. head = p;
  1817. lost = 0;
  1818. len = 0;
  1819. }
  1820. len += p->len;
  1821. /* Got a complete packet yet? */
  1822. if (lost == 0 && (p->BEbits & E) && (head->BEbits & B)) {
  1823. if (len > ppp->mrru + 2) {
  1824. ++ppp->dev->stats.rx_length_errors;
  1825. printk(KERN_DEBUG "PPP: reconstructed packet"
  1826. " is too long (%d)\n", len);
  1827. } else if (p == head) {
  1828. /* fragment is complete packet - reuse skb */
  1829. tail = p;
  1830. skb = skb_get(p);
  1831. break;
  1832. } else if ((skb = dev_alloc_skb(len)) == NULL) {
  1833. ++ppp->dev->stats.rx_missed_errors;
  1834. printk(KERN_DEBUG "PPP: no memory for "
  1835. "reconstructed packet");
  1836. } else {
  1837. tail = p;
  1838. break;
  1839. }
  1840. ppp->nextseq = seq + 1;
  1841. }
  1842. /*
  1843. * If this is the ending fragment of a packet,
  1844. * and we haven't found a complete valid packet yet,
  1845. * we can discard up to and including this fragment.
  1846. */
  1847. if (p->BEbits & E)
  1848. head = next;
  1849. ++seq;
  1850. }
  1851. /* If we have a complete packet, copy it all into one skb. */
  1852. if (tail != NULL) {
  1853. /* If we have discarded any fragments,
  1854. signal a receive error. */
  1855. if (head->sequence != ppp->nextseq) {
  1856. if (ppp->debug & 1)
  1857. printk(KERN_DEBUG " missed pkts %u..%u\n",
  1858. ppp->nextseq, head->sequence-1);
  1859. ++ppp->dev->stats.rx_dropped;
  1860. ppp_receive_error(ppp);
  1861. }
  1862. if (head != tail)
  1863. /* copy to a single skb */
  1864. for (p = head; p != tail->next; p = p->next)
  1865. skb_copy_bits(p, 0, skb_put(skb, p->len), p->len);
  1866. ppp->nextseq = tail->sequence + 1;
  1867. head = tail->next;
  1868. }
  1869. /* Discard all the skbuffs that we have copied the data out of
  1870. or that we can't use. */
  1871. while ((p = list->next) != head) {
  1872. __skb_unlink(p, list);
  1873. kfree_skb(p);
  1874. }
  1875. return skb;
  1876. }
  1877. #endif /* CONFIG_PPP_MULTILINK */
  1878. /*
  1879. * Channel interface.
  1880. */
  1881. /* Create a new, unattached ppp channel. */
  1882. int ppp_register_channel(struct ppp_channel *chan)
  1883. {
  1884. return ppp_register_net_channel(current->nsproxy->net_ns, chan);
  1885. }
  1886. /* Create a new, unattached ppp channel for specified net. */
  1887. int ppp_register_net_channel(struct net *net, struct ppp_channel *chan)
  1888. {
  1889. struct channel *pch;
  1890. struct ppp_net *pn;
  1891. pch = kzalloc(sizeof(struct channel), GFP_KERNEL);
  1892. if (!pch)
  1893. return -ENOMEM;
  1894. pn = ppp_pernet(net);
  1895. pch->ppp = NULL;
  1896. pch->chan = chan;
  1897. pch->chan_net = net;
  1898. chan->ppp = pch;
  1899. init_ppp_file(&pch->file, CHANNEL);
  1900. pch->file.hdrlen = chan->hdrlen;
  1901. #ifdef CONFIG_PPP_MULTILINK
  1902. pch->lastseq = -1;
  1903. #endif /* CONFIG_PPP_MULTILINK */
  1904. init_rwsem(&pch->chan_sem);
  1905. spin_lock_init(&pch->downl);
  1906. rwlock_init(&pch->upl);
  1907. spin_lock_bh(&pn->all_channels_lock);
  1908. pch->file.index = ++pn->last_channel_index;
  1909. list_add(&pch->list, &pn->new_channels);
  1910. atomic_inc(&channel_count);
  1911. spin_unlock_bh(&pn->all_channels_lock);
  1912. return 0;
  1913. }
  1914. /*
  1915. * Return the index of a channel.
  1916. */
  1917. int ppp_channel_index(struct ppp_channel *chan)
  1918. {
  1919. struct channel *pch = chan->ppp;
  1920. if (pch)
  1921. return pch->file.index;
  1922. return -1;
  1923. }
  1924. /*
  1925. * Return the PPP unit number to which a channel is connected.
  1926. */
  1927. int ppp_unit_number(struct ppp_channel *chan)
  1928. {
  1929. struct channel *pch = chan->ppp;
  1930. int unit = -1;
  1931. if (pch) {
  1932. read_lock_bh(&pch->upl);
  1933. if (pch->ppp)
  1934. unit = pch->ppp->file.index;
  1935. read_unlock_bh(&pch->upl);
  1936. }
  1937. return unit;
  1938. }
  1939. /*
  1940. * Return the PPP device interface name of a channel.
  1941. */
  1942. char *ppp_dev_name(struct ppp_channel *chan)
  1943. {
  1944. struct channel *pch = chan->ppp;
  1945. char *name = NULL;
  1946. if (pch) {
  1947. read_lock_bh(&pch->upl);
  1948. if (pch->ppp && pch->ppp->dev)
  1949. name = pch->ppp->dev->name;
  1950. read_unlock_bh(&pch->upl);
  1951. }
  1952. return name;
  1953. }
  1954. /*
  1955. * Disconnect a channel from the generic layer.
  1956. * This must be called in process context.
  1957. */
  1958. void
  1959. ppp_unregister_channel(struct ppp_channel *chan)
  1960. {
  1961. struct channel *pch = chan->ppp;
  1962. struct ppp_net *pn;
  1963. if (!pch)
  1964. return; /* should never happen */
  1965. chan->ppp = NULL;
  1966. /*
  1967. * This ensures that we have returned from any calls into the
  1968. * the channel's start_xmit or ioctl routine before we proceed.
  1969. */
  1970. down_write(&pch->chan_sem);
  1971. spin_lock_bh(&pch->downl);
  1972. pch->chan = NULL;
  1973. spin_unlock_bh(&pch->downl);
  1974. up_write(&pch->chan_sem);
  1975. ppp_disconnect_channel(pch);
  1976. pn = ppp_pernet(pch->chan_net);
  1977. spin_lock_bh(&pn->all_channels_lock);
  1978. list_del(&pch->list);
  1979. spin_unlock_bh(&pn->all_channels_lock);
  1980. pch->file.dead = 1;
  1981. wake_up_interruptible(&pch->file.rwait);
  1982. if (atomic_dec_and_test(&pch->file.refcnt))
  1983. ppp_destroy_channel(pch);
  1984. }
  1985. /*
  1986. * Callback from a channel when it can accept more to transmit.
  1987. * This should be called at BH/softirq level, not interrupt level.
  1988. */
  1989. void
  1990. ppp_output_wakeup(struct ppp_channel *chan)
  1991. {
  1992. struct channel *pch = chan->ppp;
  1993. if (!pch)
  1994. return;
  1995. ppp_channel_push(pch);
  1996. }
  1997. /*
  1998. * Compression control.
  1999. */
  2000. /* Process the PPPIOCSCOMPRESS ioctl. */
  2001. static int
  2002. ppp_set_compress(struct ppp *ppp, unsigned long arg)
  2003. {
  2004. int err;
  2005. struct compressor *cp, *ocomp;
  2006. struct ppp_option_data data;
  2007. void *state, *ostate;
  2008. unsigned char ccp_option[CCP_MAX_OPTION_LENGTH];
  2009. err = -EFAULT;
  2010. if (copy_from_user(&data, (void __user *) arg, sizeof(data)) ||
  2011. (data.length <= CCP_MAX_OPTION_LENGTH &&
  2012. copy_from_user(ccp_option, (void __user *) data.ptr, data.length)))
  2013. goto out;
  2014. err = -EINVAL;
  2015. if (data.length > CCP_MAX_OPTION_LENGTH ||
  2016. ccp_option[1] < 2 || ccp_option[1] > data.length)
  2017. goto out;
  2018. cp = try_then_request_module(
  2019. find_compressor(ccp_option[0]),
  2020. "ppp-compress-%d", ccp_option[0]);
  2021. if (!cp)
  2022. goto out;
  2023. err = -ENOBUFS;
  2024. if (data.transmit) {
  2025. state = cp->comp_alloc(ccp_option, data.length);
  2026. if (state) {
  2027. ppp_xmit_lock(ppp);
  2028. ppp->xstate &= ~SC_COMP_RUN;
  2029. ocomp = ppp->xcomp;
  2030. ostate = ppp->xc_state;
  2031. ppp->xcomp = cp;
  2032. ppp->xc_state = state;
  2033. ppp_xmit_unlock(ppp);
  2034. if (ostate) {
  2035. ocomp->comp_free(ostate);
  2036. module_put(ocomp->owner);
  2037. }
  2038. err = 0;
  2039. } else
  2040. module_put(cp->owner);
  2041. } else {
  2042. state = cp->decomp_alloc(ccp_option, data.length);
  2043. if (state) {
  2044. ppp_recv_lock(ppp);
  2045. ppp->rstate &= ~SC_DECOMP_RUN;
  2046. ocomp = ppp->rcomp;
  2047. ostate = ppp->rc_state;
  2048. ppp->rcomp = cp;
  2049. ppp->rc_state = state;
  2050. ppp_recv_unlock(ppp);
  2051. if (ostate) {
  2052. ocomp->decomp_free(ostate);
  2053. module_put(ocomp->owner);
  2054. }
  2055. err = 0;
  2056. } else
  2057. module_put(cp->owner);
  2058. }
  2059. out:
  2060. return err;
  2061. }
  2062. /*
  2063. * Look at a CCP packet and update our state accordingly.
  2064. * We assume the caller has the xmit or recv path locked.
  2065. */
  2066. static void
  2067. ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound)
  2068. {
  2069. unsigned char *dp;
  2070. int len;
  2071. if (!pskb_may_pull(skb, CCP_HDRLEN + 2))
  2072. return; /* no header */
  2073. dp = skb->data + 2;
  2074. switch (CCP_CODE(dp)) {
  2075. case CCP_CONFREQ:
  2076. /* A ConfReq starts negotiation of compression
  2077. * in one direction of transmission,
  2078. * and hence brings it down...but which way?
  2079. *
  2080. * Remember:
  2081. * A ConfReq indicates what the sender would like to receive
  2082. */
  2083. if(inbound)
  2084. /* He is proposing what I should send */
  2085. ppp->xstate &= ~SC_COMP_RUN;
  2086. else
  2087. /* I am proposing to what he should send */
  2088. ppp->rstate &= ~SC_DECOMP_RUN;
  2089. break;
  2090. case CCP_TERMREQ:
  2091. case CCP_TERMACK:
  2092. /*
  2093. * CCP is going down, both directions of transmission
  2094. */
  2095. ppp->rstate &= ~SC_DECOMP_RUN;
  2096. ppp->xstate &= ~SC_COMP_RUN;
  2097. break;
  2098. case CCP_CONFACK:
  2099. if ((ppp->flags & (SC_CCP_OPEN | SC_CCP_UP)) != SC_CCP_OPEN)
  2100. break;
  2101. len = CCP_LENGTH(dp);
  2102. if (!pskb_may_pull(skb, len + 2))
  2103. return; /* too short */
  2104. dp += CCP_HDRLEN;
  2105. len -= CCP_HDRLEN;
  2106. if (len < CCP_OPT_MINLEN || len < CCP_OPT_LENGTH(dp))
  2107. break;
  2108. if (inbound) {
  2109. /* we will start receiving compressed packets */
  2110. if (!ppp->rc_state)
  2111. break;
  2112. if (ppp->rcomp->decomp_init(ppp->rc_state, dp, len,
  2113. ppp->file.index, 0, ppp->mru, ppp->debug)) {
  2114. ppp->rstate |= SC_DECOMP_RUN;
  2115. ppp->rstate &= ~(SC_DC_ERROR | SC_DC_FERROR);
  2116. }
  2117. } else {
  2118. /* we will soon start sending compressed packets */
  2119. if (!ppp->xc_state)
  2120. break;
  2121. if (ppp->xcomp->comp_init(ppp->xc_state, dp, len,
  2122. ppp->file.index, 0, ppp->debug))
  2123. ppp->xstate |= SC_COMP_RUN;
  2124. }
  2125. break;
  2126. case CCP_RESETACK:
  2127. /* reset the [de]compressor */
  2128. if ((ppp->flags & SC_CCP_UP) == 0)
  2129. break;
  2130. if (inbound) {
  2131. if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN)) {
  2132. ppp->rcomp->decomp_reset(ppp->rc_state);
  2133. ppp->rstate &= ~SC_DC_ERROR;
  2134. }
  2135. } else {
  2136. if (ppp->xc_state && (ppp->xstate & SC_COMP_RUN))
  2137. ppp->xcomp->comp_reset(ppp->xc_state);
  2138. }
  2139. break;
  2140. }
  2141. }
  2142. /* Free up compression resources. */
  2143. static void
  2144. ppp_ccp_closed(struct ppp *ppp)
  2145. {
  2146. void *xstate, *rstate;
  2147. struct compressor *xcomp, *rcomp;
  2148. ppp_lock(ppp);
  2149. ppp->flags &= ~(SC_CCP_OPEN | SC_CCP_UP);
  2150. ppp->xstate = 0;
  2151. xcomp = ppp->xcomp;
  2152. xstate = ppp->xc_state;
  2153. ppp->xc_state = NULL;
  2154. ppp->rstate = 0;
  2155. rcomp = ppp->rcomp;
  2156. rstate = ppp->rc_state;
  2157. ppp->rc_state = NULL;
  2158. ppp_unlock(ppp);
  2159. if (xstate) {
  2160. xcomp->comp_free(xstate);
  2161. module_put(xcomp->owner);
  2162. }
  2163. if (rstate) {
  2164. rcomp->decomp_free(rstate);
  2165. module_put(rcomp->owner);
  2166. }
  2167. }
  2168. /* List of compressors. */
  2169. static LIST_HEAD(compressor_list);
  2170. static DEFINE_SPINLOCK(compressor_list_lock);
  2171. struct compressor_entry {
  2172. struct list_head list;
  2173. struct compressor *comp;
  2174. };
  2175. static struct compressor_entry *
  2176. find_comp_entry(int proto)
  2177. {
  2178. struct compressor_entry *ce;
  2179. list_for_each_entry(ce, &compressor_list, list) {
  2180. if (ce->comp->compress_proto == proto)
  2181. return ce;
  2182. }
  2183. return NULL;
  2184. }
  2185. /* Register a compressor */
  2186. int
  2187. ppp_register_compressor(struct compressor *cp)
  2188. {
  2189. struct compressor_entry *ce;
  2190. int ret;
  2191. spin_lock(&compressor_list_lock);
  2192. ret = -EEXIST;
  2193. if (find_comp_entry(cp->compress_proto))
  2194. goto out;
  2195. ret = -ENOMEM;
  2196. ce = kmalloc(sizeof(struct compressor_entry), GFP_ATOMIC);
  2197. if (!ce)
  2198. goto out;
  2199. ret = 0;
  2200. ce->comp = cp;
  2201. list_add(&ce->list, &compressor_list);
  2202. out:
  2203. spin_unlock(&compressor_list_lock);
  2204. return ret;
  2205. }
  2206. /* Unregister a compressor */
  2207. void
  2208. ppp_unregister_compressor(struct compressor *cp)
  2209. {
  2210. struct compressor_entry *ce;
  2211. spin_lock(&compressor_list_lock);
  2212. ce = find_comp_entry(cp->compress_proto);
  2213. if (ce && ce->comp == cp) {
  2214. list_del(&ce->list);
  2215. kfree(ce);
  2216. }
  2217. spin_unlock(&compressor_list_lock);
  2218. }
  2219. /* Find a compressor. */
  2220. static struct compressor *
  2221. find_compressor(int type)
  2222. {
  2223. struct compressor_entry *ce;
  2224. struct compressor *cp = NULL;
  2225. spin_lock(&compressor_list_lock);
  2226. ce = find_comp_entry(type);
  2227. if (ce) {
  2228. cp = ce->comp;
  2229. if (!try_module_get(cp->owner))
  2230. cp = NULL;
  2231. }
  2232. spin_unlock(&compressor_list_lock);
  2233. return cp;
  2234. }
  2235. /*
  2236. * Miscelleneous stuff.
  2237. */
  2238. static void
  2239. ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
  2240. {
  2241. struct slcompress *vj = ppp->vj;
  2242. memset(st, 0, sizeof(*st));
  2243. st->p.ppp_ipackets = ppp->dev->stats.rx_packets;
  2244. st->p.ppp_ierrors = ppp->dev->stats.rx_errors;
  2245. st->p.ppp_ibytes = ppp->dev->stats.rx_bytes;
  2246. st->p.ppp_opackets = ppp->dev->stats.tx_packets;
  2247. st->p.ppp_oerrors = ppp->dev->stats.tx_errors;
  2248. st->p.ppp_obytes = ppp->dev->stats.tx_bytes;
  2249. if (!vj)
  2250. return;
  2251. st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed;
  2252. st->vj.vjs_compressed = vj->sls_o_compressed;
  2253. st->vj.vjs_searches = vj->sls_o_searches;
  2254. st->vj.vjs_misses = vj->sls_o_misses;
  2255. st->vj.vjs_errorin = vj->sls_i_error;
  2256. st->vj.vjs_tossed = vj->sls_i_tossed;
  2257. st->vj.vjs_uncompressedin = vj->sls_i_uncompressed;
  2258. st->vj.vjs_compressedin = vj->sls_i_compressed;
  2259. }
  2260. /*
  2261. * Stuff for handling the lists of ppp units and channels
  2262. * and for initialization.
  2263. */
  2264. /*
  2265. * Create a new ppp interface unit. Fails if it can't allocate memory
  2266. * or if there is already a unit with the requested number.
  2267. * unit == -1 means allocate a new number.
  2268. */
  2269. static struct ppp *
  2270. ppp_create_interface(struct net *net, int unit, int *retp)
  2271. {
  2272. struct ppp *ppp;
  2273. struct ppp_net *pn;
  2274. struct net_device *dev = NULL;
  2275. int ret = -ENOMEM;
  2276. int i;
  2277. dev = alloc_netdev(sizeof(struct ppp), "", ppp_setup);
  2278. if (!dev)
  2279. goto out1;
  2280. pn = ppp_pernet(net);
  2281. ppp = netdev_priv(dev);
  2282. ppp->dev = dev;
  2283. ppp->mru = PPP_MRU;
  2284. init_ppp_file(&ppp->file, INTERFACE);
  2285. ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */
  2286. for (i = 0; i < NUM_NP; ++i)
  2287. ppp->npmode[i] = NPMODE_PASS;
  2288. INIT_LIST_HEAD(&ppp->channels);
  2289. spin_lock_init(&ppp->rlock);
  2290. spin_lock_init(&ppp->wlock);
  2291. #ifdef CONFIG_PPP_MULTILINK
  2292. ppp->minseq = -1;
  2293. skb_queue_head_init(&ppp->mrq);
  2294. #endif /* CONFIG_PPP_MULTILINK */
  2295. /*
  2296. * drum roll: don't forget to set
  2297. * the net device is belong to
  2298. */
  2299. dev_net_set(dev, net);
  2300. ret = -EEXIST;
  2301. mutex_lock(&pn->all_ppp_mutex);
  2302. if (unit < 0) {
  2303. unit = unit_get(&pn->units_idr, ppp);
  2304. if (unit < 0) {
  2305. *retp = unit;
  2306. goto out2;
  2307. }
  2308. } else {
  2309. if (unit_find(&pn->units_idr, unit))
  2310. goto out2; /* unit already exists */
  2311. /*
  2312. * if caller need a specified unit number
  2313. * lets try to satisfy him, otherwise --
  2314. * he should better ask us for new unit number
  2315. *
  2316. * NOTE: yes I know that returning EEXIST it's not
  2317. * fair but at least pppd will ask us to allocate
  2318. * new unit in this case so user is happy :)
  2319. */
  2320. unit = unit_set(&pn->units_idr, ppp, unit);
  2321. if (unit < 0)
  2322. goto out2;
  2323. }
  2324. /* Initialize the new ppp unit */
  2325. ppp->file.index = unit;
  2326. sprintf(dev->name, "ppp%d", unit);
  2327. ret = register_netdev(dev);
  2328. if (ret != 0) {
  2329. unit_put(&pn->units_idr, unit);
  2330. printk(KERN_ERR "PPP: couldn't register device %s (%d)\n",
  2331. dev->name, ret);
  2332. goto out2;
  2333. }
  2334. ppp->ppp_net = net;
  2335. atomic_inc(&ppp_unit_count);
  2336. mutex_unlock(&pn->all_ppp_mutex);
  2337. *retp = 0;
  2338. return ppp;
  2339. out2:
  2340. mutex_unlock(&pn->all_ppp_mutex);
  2341. free_netdev(dev);
  2342. out1:
  2343. *retp = ret;
  2344. return NULL;
  2345. }
  2346. /*
  2347. * Initialize a ppp_file structure.
  2348. */
  2349. static void
  2350. init_ppp_file(struct ppp_file *pf, int kind)
  2351. {
  2352. pf->kind = kind;
  2353. skb_queue_head_init(&pf->xq);
  2354. skb_queue_head_init(&pf->rq);
  2355. atomic_set(&pf->refcnt, 1);
  2356. init_waitqueue_head(&pf->rwait);
  2357. }
  2358. /*
  2359. * Take down a ppp interface unit - called when the owning file
  2360. * (the one that created the unit) is closed or detached.
  2361. */
  2362. static void ppp_shutdown_interface(struct ppp *ppp)
  2363. {
  2364. struct ppp_net *pn;
  2365. pn = ppp_pernet(ppp->ppp_net);
  2366. mutex_lock(&pn->all_ppp_mutex);
  2367. /* This will call dev_close() for us. */
  2368. ppp_lock(ppp);
  2369. if (!ppp->closing) {
  2370. ppp->closing = 1;
  2371. ppp_unlock(ppp);
  2372. unregister_netdev(ppp->dev);
  2373. } else
  2374. ppp_unlock(ppp);
  2375. unit_put(&pn->units_idr, ppp->file.index);
  2376. ppp->file.dead = 1;
  2377. ppp->owner = NULL;
  2378. wake_up_interruptible(&ppp->file.rwait);
  2379. mutex_unlock(&pn->all_ppp_mutex);
  2380. }
  2381. /*
  2382. * Free the memory used by a ppp unit. This is only called once
  2383. * there are no channels connected to the unit and no file structs
  2384. * that reference the unit.
  2385. */
  2386. static void ppp_destroy_interface(struct ppp *ppp)
  2387. {
  2388. atomic_dec(&ppp_unit_count);
  2389. if (!ppp->file.dead || ppp->n_channels) {
  2390. /* "can't happen" */
  2391. printk(KERN_ERR "ppp: destroying ppp struct %p but dead=%d "
  2392. "n_channels=%d !\n", ppp, ppp->file.dead,
  2393. ppp->n_channels);
  2394. return;
  2395. }
  2396. ppp_ccp_closed(ppp);
  2397. if (ppp->vj) {
  2398. slhc_free(ppp->vj);
  2399. ppp->vj = NULL;
  2400. }
  2401. skb_queue_purge(&ppp->file.xq);
  2402. skb_queue_purge(&ppp->file.rq);
  2403. #ifdef CONFIG_PPP_MULTILINK
  2404. skb_queue_purge(&ppp->mrq);
  2405. #endif /* CONFIG_PPP_MULTILINK */
  2406. #ifdef CONFIG_PPP_FILTER
  2407. kfree(ppp->pass_filter);
  2408. ppp->pass_filter = NULL;
  2409. kfree(ppp->active_filter);
  2410. ppp->active_filter = NULL;
  2411. #endif /* CONFIG_PPP_FILTER */
  2412. kfree_skb(ppp->xmit_pending);
  2413. free_netdev(ppp->dev);
  2414. }
  2415. /*
  2416. * Locate an existing ppp unit.
  2417. * The caller should have locked the all_ppp_mutex.
  2418. */
  2419. static struct ppp *
  2420. ppp_find_unit(struct ppp_net *pn, int unit)
  2421. {
  2422. return unit_find(&pn->units_idr, unit);
  2423. }
  2424. /*
  2425. * Locate an existing ppp channel.
  2426. * The caller should have locked the all_channels_lock.
  2427. * First we look in the new_channels list, then in the
  2428. * all_channels list. If found in the new_channels list,
  2429. * we move it to the all_channels list. This is for speed
  2430. * when we have a lot of channels in use.
  2431. */
  2432. static struct channel *
  2433. ppp_find_channel(struct ppp_net *pn, int unit)
  2434. {
  2435. struct channel *pch;
  2436. list_for_each_entry(pch, &pn->new_channels, list) {
  2437. if (pch->file.index == unit) {
  2438. list_move(&pch->list, &pn->all_channels);
  2439. return pch;
  2440. }
  2441. }
  2442. list_for_each_entry(pch, &pn->all_channels, list) {
  2443. if (pch->file.index == unit)
  2444. return pch;
  2445. }
  2446. return NULL;
  2447. }
  2448. /*
  2449. * Connect a PPP channel to a PPP interface unit.
  2450. */
  2451. static int
  2452. ppp_connect_channel(struct channel *pch, int unit)
  2453. {
  2454. struct ppp *ppp;
  2455. struct ppp_net *pn;
  2456. int ret = -ENXIO;
  2457. int hdrlen;
  2458. pn = ppp_pernet(pch->chan_net);
  2459. mutex_lock(&pn->all_ppp_mutex);
  2460. ppp = ppp_find_unit(pn, unit);
  2461. if (!ppp)
  2462. goto out;
  2463. write_lock_bh(&pch->upl);
  2464. ret = -EINVAL;
  2465. if (pch->ppp)
  2466. goto outl;
  2467. ppp_lock(ppp);
  2468. if (pch->file.hdrlen > ppp->file.hdrlen)
  2469. ppp->file.hdrlen = pch->file.hdrlen;
  2470. hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */
  2471. if (hdrlen > ppp->dev->hard_header_len)
  2472. ppp->dev->hard_header_len = hdrlen;
  2473. list_add_tail(&pch->clist, &ppp->channels);
  2474. ++ppp->n_channels;
  2475. pch->ppp = ppp;
  2476. atomic_inc(&ppp->file.refcnt);
  2477. ppp_unlock(ppp);
  2478. ret = 0;
  2479. outl:
  2480. write_unlock_bh(&pch->upl);
  2481. out:
  2482. mutex_unlock(&pn->all_ppp_mutex);
  2483. return ret;
  2484. }
  2485. /*
  2486. * Disconnect a channel from its ppp unit.
  2487. */
  2488. static int
  2489. ppp_disconnect_channel(struct channel *pch)
  2490. {
  2491. struct ppp *ppp;
  2492. int err = -EINVAL;
  2493. write_lock_bh(&pch->upl);
  2494. ppp = pch->ppp;
  2495. pch->ppp = NULL;
  2496. write_unlock_bh(&pch->upl);
  2497. if (ppp) {
  2498. /* remove it from the ppp unit's list */
  2499. ppp_lock(ppp);
  2500. list_del(&pch->clist);
  2501. if (--ppp->n_channels == 0)
  2502. wake_up_interruptible(&ppp->file.rwait);
  2503. ppp_unlock(ppp);
  2504. if (atomic_dec_and_test(&ppp->file.refcnt))
  2505. ppp_destroy_interface(ppp);
  2506. err = 0;
  2507. }
  2508. return err;
  2509. }
  2510. /*
  2511. * Free up the resources used by a ppp channel.
  2512. */
  2513. static void ppp_destroy_channel(struct channel *pch)
  2514. {
  2515. atomic_dec(&channel_count);
  2516. if (!pch->file.dead) {
  2517. /* "can't happen" */
  2518. printk(KERN_ERR "ppp: destroying undead channel %p !\n",
  2519. pch);
  2520. return;
  2521. }
  2522. skb_queue_purge(&pch->file.xq);
  2523. skb_queue_purge(&pch->file.rq);
  2524. kfree(pch);
  2525. }
  2526. static void __exit ppp_cleanup(void)
  2527. {
  2528. /* should never happen */
  2529. if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count))
  2530. printk(KERN_ERR "PPP: removing module but units remain!\n");
  2531. unregister_chrdev(PPP_MAJOR, "ppp");
  2532. device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
  2533. class_destroy(ppp_class);
  2534. unregister_pernet_device(&ppp_net_ops);
  2535. }
  2536. /*
  2537. * Units handling. Caller must protect concurrent access
  2538. * by holding all_ppp_mutex
  2539. */
  2540. /* associate pointer with specified number */
  2541. static int unit_set(struct idr *p, void *ptr, int n)
  2542. {
  2543. int unit, err;
  2544. again:
  2545. if (!idr_pre_get(p, GFP_KERNEL)) {
  2546. printk(KERN_ERR "PPP: No free memory for idr\n");
  2547. return -ENOMEM;
  2548. }
  2549. err = idr_get_new_above(p, ptr, n, &unit);
  2550. if (err == -EAGAIN)
  2551. goto again;
  2552. if (unit != n) {
  2553. idr_remove(p, unit);
  2554. return -EINVAL;
  2555. }
  2556. return unit;
  2557. }
  2558. /* get new free unit number and associate pointer with it */
  2559. static int unit_get(struct idr *p, void *ptr)
  2560. {
  2561. int unit, err;
  2562. again:
  2563. if (!idr_pre_get(p, GFP_KERNEL)) {
  2564. printk(KERN_ERR "PPP: No free memory for idr\n");
  2565. return -ENOMEM;
  2566. }
  2567. err = idr_get_new_above(p, ptr, 0, &unit);
  2568. if (err == -EAGAIN)
  2569. goto again;
  2570. return unit;
  2571. }
  2572. /* put unit number back to a pool */
  2573. static void unit_put(struct idr *p, int n)
  2574. {
  2575. idr_remove(p, n);
  2576. }
  2577. /* get pointer associated with the number */
  2578. static void *unit_find(struct idr *p, int n)
  2579. {
  2580. return idr_find(p, n);
  2581. }
  2582. /* Module/initialization stuff */
  2583. module_init(ppp_init);
  2584. module_exit(ppp_cleanup);
  2585. EXPORT_SYMBOL(ppp_register_net_channel);
  2586. EXPORT_SYMBOL(ppp_register_channel);
  2587. EXPORT_SYMBOL(ppp_unregister_channel);
  2588. EXPORT_SYMBOL(ppp_channel_index);
  2589. EXPORT_SYMBOL(ppp_unit_number);
  2590. EXPORT_SYMBOL(ppp_dev_name);
  2591. EXPORT_SYMBOL(ppp_input);
  2592. EXPORT_SYMBOL(ppp_input_error);
  2593. EXPORT_SYMBOL(ppp_output_wakeup);
  2594. EXPORT_SYMBOL(ppp_register_compressor);
  2595. EXPORT_SYMBOL(ppp_unregister_compressor);
  2596. MODULE_LICENSE("GPL");
  2597. MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0);
  2598. MODULE_ALIAS("devname:ppp");