123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024 |
- /*
- * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
- * Copyright (c) 2005 Intel Corporation. All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
- #include <linux/dma-mapping.h>
- #include <rdma/ib_cache.h>
- #include "mad_priv.h"
- #include "mad_rmpp.h"
- #include "smi.h"
- #include "agent.h"
- MODULE_LICENSE("Dual BSD/GPL");
- MODULE_DESCRIPTION("kernel IB MAD API");
- MODULE_AUTHOR("Hal Rosenstock");
- MODULE_AUTHOR("Sean Hefty");
- static struct kmem_cache *ib_mad_cache;
- static struct list_head ib_mad_port_list;
- static u32 ib_mad_client_id = 0;
- /* Port list lock */
- static spinlock_t ib_mad_port_list_lock;
- /* Forward declarations */
- static int method_in_use(struct ib_mad_mgmt_method_table **method,
- struct ib_mad_reg_req *mad_reg_req);
- static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
- static struct ib_mad_agent_private *find_mad_agent(
- struct ib_mad_port_private *port_priv,
- struct ib_mad *mad);
- static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
- struct ib_mad_private *mad);
- static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
- static void timeout_sends(struct work_struct *work);
- static void local_completions(struct work_struct *work);
- static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
- struct ib_mad_agent_private *agent_priv,
- u8 mgmt_class);
- static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
- struct ib_mad_agent_private *agent_priv);
- /*
- * Returns a ib_mad_port_private structure or NULL for a device/port
- * Assumes ib_mad_port_list_lock is being held
- */
- static inline struct ib_mad_port_private *
- __ib_get_mad_port(struct ib_device *device, int port_num)
- {
- struct ib_mad_port_private *entry;
- list_for_each_entry(entry, &ib_mad_port_list, port_list) {
- if (entry->device == device && entry->port_num == port_num)
- return entry;
- }
- return NULL;
- }
- /*
- * Wrapper function to return a ib_mad_port_private structure or NULL
- * for a device/port
- */
- static inline struct ib_mad_port_private *
- ib_get_mad_port(struct ib_device *device, int port_num)
- {
- struct ib_mad_port_private *entry;
- unsigned long flags;
- spin_lock_irqsave(&ib_mad_port_list_lock, flags);
- entry = __ib_get_mad_port(device, port_num);
- spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
- return entry;
- }
- static inline u8 convert_mgmt_class(u8 mgmt_class)
- {
- /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
- return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
- 0 : mgmt_class;
- }
- static int get_spl_qp_index(enum ib_qp_type qp_type)
- {
- switch (qp_type)
- {
- case IB_QPT_SMI:
- return 0;
- case IB_QPT_GSI:
- return 1;
- default:
- return -1;
- }
- }
- static int vendor_class_index(u8 mgmt_class)
- {
- return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
- }
- static int is_vendor_class(u8 mgmt_class)
- {
- if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
- (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
- return 0;
- return 1;
- }
- static int is_vendor_oui(char *oui)
- {
- if (oui[0] || oui[1] || oui[2])
- return 1;
- return 0;
- }
- static int is_vendor_method_in_use(
- struct ib_mad_mgmt_vendor_class *vendor_class,
- struct ib_mad_reg_req *mad_reg_req)
- {
- struct ib_mad_mgmt_method_table *method;
- int i;
- for (i = 0; i < MAX_MGMT_OUI; i++) {
- if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
- method = vendor_class->method_table[i];
- if (method) {
- if (method_in_use(&method, mad_reg_req))
- return 1;
- else
- break;
- }
- }
- }
- return 0;
- }
- int ib_response_mad(struct ib_mad *mad)
- {
- return ((mad->mad_hdr.method & IB_MGMT_METHOD_RESP) ||
- (mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
- ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_BM) &&
- (mad->mad_hdr.attr_mod & IB_BM_ATTR_MOD_RESP)));
- }
- EXPORT_SYMBOL(ib_response_mad);
- /*
- * ib_register_mad_agent - Register to send/receive MADs
- */
- struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
- u8 port_num,
- enum ib_qp_type qp_type,
- struct ib_mad_reg_req *mad_reg_req,
- u8 rmpp_version,
- ib_mad_send_handler send_handler,
- ib_mad_recv_handler recv_handler,
- void *context)
- {
- struct ib_mad_port_private *port_priv;
- struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
- struct ib_mad_agent_private *mad_agent_priv;
- struct ib_mad_reg_req *reg_req = NULL;
- struct ib_mad_mgmt_class_table *class;
- struct ib_mad_mgmt_vendor_class_table *vendor;
- struct ib_mad_mgmt_vendor_class *vendor_class;
- struct ib_mad_mgmt_method_table *method;
- int ret2, qpn;
- unsigned long flags;
- u8 mgmt_class, vclass;
- /* Validate parameters */
- qpn = get_spl_qp_index(qp_type);
- if (qpn == -1)
- goto error1;
- if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION)
- goto error1;
- /* Validate MAD registration request if supplied */
- if (mad_reg_req) {
- if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION)
- goto error1;
- if (!recv_handler)
- goto error1;
- if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
- /*
- * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
- * one in this range currently allowed
- */
- if (mad_reg_req->mgmt_class !=
- IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
- goto error1;
- } else if (mad_reg_req->mgmt_class == 0) {
- /*
- * Class 0 is reserved in IBA and is used for
- * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
- */
- goto error1;
- } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
- /*
- * If class is in "new" vendor range,
- * ensure supplied OUI is not zero
- */
- if (!is_vendor_oui(mad_reg_req->oui))
- goto error1;
- }
- /* Make sure class supplied is consistent with RMPP */
- if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
- if (rmpp_version)
- goto error1;
- }
- /* Make sure class supplied is consistent with QP type */
- if (qp_type == IB_QPT_SMI) {
- if ((mad_reg_req->mgmt_class !=
- IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
- (mad_reg_req->mgmt_class !=
- IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
- goto error1;
- } else {
- if ((mad_reg_req->mgmt_class ==
- IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
- (mad_reg_req->mgmt_class ==
- IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
- goto error1;
- }
- } else {
- /* No registration request supplied */
- if (!send_handler)
- goto error1;
- }
- /* Validate device and port */
- port_priv = ib_get_mad_port(device, port_num);
- if (!port_priv) {
- ret = ERR_PTR(-ENODEV);
- goto error1;
- }
- /* Allocate structures */
- mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
- if (!mad_agent_priv) {
- ret = ERR_PTR(-ENOMEM);
- goto error1;
- }
- mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
- IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(mad_agent_priv->agent.mr)) {
- ret = ERR_PTR(-ENOMEM);
- goto error2;
- }
- if (mad_reg_req) {
- reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL);
- if (!reg_req) {
- ret = ERR_PTR(-ENOMEM);
- goto error3;
- }
- /* Make a copy of the MAD registration request */
- memcpy(reg_req, mad_reg_req, sizeof *reg_req);
- }
- /* Now, fill in the various structures */
- mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
- mad_agent_priv->reg_req = reg_req;
- mad_agent_priv->agent.rmpp_version = rmpp_version;
- mad_agent_priv->agent.device = device;
- mad_agent_priv->agent.recv_handler = recv_handler;
- mad_agent_priv->agent.send_handler = send_handler;
- mad_agent_priv->agent.context = context;
- mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
- mad_agent_priv->agent.port_num = port_num;
- spin_lock_init(&mad_agent_priv->lock);
- INIT_LIST_HEAD(&mad_agent_priv->send_list);
- INIT_LIST_HEAD(&mad_agent_priv->wait_list);
- INIT_LIST_HEAD(&mad_agent_priv->done_list);
- INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
- INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
- INIT_LIST_HEAD(&mad_agent_priv->local_list);
- INIT_WORK(&mad_agent_priv->local_work, local_completions);
- atomic_set(&mad_agent_priv->refcount, 1);
- init_completion(&mad_agent_priv->comp);
- spin_lock_irqsave(&port_priv->reg_lock, flags);
- mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
- /*
- * Make sure MAD registration (if supplied)
- * is non overlapping with any existing ones
- */
- if (mad_reg_req) {
- mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
- if (!is_vendor_class(mgmt_class)) {
- class = port_priv->version[mad_reg_req->
- mgmt_class_version].class;
- if (class) {
- method = class->method_table[mgmt_class];
- if (method) {
- if (method_in_use(&method,
- mad_reg_req))
- goto error4;
- }
- }
- ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
- mgmt_class);
- } else {
- /* "New" vendor class range */
- vendor = port_priv->version[mad_reg_req->
- mgmt_class_version].vendor;
- if (vendor) {
- vclass = vendor_class_index(mgmt_class);
- vendor_class = vendor->vendor_class[vclass];
- if (vendor_class) {
- if (is_vendor_method_in_use(
- vendor_class,
- mad_reg_req))
- goto error4;
- }
- }
- ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
- }
- if (ret2) {
- ret = ERR_PTR(ret2);
- goto error4;
- }
- }
- /* Add mad agent into port's agent list */
- list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
- spin_unlock_irqrestore(&port_priv->reg_lock, flags);
- return &mad_agent_priv->agent;
- error4:
- spin_unlock_irqrestore(&port_priv->reg_lock, flags);
- kfree(reg_req);
- error3:
- ib_dereg_mr(mad_agent_priv->agent.mr);
- error2:
- kfree(mad_agent_priv);
- error1:
- return ret;
- }
- EXPORT_SYMBOL(ib_register_mad_agent);
- static inline int is_snooping_sends(int mad_snoop_flags)
- {
- return (mad_snoop_flags &
- (/*IB_MAD_SNOOP_POSTED_SENDS |
- IB_MAD_SNOOP_RMPP_SENDS |*/
- IB_MAD_SNOOP_SEND_COMPLETIONS /*|
- IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
- }
- static inline int is_snooping_recvs(int mad_snoop_flags)
- {
- return (mad_snoop_flags &
- (IB_MAD_SNOOP_RECVS /*|
- IB_MAD_SNOOP_RMPP_RECVS*/));
- }
- static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
- struct ib_mad_snoop_private *mad_snoop_priv)
- {
- struct ib_mad_snoop_private **new_snoop_table;
- unsigned long flags;
- int i;
- spin_lock_irqsave(&qp_info->snoop_lock, flags);
- /* Check for empty slot in array. */
- for (i = 0; i < qp_info->snoop_table_size; i++)
- if (!qp_info->snoop_table[i])
- break;
- if (i == qp_info->snoop_table_size) {
- /* Grow table. */
- new_snoop_table = krealloc(qp_info->snoop_table,
- sizeof mad_snoop_priv *
- (qp_info->snoop_table_size + 1),
- GFP_ATOMIC);
- if (!new_snoop_table) {
- i = -ENOMEM;
- goto out;
- }
- qp_info->snoop_table = new_snoop_table;
- qp_info->snoop_table_size++;
- }
- qp_info->snoop_table[i] = mad_snoop_priv;
- atomic_inc(&qp_info->snoop_count);
- out:
- spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
- return i;
- }
- struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
- u8 port_num,
- enum ib_qp_type qp_type,
- int mad_snoop_flags,
- ib_mad_snoop_handler snoop_handler,
- ib_mad_recv_handler recv_handler,
- void *context)
- {
- struct ib_mad_port_private *port_priv;
- struct ib_mad_agent *ret;
- struct ib_mad_snoop_private *mad_snoop_priv;
- int qpn;
- /* Validate parameters */
- if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
- (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
- ret = ERR_PTR(-EINVAL);
- goto error1;
- }
- qpn = get_spl_qp_index(qp_type);
- if (qpn == -1) {
- ret = ERR_PTR(-EINVAL);
- goto error1;
- }
- port_priv = ib_get_mad_port(device, port_num);
- if (!port_priv) {
- ret = ERR_PTR(-ENODEV);
- goto error1;
- }
- /* Allocate structures */
- mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
- if (!mad_snoop_priv) {
- ret = ERR_PTR(-ENOMEM);
- goto error1;
- }
- /* Now, fill in the various structures */
- mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
- mad_snoop_priv->agent.device = device;
- mad_snoop_priv->agent.recv_handler = recv_handler;
- mad_snoop_priv->agent.snoop_handler = snoop_handler;
- mad_snoop_priv->agent.context = context;
- mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
- mad_snoop_priv->agent.port_num = port_num;
- mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
- init_completion(&mad_snoop_priv->comp);
- mad_snoop_priv->snoop_index = register_snoop_agent(
- &port_priv->qp_info[qpn],
- mad_snoop_priv);
- if (mad_snoop_priv->snoop_index < 0) {
- ret = ERR_PTR(mad_snoop_priv->snoop_index);
- goto error2;
- }
- atomic_set(&mad_snoop_priv->refcount, 1);
- return &mad_snoop_priv->agent;
- error2:
- kfree(mad_snoop_priv);
- error1:
- return ret;
- }
- EXPORT_SYMBOL(ib_register_mad_snoop);
- static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
- {
- if (atomic_dec_and_test(&mad_agent_priv->refcount))
- complete(&mad_agent_priv->comp);
- }
- static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
- {
- if (atomic_dec_and_test(&mad_snoop_priv->refcount))
- complete(&mad_snoop_priv->comp);
- }
- static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
- {
- struct ib_mad_port_private *port_priv;
- unsigned long flags;
- /* Note that we could still be handling received MADs */
- /*
- * Canceling all sends results in dropping received response
- * MADs, preventing us from queuing additional work
- */
- cancel_mads(mad_agent_priv);
- port_priv = mad_agent_priv->qp_info->port_priv;
- cancel_delayed_work(&mad_agent_priv->timed_work);
- spin_lock_irqsave(&port_priv->reg_lock, flags);
- remove_mad_reg_req(mad_agent_priv);
- list_del(&mad_agent_priv->agent_list);
- spin_unlock_irqrestore(&port_priv->reg_lock, flags);
- flush_workqueue(port_priv->wq);
- ib_cancel_rmpp_recvs(mad_agent_priv);
- deref_mad_agent(mad_agent_priv);
- wait_for_completion(&mad_agent_priv->comp);
- kfree(mad_agent_priv->reg_req);
- ib_dereg_mr(mad_agent_priv->agent.mr);
- kfree(mad_agent_priv);
- }
- static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
- {
- struct ib_mad_qp_info *qp_info;
- unsigned long flags;
- qp_info = mad_snoop_priv->qp_info;
- spin_lock_irqsave(&qp_info->snoop_lock, flags);
- qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
- atomic_dec(&qp_info->snoop_count);
- spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
- deref_snoop_agent(mad_snoop_priv);
- wait_for_completion(&mad_snoop_priv->comp);
- kfree(mad_snoop_priv);
- }
- /*
- * ib_unregister_mad_agent - Unregisters a client from using MAD services
- */
- int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
- {
- struct ib_mad_agent_private *mad_agent_priv;
- struct ib_mad_snoop_private *mad_snoop_priv;
- /* If the TID is zero, the agent can only snoop. */
- if (mad_agent->hi_tid) {
- mad_agent_priv = container_of(mad_agent,
- struct ib_mad_agent_private,
- agent);
- unregister_mad_agent(mad_agent_priv);
- } else {
- mad_snoop_priv = container_of(mad_agent,
- struct ib_mad_snoop_private,
- agent);
- unregister_mad_snoop(mad_snoop_priv);
- }
- return 0;
- }
- EXPORT_SYMBOL(ib_unregister_mad_agent);
- static void dequeue_mad(struct ib_mad_list_head *mad_list)
- {
- struct ib_mad_queue *mad_queue;
- unsigned long flags;
- BUG_ON(!mad_list->mad_queue);
- mad_queue = mad_list->mad_queue;
- spin_lock_irqsave(&mad_queue->lock, flags);
- list_del(&mad_list->list);
- mad_queue->count--;
- spin_unlock_irqrestore(&mad_queue->lock, flags);
- }
- static void snoop_send(struct ib_mad_qp_info *qp_info,
- struct ib_mad_send_buf *send_buf,
- struct ib_mad_send_wc *mad_send_wc,
- int mad_snoop_flags)
- {
- struct ib_mad_snoop_private *mad_snoop_priv;
- unsigned long flags;
- int i;
- spin_lock_irqsave(&qp_info->snoop_lock, flags);
- for (i = 0; i < qp_info->snoop_table_size; i++) {
- mad_snoop_priv = qp_info->snoop_table[i];
- if (!mad_snoop_priv ||
- !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
- continue;
- atomic_inc(&mad_snoop_priv->refcount);
- spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
- mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
- send_buf, mad_send_wc);
- deref_snoop_agent(mad_snoop_priv);
- spin_lock_irqsave(&qp_info->snoop_lock, flags);
- }
- spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
- }
- static void snoop_recv(struct ib_mad_qp_info *qp_info,
- struct ib_mad_recv_wc *mad_recv_wc,
- int mad_snoop_flags)
- {
- struct ib_mad_snoop_private *mad_snoop_priv;
- unsigned long flags;
- int i;
- spin_lock_irqsave(&qp_info->snoop_lock, flags);
- for (i = 0; i < qp_info->snoop_table_size; i++) {
- mad_snoop_priv = qp_info->snoop_table[i];
- if (!mad_snoop_priv ||
- !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
- continue;
- atomic_inc(&mad_snoop_priv->refcount);
- spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
- mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
- mad_recv_wc);
- deref_snoop_agent(mad_snoop_priv);
- spin_lock_irqsave(&qp_info->snoop_lock, flags);
- }
- spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
- }
- static void build_smp_wc(struct ib_qp *qp,
- u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
- struct ib_wc *wc)
- {
- memset(wc, 0, sizeof *wc);
- wc->wr_id = wr_id;
- wc->status = IB_WC_SUCCESS;
- wc->opcode = IB_WC_RECV;
- wc->pkey_index = pkey_index;
- wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
- wc->src_qp = IB_QP0;
- wc->qp = qp;
- wc->slid = slid;
- wc->sl = 0;
- wc->dlid_path_bits = 0;
- wc->port_num = port_num;
- }
- /*
- * Return 0 if SMP is to be sent
- * Return 1 if SMP was consumed locally (whether or not solicited)
- * Return < 0 if error
- */
- static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
- struct ib_mad_send_wr_private *mad_send_wr)
- {
- int ret = 0;
- struct ib_smp *smp = mad_send_wr->send_buf.mad;
- unsigned long flags;
- struct ib_mad_local_private *local;
- struct ib_mad_private *mad_priv;
- struct ib_mad_port_private *port_priv;
- struct ib_mad_agent_private *recv_mad_agent = NULL;
- struct ib_device *device = mad_agent_priv->agent.device;
- u8 port_num;
- struct ib_wc mad_wc;
- struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
- if (device->node_type == RDMA_NODE_IB_SWITCH &&
- smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
- port_num = send_wr->wr.ud.port_num;
- else
- port_num = mad_agent_priv->agent.port_num;
- /*
- * Directed route handling starts if the initial LID routed part of
- * a request or the ending LID routed part of a response is empty.
- * If we are at the start of the LID routed part, don't update the
- * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
- */
- if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
- IB_LID_PERMISSIVE &&
- smi_handle_dr_smp_send(smp, device->node_type, port_num) ==
- IB_SMI_DISCARD) {
- ret = -EINVAL;
- printk(KERN_ERR PFX "Invalid directed route\n");
- goto out;
- }
- /* Check to post send on QP or process locally */
- if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
- smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
- goto out;
- local = kmalloc(sizeof *local, GFP_ATOMIC);
- if (!local) {
- ret = -ENOMEM;
- printk(KERN_ERR PFX "No memory for ib_mad_local_private\n");
- goto out;
- }
- local->mad_priv = NULL;
- local->recv_mad_agent = NULL;
- mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC);
- if (!mad_priv) {
- ret = -ENOMEM;
- printk(KERN_ERR PFX "No memory for local response MAD\n");
- kfree(local);
- goto out;
- }
- build_smp_wc(mad_agent_priv->agent.qp,
- send_wr->wr_id, be16_to_cpu(smp->dr_slid),
- send_wr->wr.ud.pkey_index,
- send_wr->wr.ud.port_num, &mad_wc);
- /* No GRH for DR SMP */
- ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
- (struct ib_mad *)smp,
- (struct ib_mad *)&mad_priv->mad);
- switch (ret)
- {
- case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
- if (ib_response_mad(&mad_priv->mad.mad) &&
- mad_agent_priv->agent.recv_handler) {
- local->mad_priv = mad_priv;
- local->recv_mad_agent = mad_agent_priv;
- /*
- * Reference MAD agent until receive
- * side of local completion handled
- */
- atomic_inc(&mad_agent_priv->refcount);
- } else
- kmem_cache_free(ib_mad_cache, mad_priv);
- break;
- case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
- kmem_cache_free(ib_mad_cache, mad_priv);
- break;
- case IB_MAD_RESULT_SUCCESS:
- /* Treat like an incoming receive MAD */
- port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
- mad_agent_priv->agent.port_num);
- if (port_priv) {
- memcpy(&mad_priv->mad.mad, smp, sizeof(struct ib_mad));
- recv_mad_agent = find_mad_agent(port_priv,
- &mad_priv->mad.mad);
- }
- if (!port_priv || !recv_mad_agent) {
- /*
- * No receiving agent so drop packet and
- * generate send completion.
- */
- kmem_cache_free(ib_mad_cache, mad_priv);
- break;
- }
- local->mad_priv = mad_priv;
- local->recv_mad_agent = recv_mad_agent;
- break;
- default:
- kmem_cache_free(ib_mad_cache, mad_priv);
- kfree(local);
- ret = -EINVAL;
- goto out;
- }
- local->mad_send_wr = mad_send_wr;
- /* Reference MAD agent until send side of local completion handled */
- atomic_inc(&mad_agent_priv->refcount);
- /* Queue local completion to local list */
- spin_lock_irqsave(&mad_agent_priv->lock, flags);
- list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
- spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
- queue_work(mad_agent_priv->qp_info->port_priv->wq,
- &mad_agent_priv->local_work);
- ret = 1;
- out:
- return ret;
- }
- static int get_pad_size(int hdr_len, int data_len)
- {
- int seg_size, pad;
- seg_size = sizeof(struct ib_mad) - hdr_len;
- if (data_len && seg_size) {
- pad = seg_size - data_len % seg_size;
- return pad == seg_size ? 0 : pad;
- } else
- return seg_size;
- }
- static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
- {
- struct ib_rmpp_segment *s, *t;
- list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
- list_del(&s->list);
- kfree(s);
- }
- }
- static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
- gfp_t gfp_mask)
- {
- struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
- struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
- struct ib_rmpp_segment *seg = NULL;
- int left, seg_size, pad;
- send_buf->seg_size = sizeof (struct ib_mad) - send_buf->hdr_len;
- seg_size = send_buf->seg_size;
- pad = send_wr->pad;
- /* Allocate data segments. */
- for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
- seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
- if (!seg) {
- printk(KERN_ERR "alloc_send_rmpp_segs: RMPP mem "
- "alloc failed for len %zd, gfp %#x\n",
- sizeof (*seg) + seg_size, gfp_mask);
- free_send_rmpp_list(send_wr);
- return -ENOMEM;
- }
- seg->num = ++send_buf->seg_count;
- list_add_tail(&seg->list, &send_wr->rmpp_list);
- }
- /* Zero any padding */
- if (pad)
- memset(seg->data + seg_size - pad, 0, pad);
- rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
- agent.rmpp_version;
- rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
- ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
- send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
- struct ib_rmpp_segment, list);
- send_wr->last_ack_seg = send_wr->cur_seg;
- return 0;
- }
- struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
- u32 remote_qpn, u16 pkey_index,
- int rmpp_active,
- int hdr_len, int data_len,
- gfp_t gfp_mask)
- {
- struct ib_mad_agent_private *mad_agent_priv;
- struct ib_mad_send_wr_private *mad_send_wr;
- int pad, message_size, ret, size;
- void *buf;
- mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
- agent);
- pad = get_pad_size(hdr_len, data_len);
- message_size = hdr_len + data_len + pad;
- if ((!mad_agent->rmpp_version &&
- (rmpp_active || message_size > sizeof(struct ib_mad))) ||
- (!rmpp_active && message_size > sizeof(struct ib_mad)))
- return ERR_PTR(-EINVAL);
- size = rmpp_active ? hdr_len : sizeof(struct ib_mad);
- buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
- if (!buf)
- return ERR_PTR(-ENOMEM);
- mad_send_wr = buf + size;
- INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
- mad_send_wr->send_buf.mad = buf;
- mad_send_wr->send_buf.hdr_len = hdr_len;
- mad_send_wr->send_buf.data_len = data_len;
- mad_send_wr->pad = pad;
- mad_send_wr->mad_agent_priv = mad_agent_priv;
- mad_send_wr->sg_list[0].length = hdr_len;
- mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey;
- mad_send_wr->sg_list[1].length = sizeof(struct ib_mad) - hdr_len;
- mad_send_wr->sg_list[1].lkey = mad_agent->mr->lkey;
- mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr;
- mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
- mad_send_wr->send_wr.num_sge = 2;
- mad_send_wr->send_wr.opcode = IB_WR_SEND;
- mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED;
- mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn;
- mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
- mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index;
- if (rmpp_active) {
- ret = alloc_send_rmpp_list(mad_send_wr, gfp_mask);
- if (ret) {
- kfree(buf);
- return ERR_PTR(ret);
- }
- }
- mad_send_wr->send_buf.mad_agent = mad_agent;
- atomic_inc(&mad_agent_priv->refcount);
- return &mad_send_wr->send_buf;
- }
- EXPORT_SYMBOL(ib_create_send_mad);
- int ib_get_mad_data_offset(u8 mgmt_class)
- {
- if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
- return IB_MGMT_SA_HDR;
- else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
- (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
- (mgmt_class == IB_MGMT_CLASS_BIS))
- return IB_MGMT_DEVICE_HDR;
- else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
- (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
- return IB_MGMT_VENDOR_HDR;
- else
- return IB_MGMT_MAD_HDR;
- }
- EXPORT_SYMBOL(ib_get_mad_data_offset);
- int ib_is_mad_class_rmpp(u8 mgmt_class)
- {
- if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
- (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
- (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
- (mgmt_class == IB_MGMT_CLASS_BIS) ||
- ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
- (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
- return 1;
- return 0;
- }
- EXPORT_SYMBOL(ib_is_mad_class_rmpp);
- void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
- {
- struct ib_mad_send_wr_private *mad_send_wr;
- struct list_head *list;
- mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
- send_buf);
- list = &mad_send_wr->cur_seg->list;
- if (mad_send_wr->cur_seg->num < seg_num) {
- list_for_each_entry(mad_send_wr->cur_seg, list, list)
- if (mad_send_wr->cur_seg->num == seg_num)
- break;
- } else if (mad_send_wr->cur_seg->num > seg_num) {
- list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
- if (mad_send_wr->cur_seg->num == seg_num)
- break;
- }
- return mad_send_wr->cur_seg->data;
- }
- EXPORT_SYMBOL(ib_get_rmpp_segment);
- static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
- {
- if (mad_send_wr->send_buf.seg_count)
- return ib_get_rmpp_segment(&mad_send_wr->send_buf,
- mad_send_wr->seg_num);
- else
- return mad_send_wr->send_buf.mad +
- mad_send_wr->send_buf.hdr_len;
- }
- void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
- {
- struct ib_mad_agent_private *mad_agent_priv;
- struct ib_mad_send_wr_private *mad_send_wr;
- mad_agent_priv = container_of(send_buf->mad_agent,
- struct ib_mad_agent_private, agent);
- mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
- send_buf);
- free_send_rmpp_list(mad_send_wr);
- kfree(send_buf->mad);
- deref_mad_agent(mad_agent_priv);
- }
- EXPORT_SYMBOL(ib_free_send_mad);
- int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
- {
- struct ib_mad_qp_info *qp_info;
- struct list_head *list;
- struct ib_send_wr *bad_send_wr;
- struct ib_mad_agent *mad_agent;
- struct ib_sge *sge;
- unsigned long flags;
- int ret;
- /* Set WR ID to find mad_send_wr upon completion */
- qp_info = mad_send_wr->mad_agent_priv->qp_info;
- mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
- mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
- mad_agent = mad_send_wr->send_buf.mad_agent;
- sge = mad_send_wr->sg_list;
- sge[0].addr = ib_dma_map_single(mad_agent->device,
- mad_send_wr->send_buf.mad,
- sge[0].length,
- DMA_TO_DEVICE);
- mad_send_wr->header_mapping = sge[0].addr;
- sge[1].addr = ib_dma_map_single(mad_agent->device,
- ib_get_payload(mad_send_wr),
- sge[1].length,
- DMA_TO_DEVICE);
- mad_send_wr->payload_mapping = sge[1].addr;
- spin_lock_irqsave(&qp_info->send_queue.lock, flags);
- if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
- ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr,
- &bad_send_wr);
- list = &qp_info->send_queue.list;
- } else {
- ret = 0;
- list = &qp_info->overflow_list;
- }
- if (!ret) {
- qp_info->send_queue.count++;
- list_add_tail(&mad_send_wr->mad_list.list, list);
- }
- spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
- if (ret) {
- ib_dma_unmap_single(mad_agent->device,
- mad_send_wr->header_mapping,
- sge[0].length, DMA_TO_DEVICE);
- ib_dma_unmap_single(mad_agent->device,
- mad_send_wr->payload_mapping,
- sge[1].length, DMA_TO_DEVICE);
- }
- return ret;
- }
- /*
- * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
- * with the registered client
- */
- int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
- struct ib_mad_send_buf **bad_send_buf)
- {
- struct ib_mad_agent_private *mad_agent_priv;
- struct ib_mad_send_buf *next_send_buf;
- struct ib_mad_send_wr_private *mad_send_wr;
- unsigned long flags;
- int ret = -EINVAL;
- /* Walk list of send WRs and post each on send list */
- for (; send_buf; send_buf = next_send_buf) {
- mad_send_wr = container_of(send_buf,
- struct ib_mad_send_wr_private,
- send_buf);
- mad_agent_priv = mad_send_wr->mad_agent_priv;
- if (!send_buf->mad_agent->send_handler ||
- (send_buf->timeout_ms &&
- !send_buf->mad_agent->recv_handler)) {
- ret = -EINVAL;
- goto error;
- }
- if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
- if (mad_agent_priv->agent.rmpp_version) {
- ret = -EINVAL;
- goto error;
- }
- }
- /*
- * Save pointer to next work request to post in case the
- * current one completes, and the user modifies the work
- * request associated with the completion
- */
- next_send_buf = send_buf->next;
- mad_send_wr->send_wr.wr.ud.ah = send_buf->ah;
- if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
- IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
- ret = handle_outgoing_dr_smp(mad_agent_priv,
- mad_send_wr);
- if (ret < 0) /* error */
- goto error;
- else if (ret == 1) /* locally consumed */
- continue;
- }
- mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
- /* Timeout will be updated after send completes */
- mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
- mad_send_wr->max_retries = send_buf->retries;
- mad_send_wr->retries_left = send_buf->retries;
- send_buf->retries = 0;
- /* Reference for work request to QP + response */
- mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
- mad_send_wr->status = IB_WC_SUCCESS;
- /* Reference MAD agent until send completes */
- atomic_inc(&mad_agent_priv->refcount);
- spin_lock_irqsave(&mad_agent_priv->lock, flags);
- list_add_tail(&mad_send_wr->agent_list,
- &mad_agent_priv->send_list);
- spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
- if (mad_agent_priv->agent.rmpp_version) {
- ret = ib_send_rmpp_mad(mad_send_wr);
- if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
- ret = ib_send_mad(mad_send_wr);
- } else
- ret = ib_send_mad(mad_send_wr);
- if (ret < 0) {
- /* Fail send request */
- spin_lock_irqsave(&mad_agent_priv->lock, flags);
- list_del(&mad_send_wr->agent_list);
- spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
- atomic_dec(&mad_agent_priv->refcount);
- goto error;
- }
- }
- return 0;
- error:
- if (bad_send_buf)
- *bad_send_buf = send_buf;
- return ret;
- }
- EXPORT_SYMBOL(ib_post_send_mad);
- /*
- * ib_free_recv_mad - Returns data buffers used to receive
- * a MAD to the access layer
- */
- void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
- {
- struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
- struct ib_mad_private_header *mad_priv_hdr;
- struct ib_mad_private *priv;
- struct list_head free_list;
- INIT_LIST_HEAD(&free_list);
- list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
- list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
- &free_list, list) {
- mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
- recv_buf);
- mad_priv_hdr = container_of(mad_recv_wc,
- struct ib_mad_private_header,
- recv_wc);
- priv = container_of(mad_priv_hdr, struct ib_mad_private,
- header);
- kmem_cache_free(ib_mad_cache, priv);
- }
- }
- EXPORT_SYMBOL(ib_free_recv_mad);
- struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
- u8 rmpp_version,
- ib_mad_send_handler send_handler,
- ib_mad_recv_handler recv_handler,
- void *context)
- {
- return ERR_PTR(-EINVAL); /* XXX: for now */
- }
- EXPORT_SYMBOL(ib_redirect_mad_qp);
- int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
- struct ib_wc *wc)
- {
- printk(KERN_ERR PFX "ib_process_mad_wc() not implemented yet\n");
- return 0;
- }
- EXPORT_SYMBOL(ib_process_mad_wc);
- static int method_in_use(struct ib_mad_mgmt_method_table **method,
- struct ib_mad_reg_req *mad_reg_req)
- {
- int i;
- for (i = find_first_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS);
- i < IB_MGMT_MAX_METHODS;
- i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
- 1+i)) {
- if ((*method)->agent[i]) {
- printk(KERN_ERR PFX "Method %d already in use\n", i);
- return -EINVAL;
- }
- }
- return 0;
- }
- static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
- {
- /* Allocate management method table */
- *method = kzalloc(sizeof **method, GFP_ATOMIC);
- if (!*method) {
- printk(KERN_ERR PFX "No memory for "
- "ib_mad_mgmt_method_table\n");
- return -ENOMEM;
- }
- return 0;
- }
- /*
- * Check to see if there are any methods still in use
- */
- static int check_method_table(struct ib_mad_mgmt_method_table *method)
- {
- int i;
- for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
- if (method->agent[i])
- return 1;
- return 0;
- }
- /*
- * Check to see if there are any method tables for this class still in use
- */
- static int check_class_table(struct ib_mad_mgmt_class_table *class)
- {
- int i;
- for (i = 0; i < MAX_MGMT_CLASS; i++)
- if (class->method_table[i])
- return 1;
- return 0;
- }
- static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
- {
- int i;
- for (i = 0; i < MAX_MGMT_OUI; i++)
- if (vendor_class->method_table[i])
- return 1;
- return 0;
- }
- static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
- char *oui)
- {
- int i;
- for (i = 0; i < MAX_MGMT_OUI; i++)
- /* Is there matching OUI for this vendor class ? */
- if (!memcmp(vendor_class->oui[i], oui, 3))
- return i;
- return -1;
- }
- static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
- {
- int i;
- for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
- if (vendor->vendor_class[i])
- return 1;
- return 0;
- }
- static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
- struct ib_mad_agent_private *agent)
- {
- int i;
- /* Remove any methods for this mad agent */
- for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
- if (method->agent[i] == agent) {
- method->agent[i] = NULL;
- }
- }
- }
- static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
- struct ib_mad_agent_private *agent_priv,
- u8 mgmt_class)
- {
- struct ib_mad_port_private *port_priv;
- struct ib_mad_mgmt_class_table **class;
- struct ib_mad_mgmt_method_table **method;
- int i, ret;
- port_priv = agent_priv->qp_info->port_priv;
- class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
- if (!*class) {
- /* Allocate management class table for "new" class version */
- *class = kzalloc(sizeof **class, GFP_ATOMIC);
- if (!*class) {
- printk(KERN_ERR PFX "No memory for "
- "ib_mad_mgmt_class_table\n");
- ret = -ENOMEM;
- goto error1;
- }
- /* Allocate method table for this management class */
- method = &(*class)->method_table[mgmt_class];
- if ((ret = allocate_method_table(method)))
- goto error2;
- } else {
- method = &(*class)->method_table[mgmt_class];
- if (!*method) {
- /* Allocate method table for this management class */
- if ((ret = allocate_method_table(method)))
- goto error1;
- }
- }
- /* Now, make sure methods are not already in use */
- if (method_in_use(method, mad_reg_req))
- goto error3;
- /* Finally, add in methods being registered */
- for (i = find_first_bit(mad_reg_req->method_mask,
- IB_MGMT_MAX_METHODS);
- i < IB_MGMT_MAX_METHODS;
- i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
- 1+i)) {
- (*method)->agent[i] = agent_priv;
- }
- return 0;
- error3:
- /* Remove any methods for this mad agent */
- remove_methods_mad_agent(*method, agent_priv);
- /* Now, check to see if there are any methods in use */
- if (!check_method_table(*method)) {
- /* If not, release management method table */
- kfree(*method);
- *method = NULL;
- }
- ret = -EINVAL;
- goto error1;
- error2:
- kfree(*class);
- *class = NULL;
- error1:
- return ret;
- }
- static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
- struct ib_mad_agent_private *agent_priv)
- {
- struct ib_mad_port_private *port_priv;
- struct ib_mad_mgmt_vendor_class_table **vendor_table;
- struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
- struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
- struct ib_mad_mgmt_method_table **method;
- int i, ret = -ENOMEM;
- u8 vclass;
- /* "New" vendor (with OUI) class */
- vclass = vendor_class_index(mad_reg_req->mgmt_class);
- port_priv = agent_priv->qp_info->port_priv;
- vendor_table = &port_priv->version[
- mad_reg_req->mgmt_class_version].vendor;
- if (!*vendor_table) {
- /* Allocate mgmt vendor class table for "new" class version */
- vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
- if (!vendor) {
- printk(KERN_ERR PFX "No memory for "
- "ib_mad_mgmt_vendor_class_table\n");
- goto error1;
- }
- *vendor_table = vendor;
- }
- if (!(*vendor_table)->vendor_class[vclass]) {
- /* Allocate table for this management vendor class */
- vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
- if (!vendor_class) {
- printk(KERN_ERR PFX "No memory for "
- "ib_mad_mgmt_vendor_class\n");
- goto error2;
- }
- (*vendor_table)->vendor_class[vclass] = vendor_class;
- }
- for (i = 0; i < MAX_MGMT_OUI; i++) {
- /* Is there matching OUI for this vendor class ? */
- if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
- mad_reg_req->oui, 3)) {
- method = &(*vendor_table)->vendor_class[
- vclass]->method_table[i];
- BUG_ON(!*method);
- goto check_in_use;
- }
- }
- for (i = 0; i < MAX_MGMT_OUI; i++) {
- /* OUI slot available ? */
- if (!is_vendor_oui((*vendor_table)->vendor_class[
- vclass]->oui[i])) {
- method = &(*vendor_table)->vendor_class[
- vclass]->method_table[i];
- BUG_ON(*method);
- /* Allocate method table for this OUI */
- if ((ret = allocate_method_table(method)))
- goto error3;
- memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
- mad_reg_req->oui, 3);
- goto check_in_use;
- }
- }
- printk(KERN_ERR PFX "All OUI slots in use\n");
- goto error3;
- check_in_use:
- /* Now, make sure methods are not already in use */
- if (method_in_use(method, mad_reg_req))
- goto error4;
- /* Finally, add in methods being registered */
- for (i = find_first_bit(mad_reg_req->method_mask,
- IB_MGMT_MAX_METHODS);
- i < IB_MGMT_MAX_METHODS;
- i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
- 1+i)) {
- (*method)->agent[i] = agent_priv;
- }
- return 0;
- error4:
- /* Remove any methods for this mad agent */
- remove_methods_mad_agent(*method, agent_priv);
- /* Now, check to see if there are any methods in use */
- if (!check_method_table(*method)) {
- /* If not, release management method table */
- kfree(*method);
- *method = NULL;
- }
- ret = -EINVAL;
- error3:
- if (vendor_class) {
- (*vendor_table)->vendor_class[vclass] = NULL;
- kfree(vendor_class);
- }
- error2:
- if (vendor) {
- *vendor_table = NULL;
- kfree(vendor);
- }
- error1:
- return ret;
- }
- static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
- {
- struct ib_mad_port_private *port_priv;
- struct ib_mad_mgmt_class_table *class;
- struct ib_mad_mgmt_method_table *method;
- struct ib_mad_mgmt_vendor_class_table *vendor;
- struct ib_mad_mgmt_vendor_class *vendor_class;
- int index;
- u8 mgmt_class;
- /*
- * Was MAD registration request supplied
- * with original registration ?
- */
- if (!agent_priv->reg_req) {
- goto out;
- }
- port_priv = agent_priv->qp_info->port_priv;
- mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
- class = port_priv->version[
- agent_priv->reg_req->mgmt_class_version].class;
- if (!class)
- goto vendor_check;
- method = class->method_table[mgmt_class];
- if (method) {
- /* Remove any methods for this mad agent */
- remove_methods_mad_agent(method, agent_priv);
- /* Now, check to see if there are any methods still in use */
- if (!check_method_table(method)) {
- /* If not, release management method table */
- kfree(method);
- class->method_table[mgmt_class] = NULL;
- /* Any management classes left ? */
- if (!check_class_table(class)) {
- /* If not, release management class table */
- kfree(class);
- port_priv->version[
- agent_priv->reg_req->
- mgmt_class_version].class = NULL;
- }
- }
- }
- vendor_check:
- if (!is_vendor_class(mgmt_class))
- goto out;
- /* normalize mgmt_class to vendor range 2 */
- mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
- vendor = port_priv->version[
- agent_priv->reg_req->mgmt_class_version].vendor;
- if (!vendor)
- goto out;
- vendor_class = vendor->vendor_class[mgmt_class];
- if (vendor_class) {
- index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
- if (index < 0)
- goto out;
- method = vendor_class->method_table[index];
- if (method) {
- /* Remove any methods for this mad agent */
- remove_methods_mad_agent(method, agent_priv);
- /*
- * Now, check to see if there are
- * any methods still in use
- */
- if (!check_method_table(method)) {
- /* If not, release management method table */
- kfree(method);
- vendor_class->method_table[index] = NULL;
- memset(vendor_class->oui[index], 0, 3);
- /* Any OUIs left ? */
- if (!check_vendor_class(vendor_class)) {
- /* If not, release vendor class table */
- kfree(vendor_class);
- vendor->vendor_class[mgmt_class] = NULL;
- /* Any other vendor classes left ? */
- if (!check_vendor_table(vendor)) {
- kfree(vendor);
- port_priv->version[
- agent_priv->reg_req->
- mgmt_class_version].
- vendor = NULL;
- }
- }
- }
- }
- }
- out:
- return;
- }
- static struct ib_mad_agent_private *
- find_mad_agent(struct ib_mad_port_private *port_priv,
- struct ib_mad *mad)
- {
- struct ib_mad_agent_private *mad_agent = NULL;
- unsigned long flags;
- spin_lock_irqsave(&port_priv->reg_lock, flags);
- if (ib_response_mad(mad)) {
- u32 hi_tid;
- struct ib_mad_agent_private *entry;
- /*
- * Routing is based on high 32 bits of transaction ID
- * of MAD.
- */
- hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32;
- list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
- if (entry->agent.hi_tid == hi_tid) {
- mad_agent = entry;
- break;
- }
- }
- } else {
- struct ib_mad_mgmt_class_table *class;
- struct ib_mad_mgmt_method_table *method;
- struct ib_mad_mgmt_vendor_class_table *vendor;
- struct ib_mad_mgmt_vendor_class *vendor_class;
- struct ib_vendor_mad *vendor_mad;
- int index;
- /*
- * Routing is based on version, class, and method
- * For "newer" vendor MADs, also based on OUI
- */
- if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION)
- goto out;
- if (!is_vendor_class(mad->mad_hdr.mgmt_class)) {
- class = port_priv->version[
- mad->mad_hdr.class_version].class;
- if (!class)
- goto out;
- method = class->method_table[convert_mgmt_class(
- mad->mad_hdr.mgmt_class)];
- if (method)
- mad_agent = method->agent[mad->mad_hdr.method &
- ~IB_MGMT_METHOD_RESP];
- } else {
- vendor = port_priv->version[
- mad->mad_hdr.class_version].vendor;
- if (!vendor)
- goto out;
- vendor_class = vendor->vendor_class[vendor_class_index(
- mad->mad_hdr.mgmt_class)];
- if (!vendor_class)
- goto out;
- /* Find matching OUI */
- vendor_mad = (struct ib_vendor_mad *)mad;
- index = find_vendor_oui(vendor_class, vendor_mad->oui);
- if (index == -1)
- goto out;
- method = vendor_class->method_table[index];
- if (method) {
- mad_agent = method->agent[mad->mad_hdr.method &
- ~IB_MGMT_METHOD_RESP];
- }
- }
- }
- if (mad_agent) {
- if (mad_agent->agent.recv_handler)
- atomic_inc(&mad_agent->refcount);
- else {
- printk(KERN_NOTICE PFX "No receive handler for client "
- "%p on port %d\n",
- &mad_agent->agent, port_priv->port_num);
- mad_agent = NULL;
- }
- }
- out:
- spin_unlock_irqrestore(&port_priv->reg_lock, flags);
- return mad_agent;
- }
- static int validate_mad(struct ib_mad *mad, u32 qp_num)
- {
- int valid = 0;
- /* Make sure MAD base version is understood */
- if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) {
- printk(KERN_ERR PFX "MAD received with unsupported base "
- "version %d\n", mad->mad_hdr.base_version);
- goto out;
- }
- /* Filter SMI packets sent to other than QP0 */
- if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
- (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
- if (qp_num == 0)
- valid = 1;
- } else {
- /* Filter GSI packets sent to QP0 */
- if (qp_num != 0)
- valid = 1;
- }
- out:
- return valid;
- }
- static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
- struct ib_mad_hdr *mad_hdr)
- {
- struct ib_rmpp_mad *rmpp_mad;
- rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
- return !mad_agent_priv->agent.rmpp_version ||
- !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
- IB_MGMT_RMPP_FLAG_ACTIVE) ||
- (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
- }
- static inline int rcv_has_same_class(struct ib_mad_send_wr_private *wr,
- struct ib_mad_recv_wc *rwc)
- {
- return ((struct ib_mad *)(wr->send_buf.mad))->mad_hdr.mgmt_class ==
- rwc->recv_buf.mad->mad_hdr.mgmt_class;
- }
- static inline int rcv_has_same_gid(struct ib_mad_agent_private *mad_agent_priv,
- struct ib_mad_send_wr_private *wr,
- struct ib_mad_recv_wc *rwc )
- {
- struct ib_ah_attr attr;
- u8 send_resp, rcv_resp;
- union ib_gid sgid;
- struct ib_device *device = mad_agent_priv->agent.device;
- u8 port_num = mad_agent_priv->agent.port_num;
- u8 lmc;
- send_resp = ib_response_mad((struct ib_mad *)wr->send_buf.mad);
- rcv_resp = ib_response_mad(rwc->recv_buf.mad);
- if (send_resp == rcv_resp)
- /* both requests, or both responses. GIDs different */
- return 0;
- if (ib_query_ah(wr->send_buf.ah, &attr))
- /* Assume not equal, to avoid false positives. */
- return 0;
- if (!!(attr.ah_flags & IB_AH_GRH) !=
- !!(rwc->wc->wc_flags & IB_WC_GRH))
- /* one has GID, other does not. Assume different */
- return 0;
- if (!send_resp && rcv_resp) {
- /* is request/response. */
- if (!(attr.ah_flags & IB_AH_GRH)) {
- if (ib_get_cached_lmc(device, port_num, &lmc))
- return 0;
- return (!lmc || !((attr.src_path_bits ^
- rwc->wc->dlid_path_bits) &
- ((1 << lmc) - 1)));
- } else {
- if (ib_get_cached_gid(device, port_num,
- attr.grh.sgid_index, &sgid))
- return 0;
- return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
- 16);
- }
- }
- if (!(attr.ah_flags & IB_AH_GRH))
- return attr.dlid == rwc->wc->slid;
- else
- return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw,
- 16);
- }
- static inline int is_direct(u8 class)
- {
- return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
- }
- struct ib_mad_send_wr_private*
- ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
- struct ib_mad_recv_wc *wc)
- {
- struct ib_mad_send_wr_private *wr;
- struct ib_mad *mad;
- mad = (struct ib_mad *)wc->recv_buf.mad;
- list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
- if ((wr->tid == mad->mad_hdr.tid) &&
- rcv_has_same_class(wr, wc) &&
- /*
- * Don't check GID for direct routed MADs.
- * These might have permissive LIDs.
- */
- (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
- rcv_has_same_gid(mad_agent_priv, wr, wc)))
- return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
- }
- /*
- * It's possible to receive the response before we've
- * been notified that the send has completed
- */
- list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
- if (is_data_mad(mad_agent_priv, wr->send_buf.mad) &&
- wr->tid == mad->mad_hdr.tid &&
- wr->timeout &&
- rcv_has_same_class(wr, wc) &&
- /*
- * Don't check GID for direct routed MADs.
- * These might have permissive LIDs.
- */
- (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
- rcv_has_same_gid(mad_agent_priv, wr, wc)))
- /* Verify request has not been canceled */
- return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
- }
- return NULL;
- }
- void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
- {
- mad_send_wr->timeout = 0;
- if (mad_send_wr->refcount == 1)
- list_move_tail(&mad_send_wr->agent_list,
- &mad_send_wr->mad_agent_priv->done_list);
- }
- static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
- struct ib_mad_recv_wc *mad_recv_wc)
- {
- struct ib_mad_send_wr_private *mad_send_wr;
- struct ib_mad_send_wc mad_send_wc;
- unsigned long flags;
- INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
- list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
- if (mad_agent_priv->agent.rmpp_version) {
- mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
- mad_recv_wc);
- if (!mad_recv_wc) {
- deref_mad_agent(mad_agent_priv);
- return;
- }
- }
- /* Complete corresponding request */
- if (ib_response_mad(mad_recv_wc->recv_buf.mad)) {
- spin_lock_irqsave(&mad_agent_priv->lock, flags);
- mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
- if (!mad_send_wr) {
- spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
- ib_free_recv_mad(mad_recv_wc);
- deref_mad_agent(mad_agent_priv);
- return;
- }
- ib_mark_mad_done(mad_send_wr);
- spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
- /* Defined behavior is to complete response before request */
- mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf;
- mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
- mad_recv_wc);
- atomic_dec(&mad_agent_priv->refcount);
- mad_send_wc.status = IB_WC_SUCCESS;
- mad_send_wc.vendor_err = 0;
- mad_send_wc.send_buf = &mad_send_wr->send_buf;
- ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
- } else {
- mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
- mad_recv_wc);
- deref_mad_agent(mad_agent_priv);
- }
- }
- static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
- struct ib_wc *wc)
- {
- struct ib_mad_qp_info *qp_info;
- struct ib_mad_private_header *mad_priv_hdr;
- struct ib_mad_private *recv, *response = NULL;
- struct ib_mad_list_head *mad_list;
- struct ib_mad_agent_private *mad_agent;
- int port_num;
- mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
- qp_info = mad_list->mad_queue->qp_info;
- dequeue_mad(mad_list);
- mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
- mad_list);
- recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
- ib_dma_unmap_single(port_priv->device,
- recv->header.mapping,
- sizeof(struct ib_mad_private) -
- sizeof(struct ib_mad_private_header),
- DMA_FROM_DEVICE);
- /* Setup MAD receive work completion from "normal" work completion */
- recv->header.wc = *wc;
- recv->header.recv_wc.wc = &recv->header.wc;
- recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
- recv->header.recv_wc.recv_buf.mad = &recv->mad.mad;
- recv->header.recv_wc.recv_buf.grh = &recv->grh;
- if (atomic_read(&qp_info->snoop_count))
- snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
- /* Validate MAD */
- if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num))
- goto out;
- response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
- if (!response) {
- printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory "
- "for response buffer\n");
- goto out;
- }
- if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH)
- port_num = wc->port_num;
- else
- port_num = port_priv->port_num;
- if (recv->mad.mad.mad_hdr.mgmt_class ==
- IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
- enum smi_forward_action retsmi;
- if (smi_handle_dr_smp_recv(&recv->mad.smp,
- port_priv->device->node_type,
- port_num,
- port_priv->device->phys_port_cnt) ==
- IB_SMI_DISCARD)
- goto out;
- retsmi = smi_check_forward_dr_smp(&recv->mad.smp);
- if (retsmi == IB_SMI_LOCAL)
- goto local;
- if (retsmi == IB_SMI_SEND) { /* don't forward */
- if (smi_handle_dr_smp_send(&recv->mad.smp,
- port_priv->device->node_type,
- port_num) == IB_SMI_DISCARD)
- goto out;
- if (smi_check_local_smp(&recv->mad.smp, port_priv->device) == IB_SMI_DISCARD)
- goto out;
- } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
- /* forward case for switches */
- memcpy(response, recv, sizeof(*response));
- response->header.recv_wc.wc = &response->header.wc;
- response->header.recv_wc.recv_buf.mad = &response->mad.mad;
- response->header.recv_wc.recv_buf.grh = &response->grh;
- agent_send_response(&response->mad.mad,
- &response->grh, wc,
- port_priv->device,
- smi_get_fwd_port(&recv->mad.smp),
- qp_info->qp->qp_num);
- goto out;
- }
- }
- local:
- /* Give driver "right of first refusal" on incoming MAD */
- if (port_priv->device->process_mad) {
- int ret;
- ret = port_priv->device->process_mad(port_priv->device, 0,
- port_priv->port_num,
- wc, &recv->grh,
- &recv->mad.mad,
- &response->mad.mad);
- if (ret & IB_MAD_RESULT_SUCCESS) {
- if (ret & IB_MAD_RESULT_CONSUMED)
- goto out;
- if (ret & IB_MAD_RESULT_REPLY) {
- agent_send_response(&response->mad.mad,
- &recv->grh, wc,
- port_priv->device,
- port_num,
- qp_info->qp->qp_num);
- goto out;
- }
- }
- }
- mad_agent = find_mad_agent(port_priv, &recv->mad.mad);
- if (mad_agent) {
- ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
- /*
- * recv is freed up in error cases in ib_mad_complete_recv
- * or via recv_handler in ib_mad_complete_recv()
- */
- recv = NULL;
- }
- out:
- /* Post another receive request for this QP */
- if (response) {
- ib_mad_post_receive_mads(qp_info, response);
- if (recv)
- kmem_cache_free(ib_mad_cache, recv);
- } else
- ib_mad_post_receive_mads(qp_info, recv);
- }
- static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
- {
- struct ib_mad_send_wr_private *mad_send_wr;
- unsigned long delay;
- if (list_empty(&mad_agent_priv->wait_list)) {
- cancel_delayed_work(&mad_agent_priv->timed_work);
- } else {
- mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
- struct ib_mad_send_wr_private,
- agent_list);
- if (time_after(mad_agent_priv->timeout,
- mad_send_wr->timeout)) {
- mad_agent_priv->timeout = mad_send_wr->timeout;
- cancel_delayed_work(&mad_agent_priv->timed_work);
- delay = mad_send_wr->timeout - jiffies;
- if ((long)delay <= 0)
- delay = 1;
- queue_delayed_work(mad_agent_priv->qp_info->
- port_priv->wq,
- &mad_agent_priv->timed_work, delay);
- }
- }
- }
- static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
- {
- struct ib_mad_agent_private *mad_agent_priv;
- struct ib_mad_send_wr_private *temp_mad_send_wr;
- struct list_head *list_item;
- unsigned long delay;
- mad_agent_priv = mad_send_wr->mad_agent_priv;
- list_del(&mad_send_wr->agent_list);
- delay = mad_send_wr->timeout;
- mad_send_wr->timeout += jiffies;
- if (delay) {
- list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
- temp_mad_send_wr = list_entry(list_item,
- struct ib_mad_send_wr_private,
- agent_list);
- if (time_after(mad_send_wr->timeout,
- temp_mad_send_wr->timeout))
- break;
- }
- }
- else
- list_item = &mad_agent_priv->wait_list;
- list_add(&mad_send_wr->agent_list, list_item);
- /* Reschedule a work item if we have a shorter timeout */
- if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) {
- cancel_delayed_work(&mad_agent_priv->timed_work);
- queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
- &mad_agent_priv->timed_work, delay);
- }
- }
- void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
- int timeout_ms)
- {
- mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
- wait_for_response(mad_send_wr);
- }
- /*
- * Process a send work completion
- */
- void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
- struct ib_mad_send_wc *mad_send_wc)
- {
- struct ib_mad_agent_private *mad_agent_priv;
- unsigned long flags;
- int ret;
- mad_agent_priv = mad_send_wr->mad_agent_priv;
- spin_lock_irqsave(&mad_agent_priv->lock, flags);
- if (mad_agent_priv->agent.rmpp_version) {
- ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
- if (ret == IB_RMPP_RESULT_CONSUMED)
- goto done;
- } else
- ret = IB_RMPP_RESULT_UNHANDLED;
- if (mad_send_wc->status != IB_WC_SUCCESS &&
- mad_send_wr->status == IB_WC_SUCCESS) {
- mad_send_wr->status = mad_send_wc->status;
- mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
- }
- if (--mad_send_wr->refcount > 0) {
- if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
- mad_send_wr->status == IB_WC_SUCCESS) {
- wait_for_response(mad_send_wr);
- }
- goto done;
- }
- /* Remove send from MAD agent and notify client of completion */
- list_del(&mad_send_wr->agent_list);
- adjust_timeout(mad_agent_priv);
- spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
- if (mad_send_wr->status != IB_WC_SUCCESS )
- mad_send_wc->status = mad_send_wr->status;
- if (ret == IB_RMPP_RESULT_INTERNAL)
- ib_rmpp_send_handler(mad_send_wc);
- else
- mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
- mad_send_wc);
- /* Release reference on agent taken when sending */
- deref_mad_agent(mad_agent_priv);
- return;
- done:
- spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
- }
- static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
- struct ib_wc *wc)
- {
- struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
- struct ib_mad_list_head *mad_list;
- struct ib_mad_qp_info *qp_info;
- struct ib_mad_queue *send_queue;
- struct ib_send_wr *bad_send_wr;
- struct ib_mad_send_wc mad_send_wc;
- unsigned long flags;
- int ret;
- mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
- mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
- mad_list);
- send_queue = mad_list->mad_queue;
- qp_info = send_queue->qp_info;
- retry:
- ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
- mad_send_wr->header_mapping,
- mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
- ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
- mad_send_wr->payload_mapping,
- mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
- queued_send_wr = NULL;
- spin_lock_irqsave(&send_queue->lock, flags);
- list_del(&mad_list->list);
- /* Move queued send to the send queue */
- if (send_queue->count-- > send_queue->max_active) {
- mad_list = container_of(qp_info->overflow_list.next,
- struct ib_mad_list_head, list);
- queued_send_wr = container_of(mad_list,
- struct ib_mad_send_wr_private,
- mad_list);
- list_move_tail(&mad_list->list, &send_queue->list);
- }
- spin_unlock_irqrestore(&send_queue->lock, flags);
- mad_send_wc.send_buf = &mad_send_wr->send_buf;
- mad_send_wc.status = wc->status;
- mad_send_wc.vendor_err = wc->vendor_err;
- if (atomic_read(&qp_info->snoop_count))
- snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
- IB_MAD_SNOOP_SEND_COMPLETIONS);
- ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
- if (queued_send_wr) {
- ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
- &bad_send_wr);
- if (ret) {
- printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret);
- mad_send_wr = queued_send_wr;
- wc->status = IB_WC_LOC_QP_OP_ERR;
- goto retry;
- }
- }
- }
- static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
- {
- struct ib_mad_send_wr_private *mad_send_wr;
- struct ib_mad_list_head *mad_list;
- unsigned long flags;
- spin_lock_irqsave(&qp_info->send_queue.lock, flags);
- list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
- mad_send_wr = container_of(mad_list,
- struct ib_mad_send_wr_private,
- mad_list);
- mad_send_wr->retry = 1;
- }
- spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
- }
- static void mad_error_handler(struct ib_mad_port_private *port_priv,
- struct ib_wc *wc)
- {
- struct ib_mad_list_head *mad_list;
- struct ib_mad_qp_info *qp_info;
- struct ib_mad_send_wr_private *mad_send_wr;
- int ret;
- /* Determine if failure was a send or receive */
- mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
- qp_info = mad_list->mad_queue->qp_info;
- if (mad_list->mad_queue == &qp_info->recv_queue)
- /*
- * Receive errors indicate that the QP has entered the error
- * state - error handling/shutdown code will cleanup
- */
- return;
- /*
- * Send errors will transition the QP to SQE - move
- * QP to RTS and repost flushed work requests
- */
- mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
- mad_list);
- if (wc->status == IB_WC_WR_FLUSH_ERR) {
- if (mad_send_wr->retry) {
- /* Repost send */
- struct ib_send_wr *bad_send_wr;
- mad_send_wr->retry = 0;
- ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,
- &bad_send_wr);
- if (ret)
- ib_mad_send_done_handler(port_priv, wc);
- } else
- ib_mad_send_done_handler(port_priv, wc);
- } else {
- struct ib_qp_attr *attr;
- /* Transition QP to RTS and fail offending send */
- attr = kmalloc(sizeof *attr, GFP_KERNEL);
- if (attr) {
- attr->qp_state = IB_QPS_RTS;
- attr->cur_qp_state = IB_QPS_SQE;
- ret = ib_modify_qp(qp_info->qp, attr,
- IB_QP_STATE | IB_QP_CUR_STATE);
- kfree(attr);
- if (ret)
- printk(KERN_ERR PFX "mad_error_handler - "
- "ib_modify_qp to RTS : %d\n", ret);
- else
- mark_sends_for_retry(qp_info);
- }
- ib_mad_send_done_handler(port_priv, wc);
- }
- }
- /*
- * IB MAD completion callback
- */
- static void ib_mad_completion_handler(struct work_struct *work)
- {
- struct ib_mad_port_private *port_priv;
- struct ib_wc wc;
- port_priv = container_of(work, struct ib_mad_port_private, work);
- ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
- while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
- if (wc.status == IB_WC_SUCCESS) {
- switch (wc.opcode) {
- case IB_WC_SEND:
- ib_mad_send_done_handler(port_priv, &wc);
- break;
- case IB_WC_RECV:
- ib_mad_recv_done_handler(port_priv, &wc);
- break;
- default:
- BUG_ON(1);
- break;
- }
- } else
- mad_error_handler(port_priv, &wc);
- }
- }
- static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
- {
- unsigned long flags;
- struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
- struct ib_mad_send_wc mad_send_wc;
- struct list_head cancel_list;
- INIT_LIST_HEAD(&cancel_list);
- spin_lock_irqsave(&mad_agent_priv->lock, flags);
- list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
- &mad_agent_priv->send_list, agent_list) {
- if (mad_send_wr->status == IB_WC_SUCCESS) {
- mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
- mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
- }
- }
- /* Empty wait list to prevent receives from finding a request */
- list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
- spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
- /* Report all cancelled requests */
- mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
- mad_send_wc.vendor_err = 0;
- list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
- &cancel_list, agent_list) {
- mad_send_wc.send_buf = &mad_send_wr->send_buf;
- list_del(&mad_send_wr->agent_list);
- mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
- &mad_send_wc);
- atomic_dec(&mad_agent_priv->refcount);
- }
- }
- static struct ib_mad_send_wr_private*
- find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
- struct ib_mad_send_buf *send_buf)
- {
- struct ib_mad_send_wr_private *mad_send_wr;
- list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
- agent_list) {
- if (&mad_send_wr->send_buf == send_buf)
- return mad_send_wr;
- }
- list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
- agent_list) {
- if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) &&
- &mad_send_wr->send_buf == send_buf)
- return mad_send_wr;
- }
- return NULL;
- }
- int ib_modify_mad(struct ib_mad_agent *mad_agent,
- struct ib_mad_send_buf *send_buf, u32 timeout_ms)
- {
- struct ib_mad_agent_private *mad_agent_priv;
- struct ib_mad_send_wr_private *mad_send_wr;
- unsigned long flags;
- int active;
- mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
- agent);
- spin_lock_irqsave(&mad_agent_priv->lock, flags);
- mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
- if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
- spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
- return -EINVAL;
- }
- active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
- if (!timeout_ms) {
- mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
- mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
- }
- mad_send_wr->send_buf.timeout_ms = timeout_ms;
- if (active)
- mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
- else
- ib_reset_mad_timeout(mad_send_wr, timeout_ms);
- spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
- return 0;
- }
- EXPORT_SYMBOL(ib_modify_mad);
- void ib_cancel_mad(struct ib_mad_agent *mad_agent,
- struct ib_mad_send_buf *send_buf)
- {
- ib_modify_mad(mad_agent, send_buf, 0);
- }
- EXPORT_SYMBOL(ib_cancel_mad);
- static void local_completions(struct work_struct *work)
- {
- struct ib_mad_agent_private *mad_agent_priv;
- struct ib_mad_local_private *local;
- struct ib_mad_agent_private *recv_mad_agent;
- unsigned long flags;
- int free_mad;
- struct ib_wc wc;
- struct ib_mad_send_wc mad_send_wc;
- mad_agent_priv =
- container_of(work, struct ib_mad_agent_private, local_work);
- spin_lock_irqsave(&mad_agent_priv->lock, flags);
- while (!list_empty(&mad_agent_priv->local_list)) {
- local = list_entry(mad_agent_priv->local_list.next,
- struct ib_mad_local_private,
- completion_list);
- list_del(&local->completion_list);
- spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
- free_mad = 0;
- if (local->mad_priv) {
- recv_mad_agent = local->recv_mad_agent;
- if (!recv_mad_agent) {
- printk(KERN_ERR PFX "No receive MAD agent for local completion\n");
- free_mad = 1;
- goto local_send_completion;
- }
- /*
- * Defined behavior is to complete response
- * before request
- */
- build_smp_wc(recv_mad_agent->agent.qp,
- (unsigned long) local->mad_send_wr,
- be16_to_cpu(IB_LID_PERMISSIVE),
- 0, recv_mad_agent->agent.port_num, &wc);
- local->mad_priv->header.recv_wc.wc = &wc;
- local->mad_priv->header.recv_wc.mad_len =
- sizeof(struct ib_mad);
- INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
- list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
- &local->mad_priv->header.recv_wc.rmpp_list);
- local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
- local->mad_priv->header.recv_wc.recv_buf.mad =
- &local->mad_priv->mad.mad;
- if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
- snoop_recv(recv_mad_agent->qp_info,
- &local->mad_priv->header.recv_wc,
- IB_MAD_SNOOP_RECVS);
- recv_mad_agent->agent.recv_handler(
- &recv_mad_agent->agent,
- &local->mad_priv->header.recv_wc);
- spin_lock_irqsave(&recv_mad_agent->lock, flags);
- atomic_dec(&recv_mad_agent->refcount);
- spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
- }
- local_send_completion:
- /* Complete send */
- mad_send_wc.status = IB_WC_SUCCESS;
- mad_send_wc.vendor_err = 0;
- mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
- if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
- snoop_send(mad_agent_priv->qp_info,
- &local->mad_send_wr->send_buf,
- &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
- mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
- &mad_send_wc);
- spin_lock_irqsave(&mad_agent_priv->lock, flags);
- atomic_dec(&mad_agent_priv->refcount);
- if (free_mad)
- kmem_cache_free(ib_mad_cache, local->mad_priv);
- kfree(local);
- }
- spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
- }
- static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
- {
- int ret;
- if (!mad_send_wr->retries_left)
- return -ETIMEDOUT;
- mad_send_wr->retries_left--;
- mad_send_wr->send_buf.retries++;
- mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
- if (mad_send_wr->mad_agent_priv->agent.rmpp_version) {
- ret = ib_retry_rmpp(mad_send_wr);
- switch (ret) {
- case IB_RMPP_RESULT_UNHANDLED:
- ret = ib_send_mad(mad_send_wr);
- break;
- case IB_RMPP_RESULT_CONSUMED:
- ret = 0;
- break;
- default:
- ret = -ECOMM;
- break;
- }
- } else
- ret = ib_send_mad(mad_send_wr);
- if (!ret) {
- mad_send_wr->refcount++;
- list_add_tail(&mad_send_wr->agent_list,
- &mad_send_wr->mad_agent_priv->send_list);
- }
- return ret;
- }
- static void timeout_sends(struct work_struct *work)
- {
- struct ib_mad_agent_private *mad_agent_priv;
- struct ib_mad_send_wr_private *mad_send_wr;
- struct ib_mad_send_wc mad_send_wc;
- unsigned long flags, delay;
- mad_agent_priv = container_of(work, struct ib_mad_agent_private,
- timed_work.work);
- mad_send_wc.vendor_err = 0;
- spin_lock_irqsave(&mad_agent_priv->lock, flags);
- while (!list_empty(&mad_agent_priv->wait_list)) {
- mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
- struct ib_mad_send_wr_private,
- agent_list);
- if (time_after(mad_send_wr->timeout, jiffies)) {
- delay = mad_send_wr->timeout - jiffies;
- if ((long)delay <= 0)
- delay = 1;
- queue_delayed_work(mad_agent_priv->qp_info->
- port_priv->wq,
- &mad_agent_priv->timed_work, delay);
- break;
- }
- list_del(&mad_send_wr->agent_list);
- if (mad_send_wr->status == IB_WC_SUCCESS &&
- !retry_send(mad_send_wr))
- continue;
- spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
- if (mad_send_wr->status == IB_WC_SUCCESS)
- mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
- else
- mad_send_wc.status = mad_send_wr->status;
- mad_send_wc.send_buf = &mad_send_wr->send_buf;
- mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
- &mad_send_wc);
- atomic_dec(&mad_agent_priv->refcount);
- spin_lock_irqsave(&mad_agent_priv->lock, flags);
- }
- spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
- }
- static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg)
- {
- struct ib_mad_port_private *port_priv = cq->cq_context;
- unsigned long flags;
- spin_lock_irqsave(&ib_mad_port_list_lock, flags);
- if (!list_empty(&port_priv->port_list))
- queue_work(port_priv->wq, &port_priv->work);
- spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
- }
- /*
- * Allocate receive MADs and post receive WRs for them
- */
- static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
- struct ib_mad_private *mad)
- {
- unsigned long flags;
- int post, ret;
- struct ib_mad_private *mad_priv;
- struct ib_sge sg_list;
- struct ib_recv_wr recv_wr, *bad_recv_wr;
- struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
- /* Initialize common scatter list fields */
- sg_list.length = sizeof *mad_priv - sizeof mad_priv->header;
- sg_list.lkey = (*qp_info->port_priv->mr).lkey;
- /* Initialize common receive WR fields */
- recv_wr.next = NULL;
- recv_wr.sg_list = &sg_list;
- recv_wr.num_sge = 1;
- do {
- /* Allocate and map receive buffer */
- if (mad) {
- mad_priv = mad;
- mad = NULL;
- } else {
- mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
- if (!mad_priv) {
- printk(KERN_ERR PFX "No memory for receive buffer\n");
- ret = -ENOMEM;
- break;
- }
- }
- sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
- &mad_priv->grh,
- sizeof *mad_priv -
- sizeof mad_priv->header,
- DMA_FROM_DEVICE);
- mad_priv->header.mapping = sg_list.addr;
- recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
- mad_priv->header.mad_list.mad_queue = recv_queue;
- /* Post receive WR */
- spin_lock_irqsave(&recv_queue->lock, flags);
- post = (++recv_queue->count < recv_queue->max_active);
- list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
- spin_unlock_irqrestore(&recv_queue->lock, flags);
- ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
- if (ret) {
- spin_lock_irqsave(&recv_queue->lock, flags);
- list_del(&mad_priv->header.mad_list.list);
- recv_queue->count--;
- spin_unlock_irqrestore(&recv_queue->lock, flags);
- ib_dma_unmap_single(qp_info->port_priv->device,
- mad_priv->header.mapping,
- sizeof *mad_priv -
- sizeof mad_priv->header,
- DMA_FROM_DEVICE);
- kmem_cache_free(ib_mad_cache, mad_priv);
- printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
- break;
- }
- } while (post);
- return ret;
- }
- /*
- * Return all the posted receive MADs
- */
- static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
- {
- struct ib_mad_private_header *mad_priv_hdr;
- struct ib_mad_private *recv;
- struct ib_mad_list_head *mad_list;
- while (!list_empty(&qp_info->recv_queue.list)) {
- mad_list = list_entry(qp_info->recv_queue.list.next,
- struct ib_mad_list_head, list);
- mad_priv_hdr = container_of(mad_list,
- struct ib_mad_private_header,
- mad_list);
- recv = container_of(mad_priv_hdr, struct ib_mad_private,
- header);
- /* Remove from posted receive MAD list */
- list_del(&mad_list->list);
- ib_dma_unmap_single(qp_info->port_priv->device,
- recv->header.mapping,
- sizeof(struct ib_mad_private) -
- sizeof(struct ib_mad_private_header),
- DMA_FROM_DEVICE);
- kmem_cache_free(ib_mad_cache, recv);
- }
- qp_info->recv_queue.count = 0;
- }
- /*
- * Start the port
- */
- static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
- {
- int ret, i;
- struct ib_qp_attr *attr;
- struct ib_qp *qp;
- attr = kmalloc(sizeof *attr, GFP_KERNEL);
- if (!attr) {
- printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n");
- return -ENOMEM;
- }
- for (i = 0; i < IB_MAD_QPS_CORE; i++) {
- qp = port_priv->qp_info[i].qp;
- /*
- * PKey index for QP1 is irrelevant but
- * one is needed for the Reset to Init transition
- */
- attr->qp_state = IB_QPS_INIT;
- attr->pkey_index = 0;
- attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
- ret = ib_modify_qp(qp, attr, IB_QP_STATE |
- IB_QP_PKEY_INDEX | IB_QP_QKEY);
- if (ret) {
- printk(KERN_ERR PFX "Couldn't change QP%d state to "
- "INIT: %d\n", i, ret);
- goto out;
- }
- attr->qp_state = IB_QPS_RTR;
- ret = ib_modify_qp(qp, attr, IB_QP_STATE);
- if (ret) {
- printk(KERN_ERR PFX "Couldn't change QP%d state to "
- "RTR: %d\n", i, ret);
- goto out;
- }
- attr->qp_state = IB_QPS_RTS;
- attr->sq_psn = IB_MAD_SEND_Q_PSN;
- ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
- if (ret) {
- printk(KERN_ERR PFX "Couldn't change QP%d state to "
- "RTS: %d\n", i, ret);
- goto out;
- }
- }
- ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
- if (ret) {
- printk(KERN_ERR PFX "Failed to request completion "
- "notification: %d\n", ret);
- goto out;
- }
- for (i = 0; i < IB_MAD_QPS_CORE; i++) {
- ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
- if (ret) {
- printk(KERN_ERR PFX "Couldn't post receive WRs\n");
- goto out;
- }
- }
- out:
- kfree(attr);
- return ret;
- }
- static void qp_event_handler(struct ib_event *event, void *qp_context)
- {
- struct ib_mad_qp_info *qp_info = qp_context;
- /* It's worse than that! He's dead, Jim! */
- printk(KERN_ERR PFX "Fatal error (%d) on MAD QP (%d)\n",
- event->event, qp_info->qp->qp_num);
- }
- static void init_mad_queue(struct ib_mad_qp_info *qp_info,
- struct ib_mad_queue *mad_queue)
- {
- mad_queue->qp_info = qp_info;
- mad_queue->count = 0;
- spin_lock_init(&mad_queue->lock);
- INIT_LIST_HEAD(&mad_queue->list);
- }
- static void init_mad_qp(struct ib_mad_port_private *port_priv,
- struct ib_mad_qp_info *qp_info)
- {
- qp_info->port_priv = port_priv;
- init_mad_queue(qp_info, &qp_info->send_queue);
- init_mad_queue(qp_info, &qp_info->recv_queue);
- INIT_LIST_HEAD(&qp_info->overflow_list);
- spin_lock_init(&qp_info->snoop_lock);
- qp_info->snoop_table = NULL;
- qp_info->snoop_table_size = 0;
- atomic_set(&qp_info->snoop_count, 0);
- }
- static int create_mad_qp(struct ib_mad_qp_info *qp_info,
- enum ib_qp_type qp_type)
- {
- struct ib_qp_init_attr qp_init_attr;
- int ret;
- memset(&qp_init_attr, 0, sizeof qp_init_attr);
- qp_init_attr.send_cq = qp_info->port_priv->cq;
- qp_init_attr.recv_cq = qp_info->port_priv->cq;
- qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
- qp_init_attr.cap.max_send_wr = IB_MAD_QP_SEND_SIZE;
- qp_init_attr.cap.max_recv_wr = IB_MAD_QP_RECV_SIZE;
- qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
- qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
- qp_init_attr.qp_type = qp_type;
- qp_init_attr.port_num = qp_info->port_priv->port_num;
- qp_init_attr.qp_context = qp_info;
- qp_init_attr.event_handler = qp_event_handler;
- qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
- if (IS_ERR(qp_info->qp)) {
- printk(KERN_ERR PFX "Couldn't create ib_mad QP%d\n",
- get_spl_qp_index(qp_type));
- ret = PTR_ERR(qp_info->qp);
- goto error;
- }
- /* Use minimum queue sizes unless the CQ is resized */
- qp_info->send_queue.max_active = IB_MAD_QP_SEND_SIZE;
- qp_info->recv_queue.max_active = IB_MAD_QP_RECV_SIZE;
- return 0;
- error:
- return ret;
- }
- static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
- {
- ib_destroy_qp(qp_info->qp);
- kfree(qp_info->snoop_table);
- }
- /*
- * Open the port
- * Create the QP, PD, MR, and CQ if needed
- */
- static int ib_mad_port_open(struct ib_device *device,
- int port_num)
- {
- int ret, cq_size;
- struct ib_mad_port_private *port_priv;
- unsigned long flags;
- char name[sizeof "ib_mad123"];
- /* Create new device info */
- port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
- if (!port_priv) {
- printk(KERN_ERR PFX "No memory for ib_mad_port_private\n");
- return -ENOMEM;
- }
- port_priv->device = device;
- port_priv->port_num = port_num;
- spin_lock_init(&port_priv->reg_lock);
- INIT_LIST_HEAD(&port_priv->agent_list);
- init_mad_qp(port_priv, &port_priv->qp_info[0]);
- init_mad_qp(port_priv, &port_priv->qp_info[1]);
- cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2;
- port_priv->cq = ib_create_cq(port_priv->device,
- ib_mad_thread_completion_handler,
- NULL, port_priv, cq_size, 0);
- if (IS_ERR(port_priv->cq)) {
- printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n");
- ret = PTR_ERR(port_priv->cq);
- goto error3;
- }
- port_priv->pd = ib_alloc_pd(device);
- if (IS_ERR(port_priv->pd)) {
- printk(KERN_ERR PFX "Couldn't create ib_mad PD\n");
- ret = PTR_ERR(port_priv->pd);
- goto error4;
- }
- port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(port_priv->mr)) {
- printk(KERN_ERR PFX "Couldn't get ib_mad DMA MR\n");
- ret = PTR_ERR(port_priv->mr);
- goto error5;
- }
- ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
- if (ret)
- goto error6;
- ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
- if (ret)
- goto error7;
- snprintf(name, sizeof name, "ib_mad%d", port_num);
- port_priv->wq = create_singlethread_workqueue(name);
- if (!port_priv->wq) {
- ret = -ENOMEM;
- goto error8;
- }
- INIT_WORK(&port_priv->work, ib_mad_completion_handler);
- spin_lock_irqsave(&ib_mad_port_list_lock, flags);
- list_add_tail(&port_priv->port_list, &ib_mad_port_list);
- spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
- ret = ib_mad_port_start(port_priv);
- if (ret) {
- printk(KERN_ERR PFX "Couldn't start port\n");
- goto error9;
- }
- return 0;
- error9:
- spin_lock_irqsave(&ib_mad_port_list_lock, flags);
- list_del_init(&port_priv->port_list);
- spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
- destroy_workqueue(port_priv->wq);
- error8:
- destroy_mad_qp(&port_priv->qp_info[1]);
- error7:
- destroy_mad_qp(&port_priv->qp_info[0]);
- error6:
- ib_dereg_mr(port_priv->mr);
- error5:
- ib_dealloc_pd(port_priv->pd);
- error4:
- ib_destroy_cq(port_priv->cq);
- cleanup_recv_queue(&port_priv->qp_info[1]);
- cleanup_recv_queue(&port_priv->qp_info[0]);
- error3:
- kfree(port_priv);
- return ret;
- }
- /*
- * Close the port
- * If there are no classes using the port, free the port
- * resources (CQ, MR, PD, QP) and remove the port's info structure
- */
- static int ib_mad_port_close(struct ib_device *device, int port_num)
- {
- struct ib_mad_port_private *port_priv;
- unsigned long flags;
- spin_lock_irqsave(&ib_mad_port_list_lock, flags);
- port_priv = __ib_get_mad_port(device, port_num);
- if (port_priv == NULL) {
- spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
- printk(KERN_ERR PFX "Port %d not found\n", port_num);
- return -ENODEV;
- }
- list_del_init(&port_priv->port_list);
- spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
- destroy_workqueue(port_priv->wq);
- destroy_mad_qp(&port_priv->qp_info[1]);
- destroy_mad_qp(&port_priv->qp_info[0]);
- ib_dereg_mr(port_priv->mr);
- ib_dealloc_pd(port_priv->pd);
- ib_destroy_cq(port_priv->cq);
- cleanup_recv_queue(&port_priv->qp_info[1]);
- cleanup_recv_queue(&port_priv->qp_info[0]);
- /* XXX: Handle deallocation of MAD registration tables */
- kfree(port_priv);
- return 0;
- }
- static void ib_mad_init_device(struct ib_device *device)
- {
- int start, end, i;
- if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
- return;
- if (device->node_type == RDMA_NODE_IB_SWITCH) {
- start = 0;
- end = 0;
- } else {
- start = 1;
- end = device->phys_port_cnt;
- }
- for (i = start; i <= end; i++) {
- if (ib_mad_port_open(device, i)) {
- printk(KERN_ERR PFX "Couldn't open %s port %d\n",
- device->name, i);
- goto error;
- }
- if (ib_agent_port_open(device, i)) {
- printk(KERN_ERR PFX "Couldn't open %s port %d "
- "for agents\n",
- device->name, i);
- goto error_agent;
- }
- }
- return;
- error_agent:
- if (ib_mad_port_close(device, i))
- printk(KERN_ERR PFX "Couldn't close %s port %d\n",
- device->name, i);
- error:
- i--;
- while (i >= start) {
- if (ib_agent_port_close(device, i))
- printk(KERN_ERR PFX "Couldn't close %s port %d "
- "for agents\n",
- device->name, i);
- if (ib_mad_port_close(device, i))
- printk(KERN_ERR PFX "Couldn't close %s port %d\n",
- device->name, i);
- i--;
- }
- }
- static void ib_mad_remove_device(struct ib_device *device)
- {
- int i, num_ports, cur_port;
- if (device->node_type == RDMA_NODE_IB_SWITCH) {
- num_ports = 1;
- cur_port = 0;
- } else {
- num_ports = device->phys_port_cnt;
- cur_port = 1;
- }
- for (i = 0; i < num_ports; i++, cur_port++) {
- if (ib_agent_port_close(device, cur_port))
- printk(KERN_ERR PFX "Couldn't close %s port %d "
- "for agents\n",
- device->name, cur_port);
- if (ib_mad_port_close(device, cur_port))
- printk(KERN_ERR PFX "Couldn't close %s port %d\n",
- device->name, cur_port);
- }
- }
- static struct ib_client mad_client = {
- .name = "mad",
- .add = ib_mad_init_device,
- .remove = ib_mad_remove_device
- };
- static int __init ib_mad_init_module(void)
- {
- int ret;
- spin_lock_init(&ib_mad_port_list_lock);
- ib_mad_cache = kmem_cache_create("ib_mad",
- sizeof(struct ib_mad_private),
- 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
- if (!ib_mad_cache) {
- printk(KERN_ERR PFX "Couldn't create ib_mad cache\n");
- ret = -ENOMEM;
- goto error1;
- }
- INIT_LIST_HEAD(&ib_mad_port_list);
- if (ib_register_client(&mad_client)) {
- printk(KERN_ERR PFX "Couldn't register ib_mad client\n");
- ret = -EINVAL;
- goto error2;
- }
- return 0;
- error2:
- kmem_cache_destroy(ib_mad_cache);
- error1:
- return ret;
- }
- static void __exit ib_mad_cleanup_module(void)
- {
- ib_unregister_client(&mad_client);
- kmem_cache_destroy(ib_mad_cache);
- }
- module_init(ib_mad_init_module);
- module_exit(ib_mad_cleanup_module);
|