PNG  IHDRQgAMA a cHRMz&u0`:pQ<bKGDgmIDATxwUﹻ& ^CX(J I@ "% (** BX +*i"]j(IH{~R)[~>h{}gy)I$Ij .I$I$ʊy@}x.: $I$Ii}VZPC)I$IF ^0ʐJ$I$Q^}{"r=OzI$gRZeC.IOvH eKX $IMpxsk.쒷/&r[޳<v| .I~)@$updYRa$I |M.e JaֶpSYR6j>h%IRز if&uJ)M$I vLi=H;7UJ,],X$I1AҒJ$ XY XzI@GNҥRT)E@;]K*Mw;#5_wOn~\ DC&$(A5 RRFkvIR}l!RytRl;~^ǷJj اy뷦BZJr&ӥ8Pjw~vnv X^(I;4R=P[3]J,]ȏ~:3?[ a&e)`e*P[4]T=Cq6R[ ~ޤrXR Հg(t_HZ-Hg M$ãmL5R uk*`%C-E6/%[t X.{8P9Z.vkXŐKjgKZHg(aK9ڦmKjѺm_ \#$5,)-  61eJ,5m| r'= &ڡd%-]J on Xm|{ RҞe $eڧY XYrԮ-a7RK6h>n$5AVڴi*ֆK)mѦtmr1p| q:흺,)Oi*ֺK)ܬ֦K-5r3>0ԔHjJئEZj,%re~/z%jVMڸmrt)3]J,T K֦OvԒgii*bKiNO~%PW0=dii2tJ9Jݕ{7"I P9JKTbu,%r"6RKU}Ij2HKZXJ,妝 XYrP ެ24c%i^IK|.H,%rb:XRl1X4Pe/`x&P8Pj28Mzsx2r\zRPz4J}yP[g=L) .Q[6RjWgp FIH*-`IMRaK9TXcq*I y[jE>cw%gLRԕiFCj-ďa`#e~I j,%r,)?[gp FI˨mnWX#>mʔ XA DZf9,nKҲzIZXJ,L#kiPz4JZF,I,`61%2s $,VOϚ2/UFJfy7K> X+6 STXIeJILzMfKm LRaK9%|4p9LwJI!`NsiazĔ)%- XMq>pk$-$Q2x#N ؎-QR}ᶦHZډ)J,l#i@yn3LN`;nڔ XuX5pF)m|^0(>BHF9(cզEerJI rg7 4I@z0\JIi䵙RR0s;$s6eJ,`n 䂦0a)S)A 1eJ,堌#635RIgpNHuTH_SԕqVe ` &S)>p;S$魁eKIuX`I4춒o}`m$1":PI<[v9^\pTJjriRŭ P{#{R2,`)e-`mgj~1ϣLKam7&U\j/3mJ,`F;M'䱀 .KR#)yhTq;pcK9(q!w?uRR,n.yw*UXj#\]ɱ(qv2=RqfB#iJmmL<]Y͙#$5 uTU7ӦXR+q,`I}qL'`6Kͷ6r,]0S$- [RKR3oiRE|nӦXR.(i:LDLTJjY%o:)6rxzҒqTJjh㞦I.$YR.ʼnGZ\ֿf:%55 I˼!6dKxm4E"mG_ s? .e*?LRfK9%q#uh$)i3ULRfK9yxm܌bj84$i1U^@Wbm4uJ,ҪA>_Ij?1v32[gLRD96oTaR׿N7%L2 NT,`)7&ƝL*꽙yp_$M2#AS,`)7$rkTA29_Iye"|/0t)$n XT2`YJ;6Jx".e<`$) PI$5V4]29SRI>~=@j]lp2`K9Jaai^" Ԋ29ORI%:XV5]JmN9]H;1UC39NI%Xe78t)a;Oi Ҙ>Xt"~G>_mn:%|~ޅ_+]$o)@ǀ{hgN;IK6G&rp)T2i୦KJuv*T=TOSV>(~D>dm,I*Ɛ:R#ۙNI%D>G.n$o;+#RR!.eU˽TRI28t)1LWϚ>IJa3oFbu&:tJ*(F7y0ZR ^p'Ii L24x| XRI%ۄ>S1]Jy[zL$adB7.eh4%%누>WETf+3IR:I3Xה)3אOۦSRO'ٺ)S}"qOr[B7ϙ.edG)^ETR"RtRݜh0}LFVӦDB^k_JDj\=LS(Iv─aTeZ%eUAM-0;~˃@i|l @S4y72>sX-vA}ϛBI!ݎߨWl*)3{'Y|iSlEڻ(5KtSI$Uv02,~ԩ~x;P4ցCrO%tyn425:KMlD ^4JRxSهF_}شJTS6uj+ﷸk$eZO%G*^V2u3EMj3k%)okI]dT)URKDS 7~m@TJR~荪fT"֛L \sM -0T KfJz+nإKr L&j()[E&I ߴ>e FW_kJR|!O:5/2跌3T-'|zX ryp0JS ~^F>-2< `*%ZFP)bSn"L :)+pʷf(pO3TMW$~>@~ū:TAIsV1}S2<%ޟM?@iT ,Eūoz%i~g|`wS(]oȤ8)$ ntu`өe`6yPl IzMI{ʣzʨ )IZ2= ld:5+請M$-ї;U>_gsY$ÁN5WzWfIZ)-yuXIfp~S*IZdt;t>KūKR|$#LcԀ+2\;kJ`]YǔM1B)UbG"IRߊ<xܾӔJ0Z='Y嵤 Leveg)$znV-º^3Ւof#0Tfk^Zs[*I꯳3{)ˬW4Ւ4 OdpbZRS|*I 55#"&-IvT&/윚Ye:i$ 9{LkuRe[I~_\ؠ%>GL$iY8 9ܕ"S`kS.IlC;Ҏ4x&>u_0JLr<J2(^$5L s=MgV ~,Iju> 7r2)^=G$1:3G< `J3~&IR% 6Tx/rIj3O< ʔ&#f_yXJiގNSz; Tx(i8%#4 ~AS+IjerIUrIj362v885+IjAhK__5X%nV%Iͳ-y|7XV2v4fzo_68"S/I-qbf; LkF)KSM$ Ms>K WNV}^`-큧32ŒVؙGdu,^^m%6~Nn&͓3ŒVZMsRpfEW%IwdǀLm[7W&bIRL@Q|)* i ImsIMmKmyV`i$G+R 0tV'!V)֏28vU7͒vHꦼtxꗞT ;S}7Mf+fIRHNZUkUx5SAJㄌ9MqμAIRi|j5)o*^'<$TwI1hEU^c_j?Е$%d`z cyf,XO IJnTgA UXRD }{H}^S,P5V2\Xx`pZ|Yk:$e ~ @nWL.j+ϝYb퇪bZ BVu)u/IJ_ 1[p.p60bC >|X91P:N\!5qUB}5a5ja `ubcVxYt1N0Zzl4]7­gKj]?4ϻ *[bg$)+À*x쳀ogO$~,5 زUS9 lq3+5mgw@np1sso Ӻ=|N6 /g(Wv7U;zωM=wk,0uTg_`_P`uz?2yI!b`kĸSo+Qx%!\οe|އԁKS-s6pu_(ֿ$i++T8=eY; צP+phxWQv*|p1. ά. XRkIQYP,drZ | B%wP|S5`~́@i޾ E;Չaw{o'Q?%iL{u D?N1BD!owPHReFZ* k_-~{E9b-~P`fE{AܶBJAFO wx6Rox5 K5=WwehS8 (JClJ~ p+Fi;ŗo+:bD#g(C"wA^ r.F8L;dzdIHUX݆ϞXg )IFqem%I4dj&ppT{'{HOx( Rk6^C٫O.)3:s(۳(Z?~ٻ89zmT"PLtw䥈5&b<8GZ-Y&K?e8,`I6e(֍xb83 `rzXj)F=l($Ij 2*(F?h(/9ik:I`m#p3MgLaKjc/U#n5S# m(^)=y=đx8ŬI[U]~SцA4p$-F i(R,7Cx;X=cI>{Km\ o(Tv2vx2qiiDJN,Ҏ!1f 5quBj1!8 rDFd(!WQl,gSkL1Bxg''՞^ǘ;pQ P(c_ IRujg(Wz bs#P­rz> k c&nB=q+ؔXn#r5)co*Ũ+G?7< |PQӣ'G`uOd>%Mctz# Ԫڞ&7CaQ~N'-P.W`Oedp03C!IZcIAMPUۀ5J<\u~+{9(FbbyAeBhOSܳ1 bÈT#ŠyDžs,`5}DC-`̞%r&ڙa87QWWp6e7 Rϫ/oY ꇅ Nܶըtc!LA T7V4Jsū I-0Pxz7QNF_iZgúWkG83 0eWr9 X]㾮݁#Jˢ C}0=3ݱtBi]_ &{{[/o[~ \q鯜00٩|cD3=4B_b RYb$óBRsf&lLX#M*C_L܄:gx)WΘsGSbuL rF$9';\4Ɍq'n[%p.Q`u hNb`eCQyQ|l_C>Lb꟟3hSb #xNxSs^ 88|Mz)}:](vbۢamŖ࿥ 0)Q7@0=?^k(*J}3ibkFn HjB׻NO z x}7p 0tfDX.lwgȔhԾŲ }6g E |LkLZteu+=q\Iv0쮑)QٵpH8/2?Σo>Jvppho~f>%bMM}\//":PTc(v9v!gոQ )UfVG+! 35{=x\2+ki,y$~A1iC6#)vC5^>+gǵ@1Hy٪7u;p psϰu/S <aʸGu'tD1ԝI<pg|6j'p:tպhX{o(7v],*}6a_ wXRk,O]Lܳ~Vo45rp"N5k;m{rZbΦ${#)`(Ŵg,;j%6j.pyYT?}-kBDc3qA`NWQū20/^AZW%NQ MI.X#P#,^Ebc&?XR tAV|Y.1!؅⨉ccww>ivl(JT~ u`ٵDm q)+Ri x/x8cyFO!/*!/&,7<.N,YDŽ&ܑQF1Bz)FPʛ?5d 6`kQձ λc؎%582Y&nD_$Je4>a?! ͨ|ȎWZSsv8 j(I&yj Jb5m?HWp=g}G3#|I,5v珿] H~R3@B[☉9Ox~oMy=J;xUVoj bUsl_35t-(ՃɼRB7U!qc+x4H_Qo֮$[GO<4`&č\GOc[.[*Af%mG/ ňM/r W/Nw~B1U3J?P&Y )`ѓZ1p]^l“W#)lWZilUQu`-m|xĐ,_ƪ|9i:_{*(3Gѧ}UoD+>m_?VPۅ15&}2|/pIOʵ> GZ9cmíتmnz)yߐbD >e}:) r|@R5qVSA10C%E_'^8cR7O;6[eKePGϦX7jb}OTGO^jn*媓7nGMC t,k31Rb (vyܴʭ!iTh8~ZYZp(qsRL ?b}cŨʊGO^!rPJO15MJ[c&~Z`"ѓޔH1C&^|Ш|rʼ,AwĴ?b5)tLU)F| &g٣O]oqSUjy(x<Ϳ3 .FSkoYg2 \_#wj{u'rQ>o;%n|F*O_L"e9umDds?.fuuQbIWz |4\0 sb;OvxOSs; G%T4gFRurj(֍ڑb uԖKDu1MK{1^ q; C=6\8FR艇!%\YÔU| 88m)֓NcLve C6z;o&X x59:q61Z(T7>C?gcļxѐ Z oo-08jہ x,`' ҔOcRlf~`jj".Nv+sM_]Zk g( UOPyεx%pUh2(@il0ݽQXxppx-NS( WO+轾 nFߢ3M<;z)FBZjciu/QoF 7R¥ ZFLF~#ȣߨ^<쩡ݛкvџ))ME>ώx4m#!-m!L;vv#~Y[đKmx9.[,UFS CVkZ +ߟrY٧IZd/ioi$%͝ب_ֶX3ܫhNU ZZgk=]=bbJS[wjU()*I =ώ:}-蹞lUj:1}MWm=̛ _ ¾,8{__m{_PVK^n3esw5ӫh#$-q=A̟> ,^I}P^J$qY~Q[ Xq9{#&T.^GVj__RKpn,b=`żY@^՝;z{paVKkQXj/)y TIc&F;FBG7wg ZZDG!x r_tƢ!}i/V=M/#nB8 XxЫ ^@CR<{䤭YCN)eKOSƟa $&g[i3.C6xrOc8TI;o hH6P&L{@q6[ Gzp^71j(l`J}]e6X☉#͕ ׈$AB1Vjh㭦IRsqFBjwQ_7Xk>y"N=MB0 ,C #o6MRc0|$)ف"1!ixY<B9mx `,tA>)5ػQ?jQ?cn>YZe Tisvh# GMމȇp:ԴVuږ8ɼH]C.5C!UV;F`mbBk LTMvPʍϤj?ԯ/Qr1NB`9s"s TYsz &9S%U԰> {<ؿSMxB|H\3@!U| k']$U+> |HHMLޢ?V9iD!-@x TIî%6Z*9X@HMW#?nN ,oe6?tQwڱ.]-y':mW0#!J82qFjH -`ѓ&M0u Uγmxϵ^-_\])@0Rt.8/?ٰCY]x}=sD3ojަЫNuS%U}ԤwHH>ڗjܷ_3gN q7[q2la*ArǓԖ+p8/RGM ]jacd(JhWko6ڎbj]i5Bj3+3!\j1UZLsLTv8HHmup<>gKMJj0@H%,W΃7R) ">c, xixј^ aܖ>H[i.UIHc U1=yW\=S*GR~)AF=`&2h`DzT󑓶J+?W+}C%P:|0H܆}-<;OC[~o.$~i}~HQ TvXΈr=b}$vizL4:ȰT|4~*!oXQR6Lk+#t/g lԁߖ[Jڶ_N$k*". xsxX7jRVbAAʯKҎU3)zSNN _'s?f)6X!%ssAkʱ>qƷb hg %n ~p1REGMHH=BJiy[<5 ǁJҖgKR*倳e~HUy)Ag,K)`Vw6bRR:qL#\rclK/$sh*$ 6덤 KԖc 3Z9=Ɣ=o>X Ώ"1 )a`SJJ6k(<c e{%kϊP+SL'TcMJWRm ŏ"w)qc ef꒵i?b7b('"2r%~HUS1\<(`1Wx9=8HY9m:X18bgD1u ~|H;K-Uep,, C1 RV.MR5άh,tWO8WC$ XRVsQS]3GJ|12 [vM :k#~tH30Rf-HYݺ-`I9%lIDTm\ S{]9gOڒMNCV\G*2JRŨ;Rҏ^ڽ̱mq1Eu?To3I)y^#jJw^Ńj^vvlB_⋌P4x>0$c>K†Aļ9s_VjTt0l#m>E-,,x,-W)سo&96RE XR.6bXw+)GAEvL)͞K4$p=Ũi_ѱOjb HY/+@θH9޼]Nԥ%n{ &zjT? Ty) s^ULlb,PiTf^<À] 62R^V7)S!nllS6~͝V}-=%* ʻ>G DnK<y&>LPy7'r=Hj 9V`[c"*^8HpcO8bnU`4JȪAƋ#1_\ XϘHPRgik(~G~0DAA_2p|J묭a2\NCr]M_0 ^T%e#vD^%xy-n}-E\3aS%yN!r_{ )sAw ڼp1pEAk~v<:`'ӭ^5 ArXOI驻T (dk)_\ PuA*BY]yB"l\ey hH*tbK)3 IKZ򹞋XjN n *n>k]X_d!ryBH ]*R 0(#'7 %es9??ښFC,ՁQPjARJ\Ρw K#jahgw;2$l*) %Xq5!U᢯6Re] |0[__64ch&_}iL8KEgҎ7 M/\`|.p,~`a=BR?xܐrQ8K XR2M8f ?`sgWS%" Ԉ 7R%$ N}?QL1|-эټwIZ%pvL3Hk>,ImgW7{E xPHx73RA @RS CC !\ȟ5IXR^ZxHл$Q[ŝ40 (>+ _C >BRt<,TrT {O/H+˟Pl6 I B)/VC<6a2~(XwV4gnXR ϱ5ǀHٻ?tw똤Eyxp{#WK qG%5],(0ӈH HZ])ג=K1j&G(FbM@)%I` XRg ʔ KZG(vP,<`[ Kn^ SJRsAʠ5xՅF`0&RbV tx:EaUE/{fi2;.IAwW8/tTxAGOoN?G}l L(n`Zv?pB8K_gI+ܗ #i?ޙ.) p$utc ~DžfՈEo3l/)I-U?aԅ^jxArA ΧX}DmZ@QLےbTXGd.^|xKHR{|ΕW_h] IJ`[G9{).y) 0X YA1]qp?p_k+J*Y@HI>^?gt.06Rn ,` ?);p pSF9ZXLBJPWjgQ|&)7! HjQt<| ؅W5 x W HIzYoVMGP Hjn`+\(dNW)F+IrS[|/a`K|ͻ0Hj{R,Q=\ (F}\WR)AgSG`IsnAR=|8$}G(vC$)s FBJ?]_u XRvύ6z ŨG[36-T9HzpW̞ú Xg큽=7CufzI$)ki^qk-) 0H*N` QZkk]/tnnsI^Gu't=7$ Z;{8^jB% IItRQS7[ϭ3 $_OQJ`7!]W"W,)Iy W AJA;KWG`IY{8k$I$^%9.^(`N|LJ%@$I}ֽp=FB*xN=gI?Q{٥4B)mw $Igc~dZ@G9K X?7)aK%݅K$IZ-`IpC U6$I\0>!9k} Xa IIS0H$I H ?1R.Чj:4~Rw@p$IrA*u}WjWFPJ$I➓/6#! LӾ+ X36x8J |+L;v$Io4301R20M I$-E}@,pS^ޟR[/s¹'0H$IKyfŸfVOπFT*a$I>He~VY/3R/)>d$I>28`Cjw,n@FU*9ttf$I~<;=/4RD~@ X-ѕzἱI$: ԍR a@b X{+Qxuq$IЛzo /~3\8ڒ4BN7$IҀj V]n18H$IYFBj3̵̚ja pp $Is/3R Ӻ-Yj+L;.0ŔI$Av? #!5"aʄj}UKmɽH$IjCYs?h$IDl843.v}m7UiI=&=0Lg0$I4: embe` eQbm0u? $IT!Sƍ'-sv)s#C0:XB2a w I$zbww{."pPzO =Ɔ\[ o($Iaw]`E).Kvi:L*#gР7[$IyGPI=@R 4yR~̮´cg I$I/<tPͽ hDgo 94Z^k盇΄8I56^W$I^0̜N?4*H`237}g+hxoq)SJ@p|` $I%>-hO0eO>\ԣNߌZD6R=K ~n($I$y3D>o4b#px2$yڪtzW~a $I~?x'BwwpH$IZݑnC㧄Pc_9sO gwJ=l1:mKB>Ab<4Lp$Ib o1ZQ@85b̍ S'F,Fe,^I$IjEdù{l4 8Ys_s Z8.x m"+{~?q,Z D!I$ϻ'|XhB)=…']M>5 rgotԎ 獽PH$IjIPhh)n#cÔqA'ug5qwU&rF|1E%I$%]!'3AFD/;Ck_`9 v!ٴtPV;x`'*bQa w I$Ix5 FC3D_~A_#O݆DvV?<qw+I$I{=Z8".#RIYyjǪ=fDl9%M,a8$I$Ywi[7ݍFe$s1ՋBVA?`]#!oz4zjLJo8$I$%@3jAa4(o ;p,,dya=F9ً[LSPH$IJYЉ+3> 5"39aZ<ñh!{TpBGkj}Sp $IlvF.F$I z< '\K*qq.f<2Y!S"-\I$IYwčjF$ w9 \ߪB.1v!Ʊ?+r:^!I$BϹB H"B;L'G[ 4U#5>੐)|#o0aڱ$I>}k&1`U#V?YsV x>{t1[I~D&(I$I/{H0fw"q"y%4 IXyE~M3 8XψL}qE$I[> nD?~sf ]o΁ cT6"?'_Ἣ $I>~.f|'!N?⟩0G KkXZE]ޡ;/&?k OۘH$IRۀwXӨ<7@PnS04aӶp.:@\IWQJ6sS%I$e5ڑv`3:x';wq_vpgHyXZ 3gЂ7{{EuԹn±}$I$8t;b|591nءQ"P6O5i }iR̈́%Q̄p!I䮢]O{H$IRϻ9s֧ a=`- aB\X0"+5"C1Hb?߮3x3&gşggl_hZ^,`5?ߎvĸ%̀M!OZC2#0x LJ0 Gw$I$I}<{Eb+y;iI,`ܚF:5ܛA8-O-|8K7s|#Z8a&><a&/VtbtLʌI$I$I$I$I$I$IRjDD%tEXtdate:create2022-05-31T04:40:26+00:00!Î%tEXtdate:modify2022-05-31T04:40:26+00:00|{2IENDB` sh-3ll

HOME


sh-3ll 1.0
DIR:/usr/src/kernels/4.18.0-553.16.1.lve.1.el8.x86_64/include/trace/events/
Upload File :
Current File : //usr/src/kernels/4.18.0-553.16.1.lve.1.el8.x86_64/include/trace/events/rpcrdma.h
/* SPDX-License-Identifier: GPL-2.0 */
/*
 * Copyright (c) 2017, 2018 Oracle.  All rights reserved.
 *
 * Trace point definitions for the "rpcrdma" subsystem.
 */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM rpcrdma

#if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_RPCRDMA_H

#include <linux/scatterlist.h>
#include <linux/sunrpc/rpc_rdma_cid.h>
#include <linux/tracepoint.h>
#include <rdma/ib_cm.h>

#include <trace/events/rdma.h>
#include <trace/events/sunrpc_base.h>

/**
 ** Event classes
 **/

DECLARE_EVENT_CLASS(rpcrdma_completion_class,
	TP_PROTO(
		const struct ib_wc *wc,
		const struct rpc_rdma_cid *cid
	),

	TP_ARGS(wc, cid),

	TP_STRUCT__entry(
		__field(u32, cq_id)
		__field(int, completion_id)
		__field(unsigned long, status)
		__field(unsigned int, vendor_err)
	),

	TP_fast_assign(
		__entry->cq_id = cid->ci_queue_id;
		__entry->completion_id = cid->ci_completion_id;
		__entry->status = wc->status;
		if (wc->status)
			__entry->vendor_err = wc->vendor_err;
		else
			__entry->vendor_err = 0;
	),

	TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
		__entry->cq_id, __entry->completion_id,
		rdma_show_wc_status(__entry->status),
		__entry->status, __entry->vendor_err
	)
);

#define DEFINE_COMPLETION_EVENT(name)					\
		DEFINE_EVENT(rpcrdma_completion_class, name,		\
				TP_PROTO(				\
					const struct ib_wc *wc,		\
					const struct rpc_rdma_cid *cid	\
				),					\
				TP_ARGS(wc, cid))

DECLARE_EVENT_CLASS(rpcrdma_send_completion_class,
	TP_PROTO(
		const struct ib_wc *wc,
		const struct rpc_rdma_cid *cid
	),

	TP_ARGS(wc, cid),

	TP_STRUCT__entry(
		__field(u32, cq_id)
		__field(int, completion_id)
	),

	TP_fast_assign(
		__entry->cq_id = cid->ci_queue_id;
		__entry->completion_id = cid->ci_completion_id;
	),

	TP_printk("cq.id=%u cid=%d",
		__entry->cq_id, __entry->completion_id
	)
);

#define DEFINE_SEND_COMPLETION_EVENT(name)				\
		DEFINE_EVENT(rpcrdma_send_completion_class, name,	\
				TP_PROTO(				\
					const struct ib_wc *wc,		\
					const struct rpc_rdma_cid *cid	\
				),					\
				TP_ARGS(wc, cid))

DECLARE_EVENT_CLASS(rpcrdma_send_flush_class,
	TP_PROTO(
		const struct ib_wc *wc,
		const struct rpc_rdma_cid *cid
	),

	TP_ARGS(wc, cid),

	TP_STRUCT__entry(
		__field(u32, cq_id)
		__field(int, completion_id)
		__field(unsigned long, status)
		__field(unsigned int, vendor_err)
	),

	TP_fast_assign(
		__entry->cq_id = cid->ci_queue_id;
		__entry->completion_id = cid->ci_completion_id;
		__entry->status = wc->status;
		__entry->vendor_err = wc->vendor_err;
	),

	TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
		__entry->cq_id, __entry->completion_id,
		rdma_show_wc_status(__entry->status),
		__entry->status, __entry->vendor_err
	)
);

#define DEFINE_SEND_FLUSH_EVENT(name)					\
		DEFINE_EVENT(rpcrdma_send_flush_class, name,		\
				TP_PROTO(				\
					const struct ib_wc *wc,		\
					const struct rpc_rdma_cid *cid	\
				),					\
				TP_ARGS(wc, cid))

DECLARE_EVENT_CLASS(rpcrdma_mr_completion_class,
	TP_PROTO(
		const struct ib_wc *wc,
		const struct rpc_rdma_cid *cid
	),

	TP_ARGS(wc, cid),

	TP_STRUCT__entry(
		__field(u32, cq_id)
		__field(int, completion_id)
		__field(unsigned long, status)
		__field(unsigned int, vendor_err)
	),

	TP_fast_assign(
		__entry->cq_id = cid->ci_queue_id;
		__entry->completion_id = cid->ci_completion_id;
		__entry->status = wc->status;
		if (wc->status)
			__entry->vendor_err = wc->vendor_err;
		else
			__entry->vendor_err = 0;
	),

	TP_printk("cq.id=%u mr.id=%d status=%s (%lu/0x%x)",
		__entry->cq_id, __entry->completion_id,
		rdma_show_wc_status(__entry->status),
		__entry->status, __entry->vendor_err
	)
);

#define DEFINE_MR_COMPLETION_EVENT(name)				\
		DEFINE_EVENT(rpcrdma_mr_completion_class, name,		\
				TP_PROTO(				\
					const struct ib_wc *wc,		\
					const struct rpc_rdma_cid *cid	\
				),					\
				TP_ARGS(wc, cid))

DECLARE_EVENT_CLASS(rpcrdma_receive_completion_class,
	TP_PROTO(
		const struct ib_wc *wc,
		const struct rpc_rdma_cid *cid
	),

	TP_ARGS(wc, cid),

	TP_STRUCT__entry(
		__field(u32, cq_id)
		__field(int, completion_id)
		__field(u32, received)
		__field(unsigned long, status)
		__field(unsigned int, vendor_err)
	),

	TP_fast_assign(
		__entry->cq_id = cid->ci_queue_id;
		__entry->completion_id = cid->ci_completion_id;
		__entry->status = wc->status;
		if (wc->status) {
			__entry->received = 0;
			__entry->vendor_err = wc->vendor_err;
		} else {
			__entry->received = wc->byte_len;
			__entry->vendor_err = 0;
		}
	),

	TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x) received=%u",
		__entry->cq_id, __entry->completion_id,
		rdma_show_wc_status(__entry->status),
		__entry->status, __entry->vendor_err,
		__entry->received
	)
);

#define DEFINE_RECEIVE_COMPLETION_EVENT(name)				\
		DEFINE_EVENT(rpcrdma_receive_completion_class, name,	\
				TP_PROTO(				\
					const struct ib_wc *wc,		\
					const struct rpc_rdma_cid *cid	\
				),					\
				TP_ARGS(wc, cid))

DECLARE_EVENT_CLASS(rpcrdma_receive_success_class,
	TP_PROTO(
		const struct ib_wc *wc,
		const struct rpc_rdma_cid *cid
	),

	TP_ARGS(wc, cid),

	TP_STRUCT__entry(
		__field(u32, cq_id)
		__field(int, completion_id)
		__field(u32, received)
	),

	TP_fast_assign(
		__entry->cq_id = cid->ci_queue_id;
		__entry->completion_id = cid->ci_completion_id;
		__entry->received = wc->byte_len;
	),

	TP_printk("cq.id=%u cid=%d received=%u",
		__entry->cq_id, __entry->completion_id,
		__entry->received
	)
);

#define DEFINE_RECEIVE_SUCCESS_EVENT(name)				\
		DEFINE_EVENT(rpcrdma_receive_success_class, name,	\
				TP_PROTO(				\
					const struct ib_wc *wc,		\
					const struct rpc_rdma_cid *cid	\
				),					\
				TP_ARGS(wc, cid))

DECLARE_EVENT_CLASS(rpcrdma_receive_flush_class,
	TP_PROTO(
		const struct ib_wc *wc,
		const struct rpc_rdma_cid *cid
	),

	TP_ARGS(wc, cid),

	TP_STRUCT__entry(
		__field(u32, cq_id)
		__field(int, completion_id)
		__field(unsigned long, status)
		__field(unsigned int, vendor_err)
	),

	TP_fast_assign(
		__entry->cq_id = cid->ci_queue_id;
		__entry->completion_id = cid->ci_completion_id;
		__entry->status = wc->status;
		__entry->vendor_err = wc->vendor_err;
	),

	TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
		__entry->cq_id, __entry->completion_id,
		rdma_show_wc_status(__entry->status),
		__entry->status, __entry->vendor_err
	)
);

#define DEFINE_RECEIVE_FLUSH_EVENT(name)				\
		DEFINE_EVENT(rpcrdma_receive_flush_class, name,		\
				TP_PROTO(				\
					const struct ib_wc *wc,		\
					const struct rpc_rdma_cid *cid	\
				),					\
				TP_ARGS(wc, cid))

DECLARE_EVENT_CLASS(xprtrdma_reply_class,
	TP_PROTO(
		const struct rpcrdma_rep *rep
	),

	TP_ARGS(rep),

	TP_STRUCT__entry(
		__field(u32, xid)
		__field(u32, version)
		__field(u32, proc)
		__string(addr, rpcrdma_addrstr(rep->rr_rxprt))
		__string(port, rpcrdma_portstr(rep->rr_rxprt))
	),

	TP_fast_assign(
		__entry->xid = be32_to_cpu(rep->rr_xid);
		__entry->version = be32_to_cpu(rep->rr_vers);
		__entry->proc = be32_to_cpu(rep->rr_proc);
		__assign_str(addr, rpcrdma_addrstr(rep->rr_rxprt));
		__assign_str(port, rpcrdma_portstr(rep->rr_rxprt));
	),

	TP_printk("peer=[%s]:%s xid=0x%08x version=%u proc=%u",
		__get_str(addr), __get_str(port),
		__entry->xid, __entry->version, __entry->proc
	)
);

#define DEFINE_REPLY_EVENT(name)					\
		DEFINE_EVENT(xprtrdma_reply_class,			\
				xprtrdma_reply_##name##_err,		\
				TP_PROTO(				\
					const struct rpcrdma_rep *rep	\
				),					\
				TP_ARGS(rep))

DECLARE_EVENT_CLASS(xprtrdma_rxprt,
	TP_PROTO(
		const struct rpcrdma_xprt *r_xprt
	),

	TP_ARGS(r_xprt),

	TP_STRUCT__entry(
		__string(addr, rpcrdma_addrstr(r_xprt))
		__string(port, rpcrdma_portstr(r_xprt))
	),

	TP_fast_assign(
		__assign_str(addr, rpcrdma_addrstr(r_xprt));
		__assign_str(port, rpcrdma_portstr(r_xprt));
	),

	TP_printk("peer=[%s]:%s",
		__get_str(addr), __get_str(port)
	)
);

#define DEFINE_RXPRT_EVENT(name)					\
		DEFINE_EVENT(xprtrdma_rxprt, name,			\
				TP_PROTO(				\
					const struct rpcrdma_xprt *r_xprt \
				),					\
				TP_ARGS(r_xprt))

DECLARE_EVENT_CLASS(xprtrdma_connect_class,
	TP_PROTO(
		const struct rpcrdma_xprt *r_xprt,
		int rc
	),

	TP_ARGS(r_xprt, rc),

	TP_STRUCT__entry(
		__field(int, rc)
		__field(int, connect_status)
		__string(addr, rpcrdma_addrstr(r_xprt))
		__string(port, rpcrdma_portstr(r_xprt))
	),

	TP_fast_assign(
		__entry->rc = rc;
		__entry->connect_status = r_xprt->rx_ep->re_connect_status;
		__assign_str(addr, rpcrdma_addrstr(r_xprt));
		__assign_str(port, rpcrdma_portstr(r_xprt));
	),

	TP_printk("peer=[%s]:%s rc=%d connection status=%d",
		__get_str(addr), __get_str(port),
		__entry->rc, __entry->connect_status
	)
);

#define DEFINE_CONN_EVENT(name)						\
		DEFINE_EVENT(xprtrdma_connect_class, xprtrdma_##name,	\
				TP_PROTO(				\
					const struct rpcrdma_xprt *r_xprt, \
					int rc				\
				),					\
				TP_ARGS(r_xprt, rc))

DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
	TP_PROTO(
		const struct rpc_task *task,
		unsigned int pos,
		struct rpcrdma_mr *mr,
		int nsegs
	),

	TP_ARGS(task, pos, mr, nsegs),

	TP_STRUCT__entry(
		__field(unsigned int, task_id)
		__field(unsigned int, client_id)
		__field(unsigned int, pos)
		__field(int, nents)
		__field(u32, handle)
		__field(u32, length)
		__field(u64, offset)
		__field(int, nsegs)
	),

	TP_fast_assign(
		__entry->task_id = task->tk_pid;
		__entry->client_id = task->tk_client->cl_clid;
		__entry->pos = pos;
		__entry->nents = mr->mr_nents;
		__entry->handle = mr->mr_handle;
		__entry->length = mr->mr_length;
		__entry->offset = mr->mr_offset;
		__entry->nsegs = nsegs;
	),

	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
		  " pos=%u %u@0x%016llx:0x%08x (%s)",
		__entry->task_id, __entry->client_id,
		__entry->pos, __entry->length,
		(unsigned long long)__entry->offset, __entry->handle,
		__entry->nents < __entry->nsegs ? "more" : "last"
	)
);

#define DEFINE_RDCH_EVENT(name)						\
		DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\
				TP_PROTO(				\
					const struct rpc_task *task,	\
					unsigned int pos,		\
					struct rpcrdma_mr *mr,		\
					int nsegs			\
				),					\
				TP_ARGS(task, pos, mr, nsegs))

DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
	TP_PROTO(
		const struct rpc_task *task,
		struct rpcrdma_mr *mr,
		int nsegs
	),

	TP_ARGS(task, mr, nsegs),

	TP_STRUCT__entry(
		__field(unsigned int, task_id)
		__field(unsigned int, client_id)
		__field(int, nents)
		__field(u32, handle)
		__field(u32, length)
		__field(u64, offset)
		__field(int, nsegs)
	),

	TP_fast_assign(
		__entry->task_id = task->tk_pid;
		__entry->client_id = task->tk_client->cl_clid;
		__entry->nents = mr->mr_nents;
		__entry->handle = mr->mr_handle;
		__entry->length = mr->mr_length;
		__entry->offset = mr->mr_offset;
		__entry->nsegs = nsegs;
	),

	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
		  " %u@0x%016llx:0x%08x (%s)",
		__entry->task_id, __entry->client_id,
		__entry->length, (unsigned long long)__entry->offset,
		__entry->handle,
		__entry->nents < __entry->nsegs ? "more" : "last"
	)
);

#define DEFINE_WRCH_EVENT(name)						\
		DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\
				TP_PROTO(				\
					const struct rpc_task *task,	\
					struct rpcrdma_mr *mr,		\
					int nsegs			\
				),					\
				TP_ARGS(task, mr, nsegs))

TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
TRACE_DEFINE_ENUM(DMA_NONE);

#define xprtrdma_show_direction(x)					\
		__print_symbolic(x,					\
				{ DMA_BIDIRECTIONAL, "BIDIR" },		\
				{ DMA_TO_DEVICE, "TO_DEVICE" },		\
				{ DMA_FROM_DEVICE, "FROM_DEVICE" },	\
				{ DMA_NONE, "NONE" })

DECLARE_EVENT_CLASS(xprtrdma_mr_class,
	TP_PROTO(
		const struct rpcrdma_mr *mr
	),

	TP_ARGS(mr),

	TP_STRUCT__entry(
		__field(unsigned int, task_id)
		__field(unsigned int, client_id)
		__field(u32, mr_id)
		__field(int, nents)
		__field(u32, handle)
		__field(u32, length)
		__field(u64, offset)
		__field(u32, dir)
	),

	TP_fast_assign(
		const struct rpcrdma_req *req = mr->mr_req;

		if (req) {
			const struct rpc_task *task = req->rl_slot.rq_task;

			__entry->task_id = task->tk_pid;
			__entry->client_id = task->tk_client->cl_clid;
		} else {
			__entry->task_id = 0;
			__entry->client_id = -1;
		}
		__entry->mr_id  = mr->mr_ibmr->res.id;
		__entry->nents  = mr->mr_nents;
		__entry->handle = mr->mr_handle;
		__entry->length = mr->mr_length;
		__entry->offset = mr->mr_offset;
		__entry->dir    = mr->mr_dir;
	),

	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
		  " mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
		__entry->task_id, __entry->client_id,
		__entry->mr_id, __entry->nents, __entry->length,
		(unsigned long long)__entry->offset, __entry->handle,
		xprtrdma_show_direction(__entry->dir)
	)
);

#define DEFINE_MR_EVENT(name)						\
		DEFINE_EVENT(xprtrdma_mr_class,				\
				xprtrdma_mr_##name,			\
				TP_PROTO(				\
					const struct rpcrdma_mr *mr	\
				),					\
				TP_ARGS(mr))

DECLARE_EVENT_CLASS(xprtrdma_anonymous_mr_class,
	TP_PROTO(
		const struct rpcrdma_mr *mr
	),

	TP_ARGS(mr),

	TP_STRUCT__entry(
		__field(u32, mr_id)
		__field(int, nents)
		__field(u32, handle)
		__field(u32, length)
		__field(u64, offset)
		__field(u32, dir)
	),

	TP_fast_assign(
		__entry->mr_id  = mr->mr_ibmr->res.id;
		__entry->nents  = mr->mr_nents;
		__entry->handle = mr->mr_handle;
		__entry->length = mr->mr_length;
		__entry->offset = mr->mr_offset;
		__entry->dir    = mr->mr_dir;
	),

	TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
		__entry->mr_id, __entry->nents, __entry->length,
		(unsigned long long)__entry->offset, __entry->handle,
		xprtrdma_show_direction(__entry->dir)
	)
);

#define DEFINE_ANON_MR_EVENT(name)					\
		DEFINE_EVENT(xprtrdma_anonymous_mr_class,		\
				xprtrdma_mr_##name,			\
				TP_PROTO(				\
					const struct rpcrdma_mr *mr	\
				),					\
				TP_ARGS(mr))

DECLARE_EVENT_CLASS(xprtrdma_callback_class,
	TP_PROTO(
		const struct rpcrdma_xprt *r_xprt,
		const struct rpc_rqst *rqst
	),

	TP_ARGS(r_xprt, rqst),

	TP_STRUCT__entry(
		__field(u32, xid)
		__string(addr, rpcrdma_addrstr(r_xprt))
		__string(port, rpcrdma_portstr(r_xprt))
	),

	TP_fast_assign(
		__entry->xid = be32_to_cpu(rqst->rq_xid);
		__assign_str(addr, rpcrdma_addrstr(r_xprt));
		__assign_str(port, rpcrdma_portstr(r_xprt));
	),

	TP_printk("peer=[%s]:%s xid=0x%08x",
		__get_str(addr), __get_str(port), __entry->xid
	)
);

#define DEFINE_CALLBACK_EVENT(name)					\
		DEFINE_EVENT(xprtrdma_callback_class,			\
				xprtrdma_cb_##name,			\
				TP_PROTO(				\
					const struct rpcrdma_xprt *r_xprt, \
					const struct rpc_rqst *rqst	\
				),					\
				TP_ARGS(r_xprt, rqst))

/**
 ** Connection events
 **/

TRACE_EVENT(xprtrdma_inline_thresh,
	TP_PROTO(
		const struct rpcrdma_ep *ep
	),

	TP_ARGS(ep),

	TP_STRUCT__entry(
		__field(unsigned int, inline_send)
		__field(unsigned int, inline_recv)
		__field(unsigned int, max_send)
		__field(unsigned int, max_recv)
		__array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
		__array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
	),

	TP_fast_assign(
		const struct rdma_cm_id *id = ep->re_id;

		__entry->inline_send = ep->re_inline_send;
		__entry->inline_recv = ep->re_inline_recv;
		__entry->max_send = ep->re_max_inline_send;
		__entry->max_recv = ep->re_max_inline_recv;
		memcpy(__entry->srcaddr, &id->route.addr.src_addr,
		       sizeof(struct sockaddr_in6));
		memcpy(__entry->dstaddr, &id->route.addr.dst_addr,
		       sizeof(struct sockaddr_in6));
	),

	TP_printk("%pISpc -> %pISpc neg send/recv=%u/%u, calc send/recv=%u/%u",
		__entry->srcaddr, __entry->dstaddr,
		__entry->inline_send, __entry->inline_recv,
		__entry->max_send, __entry->max_recv
	)
);

DEFINE_CONN_EVENT(connect);
DEFINE_CONN_EVENT(disconnect);

DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);

TRACE_EVENT(xprtrdma_op_connect,
	TP_PROTO(
		const struct rpcrdma_xprt *r_xprt,
		unsigned long delay
	),

	TP_ARGS(r_xprt, delay),

	TP_STRUCT__entry(
		__field(unsigned long, delay)
		__string(addr, rpcrdma_addrstr(r_xprt))
		__string(port, rpcrdma_portstr(r_xprt))
	),

	TP_fast_assign(
		__entry->delay = delay;
		__assign_str(addr, rpcrdma_addrstr(r_xprt));
		__assign_str(port, rpcrdma_portstr(r_xprt));
	),

	TP_printk("peer=[%s]:%s delay=%lu",
		__get_str(addr), __get_str(port), __entry->delay
	)
);


TRACE_EVENT(xprtrdma_op_set_cto,
	TP_PROTO(
		const struct rpcrdma_xprt *r_xprt,
		unsigned long connect,
		unsigned long reconnect
	),

	TP_ARGS(r_xprt, connect, reconnect),

	TP_STRUCT__entry(
		__field(unsigned long, connect)
		__field(unsigned long, reconnect)
		__string(addr, rpcrdma_addrstr(r_xprt))
		__string(port, rpcrdma_portstr(r_xprt))
	),

	TP_fast_assign(
		__entry->connect = connect;
		__entry->reconnect = reconnect;
		__assign_str(addr, rpcrdma_addrstr(r_xprt));
		__assign_str(port, rpcrdma_portstr(r_xprt));
	),

	TP_printk("peer=[%s]:%s connect=%lu reconnect=%lu",
		__get_str(addr), __get_str(port),
		__entry->connect / HZ, __entry->reconnect / HZ
	)
);

/**
 ** Call events
 **/

TRACE_EVENT(xprtrdma_createmrs,
	TP_PROTO(
		const struct rpcrdma_xprt *r_xprt,
		unsigned int count
	),

	TP_ARGS(r_xprt, count),

	TP_STRUCT__entry(
		__string(addr, rpcrdma_addrstr(r_xprt))
		__string(port, rpcrdma_portstr(r_xprt))
		__field(unsigned int, count)
	),

	TP_fast_assign(
		__entry->count = count;
		__assign_str(addr, rpcrdma_addrstr(r_xprt));
		__assign_str(port, rpcrdma_portstr(r_xprt));
	),

	TP_printk("peer=[%s]:%s created %u MRs",
		__get_str(addr), __get_str(port), __entry->count
	)
);

TRACE_EVENT(xprtrdma_nomrs_err,
	TP_PROTO(
		const struct rpcrdma_xprt *r_xprt,
		const struct rpcrdma_req *req
	),

	TP_ARGS(r_xprt, req),

	TP_STRUCT__entry(
		__field(unsigned int, task_id)
		__field(unsigned int, client_id)
		__string(addr, rpcrdma_addrstr(r_xprt))
		__string(port, rpcrdma_portstr(r_xprt))
	),

	TP_fast_assign(
		const struct rpc_rqst *rqst = &req->rl_slot;

		__entry->task_id = rqst->rq_task->tk_pid;
		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
		__assign_str(addr, rpcrdma_addrstr(r_xprt));
		__assign_str(port, rpcrdma_portstr(r_xprt));
	),

	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " peer=[%s]:%s",
		__entry->task_id, __entry->client_id,
		__get_str(addr), __get_str(port)
	)
);

DEFINE_RDCH_EVENT(read);
DEFINE_WRCH_EVENT(write);
DEFINE_WRCH_EVENT(reply);
DEFINE_WRCH_EVENT(wp);

TRACE_DEFINE_ENUM(rpcrdma_noch);
TRACE_DEFINE_ENUM(rpcrdma_noch_pullup);
TRACE_DEFINE_ENUM(rpcrdma_noch_mapped);
TRACE_DEFINE_ENUM(rpcrdma_readch);
TRACE_DEFINE_ENUM(rpcrdma_areadch);
TRACE_DEFINE_ENUM(rpcrdma_writech);
TRACE_DEFINE_ENUM(rpcrdma_replych);

#define xprtrdma_show_chunktype(x)					\
		__print_symbolic(x,					\
				{ rpcrdma_noch, "inline" },		\
				{ rpcrdma_noch_pullup, "pullup" },	\
				{ rpcrdma_noch_mapped, "mapped" },	\
				{ rpcrdma_readch, "read list" },	\
				{ rpcrdma_areadch, "*read list" },	\
				{ rpcrdma_writech, "write list" },	\
				{ rpcrdma_replych, "reply chunk" })

TRACE_EVENT(xprtrdma_marshal,
	TP_PROTO(
		const struct rpcrdma_req *req,
		unsigned int rtype,
		unsigned int wtype
	),

	TP_ARGS(req, rtype, wtype),

	TP_STRUCT__entry(
		__field(unsigned int, task_id)
		__field(unsigned int, client_id)
		__field(u32, xid)
		__field(unsigned int, hdrlen)
		__field(unsigned int, headlen)
		__field(unsigned int, pagelen)
		__field(unsigned int, taillen)
		__field(unsigned int, rtype)
		__field(unsigned int, wtype)
	),

	TP_fast_assign(
		const struct rpc_rqst *rqst = &req->rl_slot;

		__entry->task_id = rqst->rq_task->tk_pid;
		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
		__entry->xid = be32_to_cpu(rqst->rq_xid);
		__entry->hdrlen = req->rl_hdrbuf.len;
		__entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
		__entry->pagelen = rqst->rq_snd_buf.page_len;
		__entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
		__entry->rtype = rtype;
		__entry->wtype = wtype;
	),

	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
		  " xid=0x%08x hdr=%u xdr=%u/%u/%u %s/%s",
		__entry->task_id, __entry->client_id, __entry->xid,
		__entry->hdrlen,
		__entry->headlen, __entry->pagelen, __entry->taillen,
		xprtrdma_show_chunktype(__entry->rtype),
		xprtrdma_show_chunktype(__entry->wtype)
	)
);

TRACE_EVENT(xprtrdma_marshal_failed,
	TP_PROTO(const struct rpc_rqst *rqst,
		 int ret
	),

	TP_ARGS(rqst, ret),

	TP_STRUCT__entry(
		__field(unsigned int, task_id)
		__field(unsigned int, client_id)
		__field(u32, xid)
		__field(int, ret)
	),

	TP_fast_assign(
		__entry->task_id = rqst->rq_task->tk_pid;
		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
		__entry->xid = be32_to_cpu(rqst->rq_xid);
		__entry->ret = ret;
	),

	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x ret=%d",
		__entry->task_id, __entry->client_id, __entry->xid,
		__entry->ret
	)
);

TRACE_EVENT(xprtrdma_prepsend_failed,
	TP_PROTO(const struct rpc_rqst *rqst,
		 int ret
	),

	TP_ARGS(rqst, ret),

	TP_STRUCT__entry(
		__field(unsigned int, task_id)
		__field(unsigned int, client_id)
		__field(u32, xid)
		__field(int, ret)
	),

	TP_fast_assign(
		__entry->task_id = rqst->rq_task->tk_pid;
		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
		__entry->xid = be32_to_cpu(rqst->rq_xid);
		__entry->ret = ret;
	),

	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x ret=%d",
		__entry->task_id, __entry->client_id, __entry->xid,
		__entry->ret
	)
);

TRACE_EVENT(xprtrdma_post_send,
	TP_PROTO(
		const struct rpcrdma_req *req
	),

	TP_ARGS(req),

	TP_STRUCT__entry(
		__field(u32, cq_id)
		__field(int, completion_id)
		__field(unsigned int, task_id)
		__field(unsigned int, client_id)
		__field(int, num_sge)
		__field(int, signaled)
	),

	TP_fast_assign(
		const struct rpc_rqst *rqst = &req->rl_slot;
		const struct rpcrdma_sendctx *sc = req->rl_sendctx;

		__entry->cq_id = sc->sc_cid.ci_queue_id;
		__entry->completion_id = sc->sc_cid.ci_completion_id;
		__entry->task_id = rqst->rq_task->tk_pid;
		__entry->client_id = rqst->rq_task->tk_client ?
				     rqst->rq_task->tk_client->cl_clid : -1;
		__entry->num_sge = req->rl_wr.num_sge;
		__entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED;
	),

	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " cq.id=%u cid=%d (%d SGE%s) %s",
		__entry->task_id, __entry->client_id,
		__entry->cq_id, __entry->completion_id,
		__entry->num_sge, (__entry->num_sge == 1 ? "" : "s"),
		(__entry->signaled ? "signaled" : "")
	)
);

TRACE_EVENT(xprtrdma_post_send_err,
	TP_PROTO(
		const struct rpcrdma_xprt *r_xprt,
		const struct rpcrdma_req *req,
		int rc
	),

	TP_ARGS(r_xprt, req, rc),

	TP_STRUCT__entry(
		__field(u32, cq_id)
		__field(unsigned int, task_id)
		__field(unsigned int, client_id)
		__field(int, rc)
	),

	TP_fast_assign(
		const struct rpc_rqst *rqst = &req->rl_slot;
		const struct rpcrdma_ep *ep = r_xprt->rx_ep;

		__entry->cq_id = ep ? ep->re_attr.recv_cq->res.id : 0;
		__entry->task_id = rqst->rq_task->tk_pid;
		__entry->client_id = rqst->rq_task->tk_client ?
				     rqst->rq_task->tk_client->cl_clid : -1;
		__entry->rc = rc;
	),

	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " cq.id=%u rc=%d",
		__entry->task_id, __entry->client_id,
		__entry->cq_id, __entry->rc
	)
);

TRACE_EVENT(xprtrdma_post_recv,
	TP_PROTO(
		const struct rpcrdma_rep *rep
	),

	TP_ARGS(rep),

	TP_STRUCT__entry(
		__field(u32, cq_id)
		__field(int, completion_id)
	),

	TP_fast_assign(
		__entry->cq_id = rep->rr_cid.ci_queue_id;
		__entry->completion_id = rep->rr_cid.ci_completion_id;
	),

	TP_printk("cq.id=%d cid=%d",
		__entry->cq_id, __entry->completion_id
	)
);

TRACE_EVENT(xprtrdma_post_recvs,
	TP_PROTO(
		const struct rpcrdma_xprt *r_xprt,
		unsigned int count
	),

	TP_ARGS(r_xprt, count),

	TP_STRUCT__entry(
		__field(u32, cq_id)
		__field(unsigned int, count)
		__field(int, posted)
		__string(addr, rpcrdma_addrstr(r_xprt))
		__string(port, rpcrdma_portstr(r_xprt))
	),

	TP_fast_assign(
		const struct rpcrdma_ep *ep = r_xprt->rx_ep;

		__entry->cq_id = ep->re_attr.recv_cq->res.id;
		__entry->count = count;
		__entry->posted = ep->re_receive_count;
		__assign_str(addr, rpcrdma_addrstr(r_xprt));
		__assign_str(port, rpcrdma_portstr(r_xprt));
	),

	TP_printk("peer=[%s]:%s cq.id=%d %u new recvs, %d active",
		__get_str(addr), __get_str(port), __entry->cq_id,
		__entry->count, __entry->posted
	)
);

TRACE_EVENT(xprtrdma_post_recvs_err,
	TP_PROTO(
		const struct rpcrdma_xprt *r_xprt,
		int status
	),

	TP_ARGS(r_xprt, status),

	TP_STRUCT__entry(
		__field(u32, cq_id)
		__field(int, status)
		__string(addr, rpcrdma_addrstr(r_xprt))
		__string(port, rpcrdma_portstr(r_xprt))
	),

	TP_fast_assign(
		const struct rpcrdma_ep *ep = r_xprt->rx_ep;

		__entry->cq_id = ep->re_attr.recv_cq->res.id;
		__entry->status = status;
		__assign_str(addr, rpcrdma_addrstr(r_xprt));
		__assign_str(port, rpcrdma_portstr(r_xprt));
	),

	TP_printk("peer=[%s]:%s cq.id=%d rc=%d",
		__get_str(addr), __get_str(port), __entry->cq_id,
		__entry->status
	)
);

TRACE_EVENT(xprtrdma_post_linv_err,
	TP_PROTO(
		const struct rpcrdma_req *req,
		int status
	),

	TP_ARGS(req, status),

	TP_STRUCT__entry(
		__field(unsigned int, task_id)
		__field(unsigned int, client_id)
		__field(int, status)
	),

	TP_fast_assign(
		const struct rpc_task *task = req->rl_slot.rq_task;

		__entry->task_id = task->tk_pid;
		__entry->client_id = task->tk_client->cl_clid;
		__entry->status = status;
	),

	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " status=%d",
		__entry->task_id, __entry->client_id, __entry->status
	)
);

/**
 ** Completion events
 **/

DEFINE_RECEIVE_COMPLETION_EVENT(xprtrdma_wc_receive);

DEFINE_COMPLETION_EVENT(xprtrdma_wc_send);
DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_fastreg);
DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li);
DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li_wake);
DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li_done);

TRACE_EVENT(xprtrdma_frwr_alloc,
	TP_PROTO(
		const struct rpcrdma_mr *mr,
		int rc
	),

	TP_ARGS(mr, rc),

	TP_STRUCT__entry(
		__field(u32, mr_id)
		__field(int, rc)
	),

	TP_fast_assign(
		__entry->mr_id = mr->mr_ibmr->res.id;
		__entry->rc = rc;
	),

	TP_printk("mr.id=%u: rc=%d",
		__entry->mr_id, __entry->rc
	)
);

TRACE_EVENT(xprtrdma_frwr_dereg,
	TP_PROTO(
		const struct rpcrdma_mr *mr,
		int rc
	),

	TP_ARGS(mr, rc),

	TP_STRUCT__entry(
		__field(u32, mr_id)
		__field(int, nents)
		__field(u32, handle)
		__field(u32, length)
		__field(u64, offset)
		__field(u32, dir)
		__field(int, rc)
	),

	TP_fast_assign(
		__entry->mr_id  = mr->mr_ibmr->res.id;
		__entry->nents  = mr->mr_nents;
		__entry->handle = mr->mr_handle;
		__entry->length = mr->mr_length;
		__entry->offset = mr->mr_offset;
		__entry->dir    = mr->mr_dir;
		__entry->rc	= rc;
	),

	TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s): rc=%d",
		__entry->mr_id, __entry->nents, __entry->length,
		(unsigned long long)__entry->offset, __entry->handle,
		xprtrdma_show_direction(__entry->dir),
		__entry->rc
	)
);

TRACE_EVENT(xprtrdma_frwr_sgerr,
	TP_PROTO(
		const struct rpcrdma_mr *mr,
		int sg_nents
	),

	TP_ARGS(mr, sg_nents),

	TP_STRUCT__entry(
		__field(u32, mr_id)
		__field(u64, addr)
		__field(u32, dir)
		__field(int, nents)
	),

	TP_fast_assign(
		__entry->mr_id = mr->mr_ibmr->res.id;
		__entry->addr = mr->mr_sg->dma_address;
		__entry->dir = mr->mr_dir;
		__entry->nents = sg_nents;
	),

	TP_printk("mr.id=%u DMA addr=0x%llx (%s) sg_nents=%d",
		__entry->mr_id, __entry->addr,
		xprtrdma_show_direction(__entry->dir),
		__entry->nents
	)
);

TRACE_EVENT(xprtrdma_frwr_maperr,
	TP_PROTO(
		const struct rpcrdma_mr *mr,
		int num_mapped
	),

	TP_ARGS(mr, num_mapped),

	TP_STRUCT__entry(
		__field(u32, mr_id)
		__field(u64, addr)
		__field(u32, dir)
		__field(int, num_mapped)
		__field(int, nents)
	),

	TP_fast_assign(
		__entry->mr_id = mr->mr_ibmr->res.id;
		__entry->addr = mr->mr_sg->dma_address;
		__entry->dir = mr->mr_dir;
		__entry->num_mapped = num_mapped;
		__entry->nents = mr->mr_nents;
	),

	TP_printk("mr.id=%u DMA addr=0x%llx (%s) nents=%d of %d",
		__entry->mr_id, __entry->addr,
		xprtrdma_show_direction(__entry->dir),
		__entry->num_mapped, __entry->nents
	)
);

DEFINE_MR_EVENT(fastreg);
DEFINE_MR_EVENT(localinv);
DEFINE_MR_EVENT(reminv);
DEFINE_MR_EVENT(map);

DEFINE_ANON_MR_EVENT(unmap);

TRACE_EVENT(xprtrdma_dma_maperr,
	TP_PROTO(
		u64 addr
	),

	TP_ARGS(addr),

	TP_STRUCT__entry(
		__field(u64, addr)
	),

	TP_fast_assign(
		__entry->addr = addr;
	),

	TP_printk("dma addr=0x%llx\n", __entry->addr)
);

/**
 ** Reply events
 **/

TRACE_EVENT(xprtrdma_reply,
	TP_PROTO(
		const struct rpc_task *task,
		const struct rpcrdma_rep *rep,
		unsigned int credits
	),

	TP_ARGS(task, rep, credits),

	TP_STRUCT__entry(
		__field(unsigned int, task_id)
		__field(unsigned int, client_id)
		__field(u32, xid)
		__field(unsigned int, credits)
	),

	TP_fast_assign(
		__entry->task_id = task->tk_pid;
		__entry->client_id = task->tk_client->cl_clid;
		__entry->xid = be32_to_cpu(rep->rr_xid);
		__entry->credits = credits;
	),

	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x credits=%u",
		__entry->task_id, __entry->client_id, __entry->xid,
		__entry->credits
	)
);

DEFINE_REPLY_EVENT(vers);
DEFINE_REPLY_EVENT(rqst);
DEFINE_REPLY_EVENT(short);
DEFINE_REPLY_EVENT(hdr);

TRACE_EVENT(xprtrdma_err_vers,
	TP_PROTO(
		const struct rpc_rqst *rqst,
		__be32 *min,
		__be32 *max
	),

	TP_ARGS(rqst, min, max),

	TP_STRUCT__entry(
		__field(unsigned int, task_id)
		__field(unsigned int, client_id)
		__field(u32, xid)
		__field(u32, min)
		__field(u32, max)
	),

	TP_fast_assign(
		__entry->task_id = rqst->rq_task->tk_pid;
		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
		__entry->xid = be32_to_cpu(rqst->rq_xid);
		__entry->min = be32_to_cpup(min);
		__entry->max = be32_to_cpup(max);
	),

	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x versions=[%u, %u]",
		__entry->task_id, __entry->client_id, __entry->xid,
		__entry->min, __entry->max
	)
);

TRACE_EVENT(xprtrdma_err_chunk,
	TP_PROTO(
		const struct rpc_rqst *rqst
	),

	TP_ARGS(rqst),

	TP_STRUCT__entry(
		__field(unsigned int, task_id)
		__field(unsigned int, client_id)
		__field(u32, xid)
	),

	TP_fast_assign(
		__entry->task_id = rqst->rq_task->tk_pid;
		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
		__entry->xid = be32_to_cpu(rqst->rq_xid);
	),

	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x",
		__entry->task_id, __entry->client_id, __entry->xid
	)
);

TRACE_EVENT(xprtrdma_err_unrecognized,
	TP_PROTO(
		const struct rpc_rqst *rqst,
		__be32 *procedure
	),

	TP_ARGS(rqst, procedure),

	TP_STRUCT__entry(
		__field(unsigned int, task_id)
		__field(unsigned int, client_id)
		__field(u32, xid)
		__field(u32, procedure)
	),

	TP_fast_assign(
		__entry->task_id = rqst->rq_task->tk_pid;
		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
		__entry->procedure = be32_to_cpup(procedure);
	),

	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x procedure=%u",
		__entry->task_id, __entry->client_id, __entry->xid,
		__entry->procedure
	)
);

TRACE_EVENT(xprtrdma_fixup,
	TP_PROTO(
		const struct rpc_rqst *rqst,
		unsigned long fixup
	),

	TP_ARGS(rqst, fixup),

	TP_STRUCT__entry(
		__field(unsigned int, task_id)
		__field(unsigned int, client_id)
		__field(unsigned long, fixup)
		__field(size_t, headlen)
		__field(unsigned int, pagelen)
		__field(size_t, taillen)
	),

	TP_fast_assign(
		__entry->task_id = rqst->rq_task->tk_pid;
		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
		__entry->fixup = fixup;
		__entry->headlen = rqst->rq_rcv_buf.head[0].iov_len;
		__entry->pagelen = rqst->rq_rcv_buf.page_len;
		__entry->taillen = rqst->rq_rcv_buf.tail[0].iov_len;
	),

	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " fixup=%lu xdr=%zu/%u/%zu",
		__entry->task_id, __entry->client_id, __entry->fixup,
		__entry->headlen, __entry->pagelen, __entry->taillen
	)
);

TRACE_EVENT(xprtrdma_decode_seg,
	TP_PROTO(
		u32 handle,
		u32 length,
		u64 offset
	),

	TP_ARGS(handle, length, offset),

	TP_STRUCT__entry(
		__field(u32, handle)
		__field(u32, length)
		__field(u64, offset)
	),

	TP_fast_assign(
		__entry->handle = handle;
		__entry->length = length;
		__entry->offset = offset;
	),

	TP_printk("%u@0x%016llx:0x%08x",
		__entry->length, (unsigned long long)__entry->offset,
		__entry->handle
	)
);

TRACE_EVENT(xprtrdma_mrs_zap,
	TP_PROTO(
		const struct rpc_task *task
	),

	TP_ARGS(task),

	TP_STRUCT__entry(
		__field(unsigned int, task_id)
		__field(unsigned int, client_id)
	),

	TP_fast_assign(
		__entry->task_id = task->tk_pid;
		__entry->client_id = task->tk_client->cl_clid;
	),

	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER,
		__entry->task_id, __entry->client_id
	)
);

/**
 ** Callback events
 **/

TRACE_EVENT(xprtrdma_cb_setup,
	TP_PROTO(
		const struct rpcrdma_xprt *r_xprt,
		unsigned int reqs
	),

	TP_ARGS(r_xprt, reqs),

	TP_STRUCT__entry(
		__field(unsigned int, reqs)
		__string(addr, rpcrdma_addrstr(r_xprt))
		__string(port, rpcrdma_portstr(r_xprt))
	),

	TP_fast_assign(
		__entry->reqs = reqs;
		__assign_str(addr, rpcrdma_addrstr(r_xprt));
		__assign_str(port, rpcrdma_portstr(r_xprt));
	),

	TP_printk("peer=[%s]:%s %u reqs",
		__get_str(addr), __get_str(port), __entry->reqs
	)
);

DEFINE_CALLBACK_EVENT(call);
DEFINE_CALLBACK_EVENT(reply);

/**
 ** Server-side RPC/RDMA events
 **/

DECLARE_EVENT_CLASS(svcrdma_accept_class,
	TP_PROTO(
		const struct svcxprt_rdma *rdma,
		long status
	),

	TP_ARGS(rdma, status),

	TP_STRUCT__entry(
		__field(long, status)
		__string(addr, rdma->sc_xprt.xpt_remotebuf)
	),

	TP_fast_assign(
		__entry->status = status;
		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
	),

	TP_printk("addr=%s status=%ld",
		__get_str(addr), __entry->status
	)
);

#define DEFINE_ACCEPT_EVENT(name) \
		DEFINE_EVENT(svcrdma_accept_class, svcrdma_##name##_err, \
				TP_PROTO( \
					const struct svcxprt_rdma *rdma, \
					long status \
				), \
				TP_ARGS(rdma, status))

DEFINE_ACCEPT_EVENT(pd);
DEFINE_ACCEPT_EVENT(qp);
DEFINE_ACCEPT_EVENT(fabric);
DEFINE_ACCEPT_EVENT(initdepth);
DEFINE_ACCEPT_EVENT(accept);

TRACE_DEFINE_ENUM(RDMA_MSG);
TRACE_DEFINE_ENUM(RDMA_NOMSG);
TRACE_DEFINE_ENUM(RDMA_MSGP);
TRACE_DEFINE_ENUM(RDMA_DONE);
TRACE_DEFINE_ENUM(RDMA_ERROR);

#define show_rpcrdma_proc(x)						\
		__print_symbolic(x,					\
				{ RDMA_MSG, "RDMA_MSG" },		\
				{ RDMA_NOMSG, "RDMA_NOMSG" },		\
				{ RDMA_MSGP, "RDMA_MSGP" },		\
				{ RDMA_DONE, "RDMA_DONE" },		\
				{ RDMA_ERROR, "RDMA_ERROR" })

TRACE_EVENT(svcrdma_decode_rqst,
	TP_PROTO(
		const struct svc_rdma_recv_ctxt *ctxt,
		__be32 *p,
		unsigned int hdrlen
	),

	TP_ARGS(ctxt, p, hdrlen),

	TP_STRUCT__entry(
		__field(u32, cq_id)
		__field(int, completion_id)
		__field(u32, xid)
		__field(u32, vers)
		__field(u32, proc)
		__field(u32, credits)
		__field(unsigned int, hdrlen)
	),

	TP_fast_assign(
		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
		__entry->xid = be32_to_cpup(p++);
		__entry->vers = be32_to_cpup(p++);
		__entry->credits = be32_to_cpup(p++);
		__entry->proc = be32_to_cpup(p);
		__entry->hdrlen = hdrlen;
	),

	TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
		__entry->cq_id, __entry->completion_id,
		__entry->xid, __entry->vers, __entry->credits,
		show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
);

TRACE_EVENT(svcrdma_decode_short_err,
	TP_PROTO(
		const struct svc_rdma_recv_ctxt *ctxt,
		unsigned int hdrlen
	),

	TP_ARGS(ctxt, hdrlen),

	TP_STRUCT__entry(
		__field(u32, cq_id)
		__field(int, completion_id)
		__field(unsigned int, hdrlen)
	),

	TP_fast_assign(
		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
		__entry->hdrlen = hdrlen;
	),

	TP_printk("cq.id=%u cid=%d hdrlen=%u",
		__entry->cq_id, __entry->completion_id,
		__entry->hdrlen)
);

DECLARE_EVENT_CLASS(svcrdma_badreq_event,
	TP_PROTO(
		const struct svc_rdma_recv_ctxt *ctxt,
		__be32 *p
	),

	TP_ARGS(ctxt, p),

	TP_STRUCT__entry(
		__field(u32, cq_id)
		__field(int, completion_id)
		__field(u32, xid)
		__field(u32, vers)
		__field(u32, proc)
		__field(u32, credits)
	),

	TP_fast_assign(
		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
		__entry->xid = be32_to_cpup(p++);
		__entry->vers = be32_to_cpup(p++);
		__entry->credits = be32_to_cpup(p++);
		__entry->proc = be32_to_cpup(p);
	),

	TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%u",
		__entry->cq_id, __entry->completion_id,
		__entry->xid, __entry->vers, __entry->credits, __entry->proc)
);

#define DEFINE_BADREQ_EVENT(name)					\
		DEFINE_EVENT(svcrdma_badreq_event,			\
			     svcrdma_decode_##name##_err,		\
				TP_PROTO(				\
					const struct svc_rdma_recv_ctxt *ctxt,	\
					__be32 *p			\
				),					\
				TP_ARGS(ctxt, p))

DEFINE_BADREQ_EVENT(badvers);
DEFINE_BADREQ_EVENT(drop);
DEFINE_BADREQ_EVENT(badproc);
DEFINE_BADREQ_EVENT(parse);

TRACE_EVENT(svcrdma_encode_wseg,
	TP_PROTO(
		const struct svc_rdma_send_ctxt *ctxt,
		u32 segno,
		u32 handle,
		u32 length,
		u64 offset
	),

	TP_ARGS(ctxt, segno, handle, length, offset),

	TP_STRUCT__entry(
		__field(u32, cq_id)
		__field(int, completion_id)
		__field(u32, segno)
		__field(u32, handle)
		__field(u32, length)
		__field(u64, offset)
	),

	TP_fast_assign(
		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
		__entry->segno = segno;
		__entry->handle = handle;
		__entry->length = length;
		__entry->offset = offset;
	),

	TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
		__entry->cq_id, __entry->completion_id,
		__entry->segno, __entry->length,
		(unsigned long long)__entry->offset, __entry->handle
	)
);

TRACE_EVENT(svcrdma_decode_rseg,
	TP_PROTO(
		const struct rpc_rdma_cid *cid,
		const struct svc_rdma_chunk *chunk,
		const struct svc_rdma_segment *segment
	),

	TP_ARGS(cid, chunk, segment),

	TP_STRUCT__entry(
		__field(u32, cq_id)
		__field(int, completion_id)
		__field(u32, segno)
		__field(u32, position)
		__field(u32, handle)
		__field(u32, length)
		__field(u64, offset)
	),

	TP_fast_assign(
		__entry->cq_id = cid->ci_queue_id;
		__entry->completion_id = cid->ci_completion_id;
		__entry->segno = chunk->ch_segcount;
		__entry->position = chunk->ch_position;
		__entry->handle = segment->rs_handle;
		__entry->length = segment->rs_length;
		__entry->offset = segment->rs_offset;
	),

	TP_printk("cq_id=%u cid=%d segno=%u position=%u %u@0x%016llx:0x%08x",
		__entry->cq_id, __entry->completion_id,
		__entry->segno, __entry->position, __entry->length,
		(unsigned long long)__entry->offset, __entry->handle
	)
);

TRACE_EVENT(svcrdma_decode_wseg,
	TP_PROTO(
		const struct rpc_rdma_cid *cid,
		const struct svc_rdma_chunk *chunk,
		u32 segno
	),

	TP_ARGS(cid, chunk, segno),

	TP_STRUCT__entry(
		__field(u32, cq_id)
		__field(int, completion_id)
		__field(u32, segno)
		__field(u32, handle)
		__field(u32, length)
		__field(u64, offset)
	),

	TP_fast_assign(
		const struct svc_rdma_segment *segment =
			&chunk->ch_segments[segno];

		__entry->cq_id = cid->ci_queue_id;
		__entry->completion_id = cid->ci_completion_id;
		__entry->segno = segno;
		__entry->handle = segment->rs_handle;
		__entry->length = segment->rs_length;
		__entry->offset = segment->rs_offset;
	),

	TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
		__entry->cq_id, __entry->completion_id,
		__entry->segno, __entry->length,
		(unsigned long long)__entry->offset, __entry->handle
	)
);

DECLARE_EVENT_CLASS(svcrdma_error_event,
	TP_PROTO(
		__be32 xid
	),

	TP_ARGS(xid),

	TP_STRUCT__entry(
		__field(u32, xid)
	),

	TP_fast_assign(
		__entry->xid = be32_to_cpu(xid);
	),

	TP_printk("xid=0x%08x",
		__entry->xid
	)
);

#define DEFINE_ERROR_EVENT(name)					\
		DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name,	\
				TP_PROTO(				\
					__be32 xid			\
				),					\
				TP_ARGS(xid))

DEFINE_ERROR_EVENT(vers);
DEFINE_ERROR_EVENT(chunk);

/**
 ** Server-side RDMA API events
 **/

DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
	TP_PROTO(
		const struct svcxprt_rdma *rdma,
		u64 dma_addr,
		u32 length
	),

	TP_ARGS(rdma, dma_addr, length),

	TP_STRUCT__entry(
		__field(u64, dma_addr)
		__field(u32, length)
		__string(device, rdma->sc_cm_id->device->name)
		__string(addr, rdma->sc_xprt.xpt_remotebuf)
	),

	TP_fast_assign(
		__entry->dma_addr = dma_addr;
		__entry->length = length;
		__assign_str(device, rdma->sc_cm_id->device->name);
		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
	),

	TP_printk("addr=%s device=%s dma_addr=%llu length=%u",
		__get_str(addr), __get_str(device),
		__entry->dma_addr, __entry->length
	)
);

#define DEFINE_SVC_DMA_EVENT(name)					\
		DEFINE_EVENT(svcrdma_dma_map_class, svcrdma_##name,	\
				TP_PROTO(				\
					const struct svcxprt_rdma *rdma,\
					u64 dma_addr,			\
					u32 length			\
				),					\
				TP_ARGS(rdma, dma_addr, length))

DEFINE_SVC_DMA_EVENT(dma_map_page);
DEFINE_SVC_DMA_EVENT(dma_map_err);
DEFINE_SVC_DMA_EVENT(dma_unmap_page);

TRACE_EVENT(svcrdma_dma_map_rw_err,
	TP_PROTO(
		const struct svcxprt_rdma *rdma,
		unsigned int nents,
		int status
	),

	TP_ARGS(rdma, nents, status),

	TP_STRUCT__entry(
		__field(int, status)
		__field(unsigned int, nents)
		__string(device, rdma->sc_cm_id->device->name)
		__string(addr, rdma->sc_xprt.xpt_remotebuf)
	),

	TP_fast_assign(
		__entry->status = status;
		__entry->nents = nents;
		__assign_str(device, rdma->sc_cm_id->device->name);
		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
	),

	TP_printk("addr=%s device=%s nents=%u status=%d",
		__get_str(addr), __get_str(device), __entry->nents,
		__entry->status
	)
);

TRACE_EVENT(svcrdma_no_rwctx_err,
	TP_PROTO(
		const struct svcxprt_rdma *rdma,
		unsigned int num_sges
	),

	TP_ARGS(rdma, num_sges),

	TP_STRUCT__entry(
		__field(unsigned int, num_sges)
		__string(device, rdma->sc_cm_id->device->name)
		__string(addr, rdma->sc_xprt.xpt_remotebuf)
	),

	TP_fast_assign(
		__entry->num_sges = num_sges;
		__assign_str(device, rdma->sc_cm_id->device->name);
		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
	),

	TP_printk("addr=%s device=%s num_sges=%d",
		__get_str(addr), __get_str(device), __entry->num_sges
	)
);

TRACE_EVENT(svcrdma_page_overrun_err,
	TP_PROTO(
		const struct svcxprt_rdma *rdma,
		const struct svc_rqst *rqst,
		unsigned int pageno
	),

	TP_ARGS(rdma, rqst, pageno),

	TP_STRUCT__entry(
		__field(unsigned int, pageno)
		__field(u32, xid)
		__string(device, rdma->sc_cm_id->device->name)
		__string(addr, rdma->sc_xprt.xpt_remotebuf)
	),

	TP_fast_assign(
		__entry->pageno = pageno;
		__entry->xid = __be32_to_cpu(rqst->rq_xid);
		__assign_str(device, rdma->sc_cm_id->device->name);
		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
	),

	TP_printk("addr=%s device=%s xid=0x%08x pageno=%u", __get_str(addr),
		__get_str(device), __entry->xid, __entry->pageno
	)
);

TRACE_EVENT(svcrdma_small_wrch_err,
	TP_PROTO(
		const struct svcxprt_rdma *rdma,
		unsigned int remaining,
		unsigned int seg_no,
		unsigned int num_segs
	),

	TP_ARGS(rdma, remaining, seg_no, num_segs),

	TP_STRUCT__entry(
		__field(unsigned int, remaining)
		__field(unsigned int, seg_no)
		__field(unsigned int, num_segs)
		__string(device, rdma->sc_cm_id->device->name)
		__string(addr, rdma->sc_xprt.xpt_remotebuf)
	),

	TP_fast_assign(
		__entry->remaining = remaining;
		__entry->seg_no = seg_no;
		__entry->num_segs = num_segs;
		__assign_str(device, rdma->sc_cm_id->device->name);
		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
	),

	TP_printk("addr=%s device=%s remaining=%u seg_no=%u num_segs=%u",
		__get_str(addr), __get_str(device), __entry->remaining,
		__entry->seg_no, __entry->num_segs
	)
);

TRACE_EVENT(svcrdma_send_pullup,
	TP_PROTO(
		const struct svc_rdma_send_ctxt *ctxt,
		unsigned int msglen
	),

	TP_ARGS(ctxt, msglen),

	TP_STRUCT__entry(
		__field(u32, cq_id)
		__field(int, completion_id)
		__field(unsigned int, hdrlen)
		__field(unsigned int, msglen)
	),

	TP_fast_assign(
		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
		__entry->hdrlen = ctxt->sc_hdrbuf.len,
		__entry->msglen = msglen;
	),

	TP_printk("cq_id=%u cid=%d hdr=%u msg=%u (total %u)",
		__entry->cq_id, __entry->completion_id,
		__entry->hdrlen, __entry->msglen,
		__entry->hdrlen + __entry->msglen)
);

TRACE_EVENT(svcrdma_send_err,
	TP_PROTO(
		const struct svc_rqst *rqst,
		int status
	),

	TP_ARGS(rqst, status),

	TP_STRUCT__entry(
		__field(int, status)
		__field(u32, xid)
		__string(addr, rqst->rq_xprt->xpt_remotebuf)
	),

	TP_fast_assign(
		__entry->status = status;
		__entry->xid = __be32_to_cpu(rqst->rq_xid);
		__assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
	),

	TP_printk("addr=%s xid=0x%08x status=%d", __get_str(addr),
		__entry->xid, __entry->status
	)
);

TRACE_EVENT(svcrdma_post_send,
	TP_PROTO(
		const struct svc_rdma_send_ctxt *ctxt
	),

	TP_ARGS(ctxt),

	TP_STRUCT__entry(
		__field(u32, cq_id)
		__field(int, completion_id)
		__field(unsigned int, num_sge)
		__field(u32, inv_rkey)
	),

	TP_fast_assign(
		const struct ib_send_wr *wr = &ctxt->sc_send_wr;

		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
		__entry->num_sge = wr->num_sge;
		__entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
					wr->ex.invalidate_rkey : 0;
	),

	TP_printk("cq_id=%u cid=%d num_sge=%u inv_rkey=0x%08x",
		__entry->cq_id, __entry->completion_id,
		__entry->num_sge, __entry->inv_rkey
	)
);

DEFINE_SEND_COMPLETION_EVENT(svcrdma_wc_send);
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_send_flush);
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_send_err);

TRACE_EVENT(svcrdma_post_recv,
	TP_PROTO(
		const struct svc_rdma_recv_ctxt *ctxt
	),

	TP_ARGS(ctxt),

	TP_STRUCT__entry(
		__field(u32, cq_id)
		__field(int, completion_id)
	),

	TP_fast_assign(
		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
	),

	TP_printk("cq.id=%d cid=%d",
		__entry->cq_id, __entry->completion_id
	)
);

DEFINE_RECEIVE_SUCCESS_EVENT(svcrdma_wc_recv);
DEFINE_RECEIVE_FLUSH_EVENT(svcrdma_wc_recv_flush);
DEFINE_RECEIVE_FLUSH_EVENT(svcrdma_wc_recv_err);

TRACE_EVENT(svcrdma_rq_post_err,
	TP_PROTO(
		const struct svcxprt_rdma *rdma,
		int status
	),

	TP_ARGS(rdma, status),

	TP_STRUCT__entry(
		__field(int, status)
		__string(addr, rdma->sc_xprt.xpt_remotebuf)
	),

	TP_fast_assign(
		__entry->status = status;
		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
	),

	TP_printk("addr=%s status=%d",
		__get_str(addr), __entry->status
	)
);

DECLARE_EVENT_CLASS(svcrdma_post_chunk_class,
	TP_PROTO(
		const struct rpc_rdma_cid *cid,
		int sqecount
	),

	TP_ARGS(cid, sqecount),

	TP_STRUCT__entry(
		__field(u32, cq_id)
		__field(int, completion_id)
		__field(int, sqecount)
	),

	TP_fast_assign(
		__entry->cq_id = cid->ci_queue_id;
		__entry->completion_id = cid->ci_completion_id;
		__entry->sqecount = sqecount;
	),

	TP_printk("cq.id=%u cid=%d sqecount=%d",
		__entry->cq_id, __entry->completion_id,
		__entry->sqecount
	)
);

#define DEFINE_POST_CHUNK_EVENT(name)					\
		DEFINE_EVENT(svcrdma_post_chunk_class,			\
				svcrdma_post_##name##_chunk,		\
				TP_PROTO(				\
					const struct rpc_rdma_cid *cid,	\
					int sqecount			\
				),					\
				TP_ARGS(cid, sqecount))

DEFINE_POST_CHUNK_EVENT(read);
DEFINE_POST_CHUNK_EVENT(write);
DEFINE_POST_CHUNK_EVENT(reply);

TRACE_EVENT(svcrdma_wc_read,
	TP_PROTO(
		const struct ib_wc *wc,
		const struct rpc_rdma_cid *cid,
		unsigned int totalbytes,
		const ktime_t posttime
	),

	TP_ARGS(wc, cid, totalbytes, posttime),

	TP_STRUCT__entry(
		__field(u32, cq_id)
		__field(int, completion_id)
		__field(s64, read_latency)
		__field(unsigned int, totalbytes)
	),

	TP_fast_assign(
		__entry->cq_id = cid->ci_queue_id;
		__entry->completion_id = cid->ci_completion_id;
		__entry->totalbytes = totalbytes;
		__entry->read_latency = ktime_us_delta(ktime_get(), posttime);
	),

	TP_printk("cq.id=%u cid=%d totalbytes=%u latency-us=%lld",
		__entry->cq_id, __entry->completion_id,
		__entry->totalbytes, __entry->read_latency
	)
);

DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_read_flush);
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_read_err);

DEFINE_SEND_COMPLETION_EVENT(svcrdma_wc_write);
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_write_flush);
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_write_err);

TRACE_EVENT(svcrdma_qp_error,
	TP_PROTO(
		const struct ib_event *event,
		const struct sockaddr *sap
	),

	TP_ARGS(event, sap),

	TP_STRUCT__entry(
		__field(unsigned int, event)
		__string(device, event->device->name)
		__array(__u8, addr, INET6_ADDRSTRLEN + 10)
	),

	TP_fast_assign(
		__entry->event = event->event;
		__assign_str(device, event->device->name);
		snprintf(__entry->addr, sizeof(__entry->addr) - 1,
			 "%pISpc", sap);
	),

	TP_printk("addr=%s dev=%s event=%s (%u)",
		__entry->addr, __get_str(device),
		rdma_show_ib_event(__entry->event), __entry->event
	)
);

DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
	TP_PROTO(
		const struct svcxprt_rdma *rdma
	),

	TP_ARGS(rdma),

	TP_STRUCT__entry(
		__field(int, avail)
		__field(int, depth)
		__string(addr, rdma->sc_xprt.xpt_remotebuf)
	),

	TP_fast_assign(
		__entry->avail = atomic_read(&rdma->sc_sq_avail);
		__entry->depth = rdma->sc_sq_depth;
		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
	),

	TP_printk("addr=%s sc_sq_avail=%d/%d",
		__get_str(addr), __entry->avail, __entry->depth
	)
);

#define DEFINE_SQ_EVENT(name)						\
		DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\
				TP_PROTO(				\
					const struct svcxprt_rdma *rdma \
				),					\
				TP_ARGS(rdma))

DEFINE_SQ_EVENT(full);
DEFINE_SQ_EVENT(retry);

TRACE_EVENT(svcrdma_sq_post_err,
	TP_PROTO(
		const struct svcxprt_rdma *rdma,
		int status
	),

	TP_ARGS(rdma, status),

	TP_STRUCT__entry(
		__field(int, avail)
		__field(int, depth)
		__field(int, status)
		__string(addr, rdma->sc_xprt.xpt_remotebuf)
	),

	TP_fast_assign(
		__entry->avail = atomic_read(&rdma->sc_sq_avail);
		__entry->depth = rdma->sc_sq_depth;
		__entry->status = status;
		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
	),

	TP_printk("addr=%s sc_sq_avail=%d/%d status=%d",
		__get_str(addr), __entry->avail, __entry->depth,
		__entry->status
	)
);

#endif /* _TRACE_RPCRDMA_H */

#include <trace/define_trace.h>