From 703bdf9459f1a3de7f0957903fb7f3618cb33aec Mon Sep 17 00:00:00 2001 From: Shuaiqiang Chang Date: Tue, 21 Mar 2023 10:07:15 +0800 Subject: [PATCH 01/71] Add files via upload --- docs/zh/05-get-started/xiaot-03.webp | Bin 0 -> 55176 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 docs/zh/05-get-started/xiaot-03.webp diff --git a/docs/zh/05-get-started/xiaot-03.webp b/docs/zh/05-get-started/xiaot-03.webp new file mode 100644 index 0000000000000000000000000000000000000000..c115346e9e84ffb990aec5947b2107b5daf6e19b GIT binary patch literal 55176 zcmagE2Rs~K^anayti@vW&gvz~>LSWoq9me3qJ%^*N%UT0v4mK%i0EC26wxAxPK4+Y zEqW(v^b$P3|NFep|Gm%Wz0W%{=gjw>d*j7HaUWw)JxVzwrOd5d;8- zBfymefFZH}m-_!d6`7sAm+h6&&J}xmUUd!tAiFC}@A!XW|NpSf|A`6zVb6Pa)vs)F zudtBg{|(#x->{#zyZ4n%=YRIL?wV&T5gdrOMYDC1SHKn!2T)hu{O7IQ z)g7aY9R>i2mo2uy9c==ijzPhD74CMEg{GFuuREL?`=Q{{*d=rA`4F#bxY{JrZb#bD z(czm(sTKl#eABEPy*j0g=!J#Fx_e!_eB9mrPrf!wlxgQTaduY6{hn~0 z7wc{{WgolVfbZ;bQafI5dKyI4nl{O=Dd;8+C;5zJ@I8}_5m}*WzD&S9m`Y0{B&m-k28D=6DIB_K}4}C5UEGu2r+a=pfg$|)r=EklAyQdoFEF_FN5^-YoeTN%f8V*f#XoE5>GB*~ z%Zybo;~8p#vrA&SsdtV~f}g!w?1c6KdihhSBm!p8^Wq1Ov>p!+R+P5T{6pSTu$rrJ z;nlIZFw{d;I*yL~sw@9=-kiYK_4%hIz`>@^$Lh5eTIXoPsy!jl)6}Vxit{>E1PelEMtafXK;(88IM(3rww*GWa-N=oOg0F4WUY6H-qP~Z&; zArwdX#5kD_jBDj+L&{~lt3?Y7HSYxbx8@>k$g^{E@ep^|J@Q~fT->Y$U|$8OBg{k@ zts!q_xlX|c2+_u7E}`bK#~-l>wVJUp02k+JKw18Q4vdY?X9Vc$c!<$O_~)EIcw9%ArlJb~8oKFs&qfJEXN^L>9+S?>||N4i$W5PF1tsli11zKDb zji)O_H=R=TBJ3Ta?jGj!oJELVP(HHSzto%F+#n_Ic7H#=ZV(?T?v6j!ccEQ%axb^SY!jZ?Gp+C9?_dY3E&g z&;E8*{@z?6yT&uDPmPF6viRIw`cp=RCx!|>Z|^_RQEbwcmQL~Nd-Ha2T6YMMcUwa% z|L(`y{P1cN)yo&%J-=^hQ1BEw4NOT6m$b>+y;NKEHbE-healj@9$Ct##~{6sc@uk& z_SGA^D7OzT{nZn9ANU2u8Pg!aYuYAh4`P`+0dtBRK*sdJ*@M`XXIm@JW}@=b2efHz&2shNdTCw z4Cxk%$)5{98byA1tjEB5$LGY-*@XKk)3$l!hZ5|_`<*KxelhpMh~&KoLxpd7c&Wp? zfs$ZxV@HM?tABK_`nhWSgu&c;`lm^8olLrl40+a%-#>qrU<=6Z*Z906evVxdk@?+Q zf_?pdU{IuEnE&a!+gEMud`kVVGBto<6aNL#1zA1*uc@JneJW#{$J zmRz>zJ%Madd5jtkZ(YpOJ&kh@a(}=mHDDx&EvWSRqy04ccA?=YR-i&ok@#9a;jc|- zvxL(&Jfr4D!h}$MgWoU~CSKxYyioVf-7lJ*=iMfy@q$F3L;>gIW<;g*{>O*I;xw$a z>fKm}vAtO;uiQbi7T1@3CrpZMGge7@!M|es!QNSKxh8!~dY$HoUvE^gw^={u*gB^u z=Plr^SN@(wGdnP?{4?z9XFq)l$8HTzKS9MU!_hvEm75M_0(uiv%+)SzQHO_@-!j_D zTqe*}UoTa&>F9C;PMelI_m`tquCI8%C;22p?@;vSGW{Q2{hOw^fNwo4KM8No_5-rd z8kwS+<{YbAB`5Tg(@vm8x8~ZNuVqsCVPe-Jg)^yEhFg2crPBiHmBJpSZWjbgaFKh@2lqfP#yJi08IbyOUX#cz zclr)0r#!T@mbuvS$G{uxy?5u<+!5qLJys6S-qS0@N=}fg&Skpiix@rU6~h|6F))d1 z>p|a7dQ*>yWD&?*LqEy;IBu-q&{2t=AYZ``(Fr8AK8~dH9M*l;VJ+jOASqqRq1U65 z@rW=!IAX)A6V7*i%dqm7<--df&Ewa6U5R{M)zLaV6>id(N_*qdYaQ%#J4+|Ev%Qx; z{{=JeKm$_zy=oD~1rii(i#DwRLNAKsv=x_i?A7p zD$um=M_t*q!!|eCmI7>N&XB5#&dBqeP=}@LB-_?YiwKFl`_f;`^8{@w-!=cpZOcUC zmH&gYfEGb@o9}i~R9un`cUWH3x5;ZU(5p?mFT8~tRy>k(x$FA(v*UP|w@MiPOBR_- z1)2YHhOs9r3EaS~&ophHGQsPnWQ2w74$lkc!}#9Hz=$B5hA>JnxF2Bgsg+mBPgrF+ zERwvpGn+BH9@CrsTo3~W(T3x_#s0`fz)%^n*ik)7$sLD!c_{Mrey)N$(Y7-IS1{d` zA(9JFA=Q?)*5K^>ELPYlS?kQ1C?01<7+8(6Rb0sc8`XdzRfB=F5HyQ%`*v?U1@~dl z`UBR^y>1tiTzV@n=^zNDuML^}0KX1uNv3pywXjgVT>&?6R~ZcDX{5>W5gy0Z4+{EI zPj{MRNMhl9DUc2KQ2s{t=H41{BwXs*xmm!RS!6Tt%=Z};G&~C+cON2;@K194+fK%S z`t-NshcfS?(Sr2yhQ)Q5v+Hzl|4!>iEVDp@TZ&uy5d}|Lqeq{p8^f@d!q(i|_wI&z6a4mXO7nMbp$2?~_oR+MDudFIhUY{7J zRbzgqOkoMC3@Yj>VsgJD1Rf;6#w@SF4trY(roX!0t56o{c!}QZ`qKJEW2?<{sdAR7 z-;r!d678Z;vv@Y|;wgDdoRMl0k%H(QOf0^Z)QaEfv_wzbu{l^KUHHD_2+E4^?_%nm za8t}%?{Vzw&zYh3ipTw4c)xTv{+|6d#mZ>6Ti9ogac8$w478E5QTHN9P~|>sKc*O< zQZc%?sj6CZzR9i!v1z!udUE*bm5DD!hx)${$H}`Vo;7*ht7fS8ZgCPZ5|H^npL3KQ zv#wXa7jBeyT?M=UdAcMk0BRlW6^0KqcPPCDBvmo6{Vh$p(|c+fOI`2ZMUq z%Y(Bs+4h`Q-&itR3aR1pTaWkS{=RKtY%{Xgk`VvNPoAAXVjCJTz}vc7_`BlEI(TOfM!pDf>f=9Qho@u%7S3Nn@ezY z*Np(L8K+U-4e!9HzuV!95QtyWh2Mulq^ueF2CR~B2&2>zQqBhs+D<4jTzCFGH6LR3 zHzA!)77)TyH#?4XBljlO^f#%6YH1`g>1zEwt#U2OG2of+i#a+^bc- z&kt*Ooa-K9;{88AieTB;l8vP_A(7Hl>d6*G=v3G?AF^wSH)~qZLJu_lu711A6GWIk z?R9^05EzX^DJ3|cCb$iKHD;+1o_3!$p!aO5UtC`_qsxk~Jq?i<_q9sa$Q9S!_VAqo zlgAleNw(xI;{PC_vg`Q?+OXx)r%5XxL~6-)q&wB@5M=N3jf2kh^1-oU zr`>DyKIPpeyU&?Nh_Kp&jHLb=t4C*eoZp=3Il~&HX<9}IpmMg=mEG54D)1^Tv&AG;`OPu}- zAS>W}_D5+i148EhH(oY`eqgU3WRpog)5}5_R({!;HM!%Z)SAV*T(rJ41{BMHwq%90 z?jK^L;Yr?PBsYBoKRp+~ztaQV1WVoFspQ6RZjyY?nswQQ7q7zsM108 z;>V9IU=BvuF!b<-Ee7yvtaIM!ih#nlwTslMOg1)GE$cuje_DzlbPdv;K(VJ{mD(TJQ2Jz8ZI&L`lX=V&+UTVff)c1XXgNJryYt3>=CgJY=M-pq0SBLR(0suuLjv&%amWy)=0 z*f+Kf0?%S6sIkK>`Mo1ZZsl3|zjOr^*@f^_UD8n(>E~}CP8$C*kCQ9i5NONbN2|Jo zoPb|X+2z0o_4=n=imB<3!(MwUpMU9fI|9wEuC^eoik?1l<(wqpT<6Zg0xj|mb%1L} z*5qk38?)-FUmY{y`FLCz%@_v;%mXDb-@@6Y)yo$zXE*exiD7ICOtYAjFS(MEk6^1* zMCBpyN-(|<=DAVSKu5F^Y$FO(LW{`g6`LnH^E1i-7RJh$Oc=`A;_iGs&NLCvMi@@} z%IbtA;{_$Xn$m72sSH;^bzb|~@i+|BMXv}-3roDUIt>-V>cYC%IoffXc?o5n4q&Jp zp752YN`qil4kREp4(#b)zI+brEU!Mg(Pj_vZiAvFi=99sRuq1;#oQPLx-Q zqnYHO!XrKpfUKaa>bWB#ZL%USB<8@iW9)}rXFVLmT9ei=)lnYPe{pR1ceZ}sN0jba z$v**qG75-15gdi{5PqV_h`oxpy-t1);f7Z@-{EWc^_F1&SUnmk89P0l@N&nnF@IPs z&p*lJ*`Oxs{o2!cVnO@H)@Yl2`-2yK7e3oJdP77JdlgL2*7qlw<-c6yD+GU}7;oBQ zk+xoyl?bMqfIqaP(s#LbJ>P!iwkntS2x_8NnoCvo=7)waLBHSe{`%f@Ihqjr=mb9u z!C!yY?<|-rzcoE&J=Q*Vxqf@OqJhW#;$vfRT%ahlxcKt5f8P_=pX-&De3uyBFMsJP$irGB_a6`M6dvGCeldN28A#sje#q1*kwZZVpDbcUN+=2!ckPW2W>(}7(?k=CWh6Yaok9ZN00o01fv59a`nfGvw z4||b{ofli4!kD!*aA>PlNq`xlJvKhuX~05>e2ZiX1L0-(s;J-!o;|gt^#uZ2H?cqv z1_9QQ;2j(wu~P6sLZJkCgTAMc+zP&hY8v2<1i--{3>qzjq=akarn}SAfzu0+xi48x z-vZ?A5H@LM%}#CTU4W()cwxwGt<$oSHq3x*>kGW55if(mAZ;*kE$1C4ZwS7CJ$_u# zNQ>9KhO5;_({9d8&E|)JD8dxE*TTp$HDL}LSTQnYjB9WC%aWcerP-`DX%1l?%S;mKo{iS~Fc7-#4fDogmo13@U5%M-oX#Ua@8<-$y_WIa^S=Y_H}{djhfU zDwRzdY;TLzO`07bFsT`LxD{#G?^mDG6p=#>?zVvHIXDuBr$ll!; z_xMgbp6%POl&hC>kQ%Tkk+g2cr5w7|6*Q@wYdqT2pD+I!OadTU%^}h z7MGGa{kry1Yp!tS-I}8dXO=GC^FOL0-0s_#^6`RV35rccdam#p7KOi;fkS(w+<&}Z zuqjS@UW`dZeNlxsFRowGZ#U}ac1I))GCw`TS@Ls4z3-l0mG2G|&y$llk=^UY{mGI( z&wVM&GQy_k+N}if*-?rQyi1MijRT_cM3v6+V=+*1k%defGc!^BZAekKI9Zfon;Jmk zag;a#f@Iem@+1tt4V%?5^EYxOTky*!Yq!T!906velAZ+EJS3d`fWZi)DaNS(^cG6* zrhjJ?E%F0@V53%F&|U{{M~9#c+;U4mb=7kN!jVwL$G6DmEkfLdST=gq0-SC=OflFu z{9zhiF{!W^E`AR_8HIb*#gkW0;irw#<=c zOeZPh{DGeu!Y$x!d$+kKOE?fifnQApsB73Igj~il^6bJ`JisJED3mrby6`m_3Z_NC zT+^B%K>IOL!0lyQsWD9ItXc;I?pK%A;PxZNiAsGw%v0Q(3@v&<@IZUNSf7KdM`JB0 zQ)wXKfRwl=A<8g}{Vg*}h{IXKPL!Pii9}FD!f#ln$-VOWrp}(E)mb57P{K2Vi4`7yF&jnY|rUs{y}Yp=DxI}Ae;K2{2oI5VacWRlfK1r4wdwf33> zF=b*+XWVUz_wLOGpB29-zg{+jd{7HK4CzQH>)3REO zq%T{cCio99t{QYkrj1Kyut>swIGQm5AG&6`{eH+mJ2msoUh=zx-z^j~Ro~rd^U{^i zA!jYDFH7dQ&Lt)@Dg4`#o0PNf@%KOMVY+z72niq3TWhs>ip8TR@?g~mrCM*}K&T{#Gnq#I`IpE;a z(t62NALpoVIsMf#!tNyE;wCl8t;OyoF5frIi>enB(`|1<2PIM|coaMiBeJW%0U_VF zkETX6P%qCu@4}=(JDZ^qN{`U5nhY*HqiUh!p*wD|U(>BikFzfj6Yl$8FSLQ5=L&Q7 zmr3g$1}hhz*mkzur^qBveg%C|`U0v@_4or{7^g%dUmd*nDmJT8_6Uy*>3Xc?xk=J` z9PHcO8hbWMzaU5@?l{*nN5(jPkqGuw1~2}zFGd4`%dx%3f-{jB8y>_x`AqTxKdxJD z0cEID@S#O)$V4vWckf6x8R5pF9%s$?OZ(2ADlIzVQ8l^s^d|wx&~qPOY`ahOnRJ*< z_B(mzY}EWXUXGNo=|`J5?AzvU2h=T!-{gHR4=JIhMn-bH$lqVh%5 zk`aw(gzm^^RE!MjaRL)A6&NZ)Vc{_oXjs#xLdeZA#2pbFc=cd=14UB)(-tyVGxbfo>kX&65<9J zTPUM&5H1vG15EpNM-98HsX1SyX0rR(d4C`F!x?5RWX&7~gj;){*t=^ zlNHaJi%>QN^)-(}n0i*+@4!hiHkDvZ4(KQ`WHo8$>WdI~+ruw#a1cpYivuhyYBQa% zG?2Ec+XBjXDCO($aC6MtKZoZIj>Y0Zmlpezz0QTLd+fqdYTJ#AyRzgH6B5_zUL(mi zrv7yvSYZ1s>i_=ORB83~Iad*B|4HxDYveZZ5f|k5_rude;OXV^R1kHav(7qy@u&6S zUpI&EueiO9neF*#qU5%OQdt-E=Fz~AgyBzK2y*!1HdNNI1m2M0^W9TTBn|`1yMUn+ z6BvEImM4)q)NOg?J$>Gc&N+Q|a^MlHI@RTS`Stpcs2e_G`m$Ws30+%B~vz)6SKBv}~)we)>f}H=JHaqF4ZZq{Ru?g!m zEC$>wD0p7pS#^q=nJOyMloOmOrNiyyt6q@qMOo0dT9U8r&~#;&jNdHKBMM#+VcZqs z6&7%x-H0N1uKvHQ;B9Lqwp}2P^H|UYj(>VtsX7yh-XEC}`h-V#O;}#^f{`Kf`hSyA z3vRn?S8;ErK#Aogn~$8veBnzY3hrJfL3e6NeY>)qf?prg#9~?tM>}BdigsPK3atal zA4MlK`fN3cHM&H~Sih%1&BtNuU^E_rf;*%{8gZ3X^)0rjzukJ>*;>Mqth@!G+eKot z@u;zw6s#PWR)vRO{Ik?sz|NO8;@+*Ed<;aBmQ`-;x3O*&PdDVvV;kLNaXE6u17FJ+ zH0e9?FCz#qR9ABlVAU{^}cn*NYD;!&M^V}od&7qJeajHgzkjlXY% zL8-C)o@I98v*K%p&N08nyg-XHlhCG;8%guf#6?QU%yt=g(fE!l#@A#nt40}}j^ za_Svq^Hn_VT|G3QmvjIXPa42T&0omcYQMnJou(xd|6Ym{vzm&|##MN)^DLoItF#L` zI&|qCI#@fi>y&(V80}FS2_Bp_W1X$&@K`XCCCZTrobWw^M!D;*nOy*A6^uY?eub5* zY1+bG5(+^OMKm-iGUBPEN}NF61yu8&;;i7{@El7pef8C>U$qKjP%#J-hNf+x2^bwfARQ0zG0|Jb~gA=)LyS^0y4*MsD3c@VDEI0 z=_pxY#(igCFUbDLp>F>d$9RwZ@0FQ7z7?yv35$^JIz4i3wUBQs(dmme^ymxa&iBY(pb-+?-}>D zzbEhOz5S_wbtM5GznxBMh19ym#HIuja!RJR5{;~PyP7P-vz$^|Hn$IB^f-Q@j)F6( z?Y<`|hLb)cndJZOuuJ%IisAtrwfXidr+5bVd(Dh^UIpvSts@tOWJKOBWC42 z%Zur5tInnVU^RDv*0`6ap*O{km|CWyMPK?|OkB(kS-|R#Kb1>k*jx2*rYd(wOwlvdpm-mnT$-`?_w5pw!=ORn3`k zTetTZe=6<1&RLx?Ca7)~9}mwv_&nN0^_b)9iDNAUjlg{QgSuMogaYU|1hHeK9{dgA zu4OQ0q=u=mz(`_Ug!mNS7?P)^r8*I1Q3Va$M(Pf7;aWC!Hne2PVnU`3$(d|fa~*T0 zx^6A$VG1KC@zvpa*F?SQ)&@jpZ2@)w|NJ?Qdoh1<7J{6w7XfeeKGgp88v4S}5f6nT zuYn6uqOMRI5M0f$tfeeTN1c@f%&&o~aiwckV0xQNV|TCgccM1fX!TL@2kM9WLQwcE zOg3LXJ0on$lT{P4C@Sibn~p$1_9`bTajP7=IGIugef?3%a#h9vR@S2IUcw5;23SsmUOcOck|>S@i7yo<}mb zsu{HR!%2y?Z6AhuOp#(g+6=%DWDDZWd>APnIQJVz%+yjHA1MtO!~w`*zz(8NMFk`V z^O^x1Ce%huR3M^#m>o1)fGmu*A`MwM4ejkV0~#WVXf==We;tkCv%v$BPjv(a_&t6j z8E*k~wN5C^wb|Z!dA|?xJrM&PC7fHV^iYIG6xQV}GS{iqA@@oS~bs_`hos z7v~d+`zOyQO=)p#T$eGHM~u|8P9YbgA@U;>T$c<@$Bm$=puQ|-Bqw0r*YKN^fmQCH z%uN}npCMJYt0o>qQs_6iK4d=JI^1+sS`By3{&F@O;e*~h7;X5q+2Q+JY3+5fo7~Ok z&*oS<$M&ZG*ttk0{kirmKFIegh5Cr+FQ>L) zHSX(zC)&>mmxDRmkfRZBgWsh|DEc7~lB7CE;acsDqy6c2c5UvR9;u0j-oM_N%`Guv zK;wCYBuEO0tv@drRe)y%UAWV>4tq7_>x}({Mg^nhI%k`K4vSP0dE(yO+Myv$l(g=YOedS3nw3`ZarwU~?4UoA3@!=^U1kB%Nf~P9dfJY{H9_ zQG1MPb$q&a6NOQ~IFNf`;j{eT^HJUu`MDr%6hP*k$^h4xi|We+(GDdeqq`H9x+5S& z^aH{>J{OHda=yYYKF|ZJ(y}|zh9pFptHdBwIi*nh5-=wOjf;kLz#|HT)d0|ETy|Wp z*i-(BTPPC7<|(3r0b`wqCZrYZiD+)wCBwjxX3Tt$Fya~<)t&&PGxN6t@Nhs_8;rt5 z=wiQv?Y)Us8W=Pp17N4%*WiAU2*N_p#(Y*-0rhK6qSOXBH9;H(gQ4f|YP%^1k)-s(=e2BI&;KS9wN|-1yj~t3}g7M*-k#OiLz)iS;Bt}|$ z@Q}^EYO(rBuEEz)Vc-OzYfHC}kGPJ3edE(=A(D_$3@30?;kI}m2x*C9?e4vx*EOJF z*`x=78MK{RP}T_%V{yF$zI+(20aP0gjvEQbTce0iQu7Fm-<)1p>r(g|Z&(RJ!PXG~ z*c9oS`W<)Q4Ip#22t$1N?BIk$43sU(2o=TJQ&-zu%MaV|hUcaKYHzJD8yhb}lMKY& zgHzb&iFw#f%V`6fovZI+tpPs3_eYqdthDo(RJl73ec!hLZ2@#D3HD)d%Njf^@TA`( znT|v1;lX<~!wIFu_^BeuUfPKZwGVpAyZ8P6Gx@TH3mvx`k0U~Q>9$)T7Js={5&_w! zIkOL`2q}r6jbF^3|4ON+9km#mJ0DWlzqh1GPphBl);D5J<=Pj2IW=Ky6;k?acHs=Z zm8E!6opB$u6tTrLgvt(DGk0A%@7RwaeUwGxwXS+1`PEf@Ec9O~h%)1b%IR8M;qqq5 z3FL5vEdQ&1?K3Hi=ZVV^@{YdJIt`Z>M_g&n>10rGZN$*{^UXX9eGGAPp;4&l9Thpa z)pq)fe<1?to{lN|NUJ0QJLjk7d=arUS~KseE*k8XG*IqrNG46sjQh~{7$C)smIi65_u^AZe%OcXv-1xmT`eC&i}n43q%%v;BKJDJALY>=B)PJHu-)>HfI#BTSuuop!^kJhY3-SZSLML8;S;5~gJi81q#=~1E&6mm0((Cs% zFdfN{@7Tk@baJQ#nXyt+DeLmEvZ-*HH=pll+sV*k1WdeHtR{w(Xa``DO9-+7U9(>U zzoIMcWei&gJ$iPiA$k<;6CSlPIm+-~Ba~1r@W&x%+6ZJ6=yPci`iZ=ZHx>nd45yXu zzsa3&cSAdsuBa$;Tw(_xMte1K#2X`7k~M(KZW= z-WdQGnkSNGGoWjL6uvcxq@{s-L#$JAP;ep&i-Q|toFbome)1Sd!H7oV+TnZ^228kc zytU~x;AAR{$`Yd*;HRLIK_wLE%0ZB|@6>MPk|RJiQ$4KLAlw4HSD96dLi8&jIJ3}e z1O+wjT2y|!{}UN}o;I{I8h}N!7lebd;;vRnFbkmFbmND;nGmWCsmZ(RWNQxvwHd|% zpr0)4U%xH35K3s^NVTYVJ7n|=TNtt;Gd3Kk_oR=$4f^PTL@7o4Qz8nUXyTPxL2~eP zfDlz6iUZ@%EYtz@-T~e|0%_(QX=gAuiBXIUrLvkFcFc~bme}_94FfcsvPbwX{j~{a z?edxufxvjVm9;Qo0Zm?Q=|8+35tv(G1T+_r%32H`d>o#S(>>yT8WrYD!cr|`?Zms_ zYFe|-nK1XR9|96W4ZiMm_4u7=Rk#(fv-B#ORJGMUuXi?He?PHT;I-$Uh0NxB z=}ni$nftOadvm>)HIEr`izYb7L44EyO8e`#t2ZthB_42x{tGSpDzCG8aF|Rj%eR(G zm(-`Utiz+_wHJNap7i7gl(J`K??=XUP;q7P#g?*cqQym-pbknL*;YAyK*2YAA>K6c zg$ijJ(%EMGly7f&-h2x6UVWv`FJ!J-Qh4S_CSm0=3!3a48ZRHHN)a>O5L5~#e!qNn zwEV7cbixwMwY}W+^Fv=SLEqoOUcW48@B1nKeF-!$Xp@~JQR~8z`i}MXr^B0KR#%_R z2Y-}vCo|7FF9{ik(xlP1%`?Q6B);`gDE*^u)Dj~p>^mo&KA9JgY-l*8-oLbTBQxea zr3<}lV0KB+KhD@d%{)tjbuw zpuRlkm399)5ifo9N47<@enAuIo+jbHDWMnNbk6n6)^K^A`iMHiEY&6P8aU6`tu67s zCs&P_+S4tJlJgkGd`ojM=AGRBvricC^x?Rr7_?#w=qdV~67giTYN@D9jE-At8HJC7 zVJWb;6lsy1#EpX}N{)??!^0p0x0zPgAqJ3iE&v`~$41Y3cNT@ue`d2gp+v&ZiZ;P5 zygTGHeB=+M#K;nHm8m>F&9E)Gs2_(Uvxg3V06*%*LmpY)X@CJkou$#tgOcQZQ&u}j zwSbk%&MzOT0ulTfh^h2W5O_>;cP|mf)e^PQMD@EqzlBcU?6_iAnc{xYE0ATCa(KY#9lwCaxji^A+lC9DkOxh92Qvs|E_#Tc5sb11IFEylT!I*28 zYS7Z=9Ib)35>I{ulDel2in5mch_DxWbsI<+ra@afa|iJ9-gsA*d}2V0WZ3H6&*ksw zkaT$31G`SnL@1`sm9%IyLV-K`U|j~qPA>=e2I}#oD{tLsWgJ%*55J|u~$j3U27Cp zn3neWnxjS%*9TY!M%OIj`sEVMdq36vUyBL;Jly?f`X7Wu#WMpNWO2z~SoKM`52vB#CSMe!m%h1#U!qx+ue;!X%qdw_XN6b5;C2W!_ZB;VqAh zm|;HK|7Ewgak(KKUF$%ax~}?HqTa3)nEJB4{?^OO4;pR#VGX4EEh%6}c4a~PQ2s=s z`#P^*!-(q6J@Cku^o6Y3*Ip~p760bYg)h_;8k5<_+?W0_(6PpC{on}!aGIVaPT}r* zC}QX$PSynH5Y(?0xc2<~rlUxBh_wW`<(%s{uFp*nsFfrf{nkz5 zJ@5P!tzBp>a}C*tHM@6}rt-h|+BNL<(N*{{2YK;2REM;$VNsFMWRS3Ve@^_)t4K*? zb2m#|GmH}B8mwLJuNSTc#$(E(WHb(QLS>ngVDX=F4wO117EaiF-KXVwvr1YRio1F& z5E&Zr@`HGOF@!O5cWbH@R2oxTDxHNK!YFvUK}^d^7@aTz;k?{e>x(XN3=ay4P`8BW zKtNEAfuQGBx$3k43 zyl$R$9w-J{M#dY=jIcJR*U~71yA^F+iD5N#HXf{eq7ss#G8m!~lqYtn0udPxLblQT z*sn6c!=b2fYvzRKf8Mc^i6fET?e5-mk${5;h8_xumwApQT8*s%d>}*up!ndf4fE?B z3y214nP^l1;%O6LFBg^>WQ-Cy-olqeUvn;D{QgF<4+ZnH+Q z%EW0y$D>My5otfD~!QJwO`KS1GMeq~3zr2g}Qa8BuWlXaMY?@iLb# zCW4d#lL0e#dXjoG6FyKyiHwu~?G&;a|SI81e90(lt8}Mrn#t}A`Ma`?M?|f202ty2h&M13%KkpsoFH@LX$i1LmXCe@_hO3td?|ItA5s#g=MB)!IH!UvvJ|Dc!{dHqe217bZv|2)n|JzdVRQfNUuzx-1DUJGZbBh#H+cj{=fT1;XPcFyBfd2UHqzx6w91TQnb6Pnu!I^dq4Ya-ndH7QFi+2 zP73zPMRqhKY>X}|$25^@Z@u9!#cf}u{hj)6oqyC-f7H4F4q@*dFDPfcMsqQAe*eO< z%a)?$&za60wK+z%|L*UJX{A4Qp(s2ny&P6!_8k!vh?~8gl_O!(K!@whwG+~8ZrYhO zX$q9ZW`T&g3QdcSfiIA@zVji^)N@lf-NY{oKc(@6?|l(R_whSly<(@=i16%V{eN$^ z9^BM*JzZ!z>Js)G3}WuDo%pou#EJpqH$O`#b{|-HAbmaSHdvc`TBafm4Zh^~wb8Jq zuG~|^*JauF!U($C;JZ8|!0R%NAaFb|>$7z7l z9`VkGpp-ftHH+*pYRd)FTJ`~drw*ycefc8#>~ir z0f)gg%K5BX6SSN%3&Lzc;9*)d=f10@R{32kz!uSrW0*bO==q3zz+leCT(PT&hL6OU zS22BEbKs!1SKm)JQ z0K!>~7EiK!Nje5T>yoAnXBB+^`tXM|>eG8yid;g1z@rNhiQ)*SpWPfpu_&r z(2-1!0t5gD;aU1?#+<51vbI<+SaZ-NIE8Y^`hM)Y-UraPXLK*E9!rqJr0o0u!hiy+ zs;3VG*gFBuG@9H+z2~n>eH9w?KW5Q~`--_kzAiHPP~OU;88B|9t*UmrF|R@Ybz(HW z;_!2UxX{4L)=kX?a`Quh+6FvWs8B7}+DDCXJFA_ANAL)Y#4u*%gk<#NC^gomq)EVY z#~x%YF=f|PraWDTBC|g!F=&9-*Co=(te$^eGtfUs zrFu8_^P~WvHDbu&c~&mR<7iMPGqNA->#~8I2mL;e3Hu?l1qN9a3y;u8ATZ8TzUDtx zk<890s1#|R8I@@Hi{hHcJhdniYZWC5pG_xu(=*2rk>=ArOo!g&*@pw&=;Bgg#DX#p z*U9wJA>Qth^1shpafHdE=b(t72-@3Qw zFP_b1wk+>r|CKGxbf*jd_0#F zh*)_3=O`>{Tl*Byi<1!5`EF~BGx#ov7U zLS$xByjAg6d;^4#5imwh*FHh7%|w?54)zi`5;G-8kP-}&(H$m{u&I# zj6kW4G|IZ1&x&xr|8jrR2NA;7{uo_`l3U;pdGqE1+W>v~I13L>{GY5h47jVx!@f(~Ul&^vgn^UGFQ>=@B380DOTnfA$uVFlQt5N=}SU>Gx)k=L>Tw?=@FJfl!T5J*_u zOE<)9Llr_EI+JvJK)|W3(8W}DrDENhBg4 z1_YM@@yL>&mjHsWB@YNPfXF8h2o4G@Ig_hF2r(|2jE!-$2tcWu$r1|WCO}4n3;=%k z#l1Y>6ewV@(8??~uX%!Do`DHtL4#2U{Y-_R&A5_uaMUMIrAZ}4q!I_Y7KcV4ml}@Y zkVOfWlo-^4q(wGCjo(NDpxG>@O!Mrd1TX+7fH4S54e;DK^5Sbix5ArU4}$s53EkL} z1ds>XoIod800RR7vOqvigL?z{pkcTzfMLmNq*_4|>}+vBP@rx)K`xzl2eQ~rqe#oM=rgS!!aoF;FNu#SbZyG6PLoNoa;yjGA>36i@)UQI*+$^bipMz<3H_?H1|wA@HJ6`|@nQ!*3h| z2$=z5NLz#~mZ9)vxZ@VSa}W~Z{08FL3ke`V4qN~P;3lryf`oAcawI?iVZyS#de9lR z{|qx?ANG#tp|y3=~p5 zA`oMPaIQ5#06_;C05^?prf=e6c@0P)8J?YK8UZ4+rP<%*c@NLh9}b51TXyt<^Mh5R zAOr+ZcRmO+y`A+Q_35~Gr!xRBy5Q~rI6xBsk=P&sqW}Pu@)2@sU>dpL4g&y6(8F+0 z09tDNq9sTpmD0vYsDOBug_)qvEZKR`Vw%S+7Ie(kyv#Dc%7YbQ=Z<@}O?b2|Q*gfE z!@w}RXPE2`Xl`2Q?92vY`0*K*+_!Oq_r9=k4#F#fn;Z58NUyHnr(PM}To0c5E%N)g z`;`D6wz=t+E31^q-nDy>LGC^=f#7`mWkrCH?YZMKDt=c>Rsn>W{G`n;Stfa%!_#Dp z2c>yqz@RKcFa~}qwM=VYf>0zrj{AJ{ zt7MT(N=^ZzfF+d!RU?KnLoeHmUAQKF(DdLk0K_fl?h+k8BmP)N={a~}pU_iZ7KHe) z*K`aZjJ!99H#6sC1q9Zg1t(d2_CNF^oV6t8@b>+NlH}yI+f5p9y2%2voNT@=&Pa@z z2FH6p##81r;K5Tau^G1jSU>&=`$q8ksJKu4Nw4Wfn0V>Ox!=st`f>kPm+du>?fwh* zB@BRzBAS5vXfFM?w$mOCR;r zy#{EW{#ws3M(1a|&Pz^Z2Idp*avWMSMehE}dGWda{NyA?f zEKO=4+i<>#Mq(lf)5TB;TN)Ig3V5U37Y!id#wr1+(<=SGl7ASOJdq7eRar@fG6&SXXY|P$1 z{zlf9y$oRNY$p?xyaJnwBhx?dBGJV3V}7g$tr6(k-{dR}cxD2-Kga)N5{wQ6*5}W& z*zOrGjYf{SE)TFevu5GpoQ!#Re$BYd(O}^0xdQ;^7-R6FTt(YBy4?YQCI|vkkshNB zV3aX2U#^{0Renq55(G(QHo_S!QMGEe90M511-%LYP=ImxS$LJ$ZCw;)R~a*{QIOjK z2yI3Pz-f`0TPzm9AUU8q9{`Lw`W*D5g$JBBLu<(76eEr(7x0Lz<^}C#p9)1 z(%tJ$@(vxf8O0iX^nS)(wStV)1f>K3S&CM8oKQ;F-r!1%?2OUIW1>Kcu33ZtYH6G9 zJpf?^JX`>25H5oZ08|Og;F>Y(=l4WySqCI&1CGXuUXiFtt_4P|ONS6ahNvv5nNu^T zu$)tau??6&Wa*4RO@NF9N`S13UNkP+6k)hbQXvGwv70A(4Mr9T!_u%YEFVV20CKbe z0~1iIZes}ZtucG$^&&ky)BfuXPEs;zkx3c91z9}?H0ezk2+}m6D}a&Glo?SnjRq*d z2`dAOclhGD76e14v{W4f0npa~23>}-<_2g1Gj1@>p;q$}?nO67oY-e12MJj(LIPw_ zXhe-tf(gQc1SI;b6O`G-5{1&qQo{fUVc0;30CL0zAdF{_AmfH((1bex8`vU*fB_+x zII?79XuuEz13O;>sODK(r|Rax`E-PX^>n$PNGz=NX_x@wxsjY;3_uwSo*{BcWMQvl zs{skRd}t2sXQAROtV|s&cNa53)QQrdH$g=NW86PE03;2}PQ_c%QJd3H0ArMaU83lj zqiQ3OH)p67i7|qJN3yJ-j5g#5!cNMvu`-lc=@3gaioFhKfYKCiRmg*-5PrIHwLoB2Kqphs(00joiaIuxmr5!_{sijnC5= zq{OXIH7I~$)Pf3SQ3L`Y!R?F!8DtC?aq(}1lY9@-S{ZE@GrxPQI$xH9tcNy&a`YHA z2)A~|MarT>Fqp?_+NOHSgpTE3Ry>M(d7d<2k^sdp4hR@9*g|E}B)2k+Mm(j0jT9(( zHZ1Yt4}N)&mmryCjaUh6O9;cJ9c>%1#QS)*IlY$VR1_y!+_fHr`JwGKN&2)I46d0h10qDKs5fCBMyU&Vd zXvRjK`3yEVP{f#8ucbT-J<&M59p4l0?!i*BYPnYe`j|2=RM*0TsWZx}=m8h@V4D$m z{KT9PSVnZ`n?^lp23OVs((z58Ga!kDLQl$#XCnlWND0td$-y*N(}%Tnt@pp|`=6d^ zTU)C7SjT46vvpQLO_vrmRSl5lmkeHA-C;Z@3j@OSkrJt@paM{NvzK?Za%W&ZJRGuc zw~hl5IC*plSfJU~5`n;{fh4YR{gS6YG6~3&mV`R_nQ(zn;HJ0whFLY$6+UZLk_PZ4 z@B6JYHzhq?ec46`u=&RMaW9s`Lp+hAkurJaphgp+2 zLL@8LTMVN)FjA;^neASpIi-u&_Pm?9TB{Bd3RzYXWsJ@u6m#s{Fi^`0!Wj#_y|Q$q6aM{H+VkS_Sd22d{%@m z@L4*QlmBlX%D_NUrr}KAgn$fsG1>xl+^u?7pZEKCtnzuP6k*bEib0i0C_)jfW}uW3 zycj+}6eCei>I;&6T;TygFautzOcOL&q)E5^(=$cWJE(8_&p8?^P5AZW>rwxdWMOq; z^tZlQoa2KN+8@P}A7K4DSN<4_4!&%?>GiEB#M+b7hPS1@&(GB}xNGVL+62?;b^eA= zKI}oSpYoTwrq!gUmwCSkjJV&cJ(<}y2KT4Fqj&L)-uq$=$NI#ZToOFgr_WnrNetOe zZItr1*Ng#}uX)@G4Z-y@O$Q$FnuegsrCy6h(55Y?OMGO+0?T9$bb$@ZeU$%%@CuyFA0^Pvy~NE~>Hj~^Tj zY1e#ff5T&hnVC*J009qu|EsK#^RM*cE4N`h2m#E(Q~$~dz5@&588s94k?VE@wrp7_ zfk2wY2Q!4xOhD_7H$PdEfRsU43pYh7J$&FRX*XChMZwS=2g@9#lm??fmNr%z8$c;? ztjic!NEhSroYvKaM*3uBu}oK;_`&kE=exLWA*W9JP%6%uI48x^?7y6007zrpsD=vx zgb7v@cR0y0%h`c&YzwlfHU$$s3@|bf7Xu77LbcAe7@4iKJ4TC!&ziY9d7PQim1{PP zXSoK1K?)`CSq$%O&yaLtf_WyxJX-=%vM{FU2nLA6m=wf7>;SvY))I(?)y6jX!U6RX>Fnzr9&x;MpiINw%$nuQe|q&_Ubd{wC`SJ=jvu|dPZD1#Rw!IQ3)VH zDWl0crl&k-N;&VtH3_J(%(-e08XSa86tOzJ9LVf>1)$*^s(yAhAlBO&gaqtBFR@Z$ z(!V2K5D)fY%cO8IuqOe{$MaD9C zmCz>xAG^BwC@H_ALil50?+7ECu0Zss*umIO{Y*?>$<>H$_Z$jDd%Cgf8RFTDX7 zkm>zVm;h?(L}5LlfVnUk3g|M15FkydK|tswff%_M31XoH8-Np}6RgVq5I66e2AGx- zBruB54x8sf51RW@^)(`kPG_wF&#(&zgMdpq?MqHQj-msC{u6Bv)w zMjH*w=nP;^MczWPZ7`yQK?KvM$p=6fKnCP;9a$iRum%w$07Dis2DvZpNR-TLYVtMx zW&_2MMk63JW*N{1S2MvT97PaB0Kt=m9h?IrAXCSV#hUrIdb&-QTFkm=sHx%5VDsVk zo^a32-N#LucVI3(aHkSVN%2=;|HFu81{qogv7N=fI)L@_yArZ?9`<&|oMNGF{hmK6 zLehS*u`o8nlwoFu6;6NSz1?7_>|I@jrUhL~#+D07JN{7Xn3AD&l!V_yE6-|q+ z`ekh{-jvnZI4`pamqJ3!sq#%@gUMFhcx=%@+wm;%f=l2qvxw(usldRTD$odJdTBO* zmnT`L_^R*0(7SaJc}Ff+Y%hpLpxGA40v%tA1xRMEE@oSxg5Tum5@A5ylU+E2eUhIh zFMT>&=o*3bfG6L!$4-9mz3@GA%g@W4n|waJ0-S$4-1}`JXa{hRzUn%ll*@YOEmR|B zKP0h+ZELsg$da77@jL#o@=8ONcr6)98#>lsJVJLs#%rYg{wcVna_^}o4y;urV`s$QTVc=BZAtQp#N%N6Ii(_jhK zG$8c!gvA0jBrMcyItXOyb>C;XPZG6tGv`fiE?5&__PyE2X`*z~}t0u3T+v zLa;<&y6|SNY?v%ScG+EPi=c--#w-?7Kdq`6GKI%u?22>K{ z&WuAVf-?s6LJtlcwI+7GaV$5Iv|58bN(UVpefLNmZoeZ2M;mByU&UR0Ycx%e(Sf)b zKM0KiCZmG(E|z4tXBFxN4)o5cN}z3<_?vw^(fSx(5x~^H_G;eTa(nwJ{sK>Ndmu#g zmFDGtkj$hQWQ?#-;OKw7k^=dYSNj`qB*d`xUAz!OnI+(G%e?jVttdz#N+?7yUJkzU z-|FYS0yap4XYoSmo-E2&d8jFmKE;>xS1xSwX^3Kjg?j)d4}RQ93!8~>;q&4R4k9}k zr+>g7@sojZb^8so#+?9NfQxr_c6RJM?1|$7z5zV1p(@jvuCxx7dQLRI!^x%dW-?)D z1>t^T<9eD~*=IAwPzhT)>H=rID(*9_zhDUG)tW-2QPLmtDYW~zX?*tX2QEw_h{Y;_ zf~WWs?W-1)fSH0FdR$aqmcN~=ZRU;4cc;RBEHH(xSHti1mrjT2`o@)bzwf=D_g-%B zZ6t5m+&YN7J~XcVJJMV98--v}4(_|6&(o$`oO0I77JT{o?ccOZ>tI;`37I;)Z}s7R zNQ2DqTWau}(Y^=#r*3Y*B1|S7K_E@=xGo@EbqlhCu^pMkRX3ABtQJ=23J==*eKyI) z8_Xu%U{%SM=hEeMs05P;wENCqTk(pcp!>gUJ0h}L0;{u|7~PN_de&GLsF6looS2$R zU>mY;3fdjvG{%(k<5k&Jgcp{;*x_OYzPKxjQk1f3(*zheq%ckYyhV{;&_~`cEg|$% z2r3w@I4Y~QGy}SXZJsxTB|VVYyRwY%+$H@^=xN3vL|#C|GML#pfP(;mAhTkxMqV%o z44P>Z!+fAN#r{^0NIZG_UKpMQh=jn_WE#T7bZEQYR5~lZ{R>(-KDg-o(czEfBukdL zF0u`yVcW4&w!!>GL>WU9dKB&zB+YE-OItOzdTsoraYFvj1gZ@>;3wJ2g$$M*r4Vu_ zD=yh1XY|I%uCVdq#=0WNIIorb`aEqwr%h#ZLVI7v(P2qk{K6H}rCx|dAbGmf*! zL1(kvKkJl52ki^f10WzFO(xPrI3~-0sY&y3kA!N=WDBs^u&ydp0T%f4|C9qXs~^E*Kj~Z_p9iVc_Au ztPWR|_i*HFtOt3Wr%I}})vZ!i0XaCG=iPx4D?+m9#tcf5fC>hElvgRy(mY6I3SN}p zNOn2m=j3WS-B}W^nG;iC4ZOX%KT* zWa-H+hOY7a+mKfm0ntbUdgh=34q`wSP|~lb*%+B(354Jde_j-1T=K8PQ3G=zMhpnt zV&ruZFakz7AqdI2giPA6L4YupAb|!HfeaIpVaqVptf^2gC-`PWo?>8{Y zHa8oxBYPzguWnLI4DXzs%LvP^)|wX>(|E5*4OW^iw3!i=ZLRIO@VmiHs2A3W4OPO$Bu9`UnVYqDzl7P~&aq zvwS+CN0=Xweiu(4shM^$C2oA4Ck~_jEXx}GZO8i{1!V~;vM=t~+c@mUbDN|86%AAH z+3x`Fhsa_lhwBu2AoYv~KtTI`+yOAKt;Ql{c!zS?*NwXM^(dM0aPNSwGsNC*$(k#4 zq5(4Xa{^=m7#u+Wv4bU0lmrRNxw>a=_E0jtZOMX!0NP*-s27~#ly!)_F_E!+ibf4+*vitVm{9N8Lxb!N*83F8I>LvenL&<8LWpOtpAB)jrXXy(GTg_GArAd4!NTPQESi_i}~va<}pu$PPYI4>f zVy|m}zuy4?7F!&FQ3K32fYOve(?HXVG-SktP0G0`bWS#K2m|{E0D^g000LnHO3nj^ zaA3o0w4rAkE%s`6+^Nch0@X81iiLr4X4yyqk!6EBz*Zox3*{I)YdEXCUuav{9eRSY z%V;JgEZ}F%`Y$yMB^H{Yu84T1?6vnHB&L+QfV3}@-jS5cT3KA}Pq}LKvPKHmL zksdI^($XyzQPLDxmwr^c;p*7SH|?{xD@4O!V*SH1?CG~L09|(Nu@EJb37I^#D&$kXUP4^2}29uvDXB4R5ZEt(awwmN%|7G748)dlvb@3M4 zbtFs|KSyqs5IdJW@s?fZGlA*gbL9YJai)YAG1T}FKO_zSGU25$#snBz z8Um#9E&FJfpG|vO&z|%qiFf3RR;#5k?Hg}lMZWcFp9@;jdIrfa2);I(-k~JitnRIW2sKYAi-zDlo*_>;u7dU-45vC;H3mSp;(SA>LokIc7Wkoxb-MKQJu@ zUPHL!ul;ormD~UkSo=EP8nhtrCcUi|DcV{k{t-wo!y(I3J5T64cObod0S^N%9M+wOQVc2Qh1WiDiPlv z%$fGflO3fl`}N8R7KT%%zU_HJ=Mz1eJ*s}Q-8pK$SXe;(pQyr#JfQc zwb$jB!-lS2SA1!9JRe8NjdGPsfJ#RW@msA%-WytvU;z!MdERTTc^6D-1V{+5bJ=>3 zKjj};9EdOa9h#qTYZicQeWM@I*SJ3$@)lU2aH%=({pX48KJy`aeA7ISN4)GTfI6}D zLnb3JtowlYnLpo`yo|B4(LVJ7@@~&REd%fTQ~XBxf%hr4AF(pH{cGjBVX*-i_(oYH zfcP2mhAf=krU^*ce+Y)r2VE8;M819A@9TqwZ~3^V5tmsQ?lq@I(qy!+Gb-(Wm4)C( zXAQ5-L5s_0tG{$>(pjE8M*2TSc*qLm`|(aq8wb?FzqNIW%Ze92-BBfNi8v?tjh<<& zzZV-W2Nho1!e#}$9GVMvy}Xe&QcyNQN;YuazQ1Gs%>Ng$YO^VBBB96n@&qmPBCL(A zErl*?ul5bOyfGKpyxVv``JAs3!DI)x8NQ(93_)vv5wO_}#Ma%+7j90Oau3!%^Ed3$ zI>6`+N?tHv3#jSW`r}{k3wv{a?E~R{$$uR@{lWJZ1X~gEVc42Q0T^Hw5{g6#5_7l# z!?24mJ_8~(v;7EknUMp)EEPe#U6)tjj3bHkybe*d_ z!l$MCUWk%1az8vNMb}~@D#9^1O+OSIg-GJQNvAbSo&CB$a?w>)8P((vk!`(L{`e)I zg-tDGd}DhCxEt9Ee2&BsMVPyqQxl7i2j?8r$o7a+LbuH9^ypy1OIPh$yNP=a?)@wiWfKW*+-Ig*Jc!8W=sk^R^` zvuV5S=y#TvJXxynZ&58?m~Xm|lWzBTggn0{9Z1^Gp>G^{X=uXvYCR4&)ICkD00WS% zc9U;c5@r-)fnr3*rKBl2=6b%xr#=&THS7g{iWLT5Q3|&+Ju?f*r2Ip)n67l^B4q^ z9XX6w88vS3YIhp#7r+RZiz;LS$Ydu=0AT^CMg}XI-bQi{kP$}UO0ofj`w$Teq`(uf zupmIcM@TFH7X+ZgY=B}|7DovIAsGvDOoYf#2mvsJBh;Ymz*xD{HE3MMJhXBu>D44e ztnpLg0_#=G$FO3#EAR;8-5|HbJ>QrCsm&q{yGQI=)xy`#j~fd(w*Tvv z?U?(yI_d7STh@N#9P7$~X8qa`vV`Ey`Wz=x_Q8Gld#?dqI59Q$Gl(0tou*UV`(so2 zES(UL51eiV>P=igEoTynMvs#aGnvwKXpg%co2?U&0a5~-sHX~TDlnt5VN@Q|O;~5# zzRV%oXeH{z*pbAreiAcnCkP5e|L{MY)x?GXL0KfC@rop{tXdNmSWFveK!y?l5fGq* zkO(M5h6)A)2?jHg5Dnw8Rv4+2ci8bhYO2u?09cDrnefUxly=(glBQ)Q0*rzhkK<;7 ztEPJ|Y9bau8J^s7r`@m;^aJ-_WF;3CNQ++Z zj`#bg-cRH!4s{pCqcV5Kcv~DPdly3=HyaWIsM8ow2wD?&Wj9tz#dDByy zUL|UicO(PT)3N|8RiM#n0AU!va~w&Ia#dw_fH8mpfi{PL=)3ld4K_2Z504)P7T%n0 zvw`I5{uE?%+MOsy>L3b^K)|L_mPZVzedB6=&vkzO(zC>DyfMGYrOD~zBS8>NylpRF zG;K6WGN@-x4OeU}a|YCoEByK^G^#TD>pyce?g2omDCtZ^InFBhe{6S;diBJzML*yj&G6FIi*B zT_Rf#NDVz7lTf~_h$POV3?sP{%hrODMuKeOT8tH20eIIJq1}E&3oI{?L;T#%A#R=Q z><{7}r#Zr-JosNIznswipCs$;yg$2zweEYlm#2gZx(ENaZ%I7z-#J+X5*dttqFxlw zv95^6*GUWD;3Lmn^Kl`}Q2?foibvJvRgTsMGxFAK(8~))1%TNi9^H;|q=U6>5Jp&N zPm*LZQbL%eI4z^PgF$Q@7+3`RkS_Y+yfoFc5Axh z=UHB4UXUajM%;%R%Mf0bAs4iwPU4EOiG$jv9>lL`FDpk+$=)@Uiaz3+uaP|&>%adv zHX-+pheN_#4qJ+R#rL0yjHcv=u>SvWaHxRLV#|SIQ?%LR&0DUtBdc@MHV52=gBGa~ zI?;P!&MJ>SVD6nofKfnxc!2Oh5}(AyEjj#F*W+bJHza}YZf=aB8$h^YKDgR18(`}x z{_jh=S_rk00dK?)D!)mA%j3_00H}-`g>hR_sc~4 z_~HK=w@0ZD;F15=^IZsV7;V35zG&_RB>;gY)A1kcPkr*^Fu)V)_-8~<4KVdpe}gk^ z5Vl`CpX3{-yAV2wwff`#xzU3w=ni6svJd;<;VnnX;N+T3k1XhQ8q$l_z)0sOj~p?U zR&$0w#3BXYdq4pw@+!P_F!q^d6ZAcKd)JEiPq&Ew{`*=kJwJh*xAMqvYbEz3Nbf(k z!#i%ov6-J0l3@<7XRc$5YbZ1} zSWkPCFM2Te?!Nw4yuDZ&_%xo``=K92a+aMilGJyGD?5qceg)CIU2@y^du-AVBh| z!0yRlJF-z}jYXmich9C1VU@yac41N-sd%qbY|z^0-JJ%UnG;b|Fc!+~PbwHveri=L)_hsq_)J`4kO@*Pad; zUR=u;S9aq1iS=t(*WeGUIdOivDr^=8){?GVcs=@12(&2rYd7xp=zC@X{PLv`yLj!w zwN6zcFC!2}EQ!t%nmQcce{yR;&0sK%Ml_p25JL1+Qm zl$SfC$C%S$pTv$jOV#u1mPXM(Dx@|>HyODNDlG5Vjr$n6rD`cekp*#g$&Dk$C%X}` zkwy}^aV+9*c0X*K5fNtG+~;W5mn?;)h1Mj@L{Q$r*P!i1VZj@dysm`t;g>57SQpW7 z&7!&x3CTrq;BVJ*C+Re!KGuv;L#9thM4tC!SCH64nOVDjhfPFm)vq6yr`P&biseA+ zAV31xRHyVa$#Kdt32>^5EUXv=vu08$2&*vcP*r4ia6`%}I;lcBc&U=d$M6c80f8Y2 zPyn3>Qe^~)AwURss05IJ-CziTBm|O4As|jmd7z=CnO-@sWN$8I1XFL zL6%O$5yT<|L5RFsbQK^{t2uz^Xfx9tkr%Egl7n{yY2%+cMrJo-8%nSAKzEi)1Z3%_ z>3gb?;cq_8FinvG{0G^zW|>n?m0Q#bNpUj;nkZ(GtdcEu48Ixt01e24!Va*A1je0G zfrVx3$k2}1$lYFd!Bc@ykdQ^8Y)1tn*@9^{!ji$r!NP$jowP;>Q9H0<1m^BNcE1GE(A+S7U0lnG@QQFlsM z6bcVtYa0U-x)Za-U=Y}j7=#hsVx=iC&MsnVvC_GcEwHdJGry#Kj^PM0iWF^5BU8QyN~V-V1;CJwC4jR~ zv%SZnUbI(fVo`j>xl)uEzHerq#g^w6F!?9K2pcHmg?tlQIJy~-9m{C`vo)%v`MBpO z)76r7UtzfSk&yC1X4z7c>*#8c-!Yx%=GacLzuGiOzw?Nx?cG)OyF6 zlF8T3Jb}QFwLU_EGR47%xtHB(ao^V9@`BoF6D=0vgS)2~P6W&uI+2b4y%V(BRs6w@ zZ30*8*n9%2@Ofoa3KLW7h>P~yjTFE0=6wY?s_!7J~f7XlO)YT3#? zMAO>d1uq{g93O7rpvv+l$1I7l+vn>?a>~C?3Yae>osh-I?olFLhrRO?RN-q&Y9TUF953* z_5|*q_S@m)zJpGZxL@Di+lkXYc`&D!0&4rIB3rNpqZ%PHZH~zy>s97wjzm8V^2H1L zY}S1^Fid%Sm98Zf%ytHF(6Co#g0?PAd28gEL(X-W`?6cT<)X3dWqoz&}hnotvSRJ@5vxnOG>wwb63{8YPShMueq~8z!JnRw5r|ZE&8R6YO2a;p}=1 znG>y?tHpUIPcKy@Y7%upii-m`&tlhjDhM~l>>fDhnANZuvYM?4S#XRi*WjXfu4(>pNOCI@S-LZ;YQ%(mj-NPUk^fuC3T|Y#M)hMAEGK85ie3RL} zvsQ450ii&U8j>VPfQF=;?sZ>Qms7aX_V50--6~EAS{O7Zw$=a}%?a!Btd5yA2$z2Q zUc_6&UR&xJ6(@RIY-p(;&k1MM7lktP;AhSrhS>mw*1YCXb%Ubk#8D}MH!HWTUNp9To9W(QsruBj4k2Cs(R_=6ilE~-VUZDyJoR4_J+ zPLoPkX91sh)Dzwj2mk}ia>Myr|GoX|I%o$%JNI-YT}KT<AJmHLd1Y<6h6$y_3;b`Q(x&CH_`Z0M$41@HW@uyztL;WeunIh_)FYL?=xI8 zQz)nbIeG0(oA2coSx8KL+MM^E)h~E2Gn<9n&GPMhl>hwyG7Mhwuk*oRYOsK8l+)n! z`jX$dxv%x%gviIv=g7xfw*Wy$9|BQm3bVtXx;af0e{K0_4cz_cZw8JAJr@G_>GIid zEfFBOW5dFqoKF=iO;%PLp(S*UZ-Q<&1$PywOXE7a+rsSB-~AGH6XHl03B- zKmocYF>oKj$Ak}*?9|i)DmqjUj&xC2asjd@2oU#vz6T%+U_`@+G3zvp?)SF24!}C& z{qc%afDFg)7HCEnKf+U|gE85`^lt8${a}tN`<6Q;;rU2%t zHs^r|oZ;ZqiVtpCgv~gT;us6ZHKJ?l${>r7t6;``(t`S-SuIhROZ%Ow{NFQ70U{Er z1+wQ{f&`8Lz|5M(VVL2C{kVe}gghBa7K>X%9V2yl&IQ(dykSST8UI0c2j}p+Qqg)! zu)=bZ&|@NQJtHVc&l`g;h5!W(fl5g0pK`Y?@mu#^7XJ+b+Kb#k*;F*3kxcP2J}f~A ziz~D5@^hiUjMtA}xzGwMotN6%I zzgtBm-|#}m0O4g2j$)77C~H6ov680-SsX|o^XgMuQ=k|#aR_3|W(h~cw)k8z&i9bM zapyfdVMuk01soa}-vAOry#=HpEjn(Rw~*FqOereN?j4PTMQyN}iS7s!_9B=iUARUT zk|7|}3PhIB8bnVk24u?y&5(uWB*{Wcjs&6+>(nGFP-IB7ls08qk^s}U=sP+H0~?Cg zP`C%j1PBOdKm%Yg9SCwcyig9rn_}g9$~5de*T0fVShhu0e<3d$Y@jtA3u~|C5#^dV z8#weQe#Ma4B%FeOrCBR}Klj(C$niL7Uo=HMp|2tOS}jF)8F9(~4J8)F#9-k%#kq`J z`ccDTUj4k~wkc-Ld9WdRa5)dfB8k^-O!4Brz(L01<3Go7B4iz5aH?;nV9UKff)23L zZQ4x)m#wfN0g2iIvT|)`*ZR;n8o~fOG#ERJn5yg~lvzSHOsGjfD(kv#o%K#w7~sRi zVzF5kh@PGps8@*KGoN9x1$LrdbOjKAt|6Rm4OkEjjB@mHpf<5( zb2rT|dG6EZ?)a*{hUt%2RhLccs{GpPBwo&){osAbN7gfbP|q{A>tQz1ubWOWc4X@_ zhiAXO#k(!^TkhJw#oLsHEGfqC0u4&!3mVx(kVH7ml-PF%V&UwKFSbi+JZ;F!4tG{l zPT+zBU>FEvinC+#Sug?v>VN@Jn6iO>Vvr$AN`nDp02?qUiJApa0tojyw*(Rh=%O?w zfIt|A#St2$OGkOixTqDPI9+#CjN+Yl#-6>-B_^;h7zjB|YCrE@#`oL3oFiLA^aDWs zkRMrP*#H%OSbfCqeYC94`Yc-HF$s`YWv%RC#6$d}%;2rR$9;p`_O3a{tLk$Ng0H>_ z`7Y&NV%d%HJY@m>R#)W*;aA|N7|u)rk104h)?5rb?B`@+wxG0-6iqe3?zL;;d)DxP|v zth6meoDd4pI-k*#XuBZ7Mv+EZoAooN49&`-+jk_tjXcI9V2!+~p^@F1*n_xU&mQ4s z9@JH}itlFA5^!_MwJRkNm&8fZwZe-C>yg`b%h#-q5f3x!#=F&eInd?`cA-Dmy9O2; z#5-;m=DT4Q3t^yq-uJ)c-(rlo5-SAT3qnm%rgV=4&;_7ua(l6TIZ@EB)e!$efC0)} zmXw?_dmJGU5RM|RTnAjzn6VaM2?K!!;S$-di}3)F6wn_Y5QJ3&0vMRF!zqIpT&k^j zrCIBSJ&2PzevpB zw6GzPEFK-&%@G*+!jd;mId%)X`b!@uTENiI2nbE84NNwm#7g1afq{>J$Y3h|*>8}fDAjX>(A8Da?3?&Mfr-@RG6zX_v#`|cRfd)pY6kG90fc)Q-p zU&v<9ACjI_t@&}BQY7j5#9r2^2=TRR38U>OUfyXC^ARdoK%@lXDi2?%ihEjMX_&cjbfz>;VFf$qts zJ9Xjquayme)K`1*=Z0*7T7Twzc5ES0xcX7wI8C-iP5;dQ|NmZzz{7vsQ+tVIb?R$p z4Vb`%Cx0#+Fqr*{-#D?GclZ8Ac3fS4kXRO-`KDTG^f+_=9+UKe_Y72_2Kw4Oub%ra zatZR;=6P}(?s6X>VGk8g_g40CPZ$zaLT44dCa?HK2=bk{9{-8A#yC>~8pgxq}H4YJdnNjZe5e;STH9x$`*_ zzAU~hLd<%QE$1Irs0Q~|KgeP&2&@|Gm?uEIYu=Ok8Dv}39wglRrVpP`sP$XE)_QP7|8kk-VDFmJG}q4S|Ibuu8u^m{9^ehLmDc7AGTX^#lwZuA7K!xGRCRfE!+z4 z=e%<*4WTV7>IlXiLexG28*{EE$BUlGr*MXu0B9*X*VlP3;Lc{VR{h-X^n4(cd!vOI zaz=)4{6W6=wE@D?_1;fDREWWnF;H_t5U&5DeDJdc|43qrvYwF-i_RQbk7&L&O2 zpKncamjp)X{~W*Q+QT7vE2F2Hla zS#pcam}`MkH-S<2vMRZbmru5SAJ!Siip$pZHn6f%1sE7Vwh`w&i~9xcs>o+S;fT0# zD_r5in8Hy52w?W0_jE=ye7`c2eY`UOR{NER)$DH|2-t*-2qh5g!~F|X4=g()=0ew; z!tkyjmwPYi8bkg)h9@j95g(EZTG-Hrp65uyJbsh&X#@=PktE z*D6F9>LN9aK<6C?j17n}N04B4oM;ORvV@w6YkYj`VL9Of6#(Ysne#)jlQ1@#h-hNK zEwt1OlT=X@O=NKh1QbF>8q5GxK#U&~UWk)|f(b-M=Kwggw^l2Q8sMv09HR`yc zb$u`LI|Wzd zrc!otu9hI@X08(yJ$B!{wbAd&$z^IsdKW@RHnm2?_$&MV7+X;YVbDTKz)UETLdn9x zlmbQqK-!Fepv_>zM;}coDdo(1fn|`PRv?CB%9)`>emNr?bj^f(0#eBz76rRYpc-KW zjt1G~9GY$dG@u|Pfr2z$zW*rF< zj5u!teOE8`G;oUU+-$SVo(%4Lx_(TlYQA^mvSdKg1)g^}-@p7@PJgmN!@8b+iL?y} z-Z{N*J1A8i)cwZv?p)zHx?y(A&WCS@#J&q&+JAH^osZCK|A<@gj)(X3dNF4a`r5J{ z^iELr@DOGw-oX$rhCu=dPXY&ML*XG73R_;67b6^vFi7MDqR;|L1C(>}<-8;>K-8kx z3JA|j`o9IY814=Pw_q5RahwJW1mal$+7JXJE*lkyqYxJetklx1U2Tq6f>Wg$uOsmDYn%O}tU6)?r#~?3nb<-zuIoPVw zAxQB=!iA}QqYrLa6HbQg-SN#p<8Z|I6YcHxW)|7+m31}t{ZCxTQ_DdsurquqIpHn* z(Squ&A@B|MB*+yqwWtZqZZLy!gz#zmGtqb#EZL*rUoQpYv7{>UjOW8W5u;Uq3CP9r0Xuov$(U%%(?L>8=B4CD}+a zk{tMJDi=$HDGwdu9qY&U>{^#L`*}j$L+;Tlp?pEFde>dDemDe1_EPTkPVZ?EG*3%M z1x<{)bepEZ-P@`zT2Rt|(m+c@c?2E>^{rZq_?+Uyob4f+I;>8poP=^w*%6^cK1ZU z!|qRm3lyYl{TXqxQ&3AousnouAO}VPa2rrB$ts%-7QELR?eY$D_m<)ZC1}4W?N{XZ z6wtu8@Rt4zKcuZ^!r}j~5IVx{xAlB5YBy3MXh`I;5#J6SY4oygSys~-ZjhDkFS*rq zlLn?(ui7=vGJe?Cd5n0Z1R@+{CUelFC`PAmJDA$%I{Mh#Vu#?Ym$7wAJi|Y1)*X^? zU4VV~IX}{sIC?EG{$}@I_H%hoC}jc0FZoV+h;8Mn9$$+O&Ytqt&6anKusywnS^}$; zEp0uKXSil3p?c$y3<+qmBJ>-12C}Zo&CR8gfYco&reiiF;1~IGf2%kTwxa~{5F(fS zyL(Ex1Q=HzSFPx@`QN`Mhz$I0z8r42fF<+(?mm;M-;d*DxOLe(8iEx{I*#@^jh3#I@bQo30A`NFz4Pjy z`I!sn)B_(3;lutTT$8AL?rn4H#n2cWA3tB)oy;1830gkK51gyr6QWq4|BhJ;&b+Vt z8aPsW;+eQC@qO~z;Bu1&Ywwc3z!%G8g`ru{)AR-L$v(7#Lfvi!gpc%&AACTFP^CKu z?|swM0YNE*?e(?&-lhk--sYr%jI1sMw170|FeLd;lZuZtTYqEU0KeHT%S&ILyDaU( z<}`%MQIE_f+skTJ4J(K9W~(nYu?MB}GLL@vKP2R33o^Y%50hWs*7NJ%{rN#GZ&52R zY{5JCQTr2Zqs4`Vx2GvVc!j&;Ca${CMF;VE0AwxLHaq=Dv&eeg*6vn@I*XpoTfz3y z24Ro{?kmr0{)C{5EkFzC-i9&$j5Z+OszCk z`!dxx_|}dk8 z_-WtqN*0^m5S#6Vd5?ujk@xV$tx!g+4koS5sR3STQ-~2?1eBr&K18yPoy&1q31`j< z!AmF~gwsSwHo$2~lY4+a(7|2e*NEA;78}#WS*jO&m zgN^Ka5Hn)zoarrdLL@K^VYR<``*IT!B*Q+Sfh14>>BC)WV9I9hZ%6^D$JKR#)M~)X zNc^B!s8E#|7&{ALrUmDDjECK^ZTJ2ppTl<(9=TgDA31E$h0bx-)Oso+N45&zn&upS zVo-y_Hs2CEKpPn25H}0#svYknR=;rXE_|$1Ws5{8k(O>q{!vK>R};cub2)6rnX3Z4 zN-$)dXpOu_ge~`v4qomM9ey3mIe&d(MOx;$l2 z_rM512439niGKq_gbwfuhMS$Jl{mc&Q8j3>VQNjY1k9>NwGw?h0THOt-pEo`76l+f zA%mi-vkBTnUy@tqOt2Laf;-Lw1VIdw7963m&>rl3*$I}1+kJ50=>}9ZMk)FMue2vX zFKzGJX$}pb3Kyp+iyY*Gr|6#?31QC2&+S3{O$KZYzN$^U1;~ zyVz0R0opNQ8ZlrY4isUQN1G~w5w8b42fHx=ODm&Htzr?pE0kjqSUz@laS`+`4n0Mc z=tZoY5kZn!Yezr49KpdunJ(a68$yve^L={7HBGrAkD1Zxu9D*QVNGAeB0uTX-$NA*4!qXk-x95HJ-{?8*PuoL8@Rw|kN8<8-E2n&&=G zo5!>ukZGR-Uy;t`S;e|%bNHB?`3{@zGQI1P*Bi;$nfDe1UaEpKdK|;VG z1P#N+5GKUH+z^l%nFuWnDMuNa0AZM*XnmGT09dWhQGb&kKIK8NXP5#({o@<_*{`LO zyXhcuA0?j)iBhFbA%4QHw4Jb0C(J?>Uco!QAdrN>=Fl4tk9(O~poMsXQW-Si#GB*c zP%^b!%8K1ukC`Gzz$&$+N$h8_YUBiA>x?55l)rEsvejmQUZ33_=`Q1|mYP=%o^M*rN4} zH?fyBN!$Cs>>}5kH7DffI z=EH&@+z20^$&rQ?#bQN%eOS3w zIZgCu$Y%!W0ydupw?5xd-{ke}=0>LNZ;KOCTgd9xPlZc&n}vzN+I;$4_WV0A`rs?` zE(wMUp9SA2ThEN+`we`@KYj;PegiHFM(@>0k)7_g?L681?*HeD$2!MwrfgCcNf73`?a1 zv$!2C$%5jNewz|#0CyAGKTrj;Nak_F2w3d-=tW(F!=nZ$Z7g6Jr`R^f`Geq6>mrx} z9h@oo7Pu%cJb8WUi@o1nxC1Duc2doh)$E??KiDqB8HRwQ*M{;d!EY|xP*Y~J@3=L4yydznJio-Cg3gYQ3W#FSbVOsPFl z{f)m#^-Cl28iYX>zs3Lf;VgXXKQAPvW%Dz=`kONT>G2!j&)w|c*UCTRtA*1)>8<#k zL(-JUj!xAh?0yaWK5l3HNxv4}bu{h!0P4r@u|rdk-N;gQ+){SP6hI}gL!kdIzz!|# z3)b0(2nY%SS`DMUo+aryX`h2pN*SmKa87`OUJpJEiq~l!)`3l7e^9ic1YGhEuk8lJ zO~$}FLkldXcV4gg13fUJVZ@tNUq7M~0xF?(V9ffx$GgjW#66rWsc0CHpOhmSMt|iy z_?}Od1wCaL(k)5qHfRSm#;yIs=s7RNWgAc-j zi-kpT*980cFfK+0M5g3Nz&f9Vcu^fEjGik2gS(}w4H(#=(~^{4JW_sIub*O_Q@-{Y zYso>iRH9Giprq+i$92X9`QavcS!PtjVHg~^h{AQo#%RYfON|<2=Cu|8B1EiB#$Xs9 z3vpDz2xvx7Qn=VKu9Y@>K_bfNwAa;%PF#>Ar=V!Wn5_G*R$F~k`&qAgN|Cps3BtE0OXj|~i=4iW<+LSZRD zLN?7b5J*EpKq$jxpnxy{fiN0@Ltz6HA_7>E14P&!B72 z#-Xh@$K$ncd~m0hsnJ1x*>CN_866@i#3yn)dVCR{Ym6~20?cenoE5s7R!rF!5-qI+ zO|_MbubZ>`E*an8^A!Xj1S>ZJvgncTAV@huvOHmBK;j?%hhYLiqD>$n9h4<9rEAi$ z4iK&i4gjtM32fRp0sBN~QXLX2R|b26aEO2?76ORrFf@z?3gf;Rb_gIC0uV?z@(fA@ zBpKOJi3kX`5G2qlt&N{LxT%A%2pZUUw)F)**oTDAGSQ2c-q#1K5BPEyH=>IUY+s3A z`Gtl^kbU$9ANE6RZ2!Vilud{LqP?X|)5_BoKu|30b0D~MX*Ai{oG zy}CdU7%)eMIx-9~pn{%8$XG;-EG$(N$`N5CIR^-|IV2p5ctBtQGTpA6npj{GWtDICJkg4h9JRy7<38Ne$~f@lS=5@eG%2#f#>o?x46u)8iWgc z(`=oyMI>a(4lVt>&U`cm9l9Vw*xsHI9376X&DaQ;vICsO^EOy3CZ@2KVq9u1byd{& z4Hym}L0Sf^0*ij@Bo}}-hc<}30OlnK3kx}Jx*36#vupv>U;`vz2(Y411~IP<(TJX# z!%YC$!O)Ei0uqANE>QwhK+!>#2xi0#Jc0%S9_D2jV4hniW~Zu}r1B~HT@Y(OF<|kQ zzvs_~^+tfb2cLjHB$lC%&!^T3@+lZeI)HTp?-4QKJBMvD%`Kmz`d#oT7$W9R0m!lm z)o98p!DdMfA6rS_+#Dc~Vnpl%5<3D3+@WG(REyCGK&}uyr33~|CX%$;ml))9=`mO! z#HAwHHdr2J#-%GVYSx4dCRa2NB#a8wB%`SrvRq`x8;#D***HT@V3De>4f|z~;$42N zJbQndEfO}nVTOe5UJe6=R!Oy#$S|eZMk+KLS|OwbSy+tXKH_=j*oipNVg6)AvB^I_Ntb%0(WZX;u#w$S9Y;#QUH$tX90_N2>#UT+9 zzVY>|0?EX+*X{urwr}&$6RWv2`&XQQ;)RJ}R#zPF5Cdml{_f0^X37<}JY|0_?FFZ4&)SObgYJop%kOWEu)XZz|FHBtLG~Z?%is;I zHpZ8{_AT2^7v{=Wy0vDOBh~NlANZ0G!kP2LTfZc}e$LNx*`0;}P4e>V+Q8s`551?S z4c2D$RwqqD!}P*OT~aM%|9Q`S?#XE~Y~6Us%=NGFwX?rsl4d zZ}-{~c`ZJ8-dBEvrGn@g!0q7^w_^~LSyI2`YyViXibx14Sf9fW_d^=MwSTSi2`UKR z@yq`n&H>}u_8rcJfawEEexz6b@h%M(;iqNPPR8eOy!$=g?`|*vA8*})bo!wmhlQ7Y zKVKRwe9Th|L6TSC^;!H6e2s4qj@2Pq7rrl9AVTFB;( zJ9ZgAec#7fEf9^+Z=J{A%K2jW&?^B1x#gq39){uTUqdo1xd@NRzT$mdy2s z-E(|U!=5&b;01sGFOuFN2r3eTulc>5>j5%=&JG|{Afe#Zc`xKf^xSSnj<|Bi(Ewq% z4R#QSi}pQ!X2D$05g;y6GeesS2hH+C69X8ISc-w28Ue$ol`fDBd<8PG%7_p|VYMmD z$k{DO1m-P-(YqKMF^H1z|v%qJrN^BEJOlU z(o_Vr(gAYC#9O#~s59KkR=G%*DP5|&I6z_5IP1PFn!@FrR?03L=;b1)c~3c3h^f)s!O z1PD74A_%}hFew}9*3FyH{|C)A2NmVtv?sEg+4wz%aVsYdnjz0-d zF`+n>qrMOc7!MgUq=wq`6jVT}NnlOQ2qp4|50|~_X^#&*CV)F45CV_@4u(BOK#7Ck zf&>D!14p})v9JscI1)!90+o>jN^BGoz(o?00EoRR12l{dl^mi3lXu7!YjZv(A7zT0 zd8s*2LM{5W9-*}Z1m+ocU^VZvuxbxk7|vT@W($K1tw@YDm0{c=VvY$YgkTnxTaZzM zfKUZtbSe!I0XE1@%G36bm;a8JZEJ zO62Rvv_K~R(MRj>;%C5VCo$16*`Lq8-#sxMJeYX)&f^cO)dtA=(ixf`_r z)<4sWx5aS(+{gc<7nKO7f06tUNk9+2aeg0Nwk^E;kN<$PF@S6K_W$sI_bV|0R&V%^ zx!>W28TZb7t-SI1)y9zNe!mpnbwwO}hP?H?Zi7W#e0Qke1z+|PovRS=;GzH7lgoz!xcGdDJYy)fA-ro-}nZ? z%wF=NVsQL7|E$j$1b{#6nh*9~oF4L;Kkq|ju?&U3)T6HEh|Nv^@-GsGkq18Z z|HZh^xXfNo7T@t`zyR9^P6$E{?hBFO^PjW_A;4SxgOYlA0~^NQsS z;%&}fD8)F?k{mZTGJ3c{Ai}2C$E?>7B%#)80-^VNm!t6(2nm_r z76EvIOs+VjG6Ub}XL}!R{8n7{wMxPmMh%P_@@77)ZXvU)E?4yY$M&6MeF#STrxP&c zJY;_Z7`c3s+hAnynkmf-W1*yGA0z`IB(NX}m<^zzD_!wbI?xISDv6Bc7(k|}(p_K} z>qDp%BmkL;x(V|}NxN?_kYi^_GVBZiRbZCNWVh_dYdFkgY7(1e0t(F%F2KMfq|L?BqNeAkN_s-CPd*ceoCwl zs;;1%Lm&Wh?Z=Mw2)W*CG7&_P1Ta8$REP)?F;F0K+9N;^$ZIHx02+j=0wF9^0E4({ zc;!xFp(heZ!oFdR00CI0IFZk9^xtt5(PAD42&yC`EMQJy1Qmf`U`5(n0Xhv-&K1lR z_r&4=B?(~8e9Wm_OiVx-*~sJLNM#UfyEM*A7sUWtW3X}**Q5j_kl@gqm?Sv21egdk zjY<>p1_{PW6escl%?U`56i8sdh7_`NSPBFa6OiRSggz|if*?Z)fvpQ*gK;2$hXPVM zWJq4{K>MaM0X-26gl=142?4ev^I=Us0Z9g?SaZo3u&`nQP=+YSAOUj0!a%e=0tg~9 zH`G$Y*R2Q$TS9<@nni{RJ?yH2n~eov`(3!t!V7>|LkLi5yb*cp3lPBG2q_D_0p8vd za$d|d;Ei{=Kr=Z9uG17j4d4JI4MwA5l??Wb$O_Di!RVNQU>ZVXww?@u!ma^RqbL{W z85}f{0Gl8{S1}CCEdjxO(|(9T7l;(b0D-LxV_~2YKmsHH0=FL{VL%|jMx08HuQzz` z)$Iw9QSpAuN`WX2I1%v*`!(bkvbQt5u-2~3X4=5m13ulA2Q>_lq+ z(x<+~S&3wG?aMFsn!9iD(>{8;m1J}L-0NMpiNN-5UiWY>g|&OXC(Vq86VJb;*ITdy z2>STTa3s^UkGW}GKv-SzwekwArw*>pJY^#0mHU12cipZ#(5=Nek8jSeQut^6*q*vl zT`xvinsFz+@BL*S2P}W_&+r4U>s&YkJlo#?$3D{9K2H^{b;;wMx45uy^y*)_!7u&U zuN3yDXEtMc`-G2lM{g!m_MBTTYb#6vXWr&IMl(I~bzZHfAoKjP*Lq?pq`C24r0Zzd zC4uYS|FVh(EpqHnV`)>(To$?`I=KS$dMs<(a?I1Ayh@pVl}3Xxm>rQ&cTq@d1Mr z6fSt+-+yaF|2->T<%ut3?aU5Nj2j+$)w3l6JpTDVE=wAUqbUoMy$7-QIXAo>3txD5 zb1@*z;`PCIa|`1q^J>RsVFDoRKJkSB5*IsT-t+9oFdBZGr(e%vJ6tv_tQh=QmoTLW zx$%RUzn{F!n_hqRf4!n``X0{*H&2;-!>ikV!8bj?I%Bw%3PB2zYyL2XNnVMtp3OC* zi3m+efQ~Rv>pf(++0N4vM?1P?fJhu!)8!!SU<7C!+X!Z`Vl4}ckzs=oM>`YEfLb%| zW+P@%q$`~)fVBZ85aqF?(K8BR*g$gx36f}S**4mcG@-B%rQ?Xg>lUCu z8Z29Z0O_JYj6o%?Y9U0V^6YStx@XCI{H;VY0XT*sBouU{!axG5U*F61697|c<|Vbcm|PxFhC24k-Q^{LVy4!&4`jL0m1+o3j+!f0coY% zZGae;TgGEDgkcK=5n8|)z}{$L2Ii<>2*^+wKn&^tIuSrTcgtdnsE zT}dk`MuiTgln0Rh1nWuXKih6F*9$*vG&Xh@&| z0uUmjIhi&cFvW@k14sx2!V(B%3`hnGjEziQVBBI0WS9*^j&d4k!jgiBp$!C(Z32Rn z2}c?PM-_om!nh10gl-H31ek~f6J^+7VUQ(KZ6b!KAua~ekVAl+CX5Eed9)S*0nz&c z1R^U>D6|YRxEXmRyc%^6fUHczxV3hSNsGEl$q3jSqV8y7VJt6Q48TKY$afAqye?6a zahVp89ZU-#xNW2X2Z@HL(Ayy0$Pot+1e9bvH$)(mTLftqE7Jr!*fT>Z*2lR-Q;Ztt z*2WerZAWjO7C9T6yvAT)n?keD8rPT+(d-q-2L*>rf` zmXYN%;M}edYx}s{_JR&~fl%wmyu@@mvxxnoXI#69%S33O^9V?QJmGskxe=-9^t(Uu ztPu6g6KnuEu@{9v+NO8Du%{4a12B6z8z|1@(90#w8>eg&WS%iPjHpm4#9 zzF@G=iJ$V?-{gG<T#*JvA~NywmsezUyi{ z#CWy-B!9fUDRTcU{sPp%GtYYO|A3U1-0&sy6>$oU^!kQs3W7Tt<2ir4uL;Pk7z*#UtwyPN$0&QW*Q9vMsQ8Qsc(iG!{0CYfG{He&Hkq_DMw7`l4l4V zI!s1mCqM{EgQi8KQI^pG66Tvg0BK6V5F;I^Hb^%>Cxs0$4nc;7m^26wDFn38Ftmb< zlpymFGZSYPIzR#nSs)!8F{7r{>nw-?3SnAF2ImAIj3fE5?NJm`z!J8M6bGOv?uZd20?thU1Y#)8jTO9hG$mhw0F>K=R4WOEDo1paHO3MKgk`iKo`5+z zCbT&OFbpLi&c!fBBM}IYu%r$&44Z&~2mu1J5dsKELjuqN2a=|W0A!E|APZw*84MbZ z%?N~H0dWNoV+TkW59dZPfKiaa^tLcVAk_jQU>qPwLZ|?cBZG!H<*8#5X>K%TKv*k) zP{`=uhyz4W2-Ok7xFi@CLy$li4@E)%K>%eK7c3Ma0g^Vy0+E0L45MBW31QT+rPCMUfW-m0#Nkhob30_mCHNN>+Z3_&P`P}xL= zW;%H9wGbg2T#;M-wCPu6Cl-Ij^E(NDkuk+z6 z*N<)V#?!5O?-p_?0MC>pA zzy6ZXNik6a5>6Z=@z?#O?@@c^ljJN?td4$WZ^7V+z}3I{p>#7{`>+42;~Djx(ZaUB z-%keU%wzI5l_ze#hhcsXyjxHU)Bvly zZrw0`%Wgm@UYU-gaCrTVES&E70v6s=*Y7fZ)-kuP|Fs1aa_%TsjEhRE@Wr6{Eb$-ExEi}gVezDs_ZUBj?6_UX=>s+vLkD*~Zz5m|6CjtVs z1v|+LLfC95da%2J#PIlvrsI@M2Xc7+WKqU>?b%ZpmotnpJ637(f z8BjD0$axn6R0R@1<(Ms+u*A{;L=>42G2{t?pPGE4-`d!o~qI9ME~<_w+#?q$H`JZMf~IWOuNV;3wN0kPb%(SSQ85GgPt z3AiFGw&yb=Km;lvVJH*4iAIK54jb)Z-Q#Zp8b;&~qh3r?U5{!=)cnH~%U~UjAF^^C zl7pAej?1!M2P6S&qDEZTqMWN6KsIYAhE-d5_$%xjl(@>*I5;^tc1z)-TukW*kV z#R>7r7Sy>J8Q-417)`X3kb?suDD>%z`;6wIfq>O@xCV`3dp3y}*}L|f4CD6pyThof z?nJKM^xW53airqyYs?^Q54Pk8AQz4g@^JPQxKW7Y;^wj$Gw6dATQ|1ZU%Oy?&}5zA z^q>2Ge}Mpa*+1ac4IPj^yC)fNF@fLElF#JICn!q}l?`Zd&GQ0~;KFlnrk<{9rmC4P z{yHa$1p&x^ikE8ttzIM^pQ*npuq%L zpM1b+6p#;|``)K!i{ah>ZhEem-27fIfD-Q;e&G9W=OC3wzUoVk25|PdZ+T%;gVmWI zcn^(~XdHyT?z%@sh>RmqBTS)}!ald^?fO+~C zezY6*C}6ty&-il_mTe-v`{>og665r=D}@0*SO<+YGpjV-aT8r#!9gHt2+S+# z1jANTkPb$I)Mmzv?9EL$;xy?H#vKF*II@2pf)GdI7=<7)jIUX(jR6j%B%lwVlfm)% z=zzopX_nkMz3J3zkK$w-G zEg)gB&;m76fgplJL2wcYR7*gClF&y6gi+&ijDdgvGAiTEski_F1|WeTK_FqcnaI%r zvj8w-Mj$K=5HiIwAPS9;6$HXiA^-_&!Cxre0@8ILSkK+Iq$OC|!cIE>0O1~43(vRJ}mWl0$|K{=KMGaz6bba$2qz@o6B zZ-X&n05xK-=A}1RmQ}!PG%puzuvEG-ZU)I9?$}O8GeQu{$D?L~xEL@jmSG6+87@lz z9>pMP7gVS}dFPD5K@5#&6uhPgNj4#Y7={WY>}*Ed2@nW3Lji&k9E|1?A(Du4)kH!E zNOKffE1L;Y08kJmbHETFI=qt_5-AzS`erUWXjvJAgwfR=VDFG43B3d393cdHg<)Y_ z77GFdi0G9NX%G+#V!i&b0~{Fx_?dCC98rOJ*a4UVza?Pr1Wwxv7lO=g-pHV2d-;3& zzBX@8ty5#?ufHanq<01^&z|k($OB|H5O#04W?c=)_T;NqEIfX^2O#HxE?Ai^TVnxT ze0C+v&GaRI57*tO0nN5QhiCJ1%uoe>Hs=H&fIq$ag?GfPpY?$+*iM;xs1JZZUH(b` zwO4MWCg%A$f2T*sjJ)js@{(x^qpsLJ_m&@eAv*vRxN`smuKWXCdZ<8r+UxvcbOG3V z>6`CudeCQnrq?!C_3HoE!($m)Kjsg(Z?-cZ@tGN#?GK#)=^37^)GaFimEX?WJ7=C= zNG3kS8#J`#_#^Xz`HryvjNe@l@F^ZEaQUnL4j*&}&h++GekX+h=CL2CDRwHbzV0X9 zhj>Ja2+E|n@~>(c2e10kxE!rcgn+c2!wB1fIj`t$--IP)QR1qEXmFBdLCR@JX!V8aItV&Y_h$;{u&uw#N5mb@Jy5ORvT_zX^PzdZBn*ajF<*o(TO=PD- zvseIQBrpXCXMkiTrinzn0wf?pDU*Vb_5i0?2f;N#Gw=kIuxe@&Y)T*ykC8_TV)5)* zM#6X#a6?R2kjs)?xFJZ8W41#CgItdpAcasbz0uTt}vNkmjA_VAKi)twXR)WCaf{}$4 z0iuZpBa9>@N8=haq-S6mz&i6G!+?ZtWOOSEwFrpmKtvXXEr~>u5Fud8-36foMvx8w z-QI13(Qt190%X)?G?2z73JN#^rDGGA7Yr?Egf!r_K#CBk4WIxrG$5M}kV*++QjKH; ztN@vMDqNsQPXxkK8<5!r0SvgvL_}zC1_Pw`It>Whl?V~szJeEk-oR!U3W&FD2trDc zqjLihm}>(BVBOF_!`>DRL;_)n>%lEF5YjC-3)e6)x<1(6&4!|prYwp_H)pVWR90$TH+h^To6I|!EuLhgxie?@zxH*>VZGb-o zk95t2Do=pH7ktG!upa?6Q-M{J1X@;)e1}I)Q}Z;(=)u)5s0soi5Mpuchkt^5woN`GMbf3r1GE4?l!3yyl1e;zA9ep zvJC>CdhuU=uVx^1@W}uFUIoK^>JjhQGnK$*Wd7US@^ww z5do2Y_n+@J#&`cVCjkN6oWBA;#lL7~{3$=h_lsd%I>5~z@LU!~c0sr|#89C)S|GT& zIWI`rU%Tlk;LPv%9WO{!*egKxsvNhgc&q`vZgBAEBo%---kYPgnN$jp zSZlMm0b@9`Py!%W6GRp&ghiHtZsP(Yrp5i*W>TRJ#Oc00gG^!=3M2r{Km;&=F!X#N zJ1M9VvR~G`M1n1AAo|%sJfqQrJ#2>2fP@ah))GPz1k6qfcFAVe$ohrTY<5Y_iB1zJ znGgHsM!_Qrw&jQf)V<$48b z=9%sprbu>C#blxqz?e0#OUFPEAn;+wxlB1=I}l_mgfTY524v|0N+PMu480>{m}J`G zAdUtCAqrGaKtTmC(WaYuN`xeNlO8$&Bva`1#fZt9(*z6%3FA@FJV(i8B+f9CxM~eZI1#!Hf`+CM#27c?W;o~|DZ@aB3)OLhz;BeTaM zNI1FyfxUqgdIllu07w8|_jYg^O$lCVVt3+TFD!iV3Tnh&Q3^>L9OO(k5CsV+IS9!4=H|i`r_&^mORsdTNtr! zy}2Fy9slXy|G|eXkaH8)gZqA1QZyy-Pyfod%SkNI{XPqGN?-sP8LK*Y%XM>9vL*L} zU_f}v-{yh!klmYJ`5lhKtH?7p65sI8cDDk|)0e?V`X659qIL2Oo_)ZnK!QK-@Aw0E^;u(K#aJ10W>O(*3@lAKZ>c&s>c02lNU+0V8e#}}MDd?{1ZmNks3}v%;E??>6 zZWthL-*I!VO_q?C$s5E~pW#pTz;Q5%6?o>nx&A%x&ttb=KZWt!VV?CqEPmfT;%af> zX%7M%OuoV}%2ba(&yv(b8Q~ z{x=;)r)aRF$QyIY$eJU18}TH^FlO)1d)P89Rt@O$2w$P;6*&k0cV;BG&>kDEGYygwk#|u00TIX2mv816r2b~5E>LB z1Rw%*NTtKhH)sTsjUB*TgDeu2cORISa}EQx4NRa6fWzc46Usz!%?zS3XiZ31AF%;@ zk_O0P0|e*-AOT0i4hszsmdzj#Mx$A9QIJswKx|M*V;~F*5Wy}46Tnyu<7hlEGDU__ zIj51yTjb{yAAId(z3g$gN1?@Djz`O>%N%5+3o^2#0g&qk$OFSL%UqVlW-~`oMg>R4 z!!UwCSUxNrj!wXc1|U_yq-=>36}=C8Ld7Ik&Pj>;)N3F}0(c~0rvvB_5Wp-jBUlUe z9eF7(Cy+59izEOd5Ws-Jv0=^tGiHv=4In@Ugy$t^q`1#rU@8a%s*l7$n`^+1$Es5R zn==eUL2U3wZsvnBS`eTh9p|w*5Cegkh24w?=9va*x;qiVc#vFx0FS81n+j9ywF<5k z&IsQ)fiQ=~6IlwPVSY=O@+Eo*nc2}mEF|+>6PJS^<2vI^7my+bBt093Zmn1#8<2nj zY61ei>HExV2@;@iMnVu#;M^H*1$+hoJ*I~toPOz>bO+hwNe_QX6st zUi+g$5V+zaU*c>*=ds`W>uoKx>M?(q$E_pK_1*k!p1PtW+q&UyM#JvM{D*ZLV7qzh zhv4AiM}61r1km$y{!;%9t~>tOzrvsWu!u8Wrds}$GB)>Fhm#L|G7Dehncpbv8Bqcy`*;8Y&;F2CW8sUR@D!GG1nXXg z5};ScCm;4)7G8Nj@Pb%R_A~n3$=5&)jDayw12s?sHN6dp)GRaiK}i+kk=+aI?1qIA za(_;}3ax~s40|X35Y5{n2v6`LL zMKUfIg>bzSXh)6!G8VYq5YV1p4@lH#a)Lk{BaBZ7AYj*qCNIcZn>iRjpz#$0K!_|G zkSKDRCp#@mCS$`K1vGeRx|lcQG=@!Nyjd7_STi`Tf-ylnG!P(00{Du;js}p$aX|*@ z62j6l77~sQ;1m*YA7*pl3bD+BrMp-4%ta7aVEG0|N+3kL z7BKASW({$~4z6&)^6EvA3}11Wx3lJm=xN8;xD`l}BN)1IJ2Aee39xN}A?ppplV{$< zMS)moS2vIsVnG<=O$^CqL3K;y6~~+2{1JO|$yNjcZnnpNgs=Ag!Ts*n{kk8R5_j*o zcb#Se_rdk|dmlo>rXFy47<&KWzv`by+;MQ`;fLU)4`|WlWU;C3OFm?;K4ws7B>T_0 zG&t59A=5*ivYtCqb^NxM+xUuraN_#Ij*tKeAm95ZyL(2G%}t;Dm2WBmIrUoqy%W6y zx;rGTU;frF-4YC(yTU+%-}T@9gn-6eb{C}MfiCij994B4~^ z!}<7yYs^7NLY%GYLLy}X2)uU=3}O@#dZ!AoK69MB5_vN}c+!TvAAIoH-;#1(GJ1%E z|33Q*zwixt{G6gp3)bY%x01HUJ7~Ai+SIK)QoStd_?*@DYMyl;ptxfgCxK2tI*K z{CNM$5P#=+Mz!4A_5SU~qUNg{p2fA6mK6xPZJTQQ4riuXb z{$_8xVz}O$XFvq^jvvf1noOxO8LSUi6GLf$vcFo*ohE~09`aK5kFOwJkgecnrwmNcz@);Nzxrz+C;hm(3l7=^;PI;hY4sdD#znu0jkK z?t9mR3(CqH&sRs%T7{FFQ{OtJQrWA`M_J!>+2WEc4 z&;FdV9boE@{LVM+GAJ%TnIOQq*Etgs%G3A%iN5z;u_D+1fsQkcgp8-@^?#uIZXy{R z1*)6tsnxkva1oH%JO0aOQ^wT%Awaxg5Cp zlb;vM$OnHXx_O|xpZMp!WDkir_rh&q$}B-bK(SI%XK&`HCp^F0{lRy!aC6;FEDi!8 zUM>q?avBK2oEuoui~+d)<`;3yMj$Q;Fgd)M#dqEBMU3aFnM4A0;z|}?ylX?F@L;O1 zU{tv9t!Y!s@Wf+Jg@ti>dcmB$8M=Uks?O~L3c1f*9WYZ6s10lcv@!#jlx)hTdfwd) zfGJTk5{O7(wM$CGLJPQ0Iyh5?X0ZTs5(Ls`_t^}xnkFF#GAE=Vndd+{oW@R=2&n59 z5E;9G`D~hiun6=oOM}qOeuUgdRzs*px@Mt>O4apkV5pR{P*1TQX`P z50;)KV-b|l0ZIu30$~_IXfe_@!3v}cda%z7&d@1|qXHyBATr;CbCW=U1PbhoMbsvY z2g0xcgsH>T#MX*If>0KR*r*1;lykr``JfAN0rJinGgzE-02!Jfi-Q#wZ)8%42sl{W z7ACULAV3)Cq9BuDLJE;#ZV8Cy5Qbm|Af^n3Gk8r3q`IdL<2iyz2ccjW#pS~Qf|P0! zHFZBp$f9QzsH2C21PEvV$3TG$B8ek5aAQ-76lPI?QbH3PSw5+ZiC{PaVwgw(5oDnX z$S?r08V!K3uar$lcTL+8&;(*rI0nTr00kEm8yt--`c*yVs=*O=s)$C6AQ*;;q#6^z z5P$%Y5JH4!2n3M;VKfbeM$K3{MJHfOM#4~<07A4l(o~3MPSaLNNJaORPBavBT7iI= zi4H*0;g$do7u7v;60aag$di703m$6?J(eAl2oQ-j8bCX2lLmx{0gzz>4dk8xk)l{Q z2{SN5k%VEGBjZK^WMYnh6F1nBB5Kw|)zb}zH~ zyJyScE8rs0r?7kCRL49|1hRJ`>JB9k(|Tgm`4n_-0?F=~-L(X`dp9OXsPzVraQ6J9 z0(v&=v)dY}Y47l`3LvbuVIFa_ivf)5l&ym6O)$ z7J&kqIKB64;(CXf`4LW#DFFSPPjq@Cr0WyB-`X&G{^|EWq#HQ-*gKMAP83(Y-KA6y zy!OEtSx2+kd+7UKTTyWHz-OMC3rWs?H&cfI9KP`3o0&|ogW=(0UR#;~9)^_{Tx4PP z9{SR=Q#x@v_pqC;zrV@&Sdwg4oZV0eAm=%Dwu7=}nhc2WbihJBN;--3bZy*67yLvNvp z$^4dcSKEzTl)M|gyqzNg(_}J+_4#upY26!^yETDn_^rR3F zMk53kn}}%>j#Oev8BIi5AOwPR#dO!;+_U)v0tiu%DO+yoOKz>L`tJ)$%a0%1IwK#<8$%%>P_wA!E$NWcb5BLsR9 zO9kaT3>9Q(1!HV3FM0;V8}DePB4IQxMuwf0vN(X47ASD!U@R(227~RH9S(d*BOw3^ z$z_6sGKTS!fP|r(3lLc;ATe)mP$n)(i0o{Kp$-HHaS-cs(5MkGS4ITn2osDnV?gL3 zr5Hvd$|DCrgltX|fG|vGCJ@*4&Je)Mt`)V0?1(9#ASNqd2k24(Q$vh4#BsAJNFtCT z-4#gpD0Yev(=i1;ATt#c<1=87m}kn%uH5QZt1Oa|o2cf>k{`qp>*>DA-goy8k8bHgWdc@pCZ@1G5vz?#X7D@b;U0`o06E3+Do8Nml3PlT?$#(_amC zuOM7FeYisCEN1#gKOH>)m`@)+n4;M~eH^CSbz#1IU2}{{L=0hd`D)Kda{MIJU~f-f N*qiW(ZsL%z008V|KCJ)% literal 0 HcmV?d00001 From c9e3d68a6eae527a5413c6b91dfd08645007b591 Mon Sep 17 00:00:00 2001 From: Shuaiqiang Chang Date: Tue, 21 Mar 2023 10:08:13 +0800 Subject: [PATCH 02/71] Update index.md --- docs/zh/05-get-started/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/05-get-started/index.md b/docs/zh/05-get-started/index.md index 62eeb2f1c2..78c801a99b 100644 --- a/docs/zh/05-get-started/index.md +++ b/docs/zh/05-get-started/index.md @@ -25,7 +25,7 @@ import {useCurrentSidebarCategory} from '@docusaurus/theme-common'; - + From 61882c9226deba5127cf017750e54209dbbeb163 Mon Sep 17 00:00:00 2001 From: Shuaiqiang Chang Date: Tue, 21 Mar 2023 10:09:11 +0800 Subject: [PATCH 03/71] Update index.md --- docs/zh/05-get-started/index.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/zh/05-get-started/index.md b/docs/zh/05-get-started/index.md index 78c801a99b..16172277b5 100644 --- a/docs/zh/05-get-started/index.md +++ b/docs/zh/05-get-started/index.md @@ -4,7 +4,7 @@ description: '快速设置 TDengine 环境并体验其高效写入和查询' --- import xiaot from './xiaot.webp' -import xiaot_new from './xiaot-new.webp' +import xiaot_new from './xiaot-03.webp' import channel from './channel.webp' import official_account from './official-account.webp' @@ -25,7 +25,7 @@ import {useCurrentSidebarCategory} from '@docusaurus/theme-common';
小 T 的二维码小 T 的二维码 TDengine 微信视频号 TDengine 微信公众号
- + From 70297a4648a6321ed2991cac27f6c41831b31ba3 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Wed, 22 Mar 2023 17:17:53 +0800 Subject: [PATCH 04/71] test: first submit udf python case --- tests/system-test/0-others/udfpy/af_min.py | 30 ++ tests/system-test/0-others/udfpy/af_null.py | 19 ++ tests/system-test/0-others/udfpy/af_sum.py | 28 ++ .../0-others/udfpy/sf_multi_args.py | 20 ++ tests/system-test/0-others/udfpy/sf_null.py | 16 + tests/system-test/0-others/udfpy/sf_origin.py | 15 + tests/system-test/0-others/udfpy_main.py | 298 ++++++++++++++++++ 7 files changed, 426 insertions(+) create mode 100644 tests/system-test/0-others/udfpy/af_min.py create mode 100644 tests/system-test/0-others/udfpy/af_null.py create mode 100644 tests/system-test/0-others/udfpy/af_sum.py create mode 100644 tests/system-test/0-others/udfpy/sf_multi_args.py create mode 100644 tests/system-test/0-others/udfpy/sf_null.py create mode 100644 tests/system-test/0-others/udfpy/sf_origin.py create mode 100644 tests/system-test/0-others/udfpy_main.py diff --git a/tests/system-test/0-others/udfpy/af_min.py b/tests/system-test/0-others/udfpy/af_min.py new file mode 100644 index 0000000000..0f4e579761 --- /dev/null +++ b/tests/system-test/0-others/udfpy/af_min.py @@ -0,0 +1,30 @@ +import pickle + +def init(): + pass + +def destroy(): + pass + +def start(): + return pickle.dumps([]) + +def finish(buf): + mins = pickle.loads(buf) + min_val = None + for min in mins: + if min < min_val: + min_val = min + return min_val + +def reduce(datablock, buf): + (rows, cols) = datablock.shape() + mins = pickle.loads(buf) + min = None + for i in range(rows): + val = datablock.data(i, 0) + if min is None or (val is not None and val < min) : + min = val + if min is not None: + mins.append(min) + return pickle.dumps(mins) diff --git a/tests/system-test/0-others/udfpy/af_null.py b/tests/system-test/0-others/udfpy/af_null.py new file mode 100644 index 0000000000..230eac6888 --- /dev/null +++ b/tests/system-test/0-others/udfpy/af_null.py @@ -0,0 +1,19 @@ +import pickle + +def init(): + pass + +def destroy(): + pass + +def start(): + return pickle.dumps([]) + +def finish(buf): + return None + +def reduce(datablock, buf): + (rows, cols) = datablock.shape() + mins = pickle.loads(buf) + mins.append(None) + return pickle.dumps(mins) diff --git a/tests/system-test/0-others/udfpy/af_sum.py b/tests/system-test/0-others/udfpy/af_sum.py new file mode 100644 index 0000000000..e32cf3fa31 --- /dev/null +++ b/tests/system-test/0-others/udfpy/af_sum.py @@ -0,0 +1,28 @@ +import pickle + +def init(): + pass + +def destroy(): + pass + +def start(): + return pickle.dumps([]) + +def finish(buf): + sums = pickle.loads(buf) + all = 0 + for sum in sums: + all += sum + return all + +def reduce(datablock, buf): + (rows, cols) = datablock.shape() + sums = pickle.loads(buf) + sum = 0 + for i in range(rows): + val = datablock.data(i, 0) + if val is not None: + sum += val + sums.append(sum) + return pickle.dumps(sums) diff --git a/tests/system-test/0-others/udfpy/sf_multi_args.py b/tests/system-test/0-others/udfpy/sf_multi_args.py new file mode 100644 index 0000000000..5d8194b3ad --- /dev/null +++ b/tests/system-test/0-others/udfpy/sf_multi_args.py @@ -0,0 +1,20 @@ +# init +def init(): + pass + +# destory +def destory(): + pass + +# return origin column one value +def process(block): + (nrows, ncols) = block.shape() + results = [] + for i in range(nrows): + rows = [] + for j in range(ncols): + val = block.data(i, j) + rows.append(val) + results.append(','.join(rows)) + return results + diff --git a/tests/system-test/0-others/udfpy/sf_null.py b/tests/system-test/0-others/udfpy/sf_null.py new file mode 100644 index 0000000000..a82b147d01 --- /dev/null +++ b/tests/system-test/0-others/udfpy/sf_null.py @@ -0,0 +1,16 @@ + +# init +def init(): + pass + +# destory +def destory(): + pass + +# return origin column one value +def process(block): + (rows, cols) = block.shape() + results = [] + for i in range(rows): + results.append(None) + return results \ No newline at end of file diff --git a/tests/system-test/0-others/udfpy/sf_origin.py b/tests/system-test/0-others/udfpy/sf_origin.py new file mode 100644 index 0000000000..7588d0402d --- /dev/null +++ b/tests/system-test/0-others/udfpy/sf_origin.py @@ -0,0 +1,15 @@ +# init +def init(): + pass + +# destory +def destory(): + pass + +# return origin column one value +def process(block): + (rows, cols) = block.shape() + results = [] + for i in range(rows): + results.append(block.data(i,0)) + return results diff --git a/tests/system-test/0-others/udfpy_main.py b/tests/system-test/0-others/udfpy_main.py new file mode 100644 index 0000000000..9cc329ca19 --- /dev/null +++ b/tests/system-test/0-others/udfpy_main.py @@ -0,0 +1,298 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + + +from util.log import * +from util.cases import * +from util.sql import * +from util.common import * +from util.sqlset import * + +import random +import os + + +class TDTestCase: + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + self.setsql = TDSetSql() + + # udf path + self.udf_path = os.path.dirname(os.path.realpath(__file__)) + "/udfpy" + + + self.column_dict = { + 'ts': 'timestamp', + 'col1': 'tinyint', + 'col2': 'smallint', + 'col3': 'int', + 'col4': 'bigint', + 'col5': 'tinyint unsigned', + 'col6': 'smallint unsigned', + 'col7': 'int unsigned', + 'col8': 'bigint unsigned', + 'col9': 'float', + 'col10': 'double', + 'col11': 'bool', + 'col12': 'varchar(20)', + 'col13': 'nchar(20)', + 'col14': 'timestamp' + } + self.tag_dict = { + 't1': 'tinyint', + 't2': 'smallint', + 't3': 'int', + 't4': 'bigint', + 't5': 'tinyint unsigned', + 't6': 'smallint unsigned', + 't7': 'int unsigned', + 't8': 'bigint unsigned', + 't9': 'float', + 't10': 'double', + 't11': 'bool', + 't12': 'varchar(20)', + 't13': 'nchar(20)', + 't14': 'timestamp' + } + + def set_stb_sql(self,stbname,column_dict,tag_dict): + column_sql = '' + tag_sql = '' + for k,v in column_dict.items(): + column_sql += f"{k} {v}, " + for k,v in tag_dict.items(): + tag_sql += f"{k} {v}, " + create_stb_sql = f'create stable {stbname} ({column_sql[:-2]}) tags ({tag_sql[:-2]})' + return create_stb_sql + + # create stable and child tables + def create_table(self, stbname, tbname, count): + tdSql.prepare() + tdSql.execute('use db') + self.child_count = count + self.stbname = stbname + self.tbname = tbname + + # create stable + create_table_sql = self.set_stb_sql(stbname, self.column_dict, self.tag_dict) + tdSql.execute(create_table_sql) + + # create child table + for i in range(count): + ti = i % 128 + tags = f'{ti},{ti},{i},{i},{ti},{ti},{i},{i},{i}.000{i},{i}.000{i},true,"var{i}","nch{i}",now' + sql = f'create table {tbname}{i} using {stbname} tags({tags})' + tdSql.execute(sql) + + tdLog.info(f" create {count} child tables ok.") + + def create_udfpy_impl(self, funs, filename): + for name, outtype in funs.items(): + sql = f' create function {name} as "{self.udf_path}/{filename} {outtype} " language "Python" ' + tdSql.execute(sql) + + + def create_udfpy_dicts(self, dicts, filename): + for k,v in dicts: + self.create_udfpy_impl(k, v, filename) + + # create_udfpy_function + def create_udfpy_function(self): + # function + + + # scalar funciton + self.scalar_funs = { + 'sf1': 'tinyint', + 'sf2': 'smallint', + 'sf3': 'int', + 'sf4': 'bigint', + 'sf5': 'tinyint unsigned', + 'sf6': 'smallint unsigned', + 'sf7': 'int unsigned', + 'sf8': 'bigint unsigned', + 'sf9': 'float', + 'sf10': 'double', + 'sf11': 'bool', + 'sf12': 'varchar(20)', + 'sf13': 'nchar(20)', + 'sf14': 'timestamp' + } + # agg function + self.agg_funs = { + 'af1': 'tinyint', + 'af2': 'smallint', + 'af3': 'int', + 'af4': 'bigint', + 'af5': 'tinyint unsigned', + 'af6': 'smallint unsigned', + 'af7': 'int unsigned', + 'af8': 'bigint unsigned', + 'af9': 'float', + 'af10': 'double', + 'af11': 'bool', + 'af12': 'varchar(20)', + 'af13': 'nchar(20)', + 'af14': 'timestamp' + } + + # files + self.create_udfpy_function(self.scalar_funs, "fun_origin") + self.create_udf_sf("sf_multi_args", "binary(1024)") + + #self.create_udfpy_function(self.agg_funs, None) + + def create_udf_sf(self, fun_name, out_type): + sql = f'create function {fun_name} as {self.udf_path}{fun_name}.py {out_type} language "Python"' + tdSql.execute(sql) + + def create_udf_af(self, fun_name, out_type, bufsize): + sql = f'create aggregate function {fun_name} as {self.udf_path}{fun_name}.py {out_type} bufsize {bufsize} language "Python"' + tdSql.execute(sql) + + + # sql1 query result eual with sql2 + def verify_same_result(self, sql1, sql2): + # query + result1 = tdSql.getResult(sql1) + tdSql.query(sql2) + + for i, row in enumerate(result1): + for j , val in enumerate(row): + tdSql.checkData(i, j, result1[i][j]) + + # same value like select col1, udf_fun1(col1) from st + def verfiy_same_value(sql): + tdSql.query(sql) + nrows = tdSql.getRows() + for i in range(nrows): + val = tdSql.getData(i, 0) + tdSql.checkData(i, 1, val) + + # verify multi values + def verify_same_multi_values(self, sql): + tdSql.query(sql) + nrows = tdSql.getRows() + for i in range(nrows): + udf_val = tdSql.getData(i, 0) + vals = udf_val.split(',') + for j,val in enumerate(vals, 1): + tdSql.checkData(i, j, val) + + # query multi-args + def query_multi_args(self): + cols = self.column_dict.keys() + self.tag_dict.keys() + ncols = len(cols) + for i in range(2, ncols): + sample = random.sample(i) + cols_name = ','.join(sample) + sql = f'select sf_multi_args({cols_name}),{cols_name} from {self.stbname}' + self.verify_same_multi_values(sql) + + + # query_udfpy + def query_scalar_udfpy(self): + # col + for col_name, col_type in self.column_dict: + for fun_name, out_type in self.scalar_funs: + sql = f'select {col_name} {fun_name}({col_name}) from {self.stbname}' + self.verify_same_value(sql) + + # multi-args + self.query_multi_args() + + # create aggregate + def create_aggr_udfpy(self): + # all type check null + for col_name, col_type in self.column_dict: + self.create_udf_af(f"af_null_{col_name}", f"{col_type}", 10*1024*1024) + + # min + self.create_udf_af(f"af_min_float", f"float", 10*1024*1024) + self.create_udf_af(f"af_min_int", f"int", 10*1024*1024) + + # sum + self.create_udf_af(f"af_sum_float", f"float", 100*1024*1024) + self.create_udf_af(f"af_sum_int", f"sum", 100*1024*1024) + + + # query aggregate + def query_aggr_udfpy(self) : + # all type check null + for col_name, col_type in self.column_dict: + fun_name = f"af_null_{col_name}" + sql = f'select {fun_name}(col_name) from {self.stbname}' + tdSql.query(sql) + tdSql.checkData(0, 0, "NULL") + + # min + sql = f'select min(col3), af_min_int(col3) from {self.stbname}' + self.verfiy_same_value(sql) + sql = f'select min(col7), af_min_int(col7) from {self.stbname}' + self.verfiy_same_value(sql) + sql = f'select min(col9), af_min_float(col9) from {self.stbname}' + self.verfiy_same_value(sql) + + # sum + sql = f'select sum(col3), af_sum_int(col3) from {self.stbname}' + self.verfiy_same_value(sql) + sql = f'select sum(col7), af_sum_int(col7) from {self.stbname}' + self.verfiy_same_value(sql) + sql = f'select sum(col9), af_sum_float(col9) from {self.stbname}' + self.verfiy_same_value(sql) + + + + + # insert to child table d1 data + def insert_data(self, tbname, rows): + ts = 1670000000000 + for i in range(self.child_count): + for j in range(rows): + ti = j % 128 + cols = f'{ti},{ti},{i},{i},{ti},{ti},{i},{i},{i}.000{i},{i}.000{i},true,"var{i}","nch{i}",now' + sql = f'insert into {tbname}{i} values({ts+j},{cols});' + tdSql.execute(sql) + + tdLog.info(f" insert {rows} for each child table.") + + + # run + def run(self): + # var + stable = "meters" + tbname = "d" + count = 100 + # do + self.create_table(stable, tbname, count) + self.insert_data(tbname, 1000) + + # scalar + self.create_scalar_udfpy() + self.query_scalar_udfpy() + + # aggregate + self.create_aggr_udfpy() + self.query_aggr_udfpy() + + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file From 8ac1e7f02c62496db1b63a44bb9366e646344a87 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Wed, 22 Mar 2023 17:18:24 +0800 Subject: [PATCH 05/71] test: first submit udf python case --- tests/system-test/0-others/udfpy_main.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/tests/system-test/0-others/udfpy_main.py b/tests/system-test/0-others/udfpy_main.py index 9cc329ca19..1b5a1bab38 100644 --- a/tests/system-test/0-others/udfpy_main.py +++ b/tests/system-test/0-others/udfpy_main.py @@ -252,8 +252,6 @@ class TDTestCase: self.verfiy_same_value(sql) sql = f'select sum(col9), af_sum_float(col9) from {self.stbname}' self.verfiy_same_value(sql) - - # insert to child table d1 data @@ -266,6 +264,15 @@ class TDTestCase: sql = f'insert into {tbname}{i} values({ts+j},{cols});' tdSql.execute(sql) + # partial columns upate + sql = f'insert into {tbname}0(ts, col1, col9, col11) values(now, 100, 200, 0)' + tdSql.execute(sql) + sql = f'insert into {tbname}0(ts, col2, col5, col8) values(now, 100, 200, 300)' + tdSql.execute(sql) + sql = f'insert into {tbname}0(ts, col3, col7, col13) values(now, null, null, null)' + tdSql.execute(sql) + sql = f'insert into {tbname}0(ts) values(now)' + tdSql.execute(sql) tdLog.info(f" insert {rows} for each child table.") @@ -288,8 +295,6 @@ class TDTestCase: self.query_aggr_udfpy() - - def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) From cf28b637a79124582e2e727b09f2480db2626237 Mon Sep 17 00:00:00 2001 From: cadem Date: Wed, 22 Mar 2023 18:09:59 +0800 Subject: [PATCH 06/71] change msg order --- include/common/tmsgdef.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/common/tmsgdef.h b/include/common/tmsgdef.h index 67fd832f13..8d02c6947c 100644 --- a/include/common/tmsgdef.h +++ b/include/common/tmsgdef.h @@ -166,7 +166,6 @@ enum { TD_DEF_MSG_TYPE(TDMT_MND_AUTH, "auth", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_APPLY_MSG, "mnode-apply", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_BALANCE_VGROUP, "balance-vgroup", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_BALANCE_VGROUP_LEADER, "balance-vgroup-leader", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_MERGE_VGROUP, "merge-vgroup", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_REDISTRIBUTE_VGROUP, "redistribute-vgroup", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_SPLIT_VGROUP, "split-vgroup", NULL, NULL) @@ -177,6 +176,7 @@ enum { // TD_DEF_MSG_TYPE(TDMT_MND_STREAM_CHECKPOINT_TIMER, "stream-checkpoint-tmr", NULL, NULL) // TD_DEF_MSG_TYPE(TDMT_MND_STREAM_BEGIN_CHECKPOINT, "stream-begin-checkpoint", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_MAX_MSG, "mnd-max", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_BALANCE_VGROUP_LEADER, "balance-vgroup-leader", NULL, NULL) TD_NEW_MSG_SEG(TDMT_VND_MSG) TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT, "submit", SSubmitReq, SSubmitRsp) From cb59cc830846f040fc064312cabd78a7226342b1 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Wed, 22 Mar 2023 18:28:50 +0800 Subject: [PATCH 07/71] test: select nested query of udf --- tests/system-test/0-others/udfpy_main.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/system-test/0-others/udfpy_main.py b/tests/system-test/0-others/udfpy_main.py index 1b5a1bab38..0de48269e9 100644 --- a/tests/system-test/0-others/udfpy_main.py +++ b/tests/system-test/0-others/udfpy_main.py @@ -207,8 +207,11 @@ class TDTestCase: # col for col_name, col_type in self.column_dict: for fun_name, out_type in self.scalar_funs: - sql = f'select {col_name} {fun_name}({col_name}) from {self.stbname}' + sql = f'select {col_name}, {fun_name}({col_name}) from {self.stbname}' self.verify_same_value(sql) + sql = f'select * from (select {col_name} as a, {fun_name}({col_name}) as b from {self.stbname} ) order by b,a desc' + self.verify_same_value(sql) + # multi-args self.query_multi_args() From 6345952f8829b9219f785d95458c987c9d0661c2 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Wed, 22 Mar 2023 20:30:25 +0800 Subject: [PATCH 08/71] test: debug python file --- tests/pytest/util/cases.py | 2 + .../0-others/udfpy/sf_multi_args.py | 4 +- tests/system-test/0-others/udfpy/sf_null.py | 4 +- tests/system-test/0-others/udfpy/sf_origin.py | 4 +- tests/system-test/0-others/udfpy_main.py | 45 +++++++++---------- 5 files changed, 29 insertions(+), 30 deletions(-) diff --git a/tests/pytest/util/cases.py b/tests/pytest/util/cases.py index 4830d2f8b0..536b8f30d3 100644 --- a/tests/pytest/util/cases.py +++ b/tests/pytest/util/cases.py @@ -17,6 +17,7 @@ import time import datetime import inspect import importlib +import traceback from util.log import * @@ -75,6 +76,7 @@ class TDCases: case.run() except Exception as e: tdLog.notice(repr(e)) + traceback.print_exc() tdLog.exit("%s failed" % (fileName)) case.stop() runNum += 1 diff --git a/tests/system-test/0-others/udfpy/sf_multi_args.py b/tests/system-test/0-others/udfpy/sf_multi_args.py index 5d8194b3ad..f5585b4e87 100644 --- a/tests/system-test/0-others/udfpy/sf_multi_args.py +++ b/tests/system-test/0-others/udfpy/sf_multi_args.py @@ -2,8 +2,8 @@ def init(): pass -# destory -def destory(): +# destroy +def destroy(): pass # return origin column one value diff --git a/tests/system-test/0-others/udfpy/sf_null.py b/tests/system-test/0-others/udfpy/sf_null.py index a82b147d01..c22ca95b19 100644 --- a/tests/system-test/0-others/udfpy/sf_null.py +++ b/tests/system-test/0-others/udfpy/sf_null.py @@ -3,8 +3,8 @@ def init(): pass -# destory -def destory(): +# destroy +def destroy(): pass # return origin column one value diff --git a/tests/system-test/0-others/udfpy/sf_origin.py b/tests/system-test/0-others/udfpy/sf_origin.py index 7588d0402d..9158e044d2 100644 --- a/tests/system-test/0-others/udfpy/sf_origin.py +++ b/tests/system-test/0-others/udfpy/sf_origin.py @@ -2,8 +2,8 @@ def init(): pass -# destory -def destory(): +# destroy +def destroy(): pass # return origin column one value diff --git a/tests/system-test/0-others/udfpy_main.py b/tests/system-test/0-others/udfpy_main.py index 0de48269e9..07fd64d983 100644 --- a/tests/system-test/0-others/udfpy_main.py +++ b/tests/system-test/0-others/udfpy_main.py @@ -98,21 +98,15 @@ class TDTestCase: tdLog.info(f" create {count} child tables ok.") - def create_udfpy_impl(self, funs, filename): - for name, outtype in funs.items(): - sql = f' create function {name} as "{self.udf_path}/{filename} {outtype} " language "Python" ' + # create with dicts + def create_sf_dicts(self, dicts, filename): + for fun_name, out_type in dicts.items(): + sql = f' create function {fun_name} as "{self.udf_path}/{filename}" outputtype {out_type} language "Python" ' tdSql.execute(sql) - - - def create_udfpy_dicts(self, dicts, filename): - for k,v in dicts: - self.create_udfpy_impl(k, v, filename) + tdLog.info(sql) # create_udfpy_function - def create_udfpy_function(self): - # function - - + def create_scalar_udfpy(self): # scalar funciton self.scalar_funs = { 'sf1': 'tinyint', @@ -149,18 +143,19 @@ class TDTestCase: } # files - self.create_udfpy_function(self.scalar_funs, "fun_origin") + self.create_sf_dicts(self.scalar_funs, "sf_origin.py") self.create_udf_sf("sf_multi_args", "binary(1024)") - #self.create_udfpy_function(self.agg_funs, None) - + # fun_name == fun_name.py def create_udf_sf(self, fun_name, out_type): - sql = f'create function {fun_name} as {self.udf_path}{fun_name}.py {out_type} language "Python"' + sql = f'create function {fun_name} as "{self.udf_path}/{fun_name}.py" outputtype {out_type} language "Python" ' tdSql.execute(sql) + tdLog.info(sql) def create_udf_af(self, fun_name, out_type, bufsize): - sql = f'create aggregate function {fun_name} as {self.udf_path}{fun_name}.py {out_type} bufsize {bufsize} language "Python"' + sql = f'create aggregate function {fun_name} as "{self.udf_path}/{fun_name}.py" outputtype {out_type} bufsize {bufsize} language "Python" ' tdSql.execute(sql) + tdLog.info(sql) # sql1 query result eual with sql2 @@ -174,7 +169,7 @@ class TDTestCase: tdSql.checkData(i, j, result1[i][j]) # same value like select col1, udf_fun1(col1) from st - def verfiy_same_value(sql): + def verify_same_value(sql): tdSql.query(sql) nrows = tdSql.getRows() for i in range(nrows): @@ -205,11 +200,13 @@ class TDTestCase: # query_udfpy def query_scalar_udfpy(self): # col - for col_name, col_type in self.column_dict: - for fun_name, out_type in self.scalar_funs: + for col_name, col_type in self.column_dict.items(): + for fun_name, out_type in self.scalar_funs.items(): sql = f'select {col_name}, {fun_name}({col_name}) from {self.stbname}' + tdLog.info(sql) self.verify_same_value(sql) sql = f'select * from (select {col_name} as a, {fun_name}({col_name}) as b from {self.stbname} ) order by b,a desc' + tdLog.info(sql) self.verify_same_value(sql) @@ -262,8 +259,8 @@ class TDTestCase: ts = 1670000000000 for i in range(self.child_count): for j in range(rows): - ti = j % 128 - cols = f'{ti},{ti},{i},{i},{ti},{ti},{i},{i},{i}.000{i},{i}.000{i},true,"var{i}","nch{i}",now' + tj = j % 128 + cols = f'{tj},{tj},{j},{j},{tj},{tj},{j},{j},{j}.000{j},{j}.000{j},true,"var{j}","nch{j}",now' sql = f'insert into {tbname}{i} values({ts+j},{cols});' tdSql.execute(sql) @@ -284,10 +281,10 @@ class TDTestCase: # var stable = "meters" tbname = "d" - count = 100 + count = 10 # do self.create_table(stable, tbname, count) - self.insert_data(tbname, 1000) + self.insert_data(tbname, 100) # scalar self.create_scalar_udfpy() From 22d92cd3dce06c5202e7c8df3bece27061b09ecf Mon Sep 17 00:00:00 2001 From: Benguang Zhao Date: Thu, 23 Mar 2023 12:55:21 +0800 Subject: [PATCH 09/71] feat: WAL cleanup not affected by topic consumption anymore --- source/libs/wal/src/walWrite.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/source/libs/wal/src/walWrite.c b/source/libs/wal/src/walWrite.c index a8f9362757..848de4f36d 100644 --- a/source/libs/wal/src/walWrite.c +++ b/source/libs/wal/src/walWrite.c @@ -290,14 +290,22 @@ int32_t walEndSnapshot(SWal *pWal) { int ts = taosGetTimestampSec(); ver = TMAX(ver - pWal->vers.logRetention, pWal->vers.firstVer - 1); + + bool hasTopic = false; + int64_t refVer = ver; void *pIter = NULL; while (1) { pIter = taosHashIterate(pWal->pRefHash, pIter); if (pIter == NULL) break; SWalRef *pRef = *(SWalRef **)pIter; if (pRef->refVer == -1) continue; - ver = TMIN(ver, pRef->refVer - 1); + refVer = TMIN(refVer, pRef->refVer - 1); wDebug("vgId:%d, wal found ref %" PRId64 ", refId %" PRId64, pWal->cfg.vgId, pRef->refVer, pRef->refId); + hasTopic = true; + } + // compatible mode + if (pWal->cfg.retentionPeriod == 0 && hasTopic) { + ver = TMIN(ver, refVer); } int deleteCnt = 0; From e2e76f9e287ab828e79118429c76f329942b480f Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Thu, 23 Mar 2023 19:22:40 +0800 Subject: [PATCH 10/71] test: udfpy_main.py case passed ! --- .../system-test/0-others/tag_index_cluster.py | 171 ++++++++++++++++++ tests/system-test/0-others/udfpy/af_min.py | 2 +- .../0-others/udfpy/sf_multi_args.py | 5 +- tests/system-test/0-others/udfpy_main.py | 71 +++++--- 4 files changed, 218 insertions(+), 31 deletions(-) create mode 100644 tests/system-test/0-others/tag_index_cluster.py diff --git a/tests/system-test/0-others/tag_index_cluster.py b/tests/system-test/0-others/tag_index_cluster.py new file mode 100644 index 0000000000..b1ae74f567 --- /dev/null +++ b/tests/system-test/0-others/tag_index_cluster.py @@ -0,0 +1,171 @@ +from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE +import taos +import sys +import time +import os + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.dnodes import TDDnodes +from util.dnodes import TDDnode +import time +import socket +import subprocess + +class MyDnodes(TDDnodes): + def __init__(self ,dnodes_lists): + super(MyDnodes,self).__init__() + self.dnodes = dnodes_lists # dnode must be TDDnode instance + self.simDeployed = False + +class TagCluster: + noConn = True + def init(self, conn, logSql, replicaVar=1): + tdLog.debug(f"start to excute {__file__}") + self.TDDnodes = None + self.depoly_cluster(5) + self.master_dnode = self.TDDnodes.dnodes[0] + self.host=self.master_dnode.cfgDict["fqdn"] + conn1 = taos.connect(self.master_dnode.cfgDict["fqdn"] , config=self.master_dnode.cfgDir) + tdSql.init(conn1.cursor()) + + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files or "taosd.exe" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + + def depoly_cluster(self ,dnodes_nums): + + testCluster = False + valgrind = 0 + hostname = socket.gethostname() + dnodes = [] + start_port = 6030 + for num in range(1, dnodes_nums+1): + dnode = TDDnode(num) + dnode.addExtraCfg("firstEp", f"{hostname}:{start_port}") + dnode.addExtraCfg("fqdn", f"{hostname}") + dnode.addExtraCfg("serverPort", f"{start_port + (num-1)*100}") + dnode.addExtraCfg("monitorFqdn", hostname) + dnode.addExtraCfg("monitorPort", 7043) + dnodes.append(dnode) + + self.TDDnodes = MyDnodes(dnodes) + self.TDDnodes.init("") + self.TDDnodes.setTestCluster(testCluster) + self.TDDnodes.setValgrind(valgrind) + + self.TDDnodes.setAsan(tdDnodes.getAsan()) + self.TDDnodes.stopAll() + for dnode in self.TDDnodes.dnodes: + self.TDDnodes.deploy(dnode.index,{}) + + for dnode in self.TDDnodes.dnodes: + self.TDDnodes.starttaosd(dnode.index) + + # create cluster + for dnode in self.TDDnodes.dnodes[1:]: + # print(dnode.cfgDict) + dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"] + dnode_first_host = dnode.cfgDict["firstEp"].split(":")[0] + dnode_first_port = dnode.cfgDict["firstEp"].split(":")[-1] + cmd = f"{self.getBuildPath()}/build/bin/taos -h {dnode_first_host} -P {dnode_first_port} -s \"create dnode \\\"{dnode_id}\\\"\"" + print(cmd) + os.system(cmd) + + time.sleep(2) + tdLog.info(" create cluster done! ") + + def five_dnode_one_mnode(self): + tdSql.query("select * from information_schema.ins_dnodes;") + tdSql.checkData(0,1,'%s:6030'%self.host) + tdSql.checkData(4,1,'%s:6430'%self.host) + tdSql.checkData(0,4,'ready') + tdSql.checkData(4,4,'ready') + tdSql.query("select * from information_schema.ins_mnodes;") + tdSql.checkData(0,1,'%s:6030'%self.host) + tdSql.checkData(0,2,'leader') + tdSql.checkData(0,3,'ready') + + + tdSql.error("create mnode on dnode 1;") + tdSql.error("drop mnode on dnode 1;") + + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db replica 1 duration 300") + tdSql.execute("use db") + tdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + tdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + ''' + ) + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + + tdSql.query('select * from information_schema.ins_databases;') + tdSql.checkData(2,5,'on') + tdSql.error("alter database db strict 'off'") + # tdSql.execute('alter database db strict 'on'') + # tdSql.query('select * from information_schema.ins_databases;') + # tdSql.checkData(2,5,'on') + + def getConnection(self, dnode): + host = dnode.cfgDict["fqdn"] + port = dnode.cfgDict["serverPort"] + config_dir = dnode.cfgDir + return taos.connect(host=host, port=int(port), config=config_dir) + + def check_alive(self): + # check cluster alive + tdLog.printNoPrefix("======== test cluster alive: ") + tdSql.checkDataLoop(0, 0, 1, "show cluster alive;", 20, 0.5) + + tdSql.query("show db.alive;") + tdSql.checkData(0, 0, 1) + + # stop 3 dnode + self.TDDnodes.stoptaosd(3) + tdSql.checkDataLoop(0, 0, 2, "show cluster alive;", 20, 0.5) + + tdSql.query("show db.alive;") + tdSql.checkData(0, 0, 2) + + # stop 2 dnode + self.TDDnodes.stoptaosd(2) + tdSql.checkDataLoop(0, 0, 0, "show cluster alive;", 20, 0.5) + + tdSql.query("show db.alive;") + tdSql.checkData(0, 0, 0) + + + def run(self): + # print(self.master_dnode.cfgDict) + self.five_dnode_one_mnode() + # check cluster and db alive + self.check_alive() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") diff --git a/tests/system-test/0-others/udfpy/af_min.py b/tests/system-test/0-others/udfpy/af_min.py index 0f4e579761..9f1aadf414 100644 --- a/tests/system-test/0-others/udfpy/af_min.py +++ b/tests/system-test/0-others/udfpy/af_min.py @@ -13,7 +13,7 @@ def finish(buf): mins = pickle.loads(buf) min_val = None for min in mins: - if min < min_val: + if min_val is None or (min is not None and min < min_val): min_val = min return min_val diff --git a/tests/system-test/0-others/udfpy/sf_multi_args.py b/tests/system-test/0-others/udfpy/sf_multi_args.py index f5585b4e87..1026661d8d 100644 --- a/tests/system-test/0-others/udfpy/sf_multi_args.py +++ b/tests/system-test/0-others/udfpy/sf_multi_args.py @@ -14,7 +14,10 @@ def process(block): rows = [] for j in range(ncols): val = block.data(i, j) - rows.append(val) + if type(val) is bytes: + rows.append(val.decode('utf-8')) + else: + rows.append(repr(val)) results.append(','.join(rows)) return results diff --git a/tests/system-test/0-others/udfpy_main.py b/tests/system-test/0-others/udfpy_main.py index 07fd64d983..04c1f5061d 100644 --- a/tests/system-test/0-others/udfpy_main.py +++ b/tests/system-test/0-others/udfpy_main.py @@ -109,6 +109,7 @@ class TDTestCase: def create_scalar_udfpy(self): # scalar funciton self.scalar_funs = { + 'sf0': 'timestamp', 'sf1': 'tinyint', 'sf2': 'smallint', 'sf3': 'int', @@ -121,8 +122,7 @@ class TDTestCase: 'sf10': 'double', 'sf11': 'bool', 'sf12': 'varchar(20)', - 'sf13': 'nchar(20)', - 'sf14': 'timestamp' + 'sf13': 'nchar(20)' } # agg function self.agg_funs = { @@ -152,8 +152,8 @@ class TDTestCase: tdSql.execute(sql) tdLog.info(sql) - def create_udf_af(self, fun_name, out_type, bufsize): - sql = f'create aggregate function {fun_name} as "{self.udf_path}/{fun_name}.py" outputtype {out_type} bufsize {bufsize} language "Python" ' + def create_udf_af(self, fun_name, filename, out_type, bufsize): + sql = f'create aggregate function {fun_name} as "{self.udf_path}/{filename}" outputtype {out_type} bufsize {bufsize} language "Python" ' tdSql.execute(sql) tdLog.info(sql) @@ -169,7 +169,7 @@ class TDTestCase: tdSql.checkData(i, j, result1[i][j]) # same value like select col1, udf_fun1(col1) from st - def verify_same_value(sql): + def verify_same_value(self, sql): tdSql.query(sql) nrows = tdSql.getRows() for i in range(nrows): @@ -188,10 +188,16 @@ class TDTestCase: # query multi-args def query_multi_args(self): - cols = self.column_dict.keys() + self.tag_dict.keys() + cols = list(self.column_dict.keys()) + list(self.tag_dict.keys()) + cols.remove("col13") + cols.remove("t13") ncols = len(cols) + print(cols) + for i in range(2, ncols): - sample = random.sample(i) + print(i) + sample = random.sample(cols, i) + print(sample) cols_name = ','.join(sample) sql = f'select sf_multi_args({cols_name}),{cols_name} from {self.stbname}' self.verify_same_multi_values(sql) @@ -202,12 +208,13 @@ class TDTestCase: # col for col_name, col_type in self.column_dict.items(): for fun_name, out_type in self.scalar_funs.items(): - sql = f'select {col_name}, {fun_name}({col_name}) from {self.stbname}' - tdLog.info(sql) - self.verify_same_value(sql) - sql = f'select * from (select {col_name} as a, {fun_name}({col_name}) as b from {self.stbname} ) order by b,a desc' - tdLog.info(sql) - self.verify_same_value(sql) + if col_type == out_type : + sql = f'select {col_name}, {fun_name}({col_name}) from {self.stbname}' + tdLog.info(sql) + self.verify_same_value(sql) + sql = f'select * from (select {col_name} as a, {fun_name}({col_name}) as b from {self.stbname} ) order by b,a desc' + tdLog.info(sql) + self.verify_same_value(sql) # multi-args @@ -216,42 +223,48 @@ class TDTestCase: # create aggregate def create_aggr_udfpy(self): # all type check null - for col_name, col_type in self.column_dict: - self.create_udf_af(f"af_null_{col_name}", f"{col_type}", 10*1024*1024) + for col_name, col_type in self.column_dict.items(): + self.create_udf_af(f"af_null_{col_name}", "af_null.py", col_type, 10*1024) # min - self.create_udf_af(f"af_min_float", f"float", 10*1024*1024) - self.create_udf_af(f"af_min_int", f"int", 10*1024*1024) + file_name = "af_min.py" + fun_name = "af_min_float" + self.create_udf_af(fun_name, file_name, f"float", 10*1024) + fun_name = "af_min_int" + self.create_udf_af(fun_name, file_name, f"int", 10*1024) # sum - self.create_udf_af(f"af_sum_float", f"float", 100*1024*1024) - self.create_udf_af(f"af_sum_int", f"sum", 100*1024*1024) + file_name = "af_sum.py" + fun_name = "af_sum_float" + self.create_udf_af(fun_name, file_name, f"float", 10*1024) + fun_name = "af_sum_int" + self.create_udf_af(fun_name, file_name, f"int", 10*1024) # query aggregate def query_aggr_udfpy(self) : # all type check null - for col_name, col_type in self.column_dict: + for col_name, col_type in self.column_dict.items(): fun_name = f"af_null_{col_name}" - sql = f'select {fun_name}(col_name) from {self.stbname}' + sql = f'select {fun_name}({col_name}) from {self.stbname}' tdSql.query(sql) - tdSql.checkData(0, 0, "NULL") + tdSql.checkData(0, 0, "None") # min sql = f'select min(col3), af_min_int(col3) from {self.stbname}' - self.verfiy_same_value(sql) + self.verify_same_value(sql) sql = f'select min(col7), af_min_int(col7) from {self.stbname}' - self.verfiy_same_value(sql) + self.verify_same_value(sql) sql = f'select min(col9), af_min_float(col9) from {self.stbname}' - self.verfiy_same_value(sql) + self.verify_same_value(sql) # sum sql = f'select sum(col3), af_sum_int(col3) from {self.stbname}' - self.verfiy_same_value(sql) + self.verify_same_value(sql) sql = f'select sum(col7), af_sum_int(col7) from {self.stbname}' - self.verfiy_same_value(sql) + self.verify_same_value(sql) sql = f'select sum(col9), af_sum_float(col9) from {self.stbname}' - self.verfiy_same_value(sql) + self.verify_same_value(sql) # insert to child table d1 data @@ -284,7 +297,7 @@ class TDTestCase: count = 10 # do self.create_table(stable, tbname, count) - self.insert_data(tbname, 100) + self.insert_data(tbname, 10) # scalar self.create_scalar_udfpy() From 2cf21bd134cebf17dfd4e52a1de722ac1c661cc9 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Fri, 24 Mar 2023 10:16:45 +0800 Subject: [PATCH 11/71] test: update code --- tests/system-test/0-others/udfpy_main.py | 50 ++++++++++++++++++------ 1 file changed, 38 insertions(+), 12 deletions(-) diff --git a/tests/system-test/0-others/udfpy_main.py b/tests/system-test/0-others/udfpy_main.py index 04c1f5061d..75a7e8b308 100644 --- a/tests/system-test/0-others/udfpy_main.py +++ b/tests/system-test/0-others/udfpy_main.py @@ -89,12 +89,15 @@ class TDTestCase: create_table_sql = self.set_stb_sql(stbname, self.column_dict, self.tag_dict) tdSql.execute(create_table_sql) + batch_size = 1000 # create child table for i in range(count): ti = i % 128 tags = f'{ti},{ti},{i},{i},{ti},{ti},{i},{i},{i}.000{i},{i}.000{i},true,"var{i}","nch{i}",now' - sql = f'create table {tbname}{i} using {stbname} tags({tags})' - tdSql.execute(sql) + sql = f'create table {tbname}{i} using {stbname} tags({tags});' + tdSql.execute(sql) + if i % batch_size == 0: + tdLog.info(f" create child table {i} ...") tdLog.info(f" create {count} child tables ok.") @@ -144,16 +147,22 @@ class TDTestCase: # files self.create_sf_dicts(self.scalar_funs, "sf_origin.py") - self.create_udf_sf("sf_multi_args", "binary(1024)") + fun_name = "sf_multi_args" + self.create_udf_sf(fun_name, f'{fun_name}.py', "binary(1024)") + + # all type check null + for col_name, col_type in self.column_dict.items(): + self.create_udf_sf(f"sf_null_{col_name}", "sf_null.py", col_type) + # fun_name == fun_name.py - def create_udf_sf(self, fun_name, out_type): - sql = f'create function {fun_name} as "{self.udf_path}/{fun_name}.py" outputtype {out_type} language "Python" ' + def create_udf_sf(self, fun_name, file_name, out_type): + sql = f'create function {fun_name} as "{self.udf_path}/{file_name}" outputtype {out_type} language "Python" ' tdSql.execute(sql) tdLog.info(sql) - def create_udf_af(self, fun_name, filename, out_type, bufsize): - sql = f'create aggregate function {fun_name} as "{self.udf_path}/{filename}" outputtype {out_type} bufsize {bufsize} language "Python" ' + def create_udf_af(self, fun_name, file_name, out_type, bufsize): + sql = f'create aggregate function {fun_name} as "{self.udf_path}/{file_name}" outputtype {out_type} bufsize {bufsize} language "Python" ' tdSql.execute(sql) tdLog.info(sql) @@ -216,10 +225,17 @@ class TDTestCase: tdLog.info(sql) self.verify_same_value(sql) - # multi-args self.query_multi_args() + # all type check null + for col_name, col_type in self.column_dict.items(): + fun_name = f"sf_null_{col_name}" + sql = f'select {fun_name}({col_name}) from {self.stbname}' + tdSql.query(sql) + tdSql.checkData(0, 0, "None") + + # create aggregate def create_aggr_udfpy(self): # all type check null @@ -270,12 +286,21 @@ class TDTestCase: # insert to child table d1 data def insert_data(self, tbname, rows): ts = 1670000000000 + sqls = "" + batch_size = 300 for i in range(self.child_count): for j in range(rows): tj = j % 128 cols = f'{tj},{tj},{j},{j},{tj},{tj},{j},{j},{j}.000{j},{j}.000{j},true,"var{j}","nch{j}",now' sql = f'insert into {tbname}{i} values({ts+j},{cols});' - tdSql.execute(sql) + sqls += sql + if j % batch_size == 0: + tdSql.execute(sqls) + tdLog.info(f" child table={i} rows={j} insert data.") + sqls = "" + # end + if sqls != "": + tdSql.execute(sqls) # partial columns upate sql = f'insert into {tbname}0(ts, col1, col9, col11) values(now, 100, 200, 0)' @@ -286,7 +311,7 @@ class TDTestCase: tdSql.execute(sql) sql = f'insert into {tbname}0(ts) values(now)' tdSql.execute(sql) - tdLog.info(f" insert {rows} for each child table.") + tdLog.info(f" insert {rows} to child table {self.child_count} .") # run @@ -294,10 +319,11 @@ class TDTestCase: # var stable = "meters" tbname = "d" - count = 10 + count = 10000 + rows = 1000 # do self.create_table(stable, tbname, count) - self.insert_data(tbname, 10) + self.insert_data(tbname, rows) # scalar self.create_scalar_udfpy() From 235ef431d25ba972e10aa45083a7563784b84463 Mon Sep 17 00:00:00 2001 From: Benguang Zhao Date: Fri, 24 Mar 2023 10:35:22 +0800 Subject: [PATCH 12/71] feat: not allowed to set WAL_RETENTION_PERIOD as zero when there are topics defined --- source/dnode/mnode/impl/inc/mndTopic.h | 1 + source/dnode/mnode/impl/src/mndDb.c | 12 ++++++++++++ source/dnode/mnode/impl/src/mndTopic.c | 4 ++-- 3 files changed, 15 insertions(+), 2 deletions(-) diff --git a/source/dnode/mnode/impl/inc/mndTopic.h b/source/dnode/mnode/impl/inc/mndTopic.h index 8ed7fc6a11..b2b86775ab 100644 --- a/source/dnode/mnode/impl/inc/mndTopic.h +++ b/source/dnode/mnode/impl/inc/mndTopic.h @@ -32,6 +32,7 @@ bool mndTopicExistsForDb(SMnode *pMnode, SDbObj *pDb); const char *mndTopicGetShowName(const char topic[TSDB_TOPIC_FNAME_LEN]); int32_t mndSetTopicCommitLogs(SMnode *pMnode, STrans *pTrans, SMqTopicObj *pTopic); +int32_t mndGetNumOfTopics(SMnode *pMnode, char *dbName, int32_t *pNumOfTopics); #ifdef __cplusplus } diff --git a/source/dnode/mnode/impl/src/mndDb.c b/source/dnode/mnode/impl/src/mndDb.c index eeb78fe576..27813826ba 100644 --- a/source/dnode/mnode/impl/src/mndDb.c +++ b/source/dnode/mnode/impl/src/mndDb.c @@ -849,6 +849,18 @@ static int32_t mndProcessAlterDbReq(SRpcMsg *pReq) { goto _OVER; } + int32_t numOfTopics = 0; + if (mndGetNumOfTopics(pMnode, pDb->name, &numOfTopics) != 0) { + goto _OVER; + } + + if (numOfTopics != 0 && alterReq.walRetentionPeriod == 0) { + terrno = TSDB_CODE_MND_INVALID_DB_OPTION; + mError("db:%s, not allowed to set WAL_RETENTION_PERIOD 0 when there are topics defined. numOfTopics:%d", pDb->name, + numOfTopics); + goto _OVER; + } + memcpy(&dbObj, pDb, sizeof(SDbObj)); if (dbObj.cfg.pRetensions != NULL) { dbObj.cfg.pRetensions = taosArrayDup(pDb->cfg.pRetensions, NULL); diff --git a/source/dnode/mnode/impl/src/mndTopic.c b/source/dnode/mnode/impl/src/mndTopic.c index 991f1099a6..6c284815f9 100644 --- a/source/dnode/mnode/impl/src/mndTopic.c +++ b/source/dnode/mnode/impl/src/mndTopic.c @@ -793,7 +793,7 @@ static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) { return TSDB_CODE_ACTION_IN_PROGRESS; } -static int32_t mndGetNumOfTopics(SMnode *pMnode, char *dbName, int32_t *pNumOfTopics) { +int32_t mndGetNumOfTopics(SMnode *pMnode, char *dbName, int32_t *pNumOfTopics) { *pNumOfTopics = 0; SSdb *pSdb = pMnode->pSdb; @@ -943,4 +943,4 @@ int32_t mndDropTopicByDB(SMnode *pMnode, STrans *pTrans, SDbObj *pDb) { return code; } -#endif \ No newline at end of file +#endif From 22b904b71100e2d3b0ab36f996fd3285f136d716 Mon Sep 17 00:00:00 2001 From: Benguang Zhao Date: Fri, 24 Mar 2023 11:55:02 +0800 Subject: [PATCH 13/71] feat: not allowed to create a topic when WAL_RETENTION_PERIOD is zero --- source/dnode/mnode/impl/src/mndTopic.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/source/dnode/mnode/impl/src/mndTopic.c b/source/dnode/mnode/impl/src/mndTopic.c index 6c284815f9..de2aee6dcc 100644 --- a/source/dnode/mnode/impl/src/mndTopic.c +++ b/source/dnode/mnode/impl/src/mndTopic.c @@ -605,6 +605,12 @@ static int32_t mndProcessCreateTopicReq(SRpcMsg *pReq) { goto _OVER; } + if (pDb->cfg.walRetentionPeriod == 0) { + terrno = TSDB_CODE_MND_INVALID_DB_OPTION; + mError("db:%s, not allowed to create topic when WAL_RETENTION_PERIOD is zero", pDb->name); + goto _OVER; + } + code = mndCreateTopic(pMnode, pReq, &createTopicReq, pDb, pReq->info.conn.user); if (code == 0) { code = TSDB_CODE_ACTION_IN_PROGRESS; From 17f948deca8101c74b7f30575b8d2e1dcc55ce5c Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Fri, 24 Mar 2023 18:57:07 +0800 Subject: [PATCH 14/71] test: add string concat function --- tests/system-test/0-others/udfpy/af_count.py | 29 ++++ tests/system-test/0-others/udfpy/af_sum.py | 18 ++- .../0-others/udfpy/sf_concat_nch.py | 23 +++ .../0-others/udfpy/sf_concat_var.py | 22 +++ tests/system-test/0-others/udfpy_main.py | 143 ++++++++++++++---- 5 files changed, 199 insertions(+), 36 deletions(-) create mode 100644 tests/system-test/0-others/udfpy/af_count.py create mode 100644 tests/system-test/0-others/udfpy/sf_concat_nch.py create mode 100644 tests/system-test/0-others/udfpy/sf_concat_var.py diff --git a/tests/system-test/0-others/udfpy/af_count.py b/tests/system-test/0-others/udfpy/af_count.py new file mode 100644 index 0000000000..285ef96b55 --- /dev/null +++ b/tests/system-test/0-others/udfpy/af_count.py @@ -0,0 +1,29 @@ +import pickle + +def init(): + pass + +def destroy(): + pass + +def start(): + return pickle.dumps([]) + +def finish(buf): + counts = pickle.loads(buf) + all_count = 0 + for count in counts: + all_count += count + + return all_count + +def reduce(datablock, buf): + (rows, cols) = datablock.shape() + counts = pickle.loads(buf) + batch_count = 0 + for i in range(rows): + val = datablock.data(i, 0) + if val is not None: + batch_count += 1 + counts.append(batch_count) + return pickle.dumps(counts) diff --git a/tests/system-test/0-others/udfpy/af_sum.py b/tests/system-test/0-others/udfpy/af_sum.py index e32cf3fa31..ac7aa16924 100644 --- a/tests/system-test/0-others/udfpy/af_sum.py +++ b/tests/system-test/0-others/udfpy/af_sum.py @@ -11,18 +11,26 @@ def start(): def finish(buf): sums = pickle.loads(buf) - all = 0 + all = None for sum in sums: - all += sum + if all is None: + all = sum + else: + all += sum return all def reduce(datablock, buf): (rows, cols) = datablock.shape() sums = pickle.loads(buf) - sum = 0 + sum = None for i in range(rows): val = datablock.data(i, 0) if val is not None: - sum += val - sums.append(sum) + if sum is None: + sum = val + else: + sum += val + + if sum is not None: + sums.append(sum) return pickle.dumps(sums) diff --git a/tests/system-test/0-others/udfpy/sf_concat_nch.py b/tests/system-test/0-others/udfpy/sf_concat_nch.py new file mode 100644 index 0000000000..c64bfa8ad3 --- /dev/null +++ b/tests/system-test/0-others/udfpy/sf_concat_nch.py @@ -0,0 +1,23 @@ +# init +def init(): + pass + +# destroy +def destroy(): + pass + +def process(block): + (nrows, ncols) = block.shape() + results = [] + for i in range(nrows): + row = [] + for j in range(ncols): + val = block.data(i, j) + if val is None: + return [None] + row.append(val.decode('utf_32_le')) + row_str = ''.join(row) + results.append(row_str.encode('utf_32_le')) + return results + + diff --git a/tests/system-test/0-others/udfpy/sf_concat_var.py b/tests/system-test/0-others/udfpy/sf_concat_var.py new file mode 100644 index 0000000000..0a63821aa7 --- /dev/null +++ b/tests/system-test/0-others/udfpy/sf_concat_var.py @@ -0,0 +1,22 @@ +# init +def init(): + pass + +# destroy +def destroy(): + pass + +def process(block): + (nrows, ncols) = block.shape() + results = [] + for i in range(nrows): + row = [] + for j in range(ncols): + val = block.data(i, j) + if val is None: + return [None] + row.append(val.decode('utf-8')) + results.append(''.join(row)) + return results + + diff --git a/tests/system-test/0-others/udfpy_main.py b/tests/system-test/0-others/udfpy_main.py index 75a7e8b308..b4fd77f93a 100644 --- a/tests/system-test/0-others/udfpy_main.py +++ b/tests/system-test/0-others/udfpy_main.py @@ -22,6 +22,33 @@ import random import os +class PerfDB: + def __init__(self): + self.sqls = [] + self.spends = [] + + # execute + def execute(self, sql): + print(f" perfdb execute {sql}") + stime = time.time() + ret = tdSql.execute(sql, 1) + spend = time.time() - stime + + self.sqls.append(sql) + self.spends.append(spend) + return ret + + # query + def query(self, sql): + print(f" perfdb query {sql}") + start = time.time() + ret = tdSql.query(sql, None, 1) + spend = time.time() - start + self.sqls.append(sql) + self.spends.append(spend) + return ret + + class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) @@ -47,8 +74,7 @@ class TDTestCase: 'col10': 'double', 'col11': 'bool', 'col12': 'varchar(20)', - 'col13': 'nchar(20)', - 'col14': 'timestamp' + 'col13': 'nchar(100)', } self.tag_dict = { 't1': 'tinyint', @@ -63,8 +89,7 @@ class TDTestCase: 't10': 'double', 't11': 'bool', 't12': 'varchar(20)', - 't13': 'nchar(20)', - 't14': 'timestamp' + 't13': 'nchar(100)', } def set_stb_sql(self,stbname,column_dict,tag_dict): @@ -93,7 +118,7 @@ class TDTestCase: # create child table for i in range(count): ti = i % 128 - tags = f'{ti},{ti},{i},{i},{ti},{ti},{i},{i},{i}.000{i},{i}.000{i},true,"var{i}","nch{i}",now' + tags = f'{ti},{ti},{i},{i},{ti},{ti},{i},{i},{i}.000{i},{i}.000{i},true,"var{i}","nch{i}"' sql = f'create table {tbname}{i} using {stbname} tags({tags});' tdSql.execute(sql) if i % batch_size == 0: @@ -125,7 +150,7 @@ class TDTestCase: 'sf10': 'double', 'sf11': 'bool', 'sf12': 'varchar(20)', - 'sf13': 'nchar(20)' + 'sf13': 'nchar(100)' } # agg function self.agg_funs = { @@ -141,11 +166,11 @@ class TDTestCase: 'af10': 'double', 'af11': 'bool', 'af12': 'varchar(20)', - 'af13': 'nchar(20)', + 'af13': 'nchar(100)', 'af14': 'timestamp' } - # files + # multi_args self.create_sf_dicts(self.scalar_funs, "sf_origin.py") fun_name = "sf_multi_args" self.create_udf_sf(fun_name, f'{fun_name}.py', "binary(1024)") @@ -154,6 +179,12 @@ class TDTestCase: for col_name, col_type in self.column_dict.items(): self.create_udf_sf(f"sf_null_{col_name}", "sf_null.py", col_type) + # concat + fun_name = "sf_concat_var" + self.create_udf_sf(fun_name, f'{fun_name}.py', "varchar(1024)") + fun_name = "sf_concat_nch" + self.create_udf_sf(fun_name, f'{fun_name}.py', "nchar(1024)") + # fun_name == fun_name.py def create_udf_sf(self, fun_name, file_name, out_type): @@ -200,15 +231,14 @@ class TDTestCase: cols = list(self.column_dict.keys()) + list(self.tag_dict.keys()) cols.remove("col13") cols.remove("t13") + cols.remove("ts") ncols = len(cols) print(cols) - for i in range(2, ncols): - print(i) sample = random.sample(cols, i) print(sample) cols_name = ','.join(sample) - sql = f'select sf_multi_args({cols_name}),{cols_name} from {self.stbname}' + sql = f'select sf_multi_args({cols_name}),{cols_name} from {self.stbname} limit 10' self.verify_same_multi_values(sql) @@ -218,10 +248,10 @@ class TDTestCase: for col_name, col_type in self.column_dict.items(): for fun_name, out_type in self.scalar_funs.items(): if col_type == out_type : - sql = f'select {col_name}, {fun_name}({col_name}) from {self.stbname}' + sql = f'select {col_name}, {fun_name}({col_name}) from {self.stbname} limit 10' tdLog.info(sql) self.verify_same_value(sql) - sql = f'select * from (select {col_name} as a, {fun_name}({col_name}) as b from {self.stbname} ) order by b,a desc' + sql = f'select * from (select {col_name} as a, {fun_name}({col_name}) as b from {self.stbname} ) order by b,a desc limit 10' tdLog.info(sql) self.verify_same_value(sql) @@ -229,12 +259,22 @@ class TDTestCase: self.query_multi_args() # all type check null - for col_name, col_type in self.column_dict.items(): + for col_name, col_type in self.column_dict.items(): fun_name = f"sf_null_{col_name}" sql = f'select {fun_name}({col_name}) from {self.stbname}' tdSql.query(sql) - tdSql.checkData(0, 0, "None") + if col_type != "timestamp": + tdSql.checkData(0, 0, "None") + else: + val = tdSql.getData(0, 0) + if val is not None: + tdLog.exit(f" check {sql} not expect None.") + # concat + sql = f'select sf_concat_var(col12, t12), concat(col12, t12) from {self.stbname}' + self.verify_same_value(sql) + sql = f'select sf_concat_nch(col13, t13), concat(col13, t13) from {self.stbname}' + self.verify_same_value(sql) # create aggregate def create_aggr_udfpy(self): @@ -255,6 +295,17 @@ class TDTestCase: self.create_udf_af(fun_name, file_name, f"float", 10*1024) fun_name = "af_sum_int" self.create_udf_af(fun_name, file_name, f"int", 10*1024) + fun_name = "af_sum_bigint" + self.create_udf_af(fun_name, file_name, f"bigint", 10*1024) + + # count + file_name = "af_count.py" + fun_name = "af_count_float" + self.create_udf_af(fun_name, file_name, f"float", 10*1024) + fun_name = "af_count_int" + self.create_udf_af(fun_name, file_name, f"int", 10*1024) + fun_name = "af_count_bigint" + self.create_udf_af(fun_name, file_name, f"bigint", 10*1024) # query aggregate @@ -264,7 +315,12 @@ class TDTestCase: fun_name = f"af_null_{col_name}" sql = f'select {fun_name}({col_name}) from {self.stbname}' tdSql.query(sql) - tdSql.checkData(0, 0, "None") + if col_type != "timestamp": + tdSql.checkData(0, 0, "None") + else: + val = tdSql.getData(0, 0) + if val is not None: + tdLog.exit(f" check {sql} not expect None.") # min sql = f'select min(col3), af_min_int(col3) from {self.stbname}' @@ -275,32 +331,55 @@ class TDTestCase: self.verify_same_value(sql) # sum - sql = f'select sum(col3), af_sum_int(col3) from {self.stbname}' + sql = f'select sum(col1), af_sum_int(col1) from d0' self.verify_same_value(sql) - sql = f'select sum(col7), af_sum_int(col7) from {self.stbname}' + sql = f'select sum(col3), af_sum_bigint(col3) from {self.stbname}' self.verify_same_value(sql) sql = f'select sum(col9), af_sum_float(col9) from {self.stbname}' self.verify_same_value(sql) - + + # count + sql = f'select count(col1), af_count_int(col1) from {self.stbname}' + self.verify_same_value(sql) + sql = f'select count(col7), af_count_bigint(col7) from {self.stbname}' + self.verify_same_value(sql) + sql = f'select count(col8), af_count_float(col8) from {self.stbname}' + self.verify_same_value(sql) + + # nest + sql = f'select a+1000,b+1000 from (select count(col8) as a, af_count_float(col8) as b from {self.stbname})' + self.verify_same_value(sql) + # group by + sql = f'select a+1000,b+1000 from (select count(col8) as a, af_count_float(col8) as b from {self.stbname} group by tbname)' + self.verify_same_value(sql) + # two filed expr + sql = f'select sum(col1+col2),af_sum_float(col1+col2) from {self.stbname};' + self.verify_same_value(sql) + # interval + sql = f'select af_sum_float(col2+col3),sum(col3+col2) from {self.stbname} interval(1s)' + self.verify_same_value(sql) + # insert to child table d1 data def insert_data(self, tbname, rows): ts = 1670000000000 - sqls = "" + values = "" batch_size = 300 + child_name = "" for i in range(self.child_count): for j in range(rows): tj = j % 128 - cols = f'{tj},{tj},{j},{j},{tj},{tj},{j},{j},{j}.000{j},{j}.000{j},true,"var{j}","nch{j}",now' - sql = f'insert into {tbname}{i} values({ts+j},{cols});' - sqls += sql - if j % batch_size == 0: - tdSql.execute(sqls) + cols = f'{tj},{tj},{j},{j},{tj},{tj},{j},{j},{j}.000{j},{j}.000{j},true,"var{j}","nch{j}涛思数据codepage is utf_32_le"' + value = f'({ts+j},{cols})' + if values == "": + values = value + else: + values += f",{value}" + if j % batch_size == 0 or j + 1 == rows: + sql = f'insert into {tbname}{i} values {values};' + tdSql.execute(sql) tdLog.info(f" child table={i} rows={j} insert data.") - sqls = "" - # end - if sqls != "": - tdSql.execute(sqls) + values = "" # partial columns upate sql = f'insert into {tbname}0(ts, col1, col9, col11) values(now, 100, 200, 0)' @@ -319,8 +398,8 @@ class TDTestCase: # var stable = "meters" tbname = "d" - count = 10000 - rows = 1000 + count = 3 + rows = 1000000 # do self.create_table(stable, tbname, count) self.insert_data(tbname, rows) @@ -333,6 +412,8 @@ class TDTestCase: self.create_aggr_udfpy() self.query_aggr_udfpy() + # show performance + def stop(self): tdSql.close() From d4c63827c2427ba4569bbf664237169e9fd88b4b Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Fri, 24 Mar 2023 19:44:08 +0800 Subject: [PATCH 15/71] test: case all passed --- tests/system-test/0-others/udfpy/sf_concat_nch.py | 10 +++++++--- tests/system-test/0-others/udfpy/sf_concat_var.py | 8 ++++++-- tests/system-test/0-others/udfpy_main.py | 10 +++++----- 3 files changed, 18 insertions(+), 10 deletions(-) diff --git a/tests/system-test/0-others/udfpy/sf_concat_nch.py b/tests/system-test/0-others/udfpy/sf_concat_nch.py index c64bfa8ad3..84d8eb2c96 100644 --- a/tests/system-test/0-others/udfpy/sf_concat_nch.py +++ b/tests/system-test/0-others/udfpy/sf_concat_nch.py @@ -14,10 +14,14 @@ def process(block): for j in range(ncols): val = block.data(i, j) if val is None: - return [None] + row = None + break row.append(val.decode('utf_32_le')) - row_str = ''.join(row) - results.append(row_str.encode('utf_32_le')) + if row is None: + results.append(None) + else: + row_str = ''.join(row) + results.append(row_str.encode('utf_32_le')) return results diff --git a/tests/system-test/0-others/udfpy/sf_concat_var.py b/tests/system-test/0-others/udfpy/sf_concat_var.py index 0a63821aa7..fc8292c718 100644 --- a/tests/system-test/0-others/udfpy/sf_concat_var.py +++ b/tests/system-test/0-others/udfpy/sf_concat_var.py @@ -14,9 +14,13 @@ def process(block): for j in range(ncols): val = block.data(i, j) if val is None: - return [None] + row = None + break row.append(val.decode('utf-8')) - results.append(''.join(row)) + if row is None: + results.append(None) + else: + results.append(''.join(row)) return results diff --git a/tests/system-test/0-others/udfpy_main.py b/tests/system-test/0-others/udfpy_main.py index b4fd77f93a..a2176a041c 100644 --- a/tests/system-test/0-others/udfpy_main.py +++ b/tests/system-test/0-others/udfpy_main.py @@ -73,7 +73,7 @@ class TDTestCase: 'col9': 'float', 'col10': 'double', 'col11': 'bool', - 'col12': 'varchar(20)', + 'col12': 'varchar(120)', 'col13': 'nchar(100)', } self.tag_dict = { @@ -88,7 +88,7 @@ class TDTestCase: 't9': 'float', 't10': 'double', 't11': 'bool', - 't12': 'varchar(20)', + 't12': 'varchar(120)', 't13': 'nchar(100)', } @@ -149,7 +149,7 @@ class TDTestCase: 'sf9': 'float', 'sf10': 'double', 'sf11': 'bool', - 'sf12': 'varchar(20)', + 'sf12': 'varchar(120)', 'sf13': 'nchar(100)' } # agg function @@ -165,7 +165,7 @@ class TDTestCase: 'af9': 'float', 'af10': 'double', 'af11': 'bool', - 'af12': 'varchar(20)', + 'af12': 'varchar(120)', 'af13': 'nchar(100)', 'af14': 'timestamp' } @@ -399,7 +399,7 @@ class TDTestCase: stable = "meters" tbname = "d" count = 3 - rows = 1000000 + rows = 1000 # do self.create_table(stable, tbname, count) self.insert_data(tbname, rows) From f95f6c9f788ef4c362a4f1b3cd668618e43ffe8c Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sat, 25 Mar 2023 09:15:13 +0800 Subject: [PATCH 16/71] feat: auto tab add create function --- tools/shell/src/shellAuto.c | 57 +++++++++++++++++++++---------------- 1 file changed, 32 insertions(+), 25 deletions(-) diff --git a/tools/shell/src/shellAuto.c b/tools/shell/src/shellAuto.c index a8986351b7..318d3574d2 100644 --- a/tools/shell/src/shellAuto.c +++ b/tools/shell/src/shellAuto.c @@ -92,7 +92,7 @@ SWords shellCommands[] = { {"create qnode on dnode ;", 0, 0, NULL}, {"create stream into as select", 0, 0, NULL}, // 26 append sub sql {"create topic as select", 0, 0, NULL}, // 27 append sub sql - {"create function ", 0, 0, NULL}, + {"create function as outputtype language ", 0, 0, NULL}, {"create user pass sysinfo 0;", 0, 0, NULL}, {"create user pass sysinfo 1;", 0, 0, NULL}, {"describe ", 0, 0, NULL}, @@ -105,7 +105,7 @@ SWords shellCommands[] = { {"drop qnode on dnode ;", 0, 0, NULL}, {"drop user ;", 0, 0, NULL}, // 40 - {"drop function", 0, 0, NULL}, + {"drop function ;", 0, 0, NULL}, {"drop consumer group on ", 0, 0, NULL}, {"drop topic ;", 0, 0, NULL}, {"drop stream ;", 0, 0, NULL}, @@ -272,6 +272,8 @@ char* key_systable[] = { "ins_subscriptions", "ins_streams", "ins_stream_tasks", "ins_vnodes", "ins_user_privileges", "perf_connections", "perf_queries", "perf_consumers", "perf_trans", "perf_apps"}; +char* language[] = {"\'Python\'", "\'C\'"} + // // ------- global variant define --------- // @@ -291,25 +293,29 @@ bool waitAutoFill = false; #define WT_VAR_USERNAME 4 #define WT_VAR_TOPIC 5 #define WT_VAR_STREAM 6 -#define WT_VAR_ALLTABLE 7 -#define WT_VAR_FUNC 8 -#define WT_VAR_KEYWORD 9 -#define WT_VAR_TBACTION 10 -#define WT_VAR_DBOPTION 11 -#define WT_VAR_ALTER_DBOPTION 12 -#define WT_VAR_DATATYPE 13 -#define WT_VAR_KEYTAGS 14 -#define WT_VAR_ANYWORD 15 -#define WT_VAR_TBOPTION 16 -#define WT_VAR_USERACTION 17 -#define WT_VAR_KEYSELECT 18 -#define WT_VAR_SYSTABLE 19 +#define WT_VAR_UDFNAME 7 -#define WT_VAR_CNT 20 - -#define WT_FROM_DB_MAX 6 // max get content from db +#define WT_FROM_DB_MAX 7 // max get content from db #define WT_FROM_DB_CNT (WT_FROM_DB_MAX + 1) +#define WT_VAR_ALLTABLE 8 +#define WT_VAR_FUNC 9 +#define WT_VAR_KEYWORD 10 +#define WT_VAR_TBACTION 11 +#define WT_VAR_DBOPTION 12 +#define WT_VAR_ALTER_DBOPTION 13 +#define WT_VAR_DATATYPE 14 +#define WT_VAR_KEYTAGS 15 +#define WT_VAR_ANYWORD 16 +#define WT_VAR_TBOPTION 17 +#define WT_VAR_USERACTION 18 +#define WT_VAR_KEYSELECT 19 +#define WT_VAR_SYSTABLE 20 +#define WT_VAR_LANGUAGE 21 + +#define WT_VAR_CNT 22 + + #define WT_TEXT 0xFF char dbName[256] = ""; // save use database name; @@ -319,13 +325,13 @@ TdThreadMutex tiresMutex; // save thread handle obtain var name from db server TdThread* threads[WT_FROM_DB_CNT]; // obtain var name with sql from server -char varTypes[WT_VAR_CNT][64] = {"", "", "", "", "", - "", "", "", "", "", - "", "", "", "", "", - "", "", "", ""}; +char varTypes[WT_VAR_CNT][64] = { + "", "", "", "", "", "", "", + "", "", "", "", "", "", "", + "", "", "", "", "", "", "sys_table", "language"}; char varSqls[WT_FROM_DB_CNT][64] = {"show databases;", "show stables;", "show tables;", "show dnodes;", - "show users;", "show topics;", "show streams;"}; + "show users;", "show topics;", "show streams;", "show functions;"}; // var words current cursor, if user press any one key except tab, cursorVar can be reset to -1 int cursorVar = -1; @@ -390,7 +396,7 @@ void showHelp() { create qnode on dnode ;\n\ create stream into as select ...\n\ create topic as select ...\n\ - create function ...\n\ + create function as outputtype language \'C\' | \'Python\' ;\n\ create user pass ...\n\ ----- D ----- \n\ describe \n\ @@ -401,7 +407,7 @@ void showHelp() { drop mnode on dnode ;\n\ drop qnode on dnode ;\n\ drop user ;\n\ - drop function ;\n\ + drop function ;\n\ drop consumer group ... \n\ drop topic ;\n\ drop stream ;\n\ @@ -643,6 +649,7 @@ bool shellAutoInit() { GenerateVarType(WT_VAR_USERACTION, user_actions, sizeof(user_actions) / sizeof(char*)); GenerateVarType(WT_VAR_KEYSELECT, key_select, sizeof(key_select) / sizeof(char*)); GenerateVarType(WT_VAR_SYSTABLE, key_systable, sizeof(key_systable) / sizeof(char*)); + GenerateVarType(WT_VAR_LANGUAGE, key_systable, sizeof(language) / sizeof(char*)); return true; } From ddc1689759a97635af0cb4ad710edead6a2f6580 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sat, 25 Mar 2023 09:20:39 +0800 Subject: [PATCH 17/71] feat: auto tab add create function --- tools/shell/src/shellAuto.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/shell/src/shellAuto.c b/tools/shell/src/shellAuto.c index 318d3574d2..14dcef86e2 100644 --- a/tools/shell/src/shellAuto.c +++ b/tools/shell/src/shellAuto.c @@ -272,7 +272,7 @@ char* key_systable[] = { "ins_subscriptions", "ins_streams", "ins_stream_tasks", "ins_vnodes", "ins_user_privileges", "perf_connections", "perf_queries", "perf_consumers", "perf_trans", "perf_apps"}; -char* language[] = {"\'Python\'", "\'C\'"} +char* language[] = {"\'Python\'", "\'C\'"}; // // ------- global variant define --------- From d6b972c2a143dcbe74007a20d5fe2ca27c0b7b17 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sat, 25 Mar 2023 09:29:01 +0800 Subject: [PATCH 18/71] feat: auto tab add create function1 --- tools/shell/src/shellAuto.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/shell/src/shellAuto.c b/tools/shell/src/shellAuto.c index 14dcef86e2..554f3edf1e 100644 --- a/tools/shell/src/shellAuto.c +++ b/tools/shell/src/shellAuto.c @@ -93,6 +93,7 @@ SWords shellCommands[] = { {"create stream into as select", 0, 0, NULL}, // 26 append sub sql {"create topic as select", 0, 0, NULL}, // 27 append sub sql {"create function as outputtype language ", 0, 0, NULL}, + {"create aggregate function as outputtype bufsize language ", 0, 0, NULL}, {"create user pass sysinfo 0;", 0, 0, NULL}, {"create user pass sysinfo 1;", 0, 0, NULL}, {"describe ", 0, 0, NULL}, @@ -397,6 +398,7 @@ void showHelp() { create stream into as select ...\n\ create topic as select ...\n\ create function as outputtype language \'C\' | \'Python\' ;\n\ + create aggregate function as outputtype bufsize language \'C\' | \'Python\';\n\ create user pass ...\n\ ----- D ----- \n\ describe \n\ From 6a16926098bd66982539c0b36abd0d76c05aa395 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sat, 25 Mar 2023 09:35:06 +0800 Subject: [PATCH 19/71] feat: auto tab add create function1 --- tools/shell/src/shellAuto.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/shell/src/shellAuto.c b/tools/shell/src/shellAuto.c index 554f3edf1e..7b00984734 100644 --- a/tools/shell/src/shellAuto.c +++ b/tools/shell/src/shellAuto.c @@ -92,8 +92,8 @@ SWords shellCommands[] = { {"create qnode on dnode ;", 0, 0, NULL}, {"create stream into as select", 0, 0, NULL}, // 26 append sub sql {"create topic as select", 0, 0, NULL}, // 27 append sub sql - {"create function as outputtype language ", 0, 0, NULL}, - {"create aggregate function as outputtype bufsize language ", 0, 0, NULL}, + {"create function as outputtype language ", 0, 0, NULL}, + {"create aggregate function as outputtype bufsize language ", 0, 0, NULL}, {"create user pass sysinfo 0;", 0, 0, NULL}, {"create user pass sysinfo 1;", 0, 0, NULL}, {"describe ", 0, 0, NULL}, @@ -273,7 +273,7 @@ char* key_systable[] = { "ins_subscriptions", "ins_streams", "ins_stream_tasks", "ins_vnodes", "ins_user_privileges", "perf_connections", "perf_queries", "perf_consumers", "perf_trans", "perf_apps"}; -char* language[] = {"\'Python\'", "\'C\'"}; +char* udf_language[] = {"\'Python\'", "\'C\'"}; // // ------- global variant define --------- @@ -651,7 +651,7 @@ bool shellAutoInit() { GenerateVarType(WT_VAR_USERACTION, user_actions, sizeof(user_actions) / sizeof(char*)); GenerateVarType(WT_VAR_KEYSELECT, key_select, sizeof(key_select) / sizeof(char*)); GenerateVarType(WT_VAR_SYSTABLE, key_systable, sizeof(key_systable) / sizeof(char*)); - GenerateVarType(WT_VAR_LANGUAGE, key_systable, sizeof(language) / sizeof(char*)); + GenerateVarType(WT_VAR_LANGUAGE, udf_language, sizeof(udf_language) / sizeof(char*)); return true; } From a52af4a07e64b59e84e2422f1891f606f9cde375 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sat, 25 Mar 2023 10:21:12 +0800 Subject: [PATCH 20/71] feat: auto tab fix udf_language --- tools/shell/src/shellAuto.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/shell/src/shellAuto.c b/tools/shell/src/shellAuto.c index 7b00984734..b2b9d122fa 100644 --- a/tools/shell/src/shellAuto.c +++ b/tools/shell/src/shellAuto.c @@ -109,7 +109,7 @@ SWords shellCommands[] = { {"drop function ;", 0, 0, NULL}, {"drop consumer group on ", 0, 0, NULL}, {"drop topic ;", 0, 0, NULL}, - {"drop stream ;", 0, 0, NULL}, + {"drop stream ;", 0, {"explain select", 0, 0, NULL}, // 44 append sub sql {"flush database ;", 0, 0, NULL}, {"help;", 0, 0, NULL}, @@ -329,7 +329,7 @@ TdThread* threads[WT_FROM_DB_CNT]; char varTypes[WT_VAR_CNT][64] = { "", "", "", "", "", "", "", "", "", "", "", "", "", "", - "", "", "", "", "", "", "sys_table", "language"}; + "", "", "", "", "", "", "sys_table", "udf_language"}; char varSqls[WT_FROM_DB_CNT][64] = {"show databases;", "show stables;", "show tables;", "show dnodes;", "show users;", "show topics;", "show streams;", "show functions;"}; From 11be6c6f85a9be39ab93b29086fe7e0cf1ef5601 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sat, 25 Mar 2023 10:25:14 +0800 Subject: [PATCH 21/71] feat: auto tab fix udf_language1 --- tools/shell/src/shellAuto.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/tools/shell/src/shellAuto.c b/tools/shell/src/shellAuto.c index b2b9d122fa..b28855ad2e 100644 --- a/tools/shell/src/shellAuto.c +++ b/tools/shell/src/shellAuto.c @@ -84,8 +84,7 @@ SWords shellCommands[] = { {"create table using tags(", 0, 0, NULL}, {"create database " " " - " ;", - 0, 0, NULL}, + " ;", 0, 0, NULL}, {"create dnode ", 0, 0, NULL}, {"create index on ()", 0, 0, NULL}, {"create mnode on dnode ;", 0, 0, NULL}, @@ -109,7 +108,7 @@ SWords shellCommands[] = { {"drop function ;", 0, 0, NULL}, {"drop consumer group on ", 0, 0, NULL}, {"drop topic ;", 0, 0, NULL}, - {"drop stream ;", 0, + {"drop stream ;", 0, NULL}, {"explain select", 0, 0, NULL}, // 44 append sub sql {"flush database ;", 0, 0, NULL}, {"help;", 0, 0, NULL}, @@ -329,7 +328,7 @@ TdThread* threads[WT_FROM_DB_CNT]; char varTypes[WT_VAR_CNT][64] = { "", "", "", "", "", "", "", "", "", "", "", "", "", "", - "", "", "", "", "", "", "sys_table", "udf_language"}; + "", "", "", "", "", "", "", ""}; char varSqls[WT_FROM_DB_CNT][64] = {"show databases;", "show stables;", "show tables;", "show dnodes;", "show users;", "show topics;", "show streams;", "show functions;"}; From 018f922661e9cff36c062e069edf8459ae77c962 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sat, 25 Mar 2023 10:26:31 +0800 Subject: [PATCH 22/71] feat: auto tab fix udf_language2 --- tools/shell/src/shellAuto.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/shell/src/shellAuto.c b/tools/shell/src/shellAuto.c index b28855ad2e..140720af81 100644 --- a/tools/shell/src/shellAuto.c +++ b/tools/shell/src/shellAuto.c @@ -108,7 +108,7 @@ SWords shellCommands[] = { {"drop function ;", 0, 0, NULL}, {"drop consumer group on ", 0, 0, NULL}, {"drop topic ;", 0, 0, NULL}, - {"drop stream ;", 0, NULL}, + {"drop stream ;", 0, 0, NULL}, {"explain select", 0, 0, NULL}, // 44 append sub sql {"flush database ;", 0, 0, NULL}, {"help;", 0, 0, NULL}, From ffc3b7b27acd831d5d363ced1464ff20fbc6f47a Mon Sep 17 00:00:00 2001 From: Benguang Zhao Date: Sat, 25 Mar 2023 11:03:23 +0800 Subject: [PATCH 23/71] enh: add the errorcode TSDB_CODE_MND_DB_RETENTION_PERIOD_ZERO --- include/util/taoserror.h | 1 + source/dnode/mnode/impl/src/mndDb.c | 2 +- source/dnode/mnode/impl/src/mndTopic.c | 2 +- source/util/src/terror.c | 1 + 4 files changed, 4 insertions(+), 2 deletions(-) diff --git a/include/util/taoserror.h b/include/util/taoserror.h index 75b71409a8..ad8d4244b5 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -288,6 +288,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_MND_INVALID_DB_ACCT TAOS_DEF_ERROR_CODE(0, 0x0389) // internal #define TSDB_CODE_MND_DB_OPTION_UNCHANGED TAOS_DEF_ERROR_CODE(0, 0x038A) // #define TSDB_CODE_MND_DB_INDEX_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x038B) +#define TSDB_CODE_MND_DB_RETENTION_PERIOD_ZERO TAOS_DEF_ERROR_CODE(0, 0x038C) // #define TSDB_CODE_MND_INVALID_DB_OPTION_DAYS TAOS_DEF_ERROR_CODE(0, 0x0390) // 2.x // #define TSDB_CODE_MND_INVALID_DB_OPTION_KEEP TAOS_DEF_ERROR_CODE(0, 0x0391) // 2.x // #define TSDB_CODE_MND_INVALID_TOPIC TAOS_DEF_ERROR_CODE(0, 0x0392) // 2.x diff --git a/source/dnode/mnode/impl/src/mndDb.c b/source/dnode/mnode/impl/src/mndDb.c index 27813826ba..65516781fc 100644 --- a/source/dnode/mnode/impl/src/mndDb.c +++ b/source/dnode/mnode/impl/src/mndDb.c @@ -855,7 +855,7 @@ static int32_t mndProcessAlterDbReq(SRpcMsg *pReq) { } if (numOfTopics != 0 && alterReq.walRetentionPeriod == 0) { - terrno = TSDB_CODE_MND_INVALID_DB_OPTION; + terrno = TSDB_CODE_MND_DB_RETENTION_PERIOD_ZERO; mError("db:%s, not allowed to set WAL_RETENTION_PERIOD 0 when there are topics defined. numOfTopics:%d", pDb->name, numOfTopics); goto _OVER; diff --git a/source/dnode/mnode/impl/src/mndTopic.c b/source/dnode/mnode/impl/src/mndTopic.c index de2aee6dcc..f6da370916 100644 --- a/source/dnode/mnode/impl/src/mndTopic.c +++ b/source/dnode/mnode/impl/src/mndTopic.c @@ -606,7 +606,7 @@ static int32_t mndProcessCreateTopicReq(SRpcMsg *pReq) { } if (pDb->cfg.walRetentionPeriod == 0) { - terrno = TSDB_CODE_MND_INVALID_DB_OPTION; + terrno = TSDB_CODE_MND_DB_RETENTION_PERIOD_ZERO; mError("db:%s, not allowed to create topic when WAL_RETENTION_PERIOD is zero", pDb->name); goto _OVER; } diff --git a/source/util/src/terror.c b/source/util/src/terror.c index 1f49f8f8b5..f586e54491 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -224,6 +224,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_DB, "Invalid database name TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_DATABASES, "Too many databases for account") TAOS_DEFINE_ERROR(TSDB_CODE_MND_DB_IN_DROPPING, "Database in dropping status") TAOS_DEFINE_ERROR(TSDB_CODE_MND_DB_NOT_EXIST, "Database not exist") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_DB_RETENTION_PERIOD_ZERO, "WAL retention period is zero") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_DB_ACCT, "Invalid database account") TAOS_DEFINE_ERROR(TSDB_CODE_MND_DB_OPTION_UNCHANGED, "Database options not changed") TAOS_DEFINE_ERROR(TSDB_CODE_MND_DB_INDEX_NOT_EXIST, "Index not exist") From 19883fdeedc3ede2173c90be56f5d215c07052c8 Mon Sep 17 00:00:00 2001 From: Benguang Zhao Date: Fri, 24 Mar 2023 17:17:26 +0800 Subject: [PATCH 24/71] feat: alter database before creating topic in tests --- docs/examples/c/tmq_example.c | 2 +- docs/examples/csharp/optsJSON/Program.cs | 2 +- docs/examples/go/sub/main.go | 2 +- .../main/java/com/taos/example/SubscribeDemo.java | 2 +- .../com/taos/example/WebsocketSubscribeDemo.java | 2 +- docs/examples/python/conn_websocket_pandas.py | 2 +- docs/examples/python/connect_websocket_examples.py | 2 +- docs/examples/python/kafka_example_common.py | 2 +- docs/examples/python/tmq_example.py | 2 +- tests/docs-examples-test/python.sh | 2 +- tests/script/tsim/tmq/basic1.sim | 12 ++++++++++++ tests/script/tsim/tmq/basic1Of2Cons.sim | 12 ++++++++++++ tests/script/tsim/tmq/basic2.sim | 9 +++++++++ tests/script/tsim/tmq/basic2Of2Cons.sim | 9 +++++++++ tests/script/tsim/tmq/basic2Of2ConsOverlap.sim | 9 +++++++++ tests/script/tsim/tmq/basic3.sim | 12 ++++++++++++ tests/script/tsim/tmq/basic3Of2Cons.sim | 12 ++++++++++++ tests/script/tsim/tmq/basic4.sim | 9 +++++++++ tests/script/tsim/tmq/basic4Of2Cons.sim | 9 +++++++++ tests/script/tsim/tmq/snapshot.sim | 12 ++++++++++++ tests/script/tsim/tmq/snapshot1.sim | 9 +++++++++ tests/script/tsim/tmq/topic.sim | 2 ++ tests/script/tsim/user/privilege_db.sim | 6 +++--- tests/script/tsim/user/privilege_topic.sim | 6 +++--- tests/system-test/0-others/backquote_check.py | 8 ++++---- tests/system-test/0-others/compatibility.py | 1 + tests/system-test/0-others/performance_schema.py | 8 ++++---- tests/system-test/0-others/taosdShell.py | 4 ++-- tests/system-test/0-others/testRoll.py | 6 +++--- tests/system-test/0-others/user_manage.py | 1 + tests/system-test/0-others/walFileIdex.py | 2 +- tests/system-test/1-insert/drop.py | 8 ++++---- tests/system-test/7-tmq/basic5.py | 3 ++- tests/system-test/7-tmq/create_wrong_topic.py | 3 ++- .../system-test/7-tmq/dataFromTsdbNWal-multiCtb.py | 1 + tests/system-test/7-tmq/dataFromTsdbNWal.py | 1 + tests/system-test/7-tmq/db.py | 2 +- .../7-tmq/dropDbR3ConflictTransaction.py | 1 + tests/system-test/7-tmq/schema.py | 4 ++-- tests/system-test/7-tmq/stbFilter.py | 1 + tests/system-test/7-tmq/stbTagFilter-1ctb.py | 1 + tests/system-test/7-tmq/stbTagFilter-multiCtb.py | 1 + tests/system-test/7-tmq/subscribeDb.py | 12 ++++++------ tests/system-test/7-tmq/subscribeDb0.py | 8 ++++---- tests/system-test/7-tmq/subscribeDb1.py | 14 +++++++------- tests/system-test/7-tmq/subscribeDb2.py | 8 ++++---- tests/system-test/7-tmq/subscribeDb3.py | 8 ++++---- tests/system-test/7-tmq/subscribeDb4.py | 1 + tests/system-test/7-tmq/subscribeStb.py | 4 ++-- tests/system-test/7-tmq/subscribeStb0.py | 4 ++-- tests/system-test/7-tmq/subscribeStb1.py | 4 ++-- tests/system-test/7-tmq/subscribeStb2.py | 4 ++-- tests/system-test/7-tmq/subscribeStb3.py | 4 ++-- tests/system-test/7-tmq/subscribeStb4.py | 4 ++-- tests/system-test/7-tmq/tmq3mnodeSwitch.py | 1 + tests/system-test/7-tmq/tmqAlterSchema.py | 2 ++ tests/system-test/7-tmq/tmqAutoCreateTbl.py | 1 + tests/system-test/7-tmq/tmqCheckData.py | 1 + tests/system-test/7-tmq/tmqCheckData1.py | 1 + .../7-tmq/tmqConsFromTsdb-1ctb-funcNFilter.py | 1 + tests/system-test/7-tmq/tmqConsFromTsdb-1ctb.py | 1 + ...tmqConsFromTsdb-mutilVg-mutilCtb-funcNFilter.py | 1 + .../7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py | 1 + tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg.py | 1 + tests/system-test/7-tmq/tmqConsFromTsdb.py | 1 + .../7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py | 1 + tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb.py | 1 + ...mqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py | 1 + .../7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py | 1 + .../system-test/7-tmq/tmqConsFromTsdb1-mutilVg.py | 1 + tests/system-test/7-tmq/tmqConsFromTsdb1.py | 1 + tests/system-test/7-tmq/tmqConsumerGroup.py | 1 + tests/system-test/7-tmq/tmqDelete-1ctb.py | 1 + tests/system-test/7-tmq/tmqDelete-multiCtb.py | 1 + tests/system-test/7-tmq/tmqDnode.py | 2 +- tests/system-test/7-tmq/tmqDnodeRestart.py | 2 ++ tests/system-test/7-tmq/tmqDnodeRestart1.py | 1 + tests/system-test/7-tmq/tmqDropNtb-snapshot0.py | 2 ++ tests/system-test/7-tmq/tmqDropNtb-snapshot1.py | 2 ++ tests/system-test/7-tmq/tmqDropStb.py | 1 + tests/system-test/7-tmq/tmqDropStbCtb.py | 1 + tests/system-test/7-tmq/tmqError.py | 2 +- tests/system-test/7-tmq/tmqModule.py | 2 +- tests/system-test/7-tmq/tmqShow.py | 1 + tests/system-test/7-tmq/tmqSubscribeStb-r3.py | 1 + .../system-test/7-tmq/tmqUdf-multCtb-snapshot0.py | 3 +++ .../system-test/7-tmq/tmqUdf-multCtb-snapshot1.py | 3 +++ tests/system-test/7-tmq/tmqUdf.py | 3 +++ tests/system-test/7-tmq/tmqUpdate-1ctb.py | 1 + .../7-tmq/tmqUpdate-multiCtb-snapshot0.py | 1 + .../7-tmq/tmqUpdate-multiCtb-snapshot1.py | 1 + tests/system-test/7-tmq/tmqUpdate-multiCtb.py | 1 + tests/system-test/7-tmq/tmqUpdateWithConsume.py | 1 + tests/system-test/99-TDcase/TD-15517.py | 2 +- tests/system-test/99-TDcase/TD-15554.py | 2 +- tests/system-test/99-TDcase/TD-15557.py | 8 ++++---- tests/system-test/99-TDcase/TD-15563.py | 10 +++++----- tests/system-test/99-TDcase/TD-16025.py | 4 ++-- tests/system-test/99-TDcase/TD-16821.py | 1 + tests/system-test/99-TDcase/TD-17255.py | 4 ++++ tests/system-test/99-TDcase/TD-17699.py | 1 + 101 files changed, 271 insertions(+), 92 deletions(-) diff --git a/docs/examples/c/tmq_example.c b/docs/examples/c/tmq_example.c index a3bade308a..d958428b8f 100644 --- a/docs/examples/c/tmq_example.c +++ b/docs/examples/c/tmq_example.c @@ -70,7 +70,7 @@ static int32_t init_env() { taos_free_result(pRes); // create database - pRes = taos_query(pConn, "create database tmqdb"); + pRes = taos_query(pConn, "create database tmqdb wal_retention_period 3600"); if (taos_errno(pRes) != 0) { printf("error in create tmqdb, reason:%s\n", taos_errstr(pRes)); return -1; diff --git a/docs/examples/csharp/optsJSON/Program.cs b/docs/examples/csharp/optsJSON/Program.cs index b67b5af62b..8dcc1dce92 100644 --- a/docs/examples/csharp/optsJSON/Program.cs +++ b/docs/examples/csharp/optsJSON/Program.cs @@ -54,7 +54,7 @@ namespace TDengineExample static void PrepareDatabase(IntPtr conn) { - IntPtr res = TDengine.Query(conn, "CREATE DATABASE test"); + IntPtr res = TDengine.Query(conn, "CREATE DATABASE test WAL_RETENTION_PERIOD 3600"); if (TDengine.ErrorNo(res) != 0) { throw new Exception("failed to create database, reason: " + TDengine.Error(res)); diff --git a/docs/examples/go/sub/main.go b/docs/examples/go/sub/main.go index 1f7218936f..01bf5e6421 100644 --- a/docs/examples/go/sub/main.go +++ b/docs/examples/go/sub/main.go @@ -15,7 +15,7 @@ func main() { panic(err) } defer db.Close() - _, err = db.Exec("create database if not exists example_tmq") + _, err = db.Exec("create database if not exists example_tmq wal_retention_period 3600") if err != nil { panic(err) } diff --git a/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java b/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java index e9af5e9ce0..8da6f77bae 100644 --- a/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java +++ b/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java @@ -35,7 +35,7 @@ public class SubscribeDemo { try (Statement statement = connection.createStatement()) { statement.executeUpdate("drop topic if exists " + TOPIC); statement.executeUpdate("drop database if exists " + DB_NAME); - statement.executeUpdate("create database " + DB_NAME); + statement.executeUpdate("create database " + DB_NAME + " wal_retention_period 3600"); statement.executeUpdate("use " + DB_NAME); statement.executeUpdate( "CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT) TAGS (`groupid` INT, `location` BINARY(24))"); diff --git a/docs/examples/java/src/main/java/com/taos/example/WebsocketSubscribeDemo.java b/docs/examples/java/src/main/java/com/taos/example/WebsocketSubscribeDemo.java index d953a73641..83cb04f552 100644 --- a/docs/examples/java/src/main/java/com/taos/example/WebsocketSubscribeDemo.java +++ b/docs/examples/java/src/main/java/com/taos/example/WebsocketSubscribeDemo.java @@ -35,7 +35,7 @@ public class WebsocketSubscribeDemo { Statement statement = connection.createStatement()) { statement.executeUpdate("drop topic if exists " + TOPIC); statement.executeUpdate("drop database if exists " + DB_NAME); - statement.executeUpdate("create database " + DB_NAME); + statement.executeUpdate("create database " + DB_NAME + " wal_retention_period 3600"); statement.executeUpdate("use " + DB_NAME); statement.executeUpdate( "CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT) TAGS (`groupid` INT, `location` BINARY(24))"); diff --git a/docs/examples/python/conn_websocket_pandas.py b/docs/examples/python/conn_websocket_pandas.py index eac386732c..5cad5384b2 100644 --- a/docs/examples/python/conn_websocket_pandas.py +++ b/docs/examples/python/conn_websocket_pandas.py @@ -4,7 +4,7 @@ import taos taos_conn = taos.connect() taos_conn.execute('drop database if exists power') -taos_conn.execute('create database if not exists power') +taos_conn.execute('create database if not exists power wal_retention_period 3600') taos_conn.execute("use power") taos_conn.execute( "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)") diff --git a/docs/examples/python/connect_websocket_examples.py b/docs/examples/python/connect_websocket_examples.py index c50976efbf..29452bbf9d 100644 --- a/docs/examples/python/connect_websocket_examples.py +++ b/docs/examples/python/connect_websocket_examples.py @@ -6,7 +6,7 @@ conn = taosws.connect("taosws://root:taosdata@localhost:6041") # ANCHOR: basic conn.execute("drop database if exists connwspy") -conn.execute("create database if not exists connwspy") +conn.execute("create database if not exists connwspy wal_retention_period 3600") conn.execute("use connwspy") conn.execute("create table if not exists stb (ts timestamp, c1 int) tags (t1 int)") conn.execute("create table if not exists tb1 using stb tags (1)") diff --git a/docs/examples/python/kafka_example_common.py b/docs/examples/python/kafka_example_common.py index 566748c94e..1c735abfc0 100644 --- a/docs/examples/python/kafka_example_common.py +++ b/docs/examples/python/kafka_example_common.py @@ -5,7 +5,7 @@ LOCATIONS = ['California.SanFrancisco', 'California.LosAngles', 'California.SanD 'California.PaloAlto', 'California.Campbell', 'California.MountainView', 'California.Sunnyvale', 'California.SantaClara', 'California.Cupertino'] -CREATE_DATABASE_SQL = 'create database if not exists {} keep 365 duration 10 buffer 16 wal_level 1' +CREATE_DATABASE_SQL = 'create database if not exists {} keep 365 duration 10 buffer 16 wal_level 1 wal_retention_period 3600' USE_DATABASE_SQL = 'use {}' DROP_TABLE_SQL = 'drop table if exists meters' DROP_DATABASE_SQL = 'drop database if exists {}' diff --git a/docs/examples/python/tmq_example.py b/docs/examples/python/tmq_example.py index 6f7fb87c89..5b462fa153 100644 --- a/docs/examples/python/tmq_example.py +++ b/docs/examples/python/tmq_example.py @@ -6,7 +6,7 @@ def init_tmq_env(db, topic): conn = taos.connect() conn.execute("drop topic if exists {}".format(topic)) conn.execute("drop database if exists {}".format(db)) - conn.execute("create database if not exists {}".format(db)) + conn.execute("create database if not exists {} wal_retention_period 3600".format(db)) conn.select_db(db) conn.execute( "create stable if not exists stb1 (ts timestamp, c1 int, c2 float, c3 varchar(16)) tags(t1 int, t3 varchar(16))") diff --git a/tests/docs-examples-test/python.sh b/tests/docs-examples-test/python.sh index a7501b54ed..2a44ee7552 100644 --- a/tests/docs-examples-test/python.sh +++ b/tests/docs-examples-test/python.sh @@ -52,7 +52,7 @@ python3 conn_rest_pandas.py taos -s "drop database if exists power" # 11 -taos -s "create database if not exists test" +taos -s "create database if not exists test wal_retention_period 3600" python3 connect_native_reference.py # 12 diff --git a/tests/script/tsim/tmq/basic1.sim b/tests/script/tsim/tmq/basic1.sim index 6880f290f5..b296290214 100644 --- a/tests/script/tsim/tmq/basic1.sim +++ b/tests/script/tsim/tmq/basic1.sim @@ -34,6 +34,9 @@ $showRow = 0 sql connect sql use $dbName +print == alter database +sql alter database $dbName wal_retention_period 3600 + print == create topics from super table sql create topic topic_stb_column as select ts, c3 from stb sql create topic topic_stb_all as select ts, c1, c2, c3 from stb @@ -83,6 +86,9 @@ sql create database $cdbName vgroups 1 sleep 500 sql use $cdbName +print == alter database +sql alter database $cdbName wal_retention_period 3600 + print == create consume info table and consume result table sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int) sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int) @@ -155,6 +161,9 @@ sql create database $cdbName vgroups 1 sleep 500 sql use $cdbName +print == alter database +sql alter database $cdbName wal_retention_period 3600 + print == create consume info table and consume result table sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int) sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int) @@ -226,6 +235,9 @@ sql create database $cdbName vgroups 1 sleep 500 sql use $cdbName +print == alter database +sql alter database $cdbName wal_retention_period 3600 + print == create consume info table and consume result table sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int) sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int) diff --git a/tests/script/tsim/tmq/basic1Of2Cons.sim b/tests/script/tsim/tmq/basic1Of2Cons.sim index 11b645c4d1..4c966c370e 100644 --- a/tests/script/tsim/tmq/basic1Of2Cons.sim +++ b/tests/script/tsim/tmq/basic1Of2Cons.sim @@ -34,6 +34,9 @@ $showRow = 0 sql connect sql use $dbName +print == alter database +sql alter database $dbName wal_retention_period 3600 + print == create topics from super table sql create topic topic_stb_column as select ts, c3 from stb sql create topic topic_stb_all as select ts, c1, c2, c3 from stb @@ -83,6 +86,9 @@ sql create database $cdbName vgroups 1 sleep 500 sql use $cdbName +print == alter database +sql alter database $cdbName wal_retention_period 3600 + print == create consume info table and consume result table for stb sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int) sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int) @@ -186,6 +192,9 @@ sql create database $cdbName vgroups 1 sleep 500 sql use $cdbName +print == alter database +sql alter database $cdbName wal_retention_period 3600 + print == create consume info table and consume result table for ctb sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int) sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int) @@ -288,6 +297,9 @@ sql create database $cdbName vgroups 1 sleep 500 sql use $cdbName +print == alter database +sql alter database $cdbName wal_retention_period 3600 + print == create consume info table and consume result table for ntb sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int) sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int) diff --git a/tests/script/tsim/tmq/basic2.sim b/tests/script/tsim/tmq/basic2.sim index dce73be592..6d49b46c85 100644 --- a/tests/script/tsim/tmq/basic2.sim +++ b/tests/script/tsim/tmq/basic2.sim @@ -34,6 +34,9 @@ $showRow = 0 sql connect sql use $dbName +print == alter database +sql alter database $dbName wal_retention_period 3600 + print == create topics from super table sql create topic topic_stb_column as select ts, c3 from stb sql create topic topic_stb_all as select ts, c1, c2, c3 from stb @@ -118,6 +121,9 @@ sql create database $cdbName vgroups 1 sleep 500 sql use $cdbName +print == alter database +sql alter database $cdbName wal_retention_period 3600 + print == create consume info table and consume result table sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int) sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int) @@ -175,6 +181,9 @@ sql create database $cdbName vgroups 1 sleep 500 sql use $cdbName +print == alter database +sql alter database $cdbName wal_retention_period 3600 + print == create consume info table and consume result table sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int) sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int) diff --git a/tests/script/tsim/tmq/basic2Of2Cons.sim b/tests/script/tsim/tmq/basic2Of2Cons.sim index 87559305ba..db660a0c93 100644 --- a/tests/script/tsim/tmq/basic2Of2Cons.sim +++ b/tests/script/tsim/tmq/basic2Of2Cons.sim @@ -34,6 +34,9 @@ $showRow = 0 sql connect sql use $dbName +print == alter database +sql alter database $dbName wal_retention_period 3600 + print == create topics from super table sql create topic topic_stb_column as select ts, c3 from stb sql create topic topic_stb_all as select ts, c1, c2, c3 from stb @@ -147,6 +150,9 @@ sql create database $cdbName vgroups 1 sleep 500 sql use $cdbName +print == alter database +sql alter database $cdbName wal_retention_period 3600 + print == create consume info table and consume result table for ctb sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int) sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int) @@ -234,6 +240,9 @@ sql create database $cdbName vgroups 1 sleep 500 sql use $cdbName +print == alter database +sql alter database $cdbName wal_retention_period 3600 + print == create consume info table and consume result table for ntb sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int) sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int) diff --git a/tests/script/tsim/tmq/basic2Of2ConsOverlap.sim b/tests/script/tsim/tmq/basic2Of2ConsOverlap.sim index dda5e0059e..54e10126f1 100644 --- a/tests/script/tsim/tmq/basic2Of2ConsOverlap.sim +++ b/tests/script/tsim/tmq/basic2Of2ConsOverlap.sim @@ -34,6 +34,9 @@ $showRow = 0 sql connect sql use $dbName +print == alter database +sql alter database $dbName wal_retention_period 3600 + print == create topics from super table sql create topic topic_stb_column as select ts, c3 from stb sql create topic topic_stb_all as select ts, c1, c2, c3 from stb @@ -168,6 +171,9 @@ sql create database $cdbName vgroups 1 sleep 500 sql use $cdbName +print == alter database +sql alter database $cdbName wal_retention_period 3600 + print == create consume info table and consume result table for ctb sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int) sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int) @@ -259,6 +265,9 @@ sql create database $cdbName vgroups 1 sleep 500 sql use $cdbName +print == alter database +sql alter database $cdbName wal_retention_period 3600 + print == create consume info table and consume result table for ntb sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int) sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int) diff --git a/tests/script/tsim/tmq/basic3.sim b/tests/script/tsim/tmq/basic3.sim index 8d677766d7..1e95fa90a5 100644 --- a/tests/script/tsim/tmq/basic3.sim +++ b/tests/script/tsim/tmq/basic3.sim @@ -34,6 +34,9 @@ $showRow = 0 sql connect sql use $dbName +print == alter database +sql alter database $dbName wal_retention_period 3600 + print == create topics from super table sql create topic topic_stb_column as select ts, c3 from stb sql create topic topic_stb_all as select ts, c1, c2, c3 from stb @@ -83,6 +86,9 @@ sql create database $cdbName vgroups 1 sleep 500 sql use $cdbName +print == alter database +sql alter database $cdbName wal_retention_period 3600 + print == create consume info table and consume result table sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int) sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int) @@ -154,6 +160,9 @@ sql create database $cdbName vgroups 1 sleep 500 sql use $cdbName +print == alter database +sql alter database $cdbName wal_retention_period 3600 + print == create consume info table and consume result table sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int) sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int) @@ -225,6 +234,9 @@ sql create database $cdbName vgroups 1 sleep 500 sql use $cdbName +print == alter database +sql alter database $cdbName wal_retention_period 3600 + print == create consume info table and consume result table sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int) sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int) diff --git a/tests/script/tsim/tmq/basic3Of2Cons.sim b/tests/script/tsim/tmq/basic3Of2Cons.sim index 4921c86c45..be0292c57b 100644 --- a/tests/script/tsim/tmq/basic3Of2Cons.sim +++ b/tests/script/tsim/tmq/basic3Of2Cons.sim @@ -34,6 +34,9 @@ $showRow = 0 sql connect sql use $dbName +print == alter database +sql alter database $dbName wal_retention_period 3600 + print == create topics from super table sql create topic topic_stb_column as select ts, c3 from stb sql create topic topic_stb_all as select ts, c1, c2, c3 from stb @@ -82,6 +85,9 @@ sql create database $cdbName vgroups 1 sleep 500 sql use $cdbName +print == alter database +sql alter database $cdbName wal_retention_period 3600 + print == create consume info table and consume result table sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int) sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int) @@ -197,6 +203,9 @@ sql create database $cdbName vgroups 1 sleep 500 sql use $cdbName +print == alter database +sql alter database $cdbName wal_retention_period 3600 + print == create consume info table and consume result table sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int) sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int) @@ -299,6 +308,9 @@ sql create database $cdbName vgroups 1 sleep 500 sql use $cdbName +print == alter database +sql alter database $cdbName wal_retention_period 3600 + print == create consume info table and consume result table sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int) sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int) diff --git a/tests/script/tsim/tmq/basic4.sim b/tests/script/tsim/tmq/basic4.sim index 9b418f12f2..33a66628d0 100644 --- a/tests/script/tsim/tmq/basic4.sim +++ b/tests/script/tsim/tmq/basic4.sim @@ -34,6 +34,9 @@ $showRow = 0 sql connect sql use $dbName +print == alter database +sql alter database $dbName wal_retention_period 3600 + print == create topics from super table sql create topic topic_stb_column as select ts, c3 from stb sql create topic topic_stb_all as select ts, c1, c2, c3 from stb @@ -115,6 +118,9 @@ sql create database $cdbName vgroups 1 sleep 500 sql use $cdbName +print == alter database +sql alter database $cdbName wal_retention_period 3600 + print == create consume info table and consume result table sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int) sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int) @@ -172,6 +178,9 @@ sql create database $cdbName vgroups 1 sleep 500 sql use $cdbName +print == alter database +sql alter database $cdbName wal_retention_period 3600 + print == create consume info table and consume result table sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int) sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int) diff --git a/tests/script/tsim/tmq/basic4Of2Cons.sim b/tests/script/tsim/tmq/basic4Of2Cons.sim index f1755f732b..fdee3f633e 100644 --- a/tests/script/tsim/tmq/basic4Of2Cons.sim +++ b/tests/script/tsim/tmq/basic4Of2Cons.sim @@ -34,6 +34,9 @@ $showRow = 0 sql connect sql use $dbName +print == alter database +sql alter database $dbName wal_retention_period 3600 + print == create topics from super table sql create topic topic_stb_column as select ts, c3 from stb sql create topic topic_stb_all as select ts, c1, c2, c3 from stb @@ -156,6 +159,9 @@ sql create database $cdbName vgroups 1 sleep 500 sql use $cdbName +print == alter database +sql alter database $cdbName wal_retention_period 3600 + print == create consume info table and consume result table sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int) sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int) @@ -244,6 +250,9 @@ sql create database $cdbName vgroups 1 sleep 500 sql use $cdbName +print == alter database +sql alter database $cdbName wal_retention_period 3600 + print == create consume info table and consume result table sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int) sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int) diff --git a/tests/script/tsim/tmq/snapshot.sim b/tests/script/tsim/tmq/snapshot.sim index de0468e6f2..81fff35224 100644 --- a/tests/script/tsim/tmq/snapshot.sim +++ b/tests/script/tsim/tmq/snapshot.sim @@ -34,6 +34,9 @@ $showRow = 0 sql connect sql use $dbName +print == alter database +sql alter database $dbName wal_retention_period 3600 + print == create topics from super table sql create topic topic_stb_column as select ts, c3 from stb sql create topic topic_stb_all as select ts, c1, c2, c3 from stb @@ -83,6 +86,9 @@ sql create database $cdbName vgroups 1 sleep 500 sql use $cdbName +print == alter database +sql alter database $cdbName wal_retention_period 3600 + print == create consume info table and consume result table sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int) sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int) @@ -152,6 +158,9 @@ sql create database $cdbName vgroups 1 sleep 500 sql use $cdbName +print == alter database +sql alter database $cdbName wal_retention_period 3600 + print == create consume info table and consume result table sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int) sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int) @@ -223,6 +232,9 @@ sql create database $cdbName vgroups 1 sleep 500 sql use $cdbName +print == alter database +sql alter database $cdbName wal_retention_period 3600 + print == create consume info table and consume result table sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int) sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int) diff --git a/tests/script/tsim/tmq/snapshot1.sim b/tests/script/tsim/tmq/snapshot1.sim index e586719db2..c79892ae1d 100644 --- a/tests/script/tsim/tmq/snapshot1.sim +++ b/tests/script/tsim/tmq/snapshot1.sim @@ -34,6 +34,9 @@ $showRow = 0 sql connect sql use $dbName +print == alter database +sql alter database $dbName wal_retention_period 3600 + print == create topics from super table sql create topic topic_stb_column as select ts, c3 from stb sql create topic topic_stb_all as select ts, c1, c2, c3 from stb @@ -147,6 +150,9 @@ sql create database $cdbName vgroups 1 sleep 500 sql use $cdbName +print == alter database +sql alter database $cdbName wal_retention_period 3600 + print == create consume info table and consume result table for ctb sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int) sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int) @@ -224,6 +230,9 @@ sql create database $cdbName vgroups 1 sleep 500 sql use $cdbName +print == alter database +sql alter database $cdbName wal_retention_period 3600 + print == create consume info table and consume result table for ntb sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int) sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int) diff --git a/tests/script/tsim/tmq/topic.sim b/tests/script/tsim/tmq/topic.sim index cb1e74798e..0bf0873e9f 100644 --- a/tests/script/tsim/tmq/topic.sim +++ b/tests/script/tsim/tmq/topic.sim @@ -39,6 +39,8 @@ endi sql use $dbName +print == alter database +sql alter database $dbName wal_retention_period 3600 print == create super table sql create table $stbPrefix (ts timestamp, c1 int, c2 float, c3 binary(16)) tags (t1 int) diff --git a/tests/script/tsim/user/privilege_db.sim b/tests/script/tsim/user/privilege_db.sim index b708fdab64..50eaa12108 100644 --- a/tests/script/tsim/user/privilege_db.sim +++ b/tests/script/tsim/user/privilege_db.sim @@ -4,13 +4,13 @@ system sh/exec.sh -n dnode1 -s start sql connect print =============== create db -sql create database d1 vgroups 1; +sql create database d1 vgroups 1 wal_retention_period 3600; sql use d1 sql create table d1_stb (ts timestamp, i int) tags (j int) sql create topic d1_topic_1 as select ts, i from d1_stb -sql create database d2 vgroups 1; -sql create database d3 vgroups 1; +sql create database d2 vgroups 1 wal_retention_period 3600; +sql create database d3 vgroups 1 wal_retention_period 3600; sql select * from information_schema.ins_databases if $rows != 5 then return -1 diff --git a/tests/script/tsim/user/privilege_topic.sim b/tests/script/tsim/user/privilege_topic.sim index 9ce5bebec3..9d096a9780 100644 --- a/tests/script/tsim/user/privilege_topic.sim +++ b/tests/script/tsim/user/privilege_topic.sim @@ -4,9 +4,9 @@ system sh/exec.sh -n dnode1 -s start sql connect print =============== create db -sql create database root_d1 vgroups 1; -sql create database root_d2 vgroups 1; -sql create database root_d3 vgroups 1; +sql create database root_d1 vgroups 1 wal_retention_period 3600; +sql create database root_d2 vgroups 1 wal_retention_period 3600; +sql create database root_d3 vgroups 1 wal_retention_period 3600; sql show user privileges if $rows != 1 then diff --git a/tests/system-test/0-others/backquote_check.py b/tests/system-test/0-others/backquote_check.py index 3357723253..be8590f913 100644 --- a/tests/system-test/0-others/backquote_check.py +++ b/tests/system-test/0-others/backquote_check.py @@ -29,7 +29,7 @@ class TDTestCase: self.streamname = 'stm' self.streamtb = 'stm_stb' def topic_name_check(self): - tdSql.execute(f'create database if not exists {self.dbname}') + tdSql.execute(f'create database if not exists {self.dbname} wal_retention_period 3600') tdSql.execute(f'use {self.dbname}') tdSql.execute(f'create stable {self.stbname} (ts timestamp,c0 int) tags(t0 int)') for name in [self.dbname,self.stbname]: @@ -56,12 +56,12 @@ class TDTestCase: tdSql.execute(f'drop topic `{name}`') def db_name_check(self): - tdSql.execute(f'create database if not exists `{self.dbname}`') + tdSql.execute(f'create database if not exists `{self.dbname}` wal_retention_period 3600') tdSql.execute(f'use `{self.dbname}`') tdSql.execute(f'drop database {self.dbname}') def stream_name_check(self): - tdSql.execute(f'create database if not exists {self.dbname}') + tdSql.execute(f'create database if not exists {self.dbname} wal_retention_period 3600') tdSql.execute(f'use {self.dbname}') tdSql.execute(f'create stable {self.stbname} (ts timestamp,c0 int) tags(t0 int)') tdSql.execute(f'create stream `{self.streamname}` into `{self.streamtb}` as select count(*) from {self.stbname} interval(10s);') @@ -84,4 +84,4 @@ class TDTestCase: tdLog.success("%s successfully executed" % __file__) tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/0-others/compatibility.py b/tests/system-test/0-others/compatibility.py index d7dd4e20c0..f89b47e6e6 100644 --- a/tests/system-test/0-others/compatibility.py +++ b/tests/system-test/0-others/compatibility.py @@ -181,6 +181,7 @@ class TDTestCase: tdsql.execute("drop database if exists db") tdsql.execute("create database db") tdsql.execute("use db") + tdsql.execute("alter database db wal_retention_period 3600") tdsql.execute("create stable db.stb1 (ts timestamp, c1 int) tags (t1 int);") tdsql.execute("insert into db.ct1 using db.stb1 TAGS(1) values(now(),11);") tdsql.error(" insert into `db.ct2` using db.stb1 TAGS(9) values(now(),11);") diff --git a/tests/system-test/0-others/performance_schema.py b/tests/system-test/0-others/performance_schema.py index 70e86009a6..9d2a362254 100755 --- a/tests/system-test/0-others/performance_schema.py +++ b/tests/system-test/0-others/performance_schema.py @@ -75,7 +75,7 @@ class TDTestCase: def prepare_data(self): tdSql.execute(f"create database if not exists {self.dbname} vgroups 2") #1 query tdSql.execute(f'use {self.dbname}') #1 query - + tdsql.execute(f"alter database {self.dbname} wal_retention_period 3600") tdSql.execute(self.setsql.set_create_stable_sql(self.stbname,self.column_dict,self.tag_dict)) #1 query for i in range(self.tbnum): #self.tbnum query @@ -92,12 +92,12 @@ class TDTestCase: def run(self): tdSqlTran = TDSql() tdSqlTran.init(self.obj.conn.cursor()) - tdSqlTran.execute(f"create database if not exists %s vgroups 20"%(self.obj.transTestDBName)) + tdSqlTran.execute(f"create database if not exists %s vgroups 20 wal_retention_period 3600"%(self.obj.transTestDBName)) tdSqlTran.execute(f"DROP DATABASE %s"%(self.obj.transTestDBName)) def init_tmq_env(self, db, topic): self.conn.execute("drop topic if exists {}".format(topic)) - self.conn.execute("create database if not exists {}".format(db)) + self.conn.execute("create database if not exists {} wal_retention_period 3600".format(db)) self.conn.select_db(db) self.conn.execute( "create stable if not exists stb_sub (ts timestamp, c1 int, c2 float, c3 varchar(16)) tags(t1 int, t3 varchar(16))") @@ -211,4 +211,4 @@ class TDTestCase: tdLog.success("%s successfully executed" % __file__) tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/0-others/taosdShell.py b/tests/system-test/0-others/taosdShell.py index ce049b8515..3b9eb66705 100644 --- a/tests/system-test/0-others/taosdShell.py +++ b/tests/system-test/0-others/taosdShell.py @@ -129,7 +129,7 @@ class TDTestCase: # database\stb\tb\chiild-tb\rows\topics tdSql.execute("create user testpy pass 'testpy'") tdSql.execute("drop database if exists db0;") - tdSql.execute("create database db0;") + tdSql.execute("create database db0 wal_retention_period 3600;") tdSql.execute("use db0;") tdSql.execute("create table if not exists db0.stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned);") tdSql.execute("create table db0.ct1 using db0.stb tags(1000);") @@ -145,7 +145,7 @@ class TDTestCase: #stream tdSql.execute("drop database if exists source_db;") - tdSql.query("create database source_db vgroups 3;") + tdSql.query("create database source_db vgroups 3 wal_retention_period 3600;") tdSql.query("use source_db") tdSql.query("create table if not exists source_db.stb (ts timestamp, k int) tags (a int);") tdSql.query("create table source_db.ct1 using source_db.stb tags(1000);create table source_db.ct2 using source_db.stb tags(2000);create table source_db.ct3 using source_db.stb tags(3000);") diff --git a/tests/system-test/0-others/testRoll.py b/tests/system-test/0-others/testRoll.py index 56e5b3630a..c5489146dc 100644 --- a/tests/system-test/0-others/testRoll.py +++ b/tests/system-test/0-others/testRoll.py @@ -13,7 +13,7 @@ def init_tmq_env(db, topic): conn.execute("drop topic if exists {}".format(topic)) conn.execute("drop database if exists {}".format(db)) - conn.execute("create database if not exists {} replica 1 ".format(db)) + conn.execute("create database if not exists {} replica 1 wal_retention_period 3600".format(db)) conn.select_db(db) conn.execute( "create stable if not exists stb1 (ts timestamp, c1 int, c2 float, c3 varchar(16)) tags(t1 int, t3 varchar(16))") @@ -37,7 +37,7 @@ def init_tmq_rest_env(db, topic): conn.execute("drop topic if exists {}".format(topic)) conn.execute("drop database if exists {}".format(db)) - conn.execute("create database if not exists {} replica 3 ".format(db)) + conn.execute("create database if not exists {} replica 3 wal_retention_period 3600".format(db)) conn.select_db(db) conn.execute( "create stable if not exists stb1 (ts timestamp, c1 int, c2 float, c3 varchar(16)) tags(t1 int, t3 varchar(16))") @@ -80,4 +80,4 @@ if __name__ == '__main__': val = res.value() for block in val: - print(block.fetchall()) \ No newline at end of file + print(block.fetchall()) diff --git a/tests/system-test/0-others/user_manage.py b/tests/system-test/0-others/user_manage.py index 6f90a2873a..1e33d4bb1c 100644 --- a/tests/system-test/0-others/user_manage.py +++ b/tests/system-test/0-others/user_manage.py @@ -115,6 +115,7 @@ class TDTestCase: jiacy0_read_conn = taos.connect(user='jiacy0_read', password='123') jiacy0_write_conn = taos.connect(user='jiacy0_write', password='123') jiacy0_none_conn = taos.connect(user='jiacy0_none', password='123') + tdSql.execute('alter database db wal_retention_period 3600') tdSql.execute('create topic root_db as select * from db.stb') for user in [jiacy1_all_conn, jiacy1_read_conn, jiacy0_all_conn, jiacy0_read_conn]: user.execute(f'create topic db_jiacy as select * from db.stb') diff --git a/tests/system-test/0-others/walFileIdex.py b/tests/system-test/0-others/walFileIdex.py index cd34c7e5e3..f8309519cd 100644 --- a/tests/system-test/0-others/walFileIdex.py +++ b/tests/system-test/0-others/walFileIdex.py @@ -58,7 +58,7 @@ class TDTestCase: #stream tdSql.execute("drop database if exists source_db;") - tdSql.query("create database source_db vgroups 3;") + tdSql.query("create database source_db vgroups 3 wal_retention_period 3600;") tdSql.query("use source_db") tdSql.query("create table if not exists source_db.stb (ts timestamp, k int) tags (a int);") tdSql.query("create table source_db.ct1 using source_db.stb tags(1000);create table source_db.ct2 using source_db.stb tags(2000);create table source_db.ct3 using source_db.stb tags(3000);") diff --git a/tests/system-test/1-insert/drop.py b/tests/system-test/1-insert/drop.py index a8bfea2741..9954b3557e 100644 --- a/tests/system-test/1-insert/drop.py +++ b/tests/system-test/1-insert/drop.py @@ -54,7 +54,7 @@ class TDTestCase: insert_list = [] self.setsql.insert_values(column_dict,i,insert_sql,insert_list,self.ts) def drop_ntb_check(self): - tdSql.execute(f'create database if not exists {self.dbname} replica {self.replicaVar}') + tdSql.execute(f'create database if not exists {self.dbname} replica {self.replicaVar} wal_retention_period 3600') tdSql.execute(f'use {self.dbname}') tdSql.execute(self.setsql.set_create_normaltable_sql(self.ntbname,self.column_dict)) self.insert_data(self.column_dict,self.ntbname,self.rowNum) @@ -80,7 +80,7 @@ class TDTestCase: tag_values = [ f'1' ] - tdSql.execute(f"create database if not exists {self.dbname} replica {self.replicaVar}") + tdSql.execute(f"create database if not exists {self.dbname} replica {self.replicaVar} wal_retention_period 3600") tdSql.execute(f'use {self.dbname}') tdSql.execute(self.setsql.set_create_stable_sql(stbname,self.column_dict,tag_dict)) for i in range(self.tbnum): @@ -116,7 +116,7 @@ class TDTestCase: tdSql.checkRows(self.tbnum) tdSql.execute(f'drop database {self.dbname}') def drop_topic_check(self): - tdSql.execute(f'create database {self.dbname} replica {self.replicaVar}') + tdSql.execute(f'create database {self.dbname} replica {self.replicaVar} wal_retention_period 3600') tdSql.execute(f'use {self.dbname}') stbname = tdCom.getLongName(5,"letters") topic_name = tdCom.getLongName(5,"letters") @@ -132,7 +132,7 @@ class TDTestCase: tdSql.execute(f'drop database {self.dbname}') def drop_stream_check(self): - tdSql.execute(f'create database {self.dbname} replica 1') + tdSql.execute(f'create database {self.dbname} replica 1 wal_retention_period 3600') tdSql.execute(f'use {self.dbname}') stbname = tdCom.getLongName(5,"letters") stream_name = tdCom.getLongName(5,"letters") diff --git a/tests/system-test/7-tmq/basic5.py b/tests/system-test/7-tmq/basic5.py index 69cf378da3..080b431ffe 100644 --- a/tests/system-test/7-tmq/basic5.py +++ b/tests/system-test/7-tmq/basic5.py @@ -57,7 +57,7 @@ class TDTestCase: return cur def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum,rowsPerTbl): - tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups)) + tsql.execute("create database if not exists %s vgroups %d wal_retention_period 3600"%(dbName, vgroups)) tsql.execute("use %s" %dbName) tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName) pre_create = "create table" @@ -149,6 +149,7 @@ class TDTestCase: topicFromStb = 'topic_stb_column' topicFromCtb = 'topic_ctb_column' + tdSql.execute("alter database %s wal_retention_period 3600" % (parameterDict['dbName'])) tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb, parameterDict['dbName'], parameterDict['stbName'])) tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s_0" %(topicFromCtb, parameterDict['dbName'], parameterDict['stbName'])) diff --git a/tests/system-test/7-tmq/create_wrong_topic.py b/tests/system-test/7-tmq/create_wrong_topic.py index 39d519fec1..77d43149cd 100644 --- a/tests/system-test/7-tmq/create_wrong_topic.py +++ b/tests/system-test/7-tmq/create_wrong_topic.py @@ -44,6 +44,7 @@ class TDTestCase: def wrong_topic(self): tdSql.prepare() tdSql.execute('use db') + tdSql.execute('alter database db wal_retention_period 3600') stbname = f'db.{tdCom.getLongName(5, "letters")}' tag_dict = { 't0':'int' @@ -75,4 +76,4 @@ class TDTestCase: tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/dataFromTsdbNWal-multiCtb.py b/tests/system-test/7-tmq/dataFromTsdbNWal-multiCtb.py index 808a4935e3..44f58279be 100644 --- a/tests/system-test/7-tmq/dataFromTsdbNWal-multiCtb.py +++ b/tests/system-test/7-tmq/dataFromTsdbNWal-multiCtb.py @@ -67,6 +67,7 @@ class TDTestCase: tdLog.info("flush db to let data falls into the disk") tdSql.query("flush database %s"%(paraDict['dbName'])) + tdSql.execute("alter database %s wal_retention_period 3600"%(paraDict['dbName'])) return def tmqCase1(self): diff --git a/tests/system-test/7-tmq/dataFromTsdbNWal.py b/tests/system-test/7-tmq/dataFromTsdbNWal.py index 8386c22cd0..0f4f1694c1 100644 --- a/tests/system-test/7-tmq/dataFromTsdbNWal.py +++ b/tests/system-test/7-tmq/dataFromTsdbNWal.py @@ -67,6 +67,7 @@ class TDTestCase: tdLog.info("flush db to let data falls into the disk") tdSql.query("flush database %s"%(paraDict['dbName'])) + tdSql.execute("alter database %s wal_retention_period 3600"%(paraDict['dbName'])) return def tmqCase1(self): diff --git a/tests/system-test/7-tmq/db.py b/tests/system-test/7-tmq/db.py index 04f5aac559..e0d1e2c5b6 100644 --- a/tests/system-test/7-tmq/db.py +++ b/tests/system-test/7-tmq/db.py @@ -60,7 +60,7 @@ class TDTestCase: def initConsumerTable(self,cdbName='cdb'): tdLog.info("create consume database, and consume info table, and consume result table") tdSql.query("drop database if exists %s "%(cdbName)) - tdSql.query("create database %s vgroups 1"%(cdbName)) + tdSql.query("create database %s vgroups 1 wal_retention_period 3600"%(cdbName)) tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) diff --git a/tests/system-test/7-tmq/dropDbR3ConflictTransaction.py b/tests/system-test/7-tmq/dropDbR3ConflictTransaction.py index 4371a909c2..fa22cad726 100644 --- a/tests/system-test/7-tmq/dropDbR3ConflictTransaction.py +++ b/tests/system-test/7-tmq/dropDbR3ConflictTransaction.py @@ -134,6 +134,7 @@ class TDTestCase: paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl + tdSql.execute("alter database dbt wal_retention_period 3600") tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' queryString = "select ts, c1, c2 from %s.%s where t4 == 'beijing' or t4 == 'changsha' "%(paraDict['dbName'], paraDict['stbName']) diff --git a/tests/system-test/7-tmq/schema.py b/tests/system-test/7-tmq/schema.py index 04224fbc7e..95c1839964 100644 --- a/tests/system-test/7-tmq/schema.py +++ b/tests/system-test/7-tmq/schema.py @@ -60,7 +60,7 @@ class TDTestCase: def initConsumerTable(self,cdbName='cdb'): tdLog.info("create consume database, and consume info table, and consume result table") tdSql.query("drop database if exists %s "%(cdbName)) - tdSql.query("create database %s vgroups 1"%(cdbName)) + tdSql.query("create database %s vgroups 1 wal_retention_period 3600"%(cdbName)) tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) @@ -115,7 +115,7 @@ class TDTestCase: if dropFlag == 1: tsql.execute("drop database if exists %s"%(dbName)) - tsql.execute("create database if not exists %s vgroups %d replica %d"%(dbName, vgroups, replica)) + tsql.execute("create database if not exists %s vgroups %d replica %d wal_retention_period 3600"%(dbName, vgroups, replica)) tdLog.debug("complete to create database %s"%(dbName)) return diff --git a/tests/system-test/7-tmq/stbFilter.py b/tests/system-test/7-tmq/stbFilter.py index 6b48a6d570..3f862ae047 100644 --- a/tests/system-test/7-tmq/stbFilter.py +++ b/tests/system-test/7-tmq/stbFilter.py @@ -45,6 +45,7 @@ class TDTestCase: tmqCom.initConsumerTable() tmqCom.create_database(tsql=tdSql, dbName=paraDict["dbName"],dropFlag=paraDict["dropFlag"], vgroups=paraDict['vgroups'],replica=paraDict['replica']) + tdSql.execute("alter database %s wal_retention_period 3600"%(paraDict["dbName"])) tdLog.info("create stb") tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) tdLog.info("create ctb") diff --git a/tests/system-test/7-tmq/stbTagFilter-1ctb.py b/tests/system-test/7-tmq/stbTagFilter-1ctb.py index c4a7c8cae5..7ee5fce5a8 100644 --- a/tests/system-test/7-tmq/stbTagFilter-1ctb.py +++ b/tests/system-test/7-tmq/stbTagFilter-1ctb.py @@ -106,6 +106,7 @@ class TDTestCase: # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + tdSql.execute("alter database dbt wal_retention_period 3600") tdLog.info("create topics from stb1") topicFromStb1 = 'topic_UpperCase_stb1' # queryString = "select ts, c1, c2 from %s.%s where t4 == 'shanghai' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName']) diff --git a/tests/system-test/7-tmq/stbTagFilter-multiCtb.py b/tests/system-test/7-tmq/stbTagFilter-multiCtb.py index c380d201b2..71b7fdef5d 100644 --- a/tests/system-test/7-tmq/stbTagFilter-multiCtb.py +++ b/tests/system-test/7-tmq/stbTagFilter-multiCtb.py @@ -54,6 +54,7 @@ class TDTestCase: tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdSql.execute("alter database %s wal_retention_period 3600" %(paraDict['dbName'])) tdLog.info("create stb") tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) tdLog.info("create ctb") diff --git a/tests/system-test/7-tmq/subscribeDb.py b/tests/system-test/7-tmq/subscribeDb.py index 0fa9bcfbd4..9f01f20470 100644 --- a/tests/system-test/7-tmq/subscribeDb.py +++ b/tests/system-test/7-tmq/subscribeDb.py @@ -52,7 +52,7 @@ class TDTestCase: def initConsumerTable(self,cdbName='cdb'): tdLog.info("create consume database, and consume info table, and consume result table") - tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) + tdSql.query("create database if not exists %s vgroups 1 wal_retention_period 3600"%(cdbName)) tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) @@ -99,7 +99,7 @@ class TDTestCase: os.system(shellCmd) def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum): - tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups)) + tsql.execute("create database if not exists %s vgroups %d wal_retention_period 3600"%(dbName, vgroups)) tsql.execute("use %s" %dbName) tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName) pre_create = "create table" @@ -180,7 +180,7 @@ class TDTestCase: self.initConsumerTable() - tdSql.execute("create database if not exists %s vgroups %d replica %d" %(parameterDict['dbName'], parameterDict['vgroups'], parameterDict['replica'])) + tdSql.execute("create database if not exists %s vgroups %d replica %d wal_retention_period 3600" %(parameterDict['dbName'], parameterDict['vgroups'], parameterDict['replica'])) prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) prepareEnvThread.start() @@ -278,7 +278,7 @@ class TDTestCase: self.initConsumerTable() - tdSql.execute("create database if not exists %s vgroups %d replica %d" %(parameterDict['dbName'], parameterDict['vgroups'], parameterDict['replica'])) + tdSql.execute("create database if not exists %s vgroups %d replica %d wal_retention_period 3600" %(parameterDict['dbName'], parameterDict['vgroups'], parameterDict['replica'])) prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) prepareEnvThread.start() @@ -345,7 +345,7 @@ class TDTestCase: self.initConsumerTable() - tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) + tdSql.execute("create database if not exists %s vgroups %d wal_retention_period 3600" %(parameterDict['dbName'], parameterDict['vgroups'])) tdSql.execute("create table if not exists %s.%s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%(parameterDict['dbName'], parameterDict['stbName'])) tdLog.info("create topics from db") @@ -415,7 +415,7 @@ class TDTestCase: self.initConsumerTable() - tdSql.execute("create database if not exists %s vgroups %d replica %d" %(parameterDict['dbName'], parameterDict['vgroups'], parameterDict['replica'])) + tdSql.execute("create database if not exists %s vgroups %d replica %d wal_retention_period 3600" %(parameterDict['dbName'], parameterDict['vgroups'], parameterDict['replica'])) prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) prepareEnvThread.start() diff --git a/tests/system-test/7-tmq/subscribeDb0.py b/tests/system-test/7-tmq/subscribeDb0.py index 50ef52cb15..ed13fcbe06 100644 --- a/tests/system-test/7-tmq/subscribeDb0.py +++ b/tests/system-test/7-tmq/subscribeDb0.py @@ -52,7 +52,7 @@ class TDTestCase: def initConsumerTable(self,cdbName='cdb'): tdLog.info("create consume database, and consume info table, and consume result table") - tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) + tdSql.query("create database if not exists %s vgroups 1 wal_retention_period 3600"%(cdbName)) tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) @@ -99,7 +99,7 @@ class TDTestCase: os.system(shellCmd) def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum): - tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups)) + tsql.execute("create database if not exists %s vgroups %d wal_retention_period 3600"%(dbName, vgroups)) tsql.execute("use %s" %dbName) tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName) pre_create = "create table" @@ -180,7 +180,7 @@ class TDTestCase: self.initConsumerTable() - tdSql.execute("create database if not exists %s vgroups %d replica %d" %(parameterDict['dbName'], parameterDict['vgroups'], parameterDict['replica'])) + tdSql.execute("create database if not exists %s vgroups %d replica %d wal_retention_period 3600" %(parameterDict['dbName'], parameterDict['vgroups'], parameterDict['replica'])) prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) prepareEnvThread.start() @@ -262,7 +262,7 @@ class TDTestCase: self.initConsumerTable() - tdSql.execute("create database if not exists %s vgroups %d replica %d" %(parameterDict['dbName'], parameterDict['vgroups'], parameterDict['replica'])) + tdSql.execute("create database if not exists %s vgroups %d replica %d wal_retention_period 3600" %(parameterDict['dbName'], parameterDict['vgroups'], parameterDict['replica'])) prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) prepareEnvThread.start() diff --git a/tests/system-test/7-tmq/subscribeDb1.py b/tests/system-test/7-tmq/subscribeDb1.py index c5ae44214a..9e0faa1776 100644 --- a/tests/system-test/7-tmq/subscribeDb1.py +++ b/tests/system-test/7-tmq/subscribeDb1.py @@ -52,7 +52,7 @@ class TDTestCase: def initConsumerTable(self,cdbName='cdb'): tdLog.info("create consume database, and consume info table, and consume result table") - tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) + tdSql.query("create database if not exists %s vgroups 1 wal_retention_period 3600"%(cdbName)) tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) @@ -99,7 +99,7 @@ class TDTestCase: os.system(shellCmd) def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum): - tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups)) + tsql.execute("create database if not exists %s vgroups %d wal_retention_period 3600"%(dbName, vgroups)) tsql.execute("use %s" %dbName) tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName) pre_create = "create table" @@ -179,8 +179,8 @@ class TDTestCase: parameterDict['cfg'] = cfgPath self.initConsumerTable() - tdLog.info("create database if not exists %s vgroups %d replica %d" %(parameterDict['dbName'], parameterDict['vgroups'], parameterDict['replica'])) - tdSql.execute("create database if not exists %s vgroups %d replica %d" %(parameterDict['dbName'], parameterDict['vgroups'], parameterDict['replica'])) + tdLog.info("create database if not exists %s vgroups %d replica %d wal_retention_period 3600" %(parameterDict['dbName'], parameterDict['vgroups'], parameterDict['replica'])) + tdSql.execute("create database if not exists %s vgroups %d replica %d wal_retention_period 3600" %(parameterDict['dbName'], parameterDict['vgroups'], parameterDict['replica'])) prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) prepareEnvThread.start() @@ -196,7 +196,7 @@ class TDTestCase: 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - tdSql.execute("create database if not exists %s vgroups %d replica %d" %(parameterDict2['dbName'], parameterDict2['vgroups'], parameterDict2['replica'])) + tdSql.execute("create database if not exists %s vgroups %d replica %d wal_retention_period 3600" %(parameterDict2['dbName'], parameterDict2['vgroups'], parameterDict2['replica'])) prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) prepareEnvThread2.start() @@ -267,7 +267,7 @@ class TDTestCase: self.initConsumerTable() - tdSql.execute("create database if not exists %s vgroups %d replica %d" %(parameterDict['dbName'], parameterDict['vgroups'], parameterDict['replica'])) + tdSql.execute("create database if not exists %s vgroups %d replica %d wal_retention_period 3600" %(parameterDict['dbName'], parameterDict['vgroups'], parameterDict['replica'])) prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) prepareEnvThread.start() @@ -283,7 +283,7 @@ class TDTestCase: 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - tdSql.execute("create database if not exists %s vgroups %d replica %d" %(parameterDict2['dbName'], parameterDict2['vgroups'], parameterDict2['replica'])) + tdSql.execute("create database if not exists %s vgroups %d replica %d wal_retention_period 3600" %(parameterDict2['dbName'], parameterDict2['vgroups'], parameterDict2['replica'])) prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) prepareEnvThread2.start() diff --git a/tests/system-test/7-tmq/subscribeDb2.py b/tests/system-test/7-tmq/subscribeDb2.py index d045842e45..60cde8d7f0 100644 --- a/tests/system-test/7-tmq/subscribeDb2.py +++ b/tests/system-test/7-tmq/subscribeDb2.py @@ -53,7 +53,7 @@ class TDTestCase: def initConsumerTable(self,cdbName='cdb'): tdLog.info("create consume database, and consume info table, and consume result table") - tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) + tdSql.query("create database if not exists %s vgroups 1 wal_retention_period 3600"%(cdbName)) tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) @@ -100,7 +100,7 @@ class TDTestCase: os.system(shellCmd) def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum,rowsPerTbl): - tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups)) + tsql.execute("create database if not exists %s vgroups %d wal_retention_period 3600"%(dbName, vgroups)) tsql.execute("use %s" %dbName) tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName) pre_create = "create table" @@ -185,7 +185,7 @@ class TDTestCase: self.initConsumerTable() - tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) + tdSql.execute("create database if not exists %s vgroups %d wal_retention_period 3600" %(parameterDict['dbName'], parameterDict['vgroups'])) prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) prepareEnvThread.start() @@ -263,7 +263,7 @@ class TDTestCase: self.initConsumerTable() - tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) + tdSql.execute("create database if not exists %s vgroups %d wal_retention_period 3600" %(parameterDict['dbName'], parameterDict['vgroups'])) prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) prepareEnvThread.start() diff --git a/tests/system-test/7-tmq/subscribeDb3.py b/tests/system-test/7-tmq/subscribeDb3.py index 747ea3ba86..f82f89f1b9 100644 --- a/tests/system-test/7-tmq/subscribeDb3.py +++ b/tests/system-test/7-tmq/subscribeDb3.py @@ -52,7 +52,7 @@ class TDTestCase: def initConsumerTable(self,cdbName='cdb'): tdLog.info("create consume database, and consume info table, and consume result table") - tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) + tdSql.query("create database if not exists %s vgroups 1 wal_retention_period 3600"%(cdbName)) tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) tdSql.query("drop table if exists %s.notifyinfo "%(cdbName)) @@ -122,7 +122,7 @@ class TDTestCase: os.system(shellCmd) def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum,rowsPerTbl): - tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups)) + tsql.execute("create database if not exists %s vgroups %d wal_retention_period 3600"%(dbName, vgroups)) tsql.execute("use %s" %dbName) tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName) pre_create = "create table" @@ -203,7 +203,7 @@ class TDTestCase: self.initConsumerTable() - tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) + tdSql.execute("create database if not exists %s vgroups %d wal_retention_period 3600" %(parameterDict['dbName'], parameterDict['vgroups'])) prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) prepareEnvThread.start() @@ -280,7 +280,7 @@ class TDTestCase: self.initConsumerTable() - tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) + tdSql.execute("create database if not exists %s vgroups %d wal_retention_period 3600" %(parameterDict['dbName'], parameterDict['vgroups'])) prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) prepareEnvThread.start() diff --git a/tests/system-test/7-tmq/subscribeDb4.py b/tests/system-test/7-tmq/subscribeDb4.py index c14d3b27b1..764362c708 100644 --- a/tests/system-test/7-tmq/subscribeDb4.py +++ b/tests/system-test/7-tmq/subscribeDb4.py @@ -65,6 +65,7 @@ class TDTestCase: tmqCom.initConsumerTable(self.cdbName) tdCom.create_database(tdSql,self.paraDict["dbName"],self.paraDict["dropFlag"]) + tdSql.execute("alter database %s wal_retention_period 3600" % (self.paraDict['dbName'])) self.paraDict["stbName"] = 'stb1' tdCom.create_stable(tdSql,dbname=self.paraDict["dbName"],stbname=self.paraDict["stbName"],column_elm_list=self.paraDict["colSchema"],tag_elm_list=self.paraDict["tagSchema"],count=1, default_stbname_prefix=self.paraDict["stbName"]) diff --git a/tests/system-test/7-tmq/subscribeStb.py b/tests/system-test/7-tmq/subscribeStb.py index 3ff0b25ff6..c8b66adfa2 100644 --- a/tests/system-test/7-tmq/subscribeStb.py +++ b/tests/system-test/7-tmq/subscribeStb.py @@ -59,7 +59,7 @@ class TDTestCase: def initConsumerTable(self,cdbName='cdb'): tdLog.info("create consume database, and consume info table, and consume result table") - tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) + tdSql.query("create database if not exists %s vgroups 1 wal_retention_period 3600"%(cdbName)) tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) @@ -114,7 +114,7 @@ class TDTestCase: if dropFlag == 1: tsql.execute("drop database if exists %s"%(dbName)) - tsql.execute("create database if not exists %s vgroups %d replica %d"%(dbName, vgroups, replica)) + tsql.execute("create database if not exists %s vgroups %d replica %d wal_retention_period 3600"%(dbName, vgroups, replica)) tdLog.debug("complete to create database %s"%(dbName)) return diff --git a/tests/system-test/7-tmq/subscribeStb0.py b/tests/system-test/7-tmq/subscribeStb0.py index 1463cad627..717cf05bdc 100644 --- a/tests/system-test/7-tmq/subscribeStb0.py +++ b/tests/system-test/7-tmq/subscribeStb0.py @@ -59,7 +59,7 @@ class TDTestCase: def initConsumerTable(self,cdbName='cdb'): tdLog.info("create consume database, and consume info table, and consume result table") - tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) + tdSql.query("create database if not exists %s vgroups 1 wal_retention_period 3600"%(cdbName)) tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) @@ -114,7 +114,7 @@ class TDTestCase: if dropFlag == 1: tsql.execute("drop database if exists %s"%(dbName)) - tsql.execute("create database if not exists %s vgroups %d replica %d"%(dbName, vgroups, replica)) + tsql.execute("create database if not exists %s vgroups %d replica %d wal_retention_period 3600"%(dbName, vgroups, replica)) tdLog.debug("complete to create database %s"%(dbName)) return diff --git a/tests/system-test/7-tmq/subscribeStb1.py b/tests/system-test/7-tmq/subscribeStb1.py index edbe1bc3c6..67dee363b3 100644 --- a/tests/system-test/7-tmq/subscribeStb1.py +++ b/tests/system-test/7-tmq/subscribeStb1.py @@ -59,7 +59,7 @@ class TDTestCase: def initConsumerTable(self,cdbName='cdb'): tdLog.info("create consume database, and consume info table, and consume result table") - tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) + tdSql.query("create database if not exists %s vgroups 1 wal_retention_period 3600"%(cdbName)) tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) @@ -114,7 +114,7 @@ class TDTestCase: if dropFlag == 1: tsql.execute("drop database if exists %s"%(dbName)) - tsql.execute("create database if not exists %s vgroups %d replica %d"%(dbName, vgroups, replica)) + tsql.execute("create database if not exists %s vgroups %d replica %d wal_retention_period 3600"%(dbName, vgroups, replica)) tdLog.debug("complete to create database %s"%(dbName)) return diff --git a/tests/system-test/7-tmq/subscribeStb2.py b/tests/system-test/7-tmq/subscribeStb2.py index 6c3e122902..422cb23ffd 100644 --- a/tests/system-test/7-tmq/subscribeStb2.py +++ b/tests/system-test/7-tmq/subscribeStb2.py @@ -59,7 +59,7 @@ class TDTestCase: def initConsumerTable(self,cdbName='cdb'): tdLog.info("create consume database, and consume info table, and consume result table") - tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) + tdSql.query("create database if not exists %s vgroups 1 wal_retention_period 3600"%(cdbName)) tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) @@ -114,7 +114,7 @@ class TDTestCase: if dropFlag == 1: tsql.execute("drop database if exists %s"%(dbName)) - tsql.execute("create database if not exists %s vgroups %d replica %d"%(dbName, vgroups, replica)) + tsql.execute("create database if not exists %s vgroups %d replica %d wal_retention_period 3600"%(dbName, vgroups, replica)) tdLog.debug("complete to create database %s"%(dbName)) return diff --git a/tests/system-test/7-tmq/subscribeStb3.py b/tests/system-test/7-tmq/subscribeStb3.py index 025f403282..7205e84620 100644 --- a/tests/system-test/7-tmq/subscribeStb3.py +++ b/tests/system-test/7-tmq/subscribeStb3.py @@ -59,7 +59,7 @@ class TDTestCase: def initConsumerTable(self,cdbName='cdb'): tdLog.info("create consume database, and consume info table, and consume result table") - tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) + tdSql.query("create database if not exists %s vgroups 1 wal_retention_period 3600"%(cdbName)) tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) @@ -114,7 +114,7 @@ class TDTestCase: if dropFlag == 1: tsql.execute("drop database if exists %s"%(dbName)) - tsql.execute("create database if not exists %s vgroups %d replica %d"%(dbName, vgroups, replica)) + tsql.execute("create database if not exists %s vgroups %d replica %d wal_retention_period 3600"%(dbName, vgroups, replica)) tdLog.debug("complete to create database %s"%(dbName)) return diff --git a/tests/system-test/7-tmq/subscribeStb4.py b/tests/system-test/7-tmq/subscribeStb4.py index 6aa3da66a4..bb8afcf14e 100644 --- a/tests/system-test/7-tmq/subscribeStb4.py +++ b/tests/system-test/7-tmq/subscribeStb4.py @@ -59,7 +59,7 @@ class TDTestCase: def initConsumerTable(self,cdbName='cdb'): tdLog.info("create consume database, and consume info table, and consume result table") - tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) + tdSql.query("create database if not exists %s vgroups 1 wal_retention_period 3600"%(cdbName)) tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) @@ -114,7 +114,7 @@ class TDTestCase: if dropFlag == 1: tsql.execute("drop database if exists %s"%(dbName)) - tsql.execute("create database if not exists %s vgroups %d replica %d"%(dbName, vgroups, replica)) + tsql.execute("create database if not exists %s vgroups %d replica %d wal_retention_period 3600"%(dbName, vgroups, replica)) tdLog.debug("complete to create database %s"%(dbName)) return diff --git a/tests/system-test/7-tmq/tmq3mnodeSwitch.py b/tests/system-test/7-tmq/tmq3mnodeSwitch.py index cdcdadbcbb..6f556382bb 100644 --- a/tests/system-test/7-tmq/tmq3mnodeSwitch.py +++ b/tests/system-test/7-tmq/tmq3mnodeSwitch.py @@ -200,6 +200,7 @@ class TDTestCase: tdLog.info("async insert data") pThread = tmqCom.asyncInsertData(paraDict) + tdSql.execute("alter database %s wal_retention_period 3600" %(paraDict['dbName'])) tdLog.info("create topics from stb with filter") # queryString = "select ts, log(c1), ceil(pow(c1,3)) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName']) diff --git a/tests/system-test/7-tmq/tmqAlterSchema.py b/tests/system-test/7-tmq/tmqAlterSchema.py index a70678219f..1a8b0693b8 100644 --- a/tests/system-test/7-tmq/tmqAlterSchema.py +++ b/tests/system-test/7-tmq/tmqAlterSchema.py @@ -65,6 +65,7 @@ class TDTestCase: queryStringList = [] tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1) + tdSql.execute("alter database %s wal_retention_period 3600" %(paraDict['dbName'])) tdLog.info("create stb") tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema']) tdLog.info("create ctb") @@ -175,6 +176,7 @@ class TDTestCase: queryStringList = [] tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1) + tdSql.execute("alter database %s wal_retention_period 3600" %(paraDict['dbName'])) tdLog.info("create stb") tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema']) tdLog.info("create ntb") diff --git a/tests/system-test/7-tmq/tmqAutoCreateTbl.py b/tests/system-test/7-tmq/tmqAutoCreateTbl.py index 41073d83ae..5d0af636b2 100644 --- a/tests/system-test/7-tmq/tmqAutoCreateTbl.py +++ b/tests/system-test/7-tmq/tmqAutoCreateTbl.py @@ -54,6 +54,7 @@ class TDTestCase: tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1, wal_retention_size=-1,wal_retention_period=-1) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) tdLog.info("create stb") tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) # tdLog.info("create ctb") diff --git a/tests/system-test/7-tmq/tmqCheckData.py b/tests/system-test/7-tmq/tmqCheckData.py index 04d0744ab5..a9671241a9 100644 --- a/tests/system-test/7-tmq/tmqCheckData.py +++ b/tests/system-test/7-tmq/tmqCheckData.py @@ -80,6 +80,7 @@ class TDTestCase: tdLog.info("insert data") tmqCom.insert_data(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"]) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) tdLog.info("create topics from stb with filter") queryString = "select ts, log(c1), ceil(pow(c1,3)) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicNameList[0], queryString) diff --git a/tests/system-test/7-tmq/tmqCheckData1.py b/tests/system-test/7-tmq/tmqCheckData1.py index b9dac62833..e06c29c5a2 100644 --- a/tests/system-test/7-tmq/tmqCheckData1.py +++ b/tests/system-test/7-tmq/tmqCheckData1.py @@ -73,6 +73,7 @@ class TDTestCase: expectRowsList = [] tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) tdLog.info("create stb") tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema']) tdLog.info("create ctb") diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb-funcNFilter.py b/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb-funcNFilter.py index f372a2b742..3ad1d097e1 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb-funcNFilter.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb-funcNFilter.py @@ -54,6 +54,7 @@ class TDTestCase: tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) tdLog.info("create stb") tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) tdLog.info("create ctb") diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb.py b/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb.py index c7f95f6f41..fdd683d08d 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb.py @@ -54,6 +54,7 @@ class TDTestCase: tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) tdLog.info("create stb") tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) tdLog.info("create ctb") diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb-funcNFilter.py b/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb-funcNFilter.py index 26dacf514d..f05f0abeff 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb-funcNFilter.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb-funcNFilter.py @@ -54,6 +54,7 @@ class TDTestCase: tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) tdLog.info("create stb") tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) tdLog.info("create ctb") diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py b/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py index d6f100041b..75b49a34fc 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py @@ -54,6 +54,7 @@ class TDTestCase: tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) tdLog.info("create stb") tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) tdLog.info("create ctb") diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg.py b/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg.py index 87832ac0ef..2862bcb09b 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg.py @@ -54,6 +54,7 @@ class TDTestCase: tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) tdLog.info("create stb") tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) tdLog.info("create ctb") diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb.py b/tests/system-test/7-tmq/tmqConsFromTsdb.py index 8ed4a6df97..cca29c178d 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb.py @@ -54,6 +54,7 @@ class TDTestCase: tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=self.replicaVar) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) tdLog.info("create stb") tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) tdLog.info("create ctb") diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py b/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py index 6a03f0f751..00d2491c97 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py @@ -54,6 +54,7 @@ class TDTestCase: tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) tdLog.info("create stb") tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) tdLog.info("create ctb") diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb.py b/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb.py index c11159c6e5..3b1dbae443 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb.py @@ -54,6 +54,7 @@ class TDTestCase: tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) tdLog.info("create stb") tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) tdLog.info("create ctb") diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py index 37946d0c22..a799fa5719 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py @@ -54,6 +54,7 @@ class TDTestCase: tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) tdLog.info("create stb") tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) tdLog.info("create ctb") diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py index 439845aa54..f0bedbb187 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py @@ -54,6 +54,7 @@ class TDTestCase: tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) tdLog.info("create stb") tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) tdLog.info("create ctb") diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg.py b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg.py index 53ff020b08..a63927dd8b 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg.py @@ -54,6 +54,7 @@ class TDTestCase: tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) tdLog.info("create stb") tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) tdLog.info("create ctb") diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1.py b/tests/system-test/7-tmq/tmqConsFromTsdb1.py index 4bb6cf463f..8fcc991d4e 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb1.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb1.py @@ -54,6 +54,7 @@ class TDTestCase: tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=self.replicaVar) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) tdLog.info("create stb") tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) tdLog.info("create ctb") diff --git a/tests/system-test/7-tmq/tmqConsumerGroup.py b/tests/system-test/7-tmq/tmqConsumerGroup.py index 02093a2d88..b1aef9d762 100644 --- a/tests/system-test/7-tmq/tmqConsumerGroup.py +++ b/tests/system-test/7-tmq/tmqConsumerGroup.py @@ -73,6 +73,7 @@ class TDTestCase: expectRowsList = [] tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) tdLog.info("create stb") tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema']) tdLog.info("create ctb") diff --git a/tests/system-test/7-tmq/tmqDelete-1ctb.py b/tests/system-test/7-tmq/tmqDelete-1ctb.py index b09efdd1e6..6a62247541 100644 --- a/tests/system-test/7-tmq/tmqDelete-1ctb.py +++ b/tests/system-test/7-tmq/tmqDelete-1ctb.py @@ -54,6 +54,7 @@ class TDTestCase: tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1,wal_retention_size=-1, wal_retention_period=-1) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) tdLog.info("create stb") tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) tdLog.info("create ctb") diff --git a/tests/system-test/7-tmq/tmqDelete-multiCtb.py b/tests/system-test/7-tmq/tmqDelete-multiCtb.py index e59e3d6ecd..7a47cd6025 100644 --- a/tests/system-test/7-tmq/tmqDelete-multiCtb.py +++ b/tests/system-test/7-tmq/tmqDelete-multiCtb.py @@ -54,6 +54,7 @@ class TDTestCase: tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=self.replicaVar,wal_retention_size=-1, wal_retention_period=-1) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) tdLog.info("create stb") tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) tdLog.info("create ctb") diff --git a/tests/system-test/7-tmq/tmqDnode.py b/tests/system-test/7-tmq/tmqDnode.py index e1d6d91e2d..b96ca24ac5 100644 --- a/tests/system-test/7-tmq/tmqDnode.py +++ b/tests/system-test/7-tmq/tmqDnode.py @@ -110,7 +110,7 @@ class TDTestCase: if dropFlag == 1: tsql.execute("drop database if exists %s"%(dbName)) - tsql.execute("create database if not exists %s vgroups %d replica %d"%(dbName, vgroups, replica)) + tsql.execute("create database if not exists %s vgroups %d replica %d wal_retention_period 3600"%(dbName, vgroups, replica)) tdLog.debug("complete to create database %s"%(dbName)) return diff --git a/tests/system-test/7-tmq/tmqDnodeRestart.py b/tests/system-test/7-tmq/tmqDnodeRestart.py index a44ff916e5..afd54c9d02 100644 --- a/tests/system-test/7-tmq/tmqDnodeRestart.py +++ b/tests/system-test/7-tmq/tmqDnodeRestart.py @@ -55,6 +55,7 @@ class TDTestCase: tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1,wal_retention_size=-1, wal_retention_period=-1) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) tdLog.info("create stb") tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) tdLog.info("create ctb") @@ -186,6 +187,7 @@ class TDTestCase: tmqCom.initConsumerTable() # tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + # tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) # tdLog.info("create stb") # tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) # tdLog.info("create ctb") diff --git a/tests/system-test/7-tmq/tmqDnodeRestart1.py b/tests/system-test/7-tmq/tmqDnodeRestart1.py index 982cc0a631..cff55a1239 100644 --- a/tests/system-test/7-tmq/tmqDnodeRestart1.py +++ b/tests/system-test/7-tmq/tmqDnodeRestart1.py @@ -54,6 +54,7 @@ class TDTestCase: tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1,wal_retention_size=-1, wal_retention_period=-1) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) tdLog.info("create stb") tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) tdLog.info("create ctb") diff --git a/tests/system-test/7-tmq/tmqDropNtb-snapshot0.py b/tests/system-test/7-tmq/tmqDropNtb-snapshot0.py index 6c49fae299..198a5ed6df 100644 --- a/tests/system-test/7-tmq/tmqDropNtb-snapshot0.py +++ b/tests/system-test/7-tmq/tmqDropNtb-snapshot0.py @@ -57,6 +57,7 @@ class TDTestCase: tmqCom.initConsumerTable() tdLog.info("start create database....") tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) tdLog.info("start create normal tables....") tmqCom.create_ntable(tsql=tdSql, dbname=paraDict["dbName"], tbname_prefix=paraDict["ctbPrefix"], tbname_index_start_num = 1, column_elm_list=paraDict["colSchema"], colPrefix='c', tblNum=paraDict["ctbNum"]) tdLog.info("start insert data into normal tables....") @@ -143,6 +144,7 @@ class TDTestCase: tmqCom.initConsumerTable() tdLog.info("start create database....") tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) tdLog.info("start create normal tables....") tmqCom.create_ntable(tsql=tdSql, dbname=paraDict["dbName"], tbname_prefix=paraDict["ctbPrefix"], tbname_index_start_num = 1, column_elm_list=paraDict["colSchema"], colPrefix='c', tblNum=paraDict["ctbNum"]) tdLog.info("start insert data into normal tables....") diff --git a/tests/system-test/7-tmq/tmqDropNtb-snapshot1.py b/tests/system-test/7-tmq/tmqDropNtb-snapshot1.py index 3fc5a2fdc7..0b9cb7e66a 100644 --- a/tests/system-test/7-tmq/tmqDropNtb-snapshot1.py +++ b/tests/system-test/7-tmq/tmqDropNtb-snapshot1.py @@ -57,6 +57,7 @@ class TDTestCase: tmqCom.initConsumerTable() tdLog.info("start create database....") tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) tdLog.info("start create normal tables....") tmqCom.create_ntable(tsql=tdSql, dbname=paraDict["dbName"], tbname_prefix=paraDict["ctbPrefix"], tbname_index_start_num = 1, column_elm_list=paraDict["colSchema"], colPrefix='c', tblNum=paraDict["ctbNum"]) tdLog.info("start insert data into normal tables....") @@ -143,6 +144,7 @@ class TDTestCase: tmqCom.initConsumerTable() tdLog.info("start create database....") tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) tdLog.info("start create normal tables....") tmqCom.create_ntable(tsql=tdSql, dbname=paraDict["dbName"], tbname_prefix=paraDict["ctbPrefix"], tbname_index_start_num = 1, column_elm_list=paraDict["colSchema"], colPrefix='c', tblNum=paraDict["ctbNum"]) tdLog.info("start insert data into normal tables....") diff --git a/tests/system-test/7-tmq/tmqDropStb.py b/tests/system-test/7-tmq/tmqDropStb.py index 3789632984..a94747e574 100644 --- a/tests/system-test/7-tmq/tmqDropStb.py +++ b/tests/system-test/7-tmq/tmqDropStb.py @@ -64,6 +64,7 @@ class TDTestCase: tmqCom.initConsumerTable(self.cdbName) tdCom.create_database(tdSql,self.paraDict["dbName"],self.paraDict["dropFlag"]) + tdSql.execute("alter database %s wal_retention_period 3600" % (self.paraDict['dbName'])) self.paraDict["stbName"] = 'stb1' tdCom.create_stable(tdSql,dbname=self.paraDict["dbName"],stbname=self.paraDict["stbName"],column_elm_list=self.paraDict["colSchema"],tag_elm_list=self.paraDict["tagSchema"],count=1, default_stbname_prefix=self.paraDict["stbName"]) diff --git a/tests/system-test/7-tmq/tmqDropStbCtb.py b/tests/system-test/7-tmq/tmqDropStbCtb.py index c9e34136cc..587baf12aa 100644 --- a/tests/system-test/7-tmq/tmqDropStbCtb.py +++ b/tests/system-test/7-tmq/tmqDropStbCtb.py @@ -54,6 +54,7 @@ class TDTestCase: # tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) tdLog.info("create stb") tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) tdLog.info("create ctb") diff --git a/tests/system-test/7-tmq/tmqError.py b/tests/system-test/7-tmq/tmqError.py index a39bac8dd1..b2038f1644 100644 --- a/tests/system-test/7-tmq/tmqError.py +++ b/tests/system-test/7-tmq/tmqError.py @@ -116,7 +116,7 @@ class TDTestCase: if dropFlag == 1: tsql.execute("drop database if exists %s"%(dbName)) - tsql.execute("create database if not exists %s vgroups %d replica %d"%(dbName, vgroups, replica)) + tsql.execute("create database if not exists %s vgroups %d replica %d wal_retention_period 3600"%(dbName, vgroups, replica)) tdLog.debug("complete to create database %s"%(dbName)) return diff --git a/tests/system-test/7-tmq/tmqModule.py b/tests/system-test/7-tmq/tmqModule.py index d6b4aff938..187152c9ce 100644 --- a/tests/system-test/7-tmq/tmqModule.py +++ b/tests/system-test/7-tmq/tmqModule.py @@ -110,7 +110,7 @@ class TDTestCase: if dropFlag == 1: tsql.execute("drop database if exists %s"%(dbName)) - tsql.execute("create database if not exists %s vgroups %d replica %d"%(dbName, vgroups, replica)) + tsql.execute("create database if not exists %s vgroups %d replica %d wal_retention_period 3600"%(dbName, vgroups, replica)) tdLog.debug("complete to create database %s"%(dbName)) return diff --git a/tests/system-test/7-tmq/tmqShow.py b/tests/system-test/7-tmq/tmqShow.py index 31ddc1b0f8..e9234f6c7a 100644 --- a/tests/system-test/7-tmq/tmqShow.py +++ b/tests/system-test/7-tmq/tmqShow.py @@ -51,6 +51,7 @@ class TDTestCase: consumerIdList = [0, 1, 2, 3] tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict['vgroups'],replica=1) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) tdLog.info("create stb") tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema']) tdLog.info("create ctb") diff --git a/tests/system-test/7-tmq/tmqSubscribeStb-r3.py b/tests/system-test/7-tmq/tmqSubscribeStb-r3.py index 85222a941b..7f322dc258 100644 --- a/tests/system-test/7-tmq/tmqSubscribeStb-r3.py +++ b/tests/system-test/7-tmq/tmqSubscribeStb-r3.py @@ -94,6 +94,7 @@ class TDTestCase: tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=self.replica) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) tdLog.info("create stb") tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) tdLog.info("create ctb") diff --git a/tests/system-test/7-tmq/tmqUdf-multCtb-snapshot0.py b/tests/system-test/7-tmq/tmqUdf-multCtb-snapshot0.py index 297429b495..3edae98746 100644 --- a/tests/system-test/7-tmq/tmqUdf-multCtb-snapshot0.py +++ b/tests/system-test/7-tmq/tmqUdf-multCtb-snapshot0.py @@ -116,6 +116,7 @@ class TDTestCase: tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) tdLog.info("create stb") tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) tdLog.info("create ctb") @@ -163,6 +164,7 @@ class TDTestCase: expectRowsList = [] tmqCom.initConsumerTable() # tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1) + # tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) # tdLog.info("create stb") # tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema']) # tdLog.info("create ctb") @@ -265,6 +267,7 @@ class TDTestCase: expectRowsList = [] tmqCom.initConsumerTable() # tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1) + # tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) # tdLog.info("create stb") # tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema']) # tdLog.info("create ctb") diff --git a/tests/system-test/7-tmq/tmqUdf-multCtb-snapshot1.py b/tests/system-test/7-tmq/tmqUdf-multCtb-snapshot1.py index 9c139b50de..3c6a700a9b 100644 --- a/tests/system-test/7-tmq/tmqUdf-multCtb-snapshot1.py +++ b/tests/system-test/7-tmq/tmqUdf-multCtb-snapshot1.py @@ -116,6 +116,7 @@ class TDTestCase: tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) tdLog.info("create stb") tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) tdLog.info("create ctb") @@ -163,6 +164,7 @@ class TDTestCase: expectRowsList = [] tmqCom.initConsumerTable() # tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1) + # tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) # tdLog.info("create stb") # tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema']) # tdLog.info("create ctb") @@ -265,6 +267,7 @@ class TDTestCase: expectRowsList = [] tmqCom.initConsumerTable() # tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1) + # tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) # tdLog.info("create stb") # tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema']) # tdLog.info("create ctb") diff --git a/tests/system-test/7-tmq/tmqUdf.py b/tests/system-test/7-tmq/tmqUdf.py index 8593fd4f1e..9aa2c8483a 100644 --- a/tests/system-test/7-tmq/tmqUdf.py +++ b/tests/system-test/7-tmq/tmqUdf.py @@ -116,6 +116,7 @@ class TDTestCase: tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) tdLog.info("create stb") tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) tdLog.info("create ctb") @@ -163,6 +164,7 @@ class TDTestCase: expectRowsList = [] tmqCom.initConsumerTable() # tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1) + # tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) # tdLog.info("create stb") # tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema']) # tdLog.info("create ctb") @@ -266,6 +268,7 @@ class TDTestCase: expectRowsList = [] tmqCom.initConsumerTable() # tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1) + # tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) # tdLog.info("create stb") # tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema']) # tdLog.info("create ctb") diff --git a/tests/system-test/7-tmq/tmqUpdate-1ctb.py b/tests/system-test/7-tmq/tmqUpdate-1ctb.py index 2fa6600fb9..a517354989 100644 --- a/tests/system-test/7-tmq/tmqUpdate-1ctb.py +++ b/tests/system-test/7-tmq/tmqUpdate-1ctb.py @@ -54,6 +54,7 @@ class TDTestCase: tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) tdLog.info("create stb") tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) tdLog.info("create ctb") diff --git a/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot0.py b/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot0.py index e2ba47c3fd..a90802e591 100644 --- a/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot0.py +++ b/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot0.py @@ -55,6 +55,7 @@ class TDTestCase: tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) tdLog.info("create stb") tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) tdLog.info("create ctb") diff --git a/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot1.py b/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot1.py index 6b8c10de27..5a35c4f5ee 100644 --- a/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot1.py +++ b/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot1.py @@ -55,6 +55,7 @@ class TDTestCase: tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) tdLog.info("create stb") tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) tdLog.info("create ctb") diff --git a/tests/system-test/7-tmq/tmqUpdate-multiCtb.py b/tests/system-test/7-tmq/tmqUpdate-multiCtb.py index 3975013e74..84617efae4 100644 --- a/tests/system-test/7-tmq/tmqUpdate-multiCtb.py +++ b/tests/system-test/7-tmq/tmqUpdate-multiCtb.py @@ -55,6 +55,7 @@ class TDTestCase: tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) tdLog.info("create stb") tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) tdLog.info("create ctb") diff --git a/tests/system-test/7-tmq/tmqUpdateWithConsume.py b/tests/system-test/7-tmq/tmqUpdateWithConsume.py index 4f595788da..6a9bb0ae92 100644 --- a/tests/system-test/7-tmq/tmqUpdateWithConsume.py +++ b/tests/system-test/7-tmq/tmqUpdateWithConsume.py @@ -54,6 +54,7 @@ class TDTestCase: tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=self.replicaVar, wal_retention_size=-1, wal_retention_period=-1) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) tdLog.info("create stb") tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) tdLog.info("create ctb") diff --git a/tests/system-test/99-TDcase/TD-15517.py b/tests/system-test/99-TDcase/TD-15517.py index db06700284..aeb28063e4 100644 --- a/tests/system-test/99-TDcase/TD-15517.py +++ b/tests/system-test/99-TDcase/TD-15517.py @@ -51,7 +51,7 @@ class TDTestCase: return cur def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum,rowsPerTbl): - tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups)) + tsql.execute("create database if not exists %s vgroups %d wal_retention_period 3600"%(dbName, vgroups)) tsql.execute("use %s" %dbName) tsql.execute("create table %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName) pre_create = "create table" diff --git a/tests/system-test/99-TDcase/TD-15554.py b/tests/system-test/99-TDcase/TD-15554.py index 51934ccd44..c8ddeca3c3 100644 --- a/tests/system-test/99-TDcase/TD-15554.py +++ b/tests/system-test/99-TDcase/TD-15554.py @@ -50,7 +50,7 @@ class TDTestCase: return cur def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum,rowsPerTbl): - tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups)) + tsql.execute("create database if not exists %s vgroups %d wal_retention_period 3600"%(dbName, vgroups)) tsql.execute("use %s" %dbName) tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName) pre_create = "create table" diff --git a/tests/system-test/99-TDcase/TD-15557.py b/tests/system-test/99-TDcase/TD-15557.py index 884c028a65..f9e676f4f8 100644 --- a/tests/system-test/99-TDcase/TD-15557.py +++ b/tests/system-test/99-TDcase/TD-15557.py @@ -64,7 +64,7 @@ class TDTestCase: os.system(shellCmd) def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum,rowsPerTbl): - tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups)) + tsql.execute("create database if not exists %s vgroups %d wal_retention_period 3600"%(dbName, vgroups)) tsql.execute("use %s" %dbName) tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName) pre_create = "create table" @@ -141,7 +141,7 @@ class TDTestCase: 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) + tdSql.execute("create database if not exists %s vgroups %d wal_retention_period 3600" %(parameterDict['dbName'], parameterDict['vgroups'])) prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) prepareEnvThread.start() @@ -214,7 +214,7 @@ class TDTestCase: 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) + tdSql.execute("create database if not exists %s vgroups %d wal_retention_period 3600" %(parameterDict['dbName'], parameterDict['vgroups'])) prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) prepareEnvThread.start() @@ -298,7 +298,7 @@ class TDTestCase: 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) + tdSql.execute("create database if not exists %s vgroups %d wal_retention_period 3600" %(parameterDict['dbName'], parameterDict['vgroups'])) prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) prepareEnvThread.start() diff --git a/tests/system-test/99-TDcase/TD-15563.py b/tests/system-test/99-TDcase/TD-15563.py index 5ea652b4fb..cc4c3f3b39 100644 --- a/tests/system-test/99-TDcase/TD-15563.py +++ b/tests/system-test/99-TDcase/TD-15563.py @@ -52,7 +52,7 @@ class TDTestCase: def initConsumerTable(self,cdbName='cdb'): tdLog.info("create consume database, and consume info table, and consume result table") - tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) + tdSql.query("create database if not exists %s vgroups 1 wal_retention_period 3600"%(cdbName)) tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) @@ -95,7 +95,7 @@ class TDTestCase: os.system(shellCmd) def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum,rowsPerTbl): - tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups)) + tsql.execute("create database if not exists %s vgroups %d wal_retention_period 3600"%(dbName, vgroups)) tsql.execute("use %s" %dbName) tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName) pre_create = "create table" @@ -176,7 +176,7 @@ class TDTestCase: self.initConsumerTable() - tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) + tdSql.execute("create database if not exists %s vgroups %d wal_retention_period 3600" %(parameterDict['dbName'], parameterDict['vgroups'])) prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) prepareEnvThread.start() @@ -238,7 +238,7 @@ class TDTestCase: self.initConsumerTable() - tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) + tdSql.execute("create database if not exists %s vgroups %d wal_retention_period 3600" %(parameterDict['dbName'], parameterDict['vgroups'])) prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) prepareEnvThread.start() @@ -304,7 +304,7 @@ class TDTestCase: self.initConsumerTable() - tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) + tdSql.execute("create database if not exists %s vgroups %d wal_retention_period 3600" %(parameterDict['dbName'], parameterDict['vgroups'])) prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) prepareEnvThread.start() diff --git a/tests/system-test/99-TDcase/TD-16025.py b/tests/system-test/99-TDcase/TD-16025.py index 8c9fa9319f..3e25b8c833 100644 --- a/tests/system-test/99-TDcase/TD-16025.py +++ b/tests/system-test/99-TDcase/TD-16025.py @@ -59,7 +59,7 @@ class TDTestCase: def initConsumerTable(self,cdbName='cdb'): tdLog.info("create consume database, and consume info table, and consume result table") - tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) + tdSql.query("create database if not exists %s vgroups 1 wal_retention_period 3600"%(cdbName)) tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) @@ -110,7 +110,7 @@ class TDTestCase: if dropFlag == 1: tsql.execute("drop database if exists %s"%(dbName)) - tsql.execute("create database if not exists %s vgroups %d replica %d"%(dbName, vgroups, replica)) + tsql.execute("create database if not exists %s vgroups %d replica %d wal_retention_period 3600"%(dbName, vgroups, replica)) tdLog.debug("complete to create database %s"%(dbName)) return diff --git a/tests/system-test/99-TDcase/TD-16821.py b/tests/system-test/99-TDcase/TD-16821.py index f57fae752f..78ac172f30 100644 --- a/tests/system-test/99-TDcase/TD-16821.py +++ b/tests/system-test/99-TDcase/TD-16821.py @@ -73,6 +73,7 @@ class TDTestCase: expectRowsList = [] tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) tdLog.info("create stb") tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema']) tdLog.info("create ctb") diff --git a/tests/system-test/99-TDcase/TD-17255.py b/tests/system-test/99-TDcase/TD-17255.py index 0f83468754..5f68a5b738 100644 --- a/tests/system-test/99-TDcase/TD-17255.py +++ b/tests/system-test/99-TDcase/TD-17255.py @@ -53,6 +53,7 @@ class TDTestCase: tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) tdLog.info("create stb") tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) tdLog.info("create ctb") @@ -97,6 +98,7 @@ class TDTestCase: tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) tdLog.info("create stb") tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) tdLog.info("create ctb") @@ -181,6 +183,7 @@ class TDTestCase: tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) tdLog.info("create stb") tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) tdLog.info("create ctb") @@ -267,6 +270,7 @@ class TDTestCase: tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) tdLog.info("create stb") tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) tdLog.info("insert data by auto create ctb") diff --git a/tests/system-test/99-TDcase/TD-17699.py b/tests/system-test/99-TDcase/TD-17699.py index 2862f4a78d..6956e88aec 100644 --- a/tests/system-test/99-TDcase/TD-17699.py +++ b/tests/system-test/99-TDcase/TD-17699.py @@ -65,6 +65,7 @@ class TDTestCase: tmqCom.initConsumerTable(self.cdbName) tdCom.create_database(tdSql,self.paraDict["dbName"],self.paraDict["dropFlag"]) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) self.paraDict["stbName"] = 'stb1' tdCom.create_stable(tdSql,dbname=self.paraDict["dbName"],stbname=self.paraDict["stbName"],column_elm_list=self.paraDict["colSchema"],tag_elm_list=self.paraDict["tagSchema"],count=1, default_stbname_prefix=self.paraDict["stbName"]) From bac441b317760123930a731cd508d5d8da51b79d Mon Sep 17 00:00:00 2001 From: Benguang Zhao Date: Sat, 25 Mar 2023 13:21:50 +0800 Subject: [PATCH 25/71] fix: create database with wal_retention_period in tmq_taosx_ci.c --- utils/test/c/tmq_taosx_ci.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/test/c/tmq_taosx_ci.c b/utils/test/c/tmq_taosx_ci.c index 1f25eae366..1cc2a48469 100644 --- a/utils/test/c/tmq_taosx_ci.c +++ b/utils/test/c/tmq_taosx_ci.c @@ -441,7 +441,7 @@ int32_t init_env() { taos_free_result(pRes); char sql[128] = {0}; - snprintf(sql, 128, "create database if not exists db_taosx vgroups %d", g_conf.dstVgroups); + snprintf(sql, 128, "create database if not exists db_taosx vgroups %d wal_retention_period 3600", g_conf.dstVgroups); pRes = taos_query(pConn, sql); if (taos_errno(pRes) != 0) { printf("error in create db_taosx, reason:%s\n", taos_errstr(pRes)); @@ -470,7 +470,7 @@ int32_t init_env() { } taos_free_result(pRes); - snprintf(sql, 128, "create database if not exists abc1 vgroups %d", g_conf.srcVgroups); + snprintf(sql, 128, "create database if not exists abc1 vgroups %d wal_retention_period 3600", g_conf.srcVgroups); pRes = taos_query(pConn, sql); if (taos_errno(pRes) != 0) { printf("error in create db, reason:%s\n", taos_errstr(pRes)); From 6e1eed8843de73a512f095a79916b1157ddb0acc Mon Sep 17 00:00:00 2001 From: Benguang Zhao Date: Sat, 25 Mar 2023 13:59:17 +0800 Subject: [PATCH 26/71] fix: create database with wal_retention_period in docs/examples/csharp --- docs/examples/csharp/influxdbLine/Program.cs | 2 +- docs/examples/csharp/optsTelnet/Program.cs | 2 +- docs/examples/csharp/sqlInsert/Program.cs | 2 +- docs/examples/csharp/stmtInsert/Program.cs | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/examples/csharp/influxdbLine/Program.cs b/docs/examples/csharp/influxdbLine/Program.cs index fa3cb21fe0..a620c01609 100644 --- a/docs/examples/csharp/influxdbLine/Program.cs +++ b/docs/examples/csharp/influxdbLine/Program.cs @@ -48,7 +48,7 @@ namespace TDengineExample static void PrepareDatabase(IntPtr conn) { - IntPtr res = TDengine.Query(conn, "CREATE DATABASE test"); + IntPtr res = TDengine.Query(conn, "CREATE DATABASE test WAL_RETENTION_PERIOD 3600"); if (TDengine.ErrorNo(res) != 0) { throw new Exception("failed to create database, reason: " + TDengine.Error(res)); diff --git a/docs/examples/csharp/optsTelnet/Program.cs b/docs/examples/csharp/optsTelnet/Program.cs index e73ceb041a..ccd29d0cfc 100644 --- a/docs/examples/csharp/optsTelnet/Program.cs +++ b/docs/examples/csharp/optsTelnet/Program.cs @@ -58,7 +58,7 @@ namespace TDengineExample static void PrepareDatabase(IntPtr conn) { - IntPtr res = TDengine.Query(conn, "CREATE DATABASE test"); + IntPtr res = TDengine.Query(conn, "CREATE DATABASE test WAL_RETENTION_PERIOD 3600"); if (TDengine.ErrorNo(res) != 0) { throw new Exception("failed to create database, reason: " + TDengine.Error(res)); diff --git a/docs/examples/csharp/sqlInsert/Program.cs b/docs/examples/csharp/sqlInsert/Program.cs index f23a6e1663..25a945a459 100644 --- a/docs/examples/csharp/sqlInsert/Program.cs +++ b/docs/examples/csharp/sqlInsert/Program.cs @@ -11,7 +11,7 @@ namespace TDengineExample IntPtr conn = GetConnection(); try { - IntPtr res = TDengine.Query(conn, "CREATE DATABASE power"); + IntPtr res = TDengine.Query(conn, "CREATE DATABASE power WAL_RETENTION_PERIOD 3600"); CheckRes(conn, res, "failed to create database"); res = TDengine.Query(conn, "USE power"); CheckRes(conn, res, "failed to change database"); diff --git a/docs/examples/csharp/stmtInsert/Program.cs b/docs/examples/csharp/stmtInsert/Program.cs index 80cadb2ff8..2e856a49bb 100644 --- a/docs/examples/csharp/stmtInsert/Program.cs +++ b/docs/examples/csharp/stmtInsert/Program.cs @@ -76,7 +76,7 @@ namespace TDengineExample static void PrepareSTable() { - IntPtr res = TDengine.Query(conn, "CREATE DATABASE power"); + IntPtr res = TDengine.Query(conn, "CREATE DATABASE power WAL_RETENTION_PERIOD 3600"); CheckResPtr(res, "failed to create database"); res = TDengine.Query(conn, "USE power"); CheckResPtr(res, "failed to change database"); From c35f2ca8e3bd4fc89e30e7877af7bfbf6ae48e35 Mon Sep 17 00:00:00 2001 From: plum-lihui Date: Sat, 25 Mar 2023 17:34:04 +0800 Subject: [PATCH 27/71] test: modify tmq cases --- tests/system-test/7-tmq/tmqUpdate-1ctb.py | 2 +- tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot0.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/system-test/7-tmq/tmqUpdate-1ctb.py b/tests/system-test/7-tmq/tmqUpdate-1ctb.py index a517354989..8fdf7748a3 100644 --- a/tests/system-test/7-tmq/tmqUpdate-1ctb.py +++ b/tests/system-test/7-tmq/tmqUpdate-1ctb.py @@ -208,7 +208,7 @@ class TDTestCase: paraDict['rowsPerTbl'] = self.rowsPerTbl consumerId = 1 if self.snapshot == 0: - expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1/2)) + expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1+ 1/2 + 1/2)) elif self.snapshot == 1: expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1)) diff --git a/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot0.py b/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot0.py index a90802e591..8b67f6f825 100644 --- a/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot0.py +++ b/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot0.py @@ -214,7 +214,7 @@ class TDTestCase: paraDict['rowsPerTbl'] = self.rowsPerTbl consumerId = 1 if self.snapshot == 0: - expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1/2) * (1/2*3)) + expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] *(2 + 1/2*1/2*2 + 1/2 + 1/4)) elif self.snapshot == 1: expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 + 1/2)) From 1b3f9f4a08f0f69bb3e35361a8ecab6e1f2abd7c Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sun, 26 Mar 2023 21:36:59 +0800 Subject: [PATCH 28/71] test: change batch size --- tests/system-test/0-others/udfpy_main.py | 38 +++++++++++++----------- 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/tests/system-test/0-others/udfpy_main.py b/tests/system-test/0-others/udfpy_main.py index a2176a041c..55a60d4314 100644 --- a/tests/system-test/0-others/udfpy_main.py +++ b/tests/system-test/0-others/udfpy_main.py @@ -240,6 +240,7 @@ class TDTestCase: cols_name = ','.join(sample) sql = f'select sf_multi_args({cols_name}),{cols_name} from {self.stbname} limit 10' self.verify_same_multi_values(sql) + tdLog.info(sql) # query_udfpy @@ -251,7 +252,7 @@ class TDTestCase: sql = f'select {col_name}, {fun_name}({col_name}) from {self.stbname} limit 10' tdLog.info(sql) self.verify_same_value(sql) - sql = f'select * from (select {col_name} as a, {fun_name}({col_name}) as b from {self.stbname} ) order by b,a desc limit 10' + sql = f'select * from (select {col_name} as a, {fun_name}({col_name}) as b from {self.stbname} limit 100) order by b,a desc' tdLog.info(sql) self.verify_same_value(sql) @@ -271,41 +272,43 @@ class TDTestCase: tdLog.exit(f" check {sql} not expect None.") # concat - sql = f'select sf_concat_var(col12, t12), concat(col12, t12) from {self.stbname}' + sql = f'select sf_concat_var(col12, t12), concat(col12, t12) from {self.stbname} limit 1000' self.verify_same_value(sql) - sql = f'select sf_concat_nch(col13, t13), concat(col13, t13) from {self.stbname}' + sql = f'select sf_concat_nch(col13, t13), concat(col13, t13) from {self.stbname} limit 1000' self.verify_same_value(sql) # create aggregate def create_aggr_udfpy(self): + + bufsize = 200 * 1024 # all type check null for col_name, col_type in self.column_dict.items(): - self.create_udf_af(f"af_null_{col_name}", "af_null.py", col_type, 10*1024) + self.create_udf_af(f"af_null_{col_name}", "af_null.py", col_type, bufsize) # min file_name = "af_min.py" fun_name = "af_min_float" - self.create_udf_af(fun_name, file_name, f"float", 10*1024) + self.create_udf_af(fun_name, file_name, f"float", bufsize) fun_name = "af_min_int" - self.create_udf_af(fun_name, file_name, f"int", 10*1024) + self.create_udf_af(fun_name, file_name, f"int", bufsize) # sum file_name = "af_sum.py" fun_name = "af_sum_float" - self.create_udf_af(fun_name, file_name, f"float", 10*1024) + self.create_udf_af(fun_name, file_name, f"float", bufsize) fun_name = "af_sum_int" - self.create_udf_af(fun_name, file_name, f"int", 10*1024) + self.create_udf_af(fun_name, file_name, f"int", bufsize) fun_name = "af_sum_bigint" - self.create_udf_af(fun_name, file_name, f"bigint", 10*1024) + self.create_udf_af(fun_name, file_name, f"bigint", bufsize) # count file_name = "af_count.py" fun_name = "af_count_float" - self.create_udf_af(fun_name, file_name, f"float", 10*1024) + self.create_udf_af(fun_name, file_name, f"float", bufsize) fun_name = "af_count_int" - self.create_udf_af(fun_name, file_name, f"int", 10*1024) + self.create_udf_af(fun_name, file_name, f"int", bufsize) fun_name = "af_count_bigint" - self.create_udf_af(fun_name, file_name, f"bigint", 10*1024) + self.create_udf_af(fun_name, file_name, f"bigint", bufsize) # query aggregate @@ -364,7 +367,7 @@ class TDTestCase: def insert_data(self, tbname, rows): ts = 1670000000000 values = "" - batch_size = 300 + batch_size = 500 child_name = "" for i in range(self.child_count): for j in range(rows): @@ -399,17 +402,16 @@ class TDTestCase: stable = "meters" tbname = "d" count = 3 - rows = 1000 + rows = 3000000 # do self.create_table(stable, tbname, count) self.insert_data(tbname, rows) - # scalar + # create self.create_scalar_udfpy() - self.query_scalar_udfpy() - - # aggregate self.create_aggr_udfpy() + # query + self.query_scalar_udfpy() self.query_aggr_udfpy() # show performance From bdd04732ffa1ff3dcf953141e71a4a1041ef0476 Mon Sep 17 00:00:00 2001 From: Yiqing Liu Date: Mon, 27 Mar 2023 13:20:02 +0800 Subject: [PATCH 29/71] Update 20-keywords.md --- docs/zh/12-taos-sql/20-keywords.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/12-taos-sql/20-keywords.md b/docs/zh/12-taos-sql/20-keywords.md index 8fd704ef55..d416febd55 100644 --- a/docs/zh/12-taos-sql/20-keywords.md +++ b/docs/zh/12-taos-sql/20-keywords.md @@ -269,7 +269,7 @@ description: TDengine 保留关键字的详细列表 - SPLIT - STABLE - STABLES -- STAR +- START - STATE - STATE_WINDOW - STATEMENT From 2495d0781d077bf66986fe53e4dc745ca7c91a2b Mon Sep 17 00:00:00 2001 From: cadem Date: Mon, 27 Mar 2023 15:11:20 +0800 Subject: [PATCH 30/71] wrong field logging --- source/dnode/mnode/impl/src/mndMain.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/mnode/impl/src/mndMain.c b/source/dnode/mnode/impl/src/mndMain.c index ef94eb536d..5c20887cf5 100644 --- a/source/dnode/mnode/impl/src/mndMain.c +++ b/source/dnode/mnode/impl/src/mndMain.c @@ -653,7 +653,7 @@ _OVER: pMsg->msgType == TDMT_MND_TRANS_TIMER || pMsg->msgType == TDMT_MND_TTL_TIMER || pMsg->msgType == TDMT_MND_UPTIME_TIMER) { mTrace("timer not process since mnode restored:%d stopped:%d, sync restored:%d role:%s ", pMnode->restored, - pMnode->stopped, state.restored, syncStr(state.restored)); + pMnode->stopped, state.restored, syncStr(state.state)); return -1; } From a5d4dfe2816b2f0b5a4b84f203eb6103deaab558 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Mon, 27 Mar 2023 16:30:29 +0800 Subject: [PATCH 31/71] fix: taosbenchmark codacy complain (#20640) --- cmake/taostools_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index 897ccdd158..40fa48b815 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG 04296a5 + GIT_TAG 2864326 SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE From 319ec95b9d7678edeaf5064b58b117176dd4ebee Mon Sep 17 00:00:00 2001 From: Adam Ji Date: Mon, 27 Mar 2023 17:07:33 +0800 Subject: [PATCH 32/71] docs: add content about req-id (#20650) --- .../14-reference/03-connector/07-python.mdx | 80 +++++++++++++++++++ .../connect_rest_with_req_id_examples.py | 44 ++++++++++ .../connect_websocket_with_req_id_examples.py | 29 +++++++ ...tion_usage_native_reference_with_req_id.py | 45 +++++++++++ ...rsor_usage_native_reference_with_req_id.py | 32 ++++++++ .../python/rest_client_with_req_id_example.py | 9 +++ .../python/result_set_with_req_id_examples.py | 33 ++++++++ docs/zh/08-connector/30-python.mdx | 79 ++++++++++++++++++ 8 files changed, 351 insertions(+) create mode 100644 docs/examples/python/connect_rest_with_req_id_examples.py create mode 100644 docs/examples/python/connect_websocket_with_req_id_examples.py create mode 100644 docs/examples/python/connection_usage_native_reference_with_req_id.py create mode 100644 docs/examples/python/cursor_usage_native_reference_with_req_id.py create mode 100644 docs/examples/python/rest_client_with_req_id_example.py create mode 100644 docs/examples/python/result_set_with_req_id_examples.py diff --git a/docs/en/14-reference/03-connector/07-python.mdx b/docs/en/14-reference/03-connector/07-python.mdx index 69be15f9e8..bfbdd929c2 100644 --- a/docs/en/14-reference/03-connector/07-python.mdx +++ b/docs/en/14-reference/03-connector/07-python.mdx @@ -353,6 +353,86 @@ For a more detailed description of the `sql()` method, please refer to [RestClie +### Usage with req_id + +By using the optional req_id parameter, you can specify a request ID that can be used for tracing. + + + + +##### TaosConnection class + +The `TaosConnection` class contains both an implementation of the PEP249 Connection interface (e.g., the `cursor()` method and the `close()` method) and many extensions (e.g., the `execute()`, `query()`, `schemaless_insert()`, and `subscribe()` methods). + +```python title="execute method" +{{#include docs/examples/python/connection_usage_native_reference_with_req_id.py:insert}} +``` + +```python title="query method" +{{#include docs/examples/python/connection_usage_native_reference_with_req_id.py:query}} +``` + +:::tip +The queried results can only be fetched once. For example, only one of `fetch_all()` and `fetch_all_into_dict()` can be used in the example above. Repeated fetches will result in an empty list. +::: + +##### Use of TaosResult class + +In the above example of using the `TaosConnection` class, we have shown two ways to get the result of a query: `fetch_all()` and `fetch_all_into_dict()`. In addition, `TaosResult` also provides methods to iterate through the result set by rows (`rows_iter`) or by data blocks (`blocks_iter`). Using these two methods will be more efficient in scenarios where the query has a large amount of data. + +```python title="blocks_iter method" +{{#include docs/examples/python/result_set_with_req_id_examples.py}} +``` +##### Use of the TaosCursor class + +The `TaosConnection` class and the `TaosResult` class already implement all the functionality of the native interface. If you are familiar with the interfaces in the PEP249 specification, you can also use the methods provided by the `TaosCursor` class. + +```python title="Use of TaosCursor" +{{#include docs/examples/python/cursor_usage_native_reference_with_req_id.py}} +``` + +:::note +The TaosCursor class uses native connections for write and query operations. In a client-side multi-threaded scenario, this cursor instance must remain thread exclusive and cannot be shared across threads for use, otherwise, it will result in errors in the returned results. + +::: + + + + +##### Use of TaosRestCursor class + +The `TaosRestCursor` class is an implementation of the PEP249 Cursor interface. + +```python title="Use of TaosRestCursor" +{{#include docs/examples/python/connect_rest_with_req_id_examples.py:basic}} +``` +- `cursor.execute`: Used to execute arbitrary SQL statements. +- `cursor.rowcount` : For write operations, returns the number of successful rows written. For query operations, returns the number of rows in the result set. +- `cursor.description` : Returns the description of the field. Please refer to [TaosRestCursor](https://docs.taosdata.com/api/taospy/taosrest/cursor.html) for the specific format of the description information. + +##### Use of the RestClient class + +The `RestClient` class is a direct wrapper for the [REST API](/reference/rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result. + +```python title="Use of RestClient" +{{#include docs/examples/python/rest_client_with_req_id_example.py}} +``` + +For a more detailed description of the `sql()` method, please refer to [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html). + + + + +```python +{{#include docs/examples/python/connect_websocket_with_req_id_examples.py:basic}} +``` + +- `conn.execute`: can use to execute arbitrary SQL statements, and return the number of rows affected. +- `conn.query`: can use to execute query SQL statements, and return the query results. + + + + ### Used with pandas diff --git a/docs/examples/python/connect_rest_with_req_id_examples.py b/docs/examples/python/connect_rest_with_req_id_examples.py new file mode 100644 index 0000000000..3feb574fa6 --- /dev/null +++ b/docs/examples/python/connect_rest_with_req_id_examples.py @@ -0,0 +1,44 @@ +# ANCHOR: connect +from taosrest import connect, TaosRestConnection, TaosRestCursor + +conn = connect(url="http://localhost:6041", + user="root", + password="taosdata", + timeout=30) + +# ANCHOR_END: connect +# ANCHOR: basic +# create STable +cursor = conn.cursor() +cursor.execute("DROP DATABASE IF EXISTS power", req_id=1) +cursor.execute("CREATE DATABASE power", req_id=2) +cursor.execute( + "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)", req_id=3) + +# insert data +cursor.execute("""INSERT INTO power.d1001 USING power.meters TAGS('California.SanFrancisco', 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) + power.d1002 USING power.meters TAGS('California.SanFrancisco', 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) + power.d1003 USING power.meters TAGS('California.LosAngeles', 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) + power.d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)""", req_id=4) +print("inserted row count:", cursor.rowcount) + +# query data +cursor.execute("SELECT * FROM power.meters LIMIT 3", req_id=5) +# get total rows +print("queried row count:", cursor.rowcount) +# get column names from cursor +column_names = [meta[0] for meta in cursor.description] +# get rows +data = cursor.fetchall() +print(column_names) +for row in data: + print(row) + +# output: +# inserted row count: 8 +# queried row count: 3 +# ['ts', 'current', 'voltage', 'phase', 'location', 'groupid'] +# [datetime.datetime(2018, 10, 3, 14, 38, 5, 500000, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 11.8, 221, 0.28, 'california.losangeles', 2] +# [datetime.datetime(2018, 10, 3, 14, 38, 16, 600000, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 13.4, 223, 0.29, 'california.losangeles', 2] +# [datetime.datetime(2018, 10, 3, 14, 38, 5, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 10.8, 223, 0.29, 'california.losangeles', 3] +# ANCHOR_END: basic diff --git a/docs/examples/python/connect_websocket_with_req_id_examples.py b/docs/examples/python/connect_websocket_with_req_id_examples.py new file mode 100644 index 0000000000..f5f76c8446 --- /dev/null +++ b/docs/examples/python/connect_websocket_with_req_id_examples.py @@ -0,0 +1,29 @@ +# ANCHOR: connect +import taosws + +conn = taosws.connect("taosws://root:taosdata@localhost:6041") +# ANCHOR_END: connect + +# ANCHOR: basic +conn.execute("drop database if exists connwspy", req_id=1) +conn.execute("create database if not exists connwspy", req_id=2) +conn.execute("use connwspy", req_id=3) +conn.execute("create table if not exists stb (ts timestamp, c1 int) tags (t1 int)", req_id=4) +conn.execute("create table if not exists tb1 using stb tags (1)", req_id=5) +conn.execute("insert into tb1 values (now, 1)", req_id=6) +conn.execute("insert into tb1 values (now, 2)", req_id=7) +conn.execute("insert into tb1 values (now, 3)", req_id=8) + +r = conn.execute("select * from stb", req_id=9) +result = conn.query("select * from stb", req_id=10) +num_of_fields = result.field_count +print(num_of_fields) + +for row in result: + print(row) + +# output: +# 3 +# ('2023-02-28 15:56:13.329 +08:00', 1, 1) +# ('2023-02-28 15:56:13.333 +08:00', 2, 1) +# ('2023-02-28 15:56:13.337 +08:00', 3, 1) diff --git a/docs/examples/python/connection_usage_native_reference_with_req_id.py b/docs/examples/python/connection_usage_native_reference_with_req_id.py new file mode 100644 index 0000000000..24d0914ad5 --- /dev/null +++ b/docs/examples/python/connection_usage_native_reference_with_req_id.py @@ -0,0 +1,45 @@ +import taos + +# ANCHOR: insert +conn = taos.connect() +# Execute a sql, ignore the result set, just get affected rows. It's useful for DDL and DML statement. +conn.execute("DROP DATABASE IF EXISTS test", req_id=1) +conn.execute("CREATE DATABASE test", req_id=2) +# change database. same as execute "USE db" +conn.select_db("test") +conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)", req_id=3) +affected_row = conn.execute("INSERT INTO t1 USING weather TAGS(1) VALUES (now, 23.5) (now+1m, 23.5) (now+2m, 24.4)", req_id=4) +print("affected_row", affected_row) +# output: +# affected_row 3 +# ANCHOR_END: insert + +# ANCHOR: query +# Execute a sql and get its result set. It's useful for SELECT statement +result = conn.query("SELECT * from weather", req_id=5) + +# Get fields from result +fields = result.fields +for field in fields: + print(field) # {name: ts, type: 9, bytes: 8} + +# output: +# {name: ts, type: 9, bytes: 8} +# {name: temperature, type: 6, bytes: 4} +# {name: location, type: 4, bytes: 4} + +# Get data from result as list of tuple +data = result.fetch_all() +print(data) +# output: +# [(datetime.datetime(2022, 4, 27, 9, 4, 25, 367000), 23.5, 1), (datetime.datetime(2022, 4, 27, 9, 5, 25, 367000), 23.5, 1), (datetime.datetime(2022, 4, 27, 9, 6, 25, 367000), 24.399999618530273, 1)] + +# Or get data from result as a list of dict +# map_data = result.fetch_all_into_dict() +# print(map_data) +# output: +# [{'ts': datetime.datetime(2022, 4, 27, 9, 1, 15, 343000), 'temperature': 23.5, 'location': 1}, {'ts': datetime.datetime(2022, 4, 27, 9, 2, 15, 343000), 'temperature': 23.5, 'location': 1}, {'ts': datetime.datetime(2022, 4, 27, 9, 3, 15, 343000), 'temperature': 24.399999618530273, 'location': 1}] +# ANCHOR_END: query + + +conn.close() \ No newline at end of file diff --git a/docs/examples/python/cursor_usage_native_reference_with_req_id.py b/docs/examples/python/cursor_usage_native_reference_with_req_id.py new file mode 100644 index 0000000000..15207ee6bc --- /dev/null +++ b/docs/examples/python/cursor_usage_native_reference_with_req_id.py @@ -0,0 +1,32 @@ +import taos + +conn = taos.connect() +cursor = conn.cursor() + +cursor.execute("DROP DATABASE IF EXISTS test", req_id=1) +cursor.execute("CREATE DATABASE test", req_id=2) +cursor.execute("USE test", req_id=3) +cursor.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)", req_id=4) + +for i in range(1000): + location = str(i % 10) + tb = "t" + location + cursor.execute(f"INSERT INTO {tb} USING weather TAGS({location}) VALUES (now+{i}a, 23.5) (now+{i + 1}a, 23.5)", req_id=5+i) + +cursor.execute("SELECT count(*) FROM weather", req_id=1005) +data = cursor.fetchall() +print("count:", data[0][0]) +cursor.execute("SELECT tbname, * FROM weather LIMIT 2", req_id=1006) +col_names = [meta[0] for meta in cursor.description] +print(col_names) +rows = cursor.fetchall() +print(rows) + +cursor.close() +conn.close() + +# output: +# count: 2000 +# ['tbname', 'ts', 'temperature', 'location'] +# row_count: -1 +# [('t0', datetime.datetime(2022, 4, 27, 14, 54, 24, 392000), 23.5, 0), ('t0', datetime.datetime(2022, 4, 27, 14, 54, 24, 393000), 23.5, 0)] diff --git a/docs/examples/python/rest_client_with_req_id_example.py b/docs/examples/python/rest_client_with_req_id_example.py new file mode 100644 index 0000000000..918398e51e --- /dev/null +++ b/docs/examples/python/rest_client_with_req_id_example.py @@ -0,0 +1,9 @@ +from taosrest import RestClient + +client = RestClient("http://localhost:6041", user="root", password="taosdata") +res: dict = client.sql("SELECT ts, current FROM power.meters LIMIT 1", req_id=1) +print(res) + +# output: +# {'status': 'succ', 'head': ['ts', 'current'], 'column_meta': [['ts', 9, 8], ['current', 6, 4]], 'data': [[datetime.datetime(2018, 10, 3, 14, 38, 5, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 10.3]], 'rows': 1} + diff --git a/docs/examples/python/result_set_with_req_id_examples.py b/docs/examples/python/result_set_with_req_id_examples.py new file mode 100644 index 0000000000..90ae2f4f26 --- /dev/null +++ b/docs/examples/python/result_set_with_req_id_examples.py @@ -0,0 +1,33 @@ +import taos + +conn = taos.connect() +conn.execute("DROP DATABASE IF EXISTS test", req_id=1) +conn.execute("CREATE DATABASE test", req_id=2) +conn.select_db("test") +conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)", req_id=3) +# prepare data +for i in range(2000): + location = str(i % 10) + tb = "t" + location + conn.execute(f"INSERT INTO {tb} USING weather TAGS({location}) VALUES (now+{i}a, 23.5) (now+{i + 1}a, 23.5)", req_id=4+i) + +result: taos.TaosResult = conn.query("SELECT * FROM weather", req_id=2004) + +block_index = 0 +blocks: taos.TaosBlocks = result.blocks_iter() +for rows, length in blocks: + print("block ", block_index, " length", length) + print("first row in this block:", rows[0]) + block_index += 1 + +conn.close() + +# possible output: +# block 0 length 1200 +# first row in this block: (datetime.datetime(2022, 4, 27, 15, 14, 52, 46000), 23.5, 0) +# block 1 length 1200 +# first row in this block: (datetime.datetime(2022, 4, 27, 15, 14, 52, 76000), 23.5, 3) +# block 2 length 1200 +# first row in this block: (datetime.datetime(2022, 4, 27, 15, 14, 52, 99000), 23.5, 6) +# block 3 length 400 +# first row in this block: (datetime.datetime(2022, 4, 27, 15, 14, 52, 122000), 23.5, 9) diff --git a/docs/zh/08-connector/30-python.mdx b/docs/zh/08-connector/30-python.mdx index fdfb141e11..5395610df3 100644 --- a/docs/zh/08-connector/30-python.mdx +++ b/docs/zh/08-connector/30-python.mdx @@ -353,6 +353,85 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线 +### 与 req_id 一起使用 + +使用可选的 req_id 参数,指定请求 id,可以用于 tracing + + + + +##### TaosConnection 类的使用 + +`TaosConnection` 类既包含对 PEP249 Connection 接口的实现(如:`cursor`方法和 `close` 方法),也包含很多扩展功能(如: `execute`、 `query`、`schemaless_insert` 和 `subscribe` 方法。 + +```python title="execute 方法" +{{#include docs/examples/python/connection_usage_native_reference_with_req_id.py:insert}} +``` + +```python title="query 方法" +{{#include docs/examples/python/connection_usage_native_reference_with_req_id.py:query}} +``` + +:::tip +查询结果只能获取一次。比如上面的示例中 `fetch_all()` 和 `fetch_all_into_dict()` 只能用一个。重复获取得到的结果为空列表。 +::: + +##### TaosResult 类的使用 + +上面 `TaosConnection` 类的使用示例中,我们已经展示了两种获取查询结果的方法: `fetch_all()` 和 `fetch_all_into_dict()`。除此之外 `TaosResult` 还提供了按行迭代(`rows_iter`)或按数据块迭代(`blocks_iter`)结果集的方法。在查询数据量较大的场景,使用这两个方法会更高效。 + +```python title="blocks_iter 方法" +{{#include docs/examples/python/result_set_with_req_id_examples.py}} +``` +##### TaosCursor 类的使用 + +`TaosConnection` 类和 `TaosResult` 类已经实现了原生接口的所有功能。如果你对 PEP249 规范中的接口比较熟悉也可以使用 `TaosCursor` 类提供的方法。 + +```python title="TaosCursor 的使用" +{{#include docs/examples/python/cursor_usage_native_reference_with_req_id.py}} +``` + +:::note +TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线程的场景下,这个游标实例必须保持线程独享,不能跨线程共享使用,否则会导致返回结果出现错误。 + +::: + + + + +##### TaosRestCursor 类的使用 + +`TaosRestCursor` 类是对 PEP249 Cursor 接口的实现。 + +```python title="TaosRestCursor 的使用" +{{#include docs/examples/python/connect_rest_with_req_id_examples.py:basic}} +``` +- `cursor.execute` : 用来执行任意 SQL 语句。 +- `cursor.rowcount`: 对于写入操作返回写入成功记录数。对于查询操作,返回结果集行数。 +- `cursor.description` : 返回字段的描述信息。关于描述信息的具体格式请参考[TaosRestCursor](https://docs.taosdata.com/api/taospy/taosrest/cursor.html)。 + +##### RestClient 类的使用 + +`RestClient` 类是对于 [REST API](../rest-api) 的直接封装。它只包含一个 `sql()` 方法用于执行任意 SQL 语句, 并返回执行结果。 + +```python title="RestClient 的使用" +{{#include docs/examples/python/rest_client_with_req_id_example.py}} +``` + +对于 `sql()` 方法更详细的介绍, 请参考 [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html)。 + + + +```python +{{#include docs/examples/python/connect_websocket_with_req_id_examples.py:basic}} +``` + +- `conn.execute`: 用来执行任意 SQL 语句,返回影响的行数 +- `conn.query`: 用来执行查询 SQL 语句,返回查询结果 + + + + ### 与 pandas 一起使用 From 11f39d2fbcd17ba30e276d94d2a4a389c4e85545 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Mon, 27 Mar 2023 19:38:47 +0800 Subject: [PATCH 33/71] fix: taosbenchmark telnet_tcp case (#20659) * fix: taosbenchmark support same min/max * fix: taosbenchmark support same min/max * fix: telnet_tcp.py case * fix: enable sml_json_all_types.py --- cmake/taostools_CMakeLists.txt.in | 2 +- .../5-taos-tools/taosbenchmark/json/sml_telnet_tcp.json | 2 +- tests/parallel_test/cases.task | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index 40fa48b815..ac3c1e2bca 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG 2864326 + GIT_TAG 7651857 SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_telnet_tcp.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_telnet_tcp.json index 9e1241397f..e609fcfebd 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_telnet_tcp.json +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_telnet_tcp.json @@ -16,7 +16,7 @@ "num_of_records_per_req": 10, "databases": [{ "dbinfo": { - "name": "db", + "name": "opentsdb_telnet", "drop": "yes" }, "super_tables": [{ diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 6e0b180ed8..2ebe4b52a2 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -1102,9 +1102,9 @@ ,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/json_tag.py ,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/query_json.py ,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/sample_csv_json.py -#,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/sml_json_alltypes.py +,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/sml_json_alltypes.py ,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/taosdemoTestQueryWithJson.py -R -#,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/telnet_tcp.py -R +,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/telnet_tcp.py -R #docs-examples test ,,n,docs-examples-test,bash python.sh From 6d42ddb7f9828f1929469418eb0ccd47f8a123da Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Mon, 27 Mar 2023 19:58:47 +0800 Subject: [PATCH 34/71] test: add udfpy_main.py to ci --- tests/parallel_test/cases.task | 1 + tests/system-test/0-others/udfpy/af_count.py | 20 ++++++-------------- tests/system-test/0-others/udfpy_main.py | 4 ++-- 3 files changed, 9 insertions(+), 16 deletions(-) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 6e0b180ed8..00e7c6ddd1 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -120,6 +120,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/fsync.py ,,n,system-test,python3 ./test.py -f 0-others/compatibility.py ,,n,system-test,python3 ./test.py -f 0-others/tag_index_basic.py +,,n,system-test,python3 ./test.py -f 0-others/udfpy_main.py ,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/alter_database.py ,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/influxdb_line_taosc_insert.py ,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/opentsdb_telnet_line_taosc_insert.py diff --git a/tests/system-test/0-others/udfpy/af_count.py b/tests/system-test/0-others/udfpy/af_count.py index 285ef96b55..226e02235f 100644 --- a/tests/system-test/0-others/udfpy/af_count.py +++ b/tests/system-test/0-others/udfpy/af_count.py @@ -1,5 +1,3 @@ -import pickle - def init(): pass @@ -7,23 +5,17 @@ def destroy(): pass def start(): - return pickle.dumps([]) + return pickle.dumps(0) def finish(buf): - counts = pickle.loads(buf) - all_count = 0 - for count in counts: - all_count += count - - return all_count + count = pickle.loads(buf) + return count def reduce(datablock, buf): (rows, cols) = datablock.shape() - counts = pickle.loads(buf) - batch_count = 0 + count = pickle.loads(buf) for i in range(rows): val = datablock.data(i, 0) if val is not None: - batch_count += 1 - counts.append(batch_count) - return pickle.dumps(counts) + count += 1 + return pickle.dumps(count) \ No newline at end of file diff --git a/tests/system-test/0-others/udfpy_main.py b/tests/system-test/0-others/udfpy_main.py index 55a60d4314..eaadfbdbd6 100644 --- a/tests/system-test/0-others/udfpy_main.py +++ b/tests/system-test/0-others/udfpy_main.py @@ -401,8 +401,8 @@ class TDTestCase: # var stable = "meters" tbname = "d" - count = 3 - rows = 3000000 + count = 10 + rows = 50000 # do self.create_table(stable, tbname, count) self.insert_data(tbname, rows) From b6542fb286a6cb153c50809ea12bef893f28585c Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Tue, 28 Mar 2023 08:43:34 +0800 Subject: [PATCH 35/71] fix: add language and definition body to ins_functions --- source/common/src/systable.c | 2 ++ source/dnode/mnode/impl/src/mndFunc.c | 21 +++++++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/source/common/src/systable.c b/source/common/src/systable.c index 919a09962b..06fe7482e2 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -114,6 +114,8 @@ static const SSysDbTableSchema userFuncSchema[] = { {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, {.name = "code_len", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, {.name = "bufsize", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, + {.name = "language", .bytes = TSDB_TYPE_STR_MAX_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "body", .bytes = TSDB_MAX_BINARY_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, }; static const SSysDbTableSchema userIdxSchema[] = { diff --git a/source/dnode/mnode/impl/src/mndFunc.c b/source/dnode/mnode/impl/src/mndFunc.c index 8d006f1029..7a475c61b6 100644 --- a/source/dnode/mnode/impl/src/mndFunc.c +++ b/source/dnode/mnode/impl/src/mndFunc.c @@ -519,6 +519,7 @@ static int32_t mndRetrieveFuncs(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataSetVal(pColInfo, numOfRows, (const char *)b2, false); + taosMemoryFree(b2); } else { pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataSetVal(pColInfo, numOfRows, NULL, true); @@ -545,6 +546,26 @@ static int32_t mndRetrieveFuncs(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataSetVal(pColInfo, numOfRows, (const char *)&pFunc->bufSize, false); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + char* language = ""; + if (pFunc->scriptType == TSDB_FUNC_SCRIPT_BIN_LIB) { + language = "C"; + } else if (pFunc->scriptType == TSDB_FUNC_SCRIPT_PYTHON) { + language = "Python"; + } + char varLang[TSDB_TYPE_STR_MAX_LEN + 1] = {0}; + varDataSetLen(varLang, strlen(language)); + strcpy(varDataVal(varLang), language); + colDataSetVal(pColInfo, numOfRows, (const char *)varLang, false); + + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + int32_t varCodeLen = (pFunc->codeSize + VARSTR_HEADER_SIZE) > TSDB_MAX_BINARY_LEN ? TSDB_MAX_BINARY_LEN : pFunc->codeSize + VARSTR_HEADER_SIZE; + char *b4 = taosMemoryMalloc(varCodeLen); + memcpy(varDataVal(b4), pFunc->pCode, varCodeLen - VARSTR_HEADER_SIZE); + varDataSetLen(b4, varCodeLen - VARSTR_HEADER_SIZE); + colDataSetVal(pColInfo, numOfRows, (const char*)b4, false); + taosMemoryFree(b4); + numOfRows++; sdbRelease(pSdb, pFunc); } From 70043090dcd89668a14cd13b4e3bae2b5410e3b9 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Tue, 28 Mar 2023 10:55:27 +0800 Subject: [PATCH 36/71] test: change sum to high performance version --- tests/system-test/0-others/udfpy/af_count.py | 2 ++ tests/system-test/0-others/udfpy/af_sum.py | 22 ++++-------- tests/system-test/0-others/udfpy_main.py | 37 +++++++++++++++++--- 3 files changed, 40 insertions(+), 21 deletions(-) diff --git a/tests/system-test/0-others/udfpy/af_count.py b/tests/system-test/0-others/udfpy/af_count.py index 226e02235f..ce29abca13 100644 --- a/tests/system-test/0-others/udfpy/af_count.py +++ b/tests/system-test/0-others/udfpy/af_count.py @@ -1,3 +1,5 @@ +import pickle + def init(): pass diff --git a/tests/system-test/0-others/udfpy/af_sum.py b/tests/system-test/0-others/udfpy/af_sum.py index ac7aa16924..8b88aba56c 100644 --- a/tests/system-test/0-others/udfpy/af_sum.py +++ b/tests/system-test/0-others/udfpy/af_sum.py @@ -7,30 +7,20 @@ def destroy(): pass def start(): - return pickle.dumps([]) + return pickle.dumps(None) def finish(buf): - sums = pickle.loads(buf) - all = None - for sum in sums: - if all is None: - all = sum - else: - all += sum - return all + sum = pickle.loads(buf) + return sum def reduce(datablock, buf): (rows, cols) = datablock.shape() - sums = pickle.loads(buf) - sum = None + sum = pickle.loads(buf) for i in range(rows): val = datablock.data(i, 0) if val is not None: if sum is None: sum = val else: - sum += val - - if sum is not None: - sums.append(sum) - return pickle.dumps(sums) + sum += val + return pickle.dumps(sum) diff --git a/tests/system-test/0-others/udfpy_main.py b/tests/system-test/0-others/udfpy_main.py index eaadfbdbd6..e76795ac28 100644 --- a/tests/system-test/0-others/udfpy_main.py +++ b/tests/system-test/0-others/udfpy_main.py @@ -209,12 +209,12 @@ class TDTestCase: tdSql.checkData(i, j, result1[i][j]) # same value like select col1, udf_fun1(col1) from st - def verify_same_value(self, sql): + def verify_same_value(self, sql, col=0): tdSql.query(sql) nrows = tdSql.getRows() for i in range(nrows): - val = tdSql.getData(i, 0) - tdSql.checkData(i, 1, val) + val = tdSql.getData(i, col) + tdSql.checkData(i, col + 1, val) # verify multi values def verify_same_multi_values(self, sql): @@ -395,6 +395,24 @@ class TDTestCase: tdSql.execute(sql) tdLog.info(f" insert {rows} to child table {self.child_count} .") + + # create stream + def create_stream(self): + sql = f"create stream ma into sta subtable(concat('sta_',tbname)) \ + as select _wstart,count(col1),af_count_bigint(col1) from {self.stbname} partition by tbname interval(1s);" + tdSql.execute(sql) + tdLog.info(sql) + + # query stream + def verify_stream(self): + sql = f"select * from sta limit 10" + self.verify_same_value(sql, 1) + + # create tmq + def create_tmq(self): + sql = f"create topic topa as select concat(col12,t12),sf_concat_var(col12,t12) from {self.stbname};" + tdSql.execute(sql) + tdLog.info(sql) # run def run(self): @@ -402,14 +420,23 @@ class TDTestCase: stable = "meters" tbname = "d" count = 10 - rows = 50000 + rows = 5000 # do self.create_table(stable, tbname, count) - self.insert_data(tbname, rows) # create self.create_scalar_udfpy() self.create_aggr_udfpy() + + # create stream + self.create_stream() + + # create tmq + self.create_tmq() + + # insert data + self.insert_data(tbname, rows) + # query self.query_scalar_udfpy() self.query_aggr_udfpy() From 169d0ee13cd6451a11de9984707ea3b47e4f591a Mon Sep 17 00:00:00 2001 From: slzhou Date: Tue, 28 Mar 2023 11:18:14 +0800 Subject: [PATCH 37/71] fix: scalar return and log error of udf execution --- source/libs/function/src/tudf.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/source/libs/function/src/tudf.c b/source/libs/function/src/tudf.c index 611344063b..fad118297e 100644 --- a/source/libs/function/src/tudf.c +++ b/source/libs/function/src/tudf.c @@ -1070,8 +1070,15 @@ int32_t callUdfScalarFunc(char *udfName, SScalarParam *input, int32_t numOfCols, if (code != 0) { return code; } + SUdfcUvSession *session = handle; code = doCallUdfScalarFunc(handle, input, numOfCols, output); + if (code != TSDB_CODE_SUCCESS) { + fnError("udfc scalar function execution failure"); + releaseUdfFuncHandle(udfName); + return code; + } + if (output->columnData == NULL) { fnError("udfc scalar function calculate error. no column data"); code = TSDB_CODE_UDF_INVALID_OUTPUT_TYPE; From 9b729c289bc268dc3fdef8dc1f76d3b6fb9cd2a1 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Tue, 28 Mar 2023 12:54:57 +0800 Subject: [PATCH 38/71] test: add install taospyudf --- tests/system-test/0-others/udfpy_main.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tests/system-test/0-others/udfpy_main.py b/tests/system-test/0-others/udfpy_main.py index e76795ac28..2dd97ea2c5 100644 --- a/tests/system-test/0-others/udfpy_main.py +++ b/tests/system-test/0-others/udfpy_main.py @@ -20,6 +20,7 @@ from util.sqlset import * import random import os +import subprocess class PerfDB: @@ -414,8 +415,17 @@ class TDTestCase: tdSql.execute(sql) tdLog.info(sql) + def install_taospy(self): + tdLog.info("install taospyudf...") + packs = ["taospyudf"] + for pack in packs: + subprocess.check_call([sys.executable, '-m', 'pip', 'install', pack]) + tdLog.info("install taospyudf successfully.") + # run def run(self): + self.install_taospy() + # var stable = "meters" tbname = "d" From 9289f7de8bdde5f9eb3fdfe46775e1597276ed87 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Tue, 28 Mar 2023 13:58:40 +0800 Subject: [PATCH 39/71] test: add retentsion for topic --- tests/system-test/0-others/udfpy_main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/system-test/0-others/udfpy_main.py b/tests/system-test/0-others/udfpy_main.py index 2dd97ea2c5..23fbfd1e09 100644 --- a/tests/system-test/0-others/udfpy_main.py +++ b/tests/system-test/0-others/udfpy_main.py @@ -105,7 +105,7 @@ class TDTestCase: # create stable and child tables def create_table(self, stbname, tbname, count): - tdSql.prepare() + tdSql.execute("create database db wal_retention_period 4") tdSql.execute('use db') self.child_count = count self.stbname = stbname From e98fe94eb2ee2a9b4570a489225870efbcd8af48 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Tue, 28 Mar 2023 15:08:38 +0800 Subject: [PATCH 40/71] test : add ldconfig to call --- tests/system-test/0-others/udfpy_main.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/system-test/0-others/udfpy_main.py b/tests/system-test/0-others/udfpy_main.py index 23fbfd1e09..c6b2af0f83 100644 --- a/tests/system-test/0-others/udfpy_main.py +++ b/tests/system-test/0-others/udfpy_main.py @@ -420,6 +420,8 @@ class TDTestCase: packs = ["taospyudf"] for pack in packs: subprocess.check_call([sys.executable, '-m', 'pip', 'install', pack]) + tdLog.info("call ldconfig...") + os.system("ldconfig") tdLog.info("install taospyudf successfully.") # run From 764521f898e00ad1b29f5bfefebf38c2c7d8d0dc Mon Sep 17 00:00:00 2001 From: gccgdb1234 Date: Tue, 28 Mar 2023 16:19:01 +0800 Subject: [PATCH 41/71] docs: refine the description about disaster recovery --- docs/zh/17-operation/03-tolerance.md | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/docs/zh/17-operation/03-tolerance.md b/docs/zh/17-operation/03-tolerance.md index 79cf10c39a..bf2a651c1a 100644 --- a/docs/zh/17-operation/03-tolerance.md +++ b/docs/zh/17-operation/03-tolerance.md @@ -19,12 +19,8 @@ TDengine 接收到应用的请求数据包时,先将请求的原始数据包 ## 灾备 -TDengine 的集群通过多个副本的机制,来提供系统的高可用性,同时具备一定的灾备能力。 +TDengine 灾备是通过在异地的两个数据中心中设置两个 TDengine 集群并利用 taosX 的数据复制能力来实现的。假定两个集群为集群 A 和集群 B,其中集群 A 为源集群,承担写入请求并提供查询服务。则在集群 A 所在数据中心中可以配置 taosX 利用 TDengine 提供的数据订阅能力,实时消费集群 A 中新写入的数据,并同步到集群 B。如果发生了灾难导致集群 A 所在数据中心不可用,则可以启用集群 B 作为数据写入和查询的主节点,并在集群 B 所处数据中心中配置 taosX 将数据复制到已经恢复的集群 A 或者新建的集群 C。 -TDengine 集群是由 mnode 负责管理的,为保证 mnode 的高可靠,可以配置 三个 mnode 副本。为保证元数据的强一致性,mnode 副本之间通过同步方式进行数据复制,保证了元数据的强一致性。 +利用 taosX 的数据复制能力也可以构造出更复杂的灾备方案。 -TDengine 集群中的时序数据的副本数是与数据库关联的,一个集群里可以有多个数据库,每个数据库可以配置不同的副本数。创建数据库时,通过参数 replica 指定副本数。为了支持高可靠,需要设置副本数为 3。 - -TDengine 集群的节点数必须大于等于副本数,否则创建表时将报错。 - -当 TDengine 集群中的节点部署在不同的物理机上,并设置多个副本数时,就实现了系统的高可靠性,无需再使用其他软件或工具。TDengine 企业版还可以将副本部署在不同机房,从而实现异地容灾。 +taosX 只在 TDengine 企业版中提供,关于其具体细节,请联系 business@taosdata.com From cb77ca212de1188389967c55772025cfa7078a96 Mon Sep 17 00:00:00 2001 From: gccgdb1234 Date: Tue, 28 Mar 2023 17:04:08 +0800 Subject: [PATCH 42/71] docs: refine disaster recovery --- docs/en/13-operation/03-tolerance.md | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/docs/en/13-operation/03-tolerance.md b/docs/en/13-operation/03-tolerance.md index 4f33748e68..b800e21a90 100644 --- a/docs/en/13-operation/03-tolerance.md +++ b/docs/en/13-operation/03-tolerance.md @@ -18,14 +18,8 @@ To achieve absolutely no data loss, set wal_level to 2 and wal_fsync_period to 0 ## Disaster Recovery -TDengine uses replication to provide high availability. +TDengine provides disaster recovery by using taosX to replicate data between two TDengine clusters which are deployed in two distant data centers. Assume there are two TDengine clusters, A and B, A is the source and B is the target, and A takes the workload of writing and querying. You can deploy `taosX` in the data center where cluster A resides in, `taosX` consumes the data written into cluster A and writes into cluster B. If the data center of cluster A is disrupted because of disaster, you can switch to cluster to take the workload of data writing and querying, and deploy a `taosX` in the data center of cluster B to replicate data from cluster B to cluster A if cluster A has been recovered, or another cluster C if cluster A has not been recovered. -A TDengine cluster is managed by mnodes. You can configure up to three mnodes to ensure high availability. The data replication between mnode replicas is performed in a synchronous way to guarantee metadata consistency. +You can use the data replication feature of `taosX` to build more complicated disaster recovery solution. -The number of replicas for time series data in TDengine is associated with each database. There can be many databases in a cluster and each database can be configured with a different number of replicas. When creating a database, the parameter `replica` is used to specify the number of replicas. To achieve high availability, set `replica` to 3. - -The number of dnodes in a TDengine cluster must NOT be lower than the number of replicas for any database, otherwise it would fail when trying to create a table. - -As long as the dnodes of a TDengine cluster are deployed on different physical machines and the replica number is higher than 1, high availability can be achieved without any other assistance. For disaster recovery, dnodes of a TDengine cluster should be deployed in geographically different data centers. - -Alternatively, you can use taosX to synchronize the data from one TDengine cluster to another cluster in a remote location. However, taosX is only available in TDengine enterprise version, for more information please contact tdengine.com. +taosX is only provided in TDengine enterprise edition, for more detail please contact business@tdengine.com. From 3702852b2cc0d3846e4330055089b62eaa551d64 Mon Sep 17 00:00:00 2001 From: wade zhang <95411902+gccgdb1234@users.noreply.github.com> Date: Tue, 28 Mar 2023 17:33:56 +0800 Subject: [PATCH 43/71] Update 03-tolerance.md --- docs/en/13-operation/03-tolerance.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/13-operation/03-tolerance.md b/docs/en/13-operation/03-tolerance.md index b800e21a90..c8d2c3f3f6 100644 --- a/docs/en/13-operation/03-tolerance.md +++ b/docs/en/13-operation/03-tolerance.md @@ -18,8 +18,8 @@ To achieve absolutely no data loss, set wal_level to 2 and wal_fsync_period to 0 ## Disaster Recovery -TDengine provides disaster recovery by using taosX to replicate data between two TDengine clusters which are deployed in two distant data centers. Assume there are two TDengine clusters, A and B, A is the source and B is the target, and A takes the workload of writing and querying. You can deploy `taosX` in the data center where cluster A resides in, `taosX` consumes the data written into cluster A and writes into cluster B. If the data center of cluster A is disrupted because of disaster, you can switch to cluster to take the workload of data writing and querying, and deploy a `taosX` in the data center of cluster B to replicate data from cluster B to cluster A if cluster A has been recovered, or another cluster C if cluster A has not been recovered. +TDengine provides disaster recovery by using taosX to replicate data between two TDengine clusters which are deployed in two distant data centers. Assume there are two TDengine clusters, A and B, A is the source and B is the target, and A takes the workload of writing and querying. You can deploy `taosX` in the data center where cluster A resides in, `taosX` consumes the data written into cluster A and writes into cluster B. If the data center of cluster A is disrupted because of disaster, you can switch to cluster B to take the workload of data writing and querying, and deploy a `taosX` in the data center of cluster B to replicate data from cluster B to cluster A if cluster A has been recovered, or another cluster C if cluster A has not been recovered. You can use the data replication feature of `taosX` to build more complicated disaster recovery solution. -taosX is only provided in TDengine enterprise edition, for more detail please contact business@tdengine.com. +taosX is only provided in TDengine enterprise edition, for more details please contact business@tdengine.com. From cebe53a48368598565a11b03faf8b471f2a7a40a Mon Sep 17 00:00:00 2001 From: Xuefeng Tan <1172915550@qq.com> Date: Tue, 28 Mar 2023 19:08:43 +0800 Subject: [PATCH 44/71] fix: fix test `cast.py` for windows (#20676) --- tests/pytest/util/sql.py | 24 +++++-- tests/system-test/2-query/cast.py | 109 +++++++++--------------------- 2 files changed, 49 insertions(+), 84 deletions(-) diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py index 414b8b2099..783ee476cb 100644 --- a/tests/pytest/util/sql.py +++ b/tests/pytest/util/sql.py @@ -251,7 +251,7 @@ class TDSql: if self.queryResult[row][col] != data: if self.cursor.istype(col, "TIMESTAMP"): - # suppose user want to check nanosecond timestamp if a longer data passed`` + # suppose user want to check nanosecond timestamp if a longer data passed`` if isinstance(data,str) : if (len(data) >= 28): if self.queryResult[row][col] == _parse_ns_timestamp(data): @@ -260,7 +260,7 @@ class TDSql: else: caller = inspect.getframeinfo(inspect.stack()[1][0]) args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) - tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) + tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) else: if self.queryResult[row][col].astimezone(datetime.timezone.utc) == _parse_datetime(data).astimezone(datetime.timezone.utc): # tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}") @@ -270,12 +270,12 @@ class TDSql: args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) return - elif isinstance(data,int) : - if len(str(data)) == 16 : + elif isinstance(data,int): + if len(str(data)) == 16: precision = 'us' - elif len(str(data)) == 13 : + elif len(str(data)) == 13: precision = 'ms' - elif len(str(data)) == 19 : + elif len(str(data)) == 19: precision = 'ns' else: caller = inspect.getframeinfo(inspect.stack()[1][0]) @@ -303,11 +303,21 @@ class TDSql: args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) return + elif isinstance(data,datetime.datetime): + dt_obj = self.queryResult[row][col] + delt_data = data-datetime.datetime.fromtimestamp(0,data.tzinfo) + delt_result = self.queryResult[row][col] - datetime.datetime.fromtimestamp(0,self.queryResult[row][col].tzinfo) + if delt_data == delt_result: + tdLog.info("check successfully") + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) + tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) + return else: caller = inspect.getframeinfo(inspect.stack()[1][0]) args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) - if str(self.queryResult[row][col]) == str(data): # tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}") diff --git a/tests/system-test/2-query/cast.py b/tests/system-test/2-query/cast.py index de434eed7d..ede1f28324 100644 --- a/tests/system-test/2-query/cast.py +++ b/tests/system-test/2-query/cast.py @@ -38,11 +38,8 @@ class TDTestCase: if data_tb_col[i] is None: tdSql.checkData( i, 0 , None ) if col_name not in ["c2", "double"] or tbname != f"{self.dbname}.t1" or i != 10: - utc_zone = datetime.timezone.utc - utc_8 = datetime.timezone(datetime.timedelta(hours=8)) - date_init_stamp = datetime.datetime.utcfromtimestamp(data_tb_col[i]/1000) - date_data = date_init_stamp.replace(tzinfo=utc_zone).astimezone(utc_8).strftime("%Y-%m-%d %H:%M:%S.%f") - tdSql.checkData( i, 0, date_data) + date_init_stamp = datetime.datetime.fromtimestamp(data_tb_col[i]/1000) + tdSql.checkData( i, 0, date_init_stamp) def __range_to_timestamp(self, cols, tables): for col in cols: @@ -60,7 +57,7 @@ class TDTestCase: self.__range_to_timestamp(cols=__col_list, tables=__table_list) def all_test(self): - + _datetime_epoch = datetime.datetime.fromtimestamp(0) tdSql.query(f"select c1 from {self.dbname}.ct4") data_ct4_c1 = [tdSql.getData(i,0) for i in range(tdSql.queryRows)] tdSql.query(f"select c1 from {self.dbname}.t1") @@ -99,22 +96,16 @@ class TDTestCase: if data_ct4_c1[i] is None: tdSql.checkData( i, 0 , None ) else: - utc_zone = datetime.timezone.utc - utc_8 = datetime.timezone(datetime.timedelta(hours=8)) - date_init_stamp = datetime.datetime.utcfromtimestamp(data_ct4_c1[i]/1000) - date_data = date_init_stamp.replace(tzinfo=utc_zone).astimezone(utc_8).strftime("%Y-%m-%d %H:%M:%S.%f") - tdSql.checkData( i, 0, date_data) + date_init_stamp = _datetime_epoch+datetime.timedelta(seconds=int(data_ct4_c1[i]) / 1000.0) + tdSql.checkData( i, 0, date_init_stamp) tdSql.query(f"select cast(c1 as timestamp) as b from {self.dbname}.t1") for i in range(len(data_t1_c1)): if data_t1_c1[i] is None: tdSql.checkData( i, 0 , None ) else: - utc_zone = datetime.timezone.utc - utc_8 = datetime.timezone(datetime.timedelta(hours=8)) - date_init_stamp = datetime.datetime.utcfromtimestamp(data_t1_c1[i]/1000) - date_data = date_init_stamp.replace(tzinfo=utc_zone).astimezone(utc_8).strftime("%Y-%m-%d %H:%M:%S.%f") - tdSql.checkData( i, 0, date_data) + date_init_stamp = _datetime_epoch+datetime.timedelta(seconds=int(data_t1_c1[i]) / 1000.0) + tdSql.checkData( i, 0, date_init_stamp) tdLog.printNoPrefix("==========step8: cast bigint to bigint, expect no changes") @@ -156,11 +147,8 @@ class TDTestCase: if data_ct4_c2[i] is None: tdSql.checkData( i, 0 , None ) else: - utc_zone = datetime.timezone.utc - utc_8 = datetime.timezone(datetime.timedelta(hours=8)) - date_init_stamp = datetime.datetime.utcfromtimestamp(data_ct4_c2[i]/1000) - date_data = date_init_stamp.replace(tzinfo=utc_zone).astimezone(utc_8).strftime("%Y-%m-%d %H:%M:%S.%f") - tdSql.checkData( i, 0, date_data) + date_init_stamp = _datetime_epoch+datetime.timedelta(seconds=int(data_ct4_c2[i]) / 1000.0) + tdSql.checkData( i, 0, date_init_stamp) tdSql.query(f"select cast(c2 as timestamp) as b from {self.dbname}.t1") @@ -170,11 +158,8 @@ class TDTestCase: elif i == 10: continue else: - utc_zone = datetime.timezone.utc - utc_8 = datetime.timezone(datetime.timedelta(hours=8)) - date_init_stamp = datetime.datetime.utcfromtimestamp(data_t1_c2[i]/1000) - date_data = date_init_stamp.replace(tzinfo=utc_zone).astimezone(utc_8).strftime("%Y-%m-%d %H:%M:%S.%f") - tdSql.checkData( i, 0, date_data) + date_init_stamp = _datetime_epoch+datetime.timedelta(seconds=int(data_t1_c2[i]) / 1000.0) + tdSql.checkData( i, 0, date_init_stamp) tdLog.printNoPrefix("==========step12: cast smallint to bigint, expect no changes") @@ -216,22 +201,16 @@ class TDTestCase: if data_ct4_c3[i] is None: tdSql.checkData( i, 0 , None ) else: - utc_zone = datetime.timezone.utc - utc_8 = datetime.timezone(datetime.timedelta(hours=8)) - date_init_stamp = datetime.datetime.utcfromtimestamp(data_ct4_c3[i]/1000) - date_data = date_init_stamp.replace(tzinfo=utc_zone).astimezone(utc_8).strftime("%Y-%m-%d %H:%M:%S.%f") - tdSql.checkData( i, 0, date_data) + date_init_stamp = _datetime_epoch+datetime.timedelta(seconds=int(data_ct4_c3[i]) / 1000.0) + tdSql.checkData( i, 0, date_init_stamp) tdSql.query(f"select cast(c3 as timestamp) as b from {self.dbname}.t1") for i in range(len(data_t1_c3)): if data_t1_c3[i] is None: tdSql.checkData( i, 0 , None ) else: - utc_zone = datetime.timezone.utc - utc_8 = datetime.timezone(datetime.timedelta(hours=8)) - date_init_stamp = datetime.datetime.utcfromtimestamp(data_t1_c3[i]/1000) - date_data = date_init_stamp.replace(tzinfo=utc_zone).astimezone(utc_8).strftime("%Y-%m-%d %H:%M:%S.%f") - tdSql.checkData( i, 0, date_data) + date_init_stamp = _datetime_epoch+datetime.timedelta(seconds=int(data_t1_c3[i]) / 1000.0) + tdSql.checkData( i, 0, date_init_stamp) tdLog.printNoPrefix("==========step16: cast tinyint to bigint, expect no changes") @@ -273,22 +252,16 @@ class TDTestCase: if data_ct4_c4[i] is None: tdSql.checkData( i, 0 , None ) else: - utc_zone = datetime.timezone.utc - utc_8 = datetime.timezone(datetime.timedelta(hours=8)) - date_init_stamp = datetime.datetime.utcfromtimestamp(data_ct4_c4[i]/1000) - date_data = date_init_stamp.replace(tzinfo=utc_zone).astimezone(utc_8).strftime("%Y-%m-%d %H:%M:%S.%f") - tdSql.checkData( i, 0, date_data) + date_init_stamp = _datetime_epoch+datetime.timedelta(seconds=int(data_ct4_c4[i]) / 1000.0) + tdSql.checkData( i, 0, date_init_stamp) tdSql.query(f"select cast(c4 as timestamp) as b from {self.dbname}.t1") for i in range(len(data_t1_c4)): if data_t1_c4[i] is None: tdSql.checkData( i, 0 , None ) else: - utc_zone = datetime.timezone.utc - utc_8 = datetime.timezone(datetime.timedelta(hours=8)) - date_init_stamp = datetime.datetime.utcfromtimestamp(data_t1_c4[i]/1000) - date_data = date_init_stamp.replace(tzinfo=utc_zone).astimezone(utc_8).strftime("%Y-%m-%d %H:%M:%S.%f") - tdSql.checkData( i, 0, date_data) + date_init_stamp = _datetime_epoch+datetime.timedelta(seconds=int(data_t1_c4[i]) / 1000.0) + tdSql.checkData( i, 0, date_init_stamp) tdLog.printNoPrefix("==========step20: cast float to bigint, expect no changes") @@ -326,21 +299,15 @@ class TDTestCase: if data_ct4_c5[i] is None: tdSql.checkData( i, 0 , None ) else: - utc_zone = datetime.timezone.utc - utc_8 = datetime.timezone(datetime.timedelta(hours=8)) - date_init_stamp = datetime.datetime.utcfromtimestamp(int(data_ct4_c5[i])/1000) - date_data = date_init_stamp.replace(tzinfo=utc_zone).astimezone(utc_8).strftime("%Y-%m-%d %H:%M:%S.%f") - tdSql.checkData( i, 0, date_data) + date_init_stamp = _datetime_epoch+datetime.timedelta(seconds=int(data_ct4_c5[i]) / 1000.0) + tdSql.checkData( i, 0, date_init_stamp) tdSql.query(f"select cast(c5 as timestamp) as b from {self.dbname}.t1") for i in range(len(data_t1_c5)): if data_t1_c5[i] is None: tdSql.checkData( i, 0 , None ) else: - utc_zone = datetime.timezone.utc - utc_8 = datetime.timezone(datetime.timedelta(hours=8)) - date_init_stamp = datetime.datetime.utcfromtimestamp(int(data_t1_c5[i])/1000) - date_data = date_init_stamp.replace(tzinfo=utc_zone).astimezone(utc_8).strftime("%Y-%m-%d %H:%M:%S.%f") - tdSql.checkData( i, 0, date_data) + date_init_stamp = _datetime_epoch+datetime.timedelta(seconds=int(data_t1_c5[i]) / 1000.0) + tdSql.checkData( i, 0, date_init_stamp) tdLog.printNoPrefix("==========step24: cast double to bigint, expect no changes") tdSql.query(f"select c6 from {self.dbname}.ct4") @@ -382,11 +349,8 @@ class TDTestCase: if data_ct4_c6[i] is None: tdSql.checkData( i, 0 , None ) else: - utc_zone = datetime.timezone.utc - utc_8 = datetime.timezone(datetime.timedelta(hours=8)) - date_init_stamp = datetime.datetime.utcfromtimestamp(int(data_ct4_c6[i])/1000) - date_data = date_init_stamp.replace(tzinfo=utc_zone).astimezone(utc_8).strftime("%Y-%m-%d %H:%M:%S.%f") - tdSql.checkData( i, 0, date_data) + date_init_stamp = _datetime_epoch+datetime.timedelta(seconds=int(data_ct4_c6[i]) / 1000.0) + tdSql.checkData( i, 0, date_init_stamp) tdSql.query(f"select cast(c6 as timestamp) as b from {self.dbname}.t1") for i in range(len(data_t1_c6)): @@ -395,11 +359,8 @@ class TDTestCase: elif i == 10: continue else: - utc_zone = datetime.timezone.utc - utc_8 = datetime.timezone(datetime.timedelta(hours=8)) - date_init_stamp = datetime.datetime.utcfromtimestamp(int(data_t1_c6[i])/1000) - date_data = date_init_stamp.replace(tzinfo=utc_zone).astimezone(utc_8).strftime("%Y-%m-%d %H:%M:%S.%f") - tdSql.checkData( i, 0, date_data) + date_init_stamp = _datetime_epoch+datetime.timedelta(seconds=int(data_t1_c6[i]) / 1000.0) + tdSql.checkData( i, 0, date_init_stamp) tdLog.printNoPrefix("==========step28: cast bool to bigint, expect no changes") tdSql.query(f"select c7 from {self.dbname}.ct4") @@ -436,21 +397,15 @@ class TDTestCase: if data_ct4_c7[i] is None: tdSql.checkData( i, 0 , None ) else: - utc_zone = datetime.timezone.utc - utc_8 = datetime.timezone(datetime.timedelta(hours=8)) - date_init_stamp = datetime.datetime.utcfromtimestamp(int(data_ct4_c7[i])/1000) - date_data = date_init_stamp.replace(tzinfo=utc_zone).astimezone(utc_8).strftime("%Y-%m-%d %H:%M:%S.%f") - tdSql.checkData( i, 0, date_data) + date_init_stamp = _datetime_epoch+datetime.timedelta(seconds=int(data_ct4_c7[i]) / 1000.0) + tdSql.checkData( i, 0, date_init_stamp) tdSql.query(f"select cast(c7 as timestamp) as b from {self.dbname}.t1") for i in range(len(data_t1_c7)): if data_t1_c7[i] is None: tdSql.checkData( i, 0 , None ) else: - utc_zone = datetime.timezone.utc - utc_8 = datetime.timezone(datetime.timedelta(hours=8)) - date_init_stamp = datetime.datetime.utcfromtimestamp(int(data_t1_c7[i])/1000) - date_data = date_init_stamp.replace(tzinfo=utc_zone).astimezone(utc_8).strftime("%Y-%m-%d %H:%M:%S.%f") - tdSql.checkData( i, 0, date_data) + date_init_stamp = _datetime_epoch+datetime.timedelta(seconds=int(data_t1_c7[i]) / 1000.0) + tdSql.checkData( i, 0, date_init_stamp) tdSql.query(f"select c8 from {self.dbname}.ct4") @@ -694,7 +649,7 @@ class TDTestCase: tdSql.query(f"select cast('123' as tinyint unsigned) as b from {self.dbname}.stb1 partition by tbname") # uion with cast and common cols - + tdSql.query(f"select cast(c2 as int) as b from {self.dbname}.stb1 union all select c1 from {self.dbname}.stb1 ") tdSql.query(f"select cast(c3 as bool) as b from {self.dbname}.stb1 union all select c7 from {self.dbname}.ct1 ") tdSql.query(f"select cast(c4 as tinyint) as b from {self.dbname}.stb1 union all select c4 from {self.dbname}.stb1") From eb1612483c84ea74982d5984dae433e255ce7000 Mon Sep 17 00:00:00 2001 From: slzhou Date: Wed, 29 Mar 2023 08:56:49 +0800 Subject: [PATCH 45/71] fix: pass odbc.py test. add two columns --- tests/system-test/2-query/odbc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/system-test/2-query/odbc.py b/tests/system-test/2-query/odbc.py index 9bbff4af21..f9232dddf8 100644 --- a/tests/system-test/2-query/odbc.py +++ b/tests/system-test/2-query/odbc.py @@ -22,7 +22,7 @@ class TDTestCase: tdSql.execute("insert into db.ctb using db.stb tags(1) (ts, c1) values (now, 1)") tdSql.query("select count(*) from information_schema.ins_columns") - tdSql.checkData(0, 0, 272) + tdSql.checkData(0, 0, 274) tdSql.query("select * from information_schema.ins_columns where table_name = 'ntb'") tdSql.checkRows(14) From 30e8191f58395e0ea1c52e606b73aad9246f293b Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 29 Mar 2023 10:02:55 +0800 Subject: [PATCH 46/71] docs: update taoskeeper docs installation part (#20680) --- docs/en/14-reference/14-taosKeeper.md | 22 ++++++++++------------ docs/zh/14-reference/14-taosKeeper.md | 17 ++++++++--------- 2 files changed, 18 insertions(+), 21 deletions(-) diff --git a/docs/en/14-reference/14-taosKeeper.md b/docs/en/14-reference/14-taosKeeper.md index 8176b70e3d..3c91cc15b3 100644 --- a/docs/en/14-reference/14-taosKeeper.md +++ b/docs/en/14-reference/14-taosKeeper.md @@ -13,14 +13,12 @@ taosKeeper is a tool for TDengine that exports monitoring metrics. With taosKeep ## Installation - +There are two ways to install taosKeeper: Methods of installing taosKeeper: - - -- You can compile taosKeeper separately and install it. Please refer to the [taosKeeper](https://github.com/taosdata/taoskeeper) repository for details. --> -You can compile taosKeeper separately and install it. Please refer to the [taosKeeper](https://github.com/taosdata/taoskeeper) repository for details. +- Installing the official TDengine installer will automatically install taosKeeper. Please refer to [TDengine installation](/operation/pkg-install) for details. +- You can compile taosKeeper separately and install it. Please refer to the [taosKeeper](https://github.com/taosdata/taoskeeper) repository for details. ## Configuration and Launch ### Configuration @@ -110,7 +108,7 @@ The following `launchctl` commands can help you manage taoskeeper service: #### Launch With Configuration File -You can quickly launch taosKeeper with the following commands. If you do not specify a configuration file, `/etc/taos/keeper.toml` is used by default. If this file does not specify configurations, the default values are used. +You can quickly launch taosKeeper with the following commands. If you do not specify a configuration file, `/etc/taos/keeper.toml` is used by default. If this file does not specify configurations, the default values are used. ```shell $ taoskeeper -c @@ -188,19 +186,19 @@ $ curl http://127.0.0.1:6043/metrics Sample result set (excerpt): ```shell -# HELP taos_cluster_info_connections_total +# HELP taos_cluster_info_connections_total # TYPE taos_cluster_info_connections_total counter taos_cluster_info_connections_total{cluster_id="5981392874047724755"} 16 -# HELP taos_cluster_info_dbs_total +# HELP taos_cluster_info_dbs_total # TYPE taos_cluster_info_dbs_total counter taos_cluster_info_dbs_total{cluster_id="5981392874047724755"} 2 -# HELP taos_cluster_info_dnodes_alive +# HELP taos_cluster_info_dnodes_alive # TYPE taos_cluster_info_dnodes_alive counter taos_cluster_info_dnodes_alive{cluster_id="5981392874047724755"} 1 -# HELP taos_cluster_info_dnodes_total +# HELP taos_cluster_info_dnodes_total # TYPE taos_cluster_info_dnodes_total counter taos_cluster_info_dnodes_total{cluster_id="5981392874047724755"} 1 -# HELP taos_cluster_info_first_ep +# HELP taos_cluster_info_first_ep # TYPE taos_cluster_info_first_ep gauge taos_cluster_info_first_ep{cluster_id="5981392874047724755",value="hlb:6030"} 1 -``` \ No newline at end of file +``` diff --git a/docs/zh/14-reference/14-taosKeeper.md b/docs/zh/14-reference/14-taosKeeper.md index 66c21dc1a1..2cdc24dfef 100644 --- a/docs/zh/14-reference/14-taosKeeper.md +++ b/docs/zh/14-reference/14-taosKeeper.md @@ -13,12 +13,11 @@ taosKeeper 是 TDengine 3.0 版本监控指标的导出工具,通过简单的 ## 安装 - +taosKeeper 有两种安装方式: taosKeeper 安装方式: - +- 安装 TDengine 官方安装包的同时会自动安装 taosKeeper, 详情请参考[ TDengine 安装](/operation/pkg-install)。 - - 单独编译 taosKeeper 并安装,详情请参考 [taosKeeper](https://github.com/taosdata/taoskeeper) 仓库。 ## 配置和运行方式 @@ -112,7 +111,7 @@ Active: inactive (dead) #### 配置文件启动 -执行以下命令即可快速体验 taosKeeper。当不指定 taosKeeper 配置文件时,优先使用 `/etc/taos/keeper.toml` 配置,否则将使用默认配置。 +执行以下命令即可快速体验 taosKeeper。当不指定 taosKeeper 配置文件时,优先使用 `/etc/taos/keeper.toml` 配置,否则将使用默认配置。 ```shell $ taoskeeper -c @@ -190,19 +189,19 @@ $ curl http://127.0.0.1:6043/metrics 部分结果集: ```shell -# HELP taos_cluster_info_connections_total +# HELP taos_cluster_info_connections_total # TYPE taos_cluster_info_connections_total counter taos_cluster_info_connections_total{cluster_id="5981392874047724755"} 16 -# HELP taos_cluster_info_dbs_total +# HELP taos_cluster_info_dbs_total # TYPE taos_cluster_info_dbs_total counter taos_cluster_info_dbs_total{cluster_id="5981392874047724755"} 2 -# HELP taos_cluster_info_dnodes_alive +# HELP taos_cluster_info_dnodes_alive # TYPE taos_cluster_info_dnodes_alive counter taos_cluster_info_dnodes_alive{cluster_id="5981392874047724755"} 1 -# HELP taos_cluster_info_dnodes_total +# HELP taos_cluster_info_dnodes_total # TYPE taos_cluster_info_dnodes_total counter taos_cluster_info_dnodes_total{cluster_id="5981392874047724755"} 1 -# HELP taos_cluster_info_first_ep +# HELP taos_cluster_info_first_ep # TYPE taos_cluster_info_first_ep gauge taos_cluster_info_first_ep{cluster_id="5981392874047724755",value="hlb:6030"} 1 ``` From 29603a82219ff523e74d1e323abadf6b91833bde Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 29 Mar 2023 10:10:41 +0800 Subject: [PATCH 47/71] docs: add taoskeeper note for monitor parameter (#20633) --- docs/en/14-reference/12-config/index.md | 3 +++ docs/zh/14-reference/12-config/index.md | 3 +++ 2 files changed, 6 insertions(+) diff --git a/docs/en/14-reference/12-config/index.md b/docs/en/14-reference/12-config/index.md index a76074f507..430487a3af 100644 --- a/docs/en/14-reference/12-config/index.md +++ b/docs/en/14-reference/12-config/index.md @@ -99,6 +99,9 @@ The parameters described in this document by the effect that they have on the sy ## Monitoring Parameters +:::note +Please note the `taoskeeper` needs to be installed and running to create the `log` database and receiving metrics sent by `taosd` as the full monitoring solution. + ### monitor | Attribute | Description | diff --git a/docs/zh/14-reference/12-config/index.md b/docs/zh/14-reference/12-config/index.md index 6eeb577ab5..e5efd77f80 100644 --- a/docs/zh/14-reference/12-config/index.md +++ b/docs/zh/14-reference/12-config/index.md @@ -99,6 +99,9 @@ taos --dump-config ## 监控相关 +:::note +请注意,完整的监控功能需要安装并运行 `taoskeeper` 服务。taoskeeper 负责接收监控指标数据并创建 `log` 库。 + ### monitor | 属性 | 说明 | From 883734a6b833a8f5c0dd81e37ee6e6083c564d83 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Wed, 29 Mar 2023 11:33:18 +0800 Subject: [PATCH 48/71] fix: use offical pypi repository --- tests/system-test/0-others/udfpy_main.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/system-test/0-others/udfpy_main.py b/tests/system-test/0-others/udfpy_main.py index c6b2af0f83..916b032edb 100644 --- a/tests/system-test/0-others/udfpy_main.py +++ b/tests/system-test/0-others/udfpy_main.py @@ -419,7 +419,7 @@ class TDTestCase: tdLog.info("install taospyudf...") packs = ["taospyudf"] for pack in packs: - subprocess.check_call([sys.executable, '-m', 'pip', 'install', pack]) + subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-i', 'https://pypi.org/simple', '-U', pack]) tdLog.info("call ldconfig...") os.system("ldconfig") tdLog.info("install taospyudf successfully.") @@ -461,4 +461,4 @@ class TDTestCase: tdLog.success("%s successfully executed" % __file__) tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file +tdCases.addLinux(__file__, TDTestCase()) From 8fdc687097a4bda652a6dc3944ec7b5548d6c61a Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 29 Mar 2023 11:47:23 +0800 Subject: [PATCH 49/71] fix: taosdump free tbname in loose mode (#20681) * fix: taosbenchmark support same min/max * fix: taosbenchmark support same min/max * fix: telnet_tcp.py case * fix: enable sml_json_all_types.py * fix: taos-tools update * fix: taosdump free tbname in loose mode --- cmake/taostools_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index ac3c1e2bca..3f7a43ab2d 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG 7651857 + GIT_TAG e82b9fc SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE From dcf5f98b4b83b452fd1837d1548a545fe3a73fd7 Mon Sep 17 00:00:00 2001 From: slzhou Date: Wed, 29 Mar 2023 15:52:08 +0800 Subject: [PATCH 50/71] fix: change name and body to func_name and func_body --- source/common/src/systable.c | 4 ++-- tests/script/tsim/query/udfpy.sim | 19 +++++++++++++++++++ 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/source/common/src/systable.c b/source/common/src/systable.c index 06fe7482e2..2c15980167 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -114,8 +114,8 @@ static const SSysDbTableSchema userFuncSchema[] = { {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, {.name = "code_len", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, {.name = "bufsize", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, - {.name = "language", .bytes = TSDB_TYPE_STR_MAX_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, - {.name = "body", .bytes = TSDB_MAX_BINARY_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "func_language", .bytes = TSDB_TYPE_STR_MAX_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "func_body", .bytes = TSDB_MAX_BINARY_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, }; static const SSysDbTableSchema userIdxSchema[] = { diff --git a/tests/script/tsim/query/udfpy.sim b/tests/script/tsim/query/udfpy.sim index 2340235daa..025df7984b 100644 --- a/tests/script/tsim/query/udfpy.sim +++ b/tests/script/tsim/query/udfpy.sim @@ -42,6 +42,25 @@ sql show functions; if $rows != 4 then return -1 endi + +sql select func_language, func_body,name from information_schema.ins_functions order by name +if $rows != 4 then + return -1 +endi + +if $data00 != @C@ then + return -1 +endi +if $data10 != @C@ then + return -1 +endi +if $data20 != @Python@ then + return -1 +endi +if $data30 != @Python@ then + return -1 +endi + sql select bit_and(f, f) from t; if $rows != 2 then return -1 From df2ecea34ff3173086aadec468670cc474edf747 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Wed, 29 Mar 2023 16:57:15 +0800 Subject: [PATCH 51/71] test: tag_index_cluster.py file was added by mistake --- .../system-test/0-others/tag_index_cluster.py | 171 ------------------ 1 file changed, 171 deletions(-) delete mode 100644 tests/system-test/0-others/tag_index_cluster.py diff --git a/tests/system-test/0-others/tag_index_cluster.py b/tests/system-test/0-others/tag_index_cluster.py deleted file mode 100644 index b1ae74f567..0000000000 --- a/tests/system-test/0-others/tag_index_cluster.py +++ /dev/null @@ -1,171 +0,0 @@ -from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE -import taos -import sys -import time -import os - -from util.log import * -from util.sql import * -from util.cases import * -from util.dnodes import * -from util.dnodes import TDDnodes -from util.dnodes import TDDnode -import time -import socket -import subprocess - -class MyDnodes(TDDnodes): - def __init__(self ,dnodes_lists): - super(MyDnodes,self).__init__() - self.dnodes = dnodes_lists # dnode must be TDDnode instance - self.simDeployed = False - -class TagCluster: - noConn = True - def init(self, conn, logSql, replicaVar=1): - tdLog.debug(f"start to excute {__file__}") - self.TDDnodes = None - self.depoly_cluster(5) - self.master_dnode = self.TDDnodes.dnodes[0] - self.host=self.master_dnode.cfgDict["fqdn"] - conn1 = taos.connect(self.master_dnode.cfgDict["fqdn"] , config=self.master_dnode.cfgDir) - tdSql.init(conn1.cursor()) - - - def getBuildPath(self): - selfPath = os.path.dirname(os.path.realpath(__file__)) - - if ("community" in selfPath): - projPath = selfPath[:selfPath.find("community")] - else: - projPath = selfPath[:selfPath.find("tests")] - - for root, dirs, files in os.walk(projPath): - if ("taosd" in files or "taosd.exe" in files): - rootRealPath = os.path.dirname(os.path.realpath(root)) - if ("packaging" not in rootRealPath): - buildPath = root[:len(root) - len("/build/bin")] - break - return buildPath - - - def depoly_cluster(self ,dnodes_nums): - - testCluster = False - valgrind = 0 - hostname = socket.gethostname() - dnodes = [] - start_port = 6030 - for num in range(1, dnodes_nums+1): - dnode = TDDnode(num) - dnode.addExtraCfg("firstEp", f"{hostname}:{start_port}") - dnode.addExtraCfg("fqdn", f"{hostname}") - dnode.addExtraCfg("serverPort", f"{start_port + (num-1)*100}") - dnode.addExtraCfg("monitorFqdn", hostname) - dnode.addExtraCfg("monitorPort", 7043) - dnodes.append(dnode) - - self.TDDnodes = MyDnodes(dnodes) - self.TDDnodes.init("") - self.TDDnodes.setTestCluster(testCluster) - self.TDDnodes.setValgrind(valgrind) - - self.TDDnodes.setAsan(tdDnodes.getAsan()) - self.TDDnodes.stopAll() - for dnode in self.TDDnodes.dnodes: - self.TDDnodes.deploy(dnode.index,{}) - - for dnode in self.TDDnodes.dnodes: - self.TDDnodes.starttaosd(dnode.index) - - # create cluster - for dnode in self.TDDnodes.dnodes[1:]: - # print(dnode.cfgDict) - dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"] - dnode_first_host = dnode.cfgDict["firstEp"].split(":")[0] - dnode_first_port = dnode.cfgDict["firstEp"].split(":")[-1] - cmd = f"{self.getBuildPath()}/build/bin/taos -h {dnode_first_host} -P {dnode_first_port} -s \"create dnode \\\"{dnode_id}\\\"\"" - print(cmd) - os.system(cmd) - - time.sleep(2) - tdLog.info(" create cluster done! ") - - def five_dnode_one_mnode(self): - tdSql.query("select * from information_schema.ins_dnodes;") - tdSql.checkData(0,1,'%s:6030'%self.host) - tdSql.checkData(4,1,'%s:6430'%self.host) - tdSql.checkData(0,4,'ready') - tdSql.checkData(4,4,'ready') - tdSql.query("select * from information_schema.ins_mnodes;") - tdSql.checkData(0,1,'%s:6030'%self.host) - tdSql.checkData(0,2,'leader') - tdSql.checkData(0,3,'ready') - - - tdSql.error("create mnode on dnode 1;") - tdSql.error("drop mnode on dnode 1;") - - tdSql.execute("drop database if exists db") - tdSql.execute("create database if not exists db replica 1 duration 300") - tdSql.execute("use db") - tdSql.execute( - '''create table stb1 - (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) - tags (t1 int) - ''' - ) - tdSql.execute( - ''' - create table t1 - (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) - ''' - ) - for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') - - tdSql.query('select * from information_schema.ins_databases;') - tdSql.checkData(2,5,'on') - tdSql.error("alter database db strict 'off'") - # tdSql.execute('alter database db strict 'on'') - # tdSql.query('select * from information_schema.ins_databases;') - # tdSql.checkData(2,5,'on') - - def getConnection(self, dnode): - host = dnode.cfgDict["fqdn"] - port = dnode.cfgDict["serverPort"] - config_dir = dnode.cfgDir - return taos.connect(host=host, port=int(port), config=config_dir) - - def check_alive(self): - # check cluster alive - tdLog.printNoPrefix("======== test cluster alive: ") - tdSql.checkDataLoop(0, 0, 1, "show cluster alive;", 20, 0.5) - - tdSql.query("show db.alive;") - tdSql.checkData(0, 0, 1) - - # stop 3 dnode - self.TDDnodes.stoptaosd(3) - tdSql.checkDataLoop(0, 0, 2, "show cluster alive;", 20, 0.5) - - tdSql.query("show db.alive;") - tdSql.checkData(0, 0, 2) - - # stop 2 dnode - self.TDDnodes.stoptaosd(2) - tdSql.checkDataLoop(0, 0, 0, "show cluster alive;", 20, 0.5) - - tdSql.query("show db.alive;") - tdSql.checkData(0, 0, 0) - - - def run(self): - # print(self.master_dnode.cfgDict) - self.five_dnode_one_mnode() - # check cluster and db alive - self.check_alive() - - def stop(self): - tdSql.close() - tdLog.success(f"{__file__} successfully executed") From 7568a89553d955cc763d336ce50f32bf971c6f8d Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Wed, 29 Mar 2023 18:44:24 +0800 Subject: [PATCH 52/71] test: tmqDnodeRestart.py set query retry times from 10 to 50 --- tests/system-test/7-tmq/tmqDnodeRestart.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/system-test/7-tmq/tmqDnodeRestart.py b/tests/system-test/7-tmq/tmqDnodeRestart.py index afd54c9d02..3ad7d7692d 100644 --- a/tests/system-test/7-tmq/tmqDnodeRestart.py +++ b/tests/system-test/7-tmq/tmqDnodeRestart.py @@ -146,7 +146,7 @@ class TDTestCase: for i in range(expectRows): totalConsumeRows += resultList[i] - tdSql.query(queryString) + tdSql.query(queryString, None, 50) totalRowsFromQury = tdSql.getRows() tdLog.info("act consume rows: %d, act query rows: %d"%(totalConsumeRows, totalRowsFromQury)) @@ -236,7 +236,7 @@ class TDTestCase: for i in range(expectRows): totalConsumeRows += resultList[i] - tdSql.query(queryString) + tdSql.query(queryString, None, 50) totalRowsFromQuery = tdSql.getRows() tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, totalRowsFromQuery)) From 5e2b26f1467a528a9c2d72df627e083f87d4283a Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 30 Mar 2023 19:30:54 +0800 Subject: [PATCH 53/71] chore: refactor cus prompt (#20688) * chore: refactor cus prompt * fix: client name in install.sh * fix: -Wno-reserved-user-defined-literal * fix: update taos-tools commit * fix: include/os/osDir.h * fix: check cus name * fix: makepkg.sh * chore: update taos-tools d194dc9 --------- Co-authored-by: chenhaoran --- cmake/cmake.define | 2 +- cmake/taostools_CMakeLists.txt.in | 2 +- include/os/osDir.h | 28 ++++++++++++++++++++++ include/util/cus_name.h | 31 ++++++++++++++++++++++++ packaging/cfg/taos.cfg | 15 ++++++------ packaging/tools/makepkg.sh | 4 +++- source/client/src/clientEnv.c | 14 +++++++++-- source/common/src/tglobal.c | 4 ++++ source/os/src/osSysinfo.c | 12 ++++++++++ tools/shell/CMakeLists.txt | 4 ---- tools/shell/src/shellArguments.c | 39 +++++++++++++++++-------------- 11 files changed, 121 insertions(+), 34 deletions(-) create mode 100644 include/util/cus_name.h diff --git a/cmake/cmake.define b/cmake/cmake.define index 10f2172541..5b65738c70 100644 --- a/cmake/cmake.define +++ b/cmake/cmake.define @@ -121,7 +121,7 @@ ELSE () MESSAGE(STATUS "Compile with Address Sanitizer!") ELSE () SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k") - SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k") + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k") ENDIF () # disable all assert diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index 3f7a43ab2d..aef89a2d42 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG e82b9fc + GIT_TAG d194dc9 SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE diff --git a/include/os/osDir.h b/include/os/osDir.h index 73871602c5..55c7a15764 100644 --- a/include/os/osDir.h +++ b/include/os/osDir.h @@ -31,21 +31,49 @@ extern "C" { #endif +#if defined(CUS_NAME) || defined(CUS_PROMPT) || defined(CUS_EMAIL) +#include "cus_name.h" +#endif + #ifdef WINDOWS + #define TD_TMP_DIR_PATH "C:\\Windows\\Temp\\" +#ifdef CUS_NAME +#define TD_CFG_DIR_PATH "C:\\"CUS_NAME"\\cfg\\" +#define TD_DATA_DIR_PATH "C:\\"CUS_NAME"\\data\\" +#define TD_LOG_DIR_PATH "C:\\"CUS_NAME"\\log\\" +#else #define TD_CFG_DIR_PATH "C:\\TDengine\\cfg\\" #define TD_DATA_DIR_PATH "C:\\TDengine\\data\\" #define TD_LOG_DIR_PATH "C:\\TDengine\\log\\" +#endif // CUS_NAME + #elif defined(_TD_DARWIN_64) + +#ifdef CUS_PROMPT +#define TD_TMP_DIR_PATH "/tmp/"CUS_PROMPT"d/" +#define TD_CFG_DIR_PATH "/etc/"CUS_PROMPT"/" +#define TD_DATA_DIR_PATH "/var/lib/"CUS_PROMPT"/" +#define TD_LOG_DIR_PATH "/var/log/"CUS_PROMPT"/" +#else #define TD_TMP_DIR_PATH "/tmp/taosd/" #define TD_CFG_DIR_PATH "/etc/taos/" #define TD_DATA_DIR_PATH "/var/lib/taos/" #define TD_LOG_DIR_PATH "/var/log/taos/" +#endif // CUS_PROMPT + #else + #define TD_TMP_DIR_PATH "/tmp/" +#ifdef CUS_PROMPT +#define TD_CFG_DIR_PATH "/etc/"CUS_PROMPT"/" +#define TD_DATA_DIR_PATH "/var/lib/"CUS_PROMPT"/" +#define TD_LOG_DIR_PATH "/var/log/"CUS_PROMPT"/" +#else #define TD_CFG_DIR_PATH "/etc/taos/" #define TD_DATA_DIR_PATH "/var/lib/taos/" #define TD_LOG_DIR_PATH "/var/log/taos/" +#endif // CUS_PROMPT #endif typedef struct TdDir *TdDirPtr; diff --git a/include/util/cus_name.h b/include/util/cus_name.h new file mode 100644 index 0000000000..16f677f855 --- /dev/null +++ b/include/util/cus_name.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef _CUS_NAME_H_ +#define _CUS_NAME_H_ + +#ifndef CUS_NAME +#define CUS_NAME "TDengine" +#endif + +#ifndef CUS_PROMPT +#define CUS_PROMPT "taos" +#endif + +#ifndef CUS_EMAIL +#define CUS_EMAIL "" +#endif + +#endif // _CUS_NAME_H_ diff --git a/packaging/cfg/taos.cfg b/packaging/cfg/taos.cfg index a98dc5a236..2159899aa2 100644 --- a/packaging/cfg/taos.cfg +++ b/packaging/cfg/taos.cfg @@ -1,7 +1,6 @@ ######################################################## # # # Configuration # -# Any questions, please email support@taosdata.com # # # ######################################################## @@ -13,7 +12,7 @@ ############### 1. Cluster End point ############################ -# The end point of the first dnode in the cluster to be connected to when this dnode or a CLI `taos` is started +# The end point of the first dnode in the cluster to be connected to when this dnode or the CLI utility is started # firstEp hostname:6030 # The end point of the second dnode to be connected to if the firstEp is not available @@ -25,7 +24,7 @@ # The FQDN of the host on which this dnode will be started. It can be IP address # fqdn hostname -# The port for external access after this dnode is started +# The port for external access after this dnode is started # serverPort 6030 # The maximum number of connections a dnode can accept @@ -96,7 +95,7 @@ # if free disk space is less than this value, this dnode will fail to start # minimalDataDirGB 2.0 -# enable/disable system monitor +# enable/disable system monitor # monitor 1 # The following parameter is used to limit the maximum number of lines in log files. @@ -114,8 +113,8 @@ # The following parameters are used for debug purpose only by this dnode. # debugFlag is a 8 bits mask: FILE-SCREEN-UNUSED-HeartBeat-DUMP-TRACE_WARN-ERROR -# Available debug levels are: -# 131: output warning and error +# Available debug levels are: +# 131: output warning and error # 135: output debug, warning and error # 143: output trace, debug, warning and error to log # 199: output debug, warning and error to both screen and file @@ -130,7 +129,7 @@ # debug flag for util # uDebugFlag 131 -# debug flag for rpc +# debug flag for rpc # rpcDebugFlag 131 # debug flag for jni @@ -139,7 +138,7 @@ # debug flag for query # qDebugFlag 131 -# debug flag for taosc driver +# debug flag for client driver # cDebugFlag 131 # debug flag for dnode messages diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh index 29160238ce..f30ec23b9f 100755 --- a/packaging/tools/makepkg.sh +++ b/packaging/tools/makepkg.sh @@ -234,7 +234,9 @@ if [ "$verMode" == "cluster" ]; then sed -i "s/serverName2=\"taosd\"/serverName2=\"${serverName2}\"/g" remove_temp.sh sed -i "s/clientName2=\"taos\"/clientName2=\"${clientName2}\"/g" remove_temp.sh sed -i "s/productName2=\"TDengine\"/productName2=\"${productName2}\"/g" remove_temp.sh - sed -i "s/emailName2=\"taosdata.com\"/emailName2=\"${cusEmail2}\"/g" remove_temp.sh + cusDomain=`echo "${cusEmail2}" | sed 's/^[^@]*@//'` + echo "domain is ${cusDomain}" + sed -i "s/emailName2=\"taosdata.com\"/emailName2=\"${cusDomain}\"/g" remove_temp.sh mv remove_temp.sh ${install_dir}/bin/remove.sh fi if [ "$verMode" == "cloud" ]; then diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c index de08ba66cc..874ac12f5c 100644 --- a/source/client/src/clientEnv.c +++ b/source/client/src/clientEnv.c @@ -30,6 +30,10 @@ #include "tsched.h" #include "ttime.h" +#if defined(CUS_NAME) || defined(CUS_PROMPT) || defined(CUS_EMAIL) +#include "cus_name.h" +#endif + #define TSC_VAR_NOT_RELEASE 1 #define TSC_VAR_RELEASED 0 @@ -541,9 +545,15 @@ void taos_init_imp(void) { deltaToUtcInitOnce(); - if (taosCreateLog("taoslog", 10, configDir, NULL, NULL, NULL, NULL, 1) != 0) { + char logDirName[64] = {0}; +#ifdef CUS_PROMPT + snprintf(logDirName, 64, "%slog", CUS_PROMPT); +#else + snprintf(logDirName, 64, "taoslog"); +#endif + if (taosCreateLog(logDirName, 10, configDir, NULL, NULL, NULL, NULL, 1) != 0) { // ignore create log failed, only print - printf(" WARING: Create taoslog failed:%s. configDir=%s\n", strerror(errno), configDir); + printf(" WARING: Create %s failed:%s. configDir=%s\n", logDirName, strerror(errno), configDir); } if (taosInitCfg(configDir, NULL, NULL, NULL, NULL, 1) != 0) { diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index aeeec1d61c..1c2d533977 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -228,7 +228,11 @@ static int32_t taosLoadCfg(SConfig *pCfg, const char **envCmd, const char *input taosExpandDir(inputCfgDir, cfgDir, PATH_MAX); if (taosIsDir(cfgDir)) { +#ifdef CUS_PROMPT + snprintf(cfgFile, sizeof(cfgFile), "%s" TD_DIRSEP "%s.cfg", CUS_PROMPT, cfgDir); +#else snprintf(cfgFile, sizeof(cfgFile), "%s" TD_DIRSEP "taos.cfg", cfgDir); +#endif } else { tstrncpy(cfgFile, cfgDir, sizeof(cfgDir)); } diff --git a/source/os/src/osSysinfo.c b/source/os/src/osSysinfo.c index 52309a7b35..84004ed3c1 100644 --- a/source/os/src/osSysinfo.c +++ b/source/os/src/osSysinfo.c @@ -17,6 +17,10 @@ #include "os.h" #include "taoserror.h" +#if defined(CUS_NAME) || defined(CUS_PROMPT) || defined(CUS_EMAIL) +#include "cus_name.h" +#endif + #define PROCESS_ITEM 12 #define UUIDLEN37 37 @@ -252,7 +256,11 @@ int32_t taosGetEmail(char *email, int32_t maxLen) { #ifdef WINDOWS // ASSERT(0); #elif defined(_TD_DARWIN_64) +#ifdef CUS_PROMPT + const char *filepath = "/usr/local/"CUS_PROMPT"/email"; +#else const char *filepath = "/usr/local/taos/email"; +#endif // CUS_PROMPT TdFilePtr pFile = taosOpenFile(filepath, TD_FILE_READ); if (pFile == NULL) return false; @@ -264,8 +272,12 @@ int32_t taosGetEmail(char *email, int32_t maxLen) { taosCloseFile(&pFile); return 0; +#else +#ifdef CUS_PROMPT + const char *filepath = "/usr/local/"CUS_PROMPT"/email"; #else const char *filepath = "/usr/local/taos/email"; +#endif // CUS_PROMPT TdFilePtr pFile = taosOpenFile(filepath, TD_FILE_READ); if (pFile == NULL) return false; diff --git a/tools/shell/CMakeLists.txt b/tools/shell/CMakeLists.txt index 600e33feab..7b1dc3a541 100644 --- a/tools/shell/CMakeLists.txt +++ b/tools/shell/CMakeLists.txt @@ -26,10 +26,6 @@ ELSE () SET(LINK_WEBSOCKET "") ENDIF () -IF (CUS_NAME OR CUS_PROMPT OR CUS_EMAIL) - ADD_DEFINITIONS(-I${CMAKE_CURRENT_SOURCE_DIR}/../../../enterprise/packaging) -ENDIF (CUS_NAME OR CUS_PROMPT OR CUS_EMAIL) - IF (TD_LINUX AND TD_ALPINE) SET(LINK_ARGP "/usr/lib/libargp.a") ELSE () diff --git a/tools/shell/src/shellArguments.c b/tools/shell/src/shellArguments.c index 2b7d829652..f7008548f6 100644 --- a/tools/shell/src/shellArguments.c +++ b/tools/shell/src/shellArguments.c @@ -19,18 +19,6 @@ #include "shellInt.h" -#ifndef CUS_NAME - char cusName[] = "TDengine"; -#endif - -#ifndef CUS_PROMPT - char cusPrompt[] = "taos"; -#endif - -#ifndef CUS_EMAIL - char cusEmail[] = ""; -#endif - #if defined(CUS_NAME) || defined(CUS_PROMPT) || defined(CUS_EMAIL) #include "cus_name.h" #endif @@ -92,7 +80,11 @@ void shellPrintHelp() { #endif printf("%s%s%s%s\r\n", indent, "-w,", indent, SHELL_WIDTH); printf("%s%s%s%s\r\n", indent, "-V,", indent, SHELL_VERSION); - printf("\r\n\r\nReport bugs to %s.\r\n", cusEmail); +#ifdef CUS_EMAIL + printf("\r\n\r\nReport bugs to %s.\r\n", CUS_EMAIL); +#else + printf("\r\n\r\nReport bugs to %s.\r\n", "support@taosdata.com"); +#endif } #ifdef LINUX @@ -104,7 +96,11 @@ void shellPrintHelp() { #endif const char *argp_program_version = version; -const char *argp_program_bug_address = cusEmail; +#ifdef CUS_EMAIL +const char *argp_program_bug_address = CUS_EMAIL; +#else +const char *argp_program_bug_address = "support@taosdata.com"; +#endif static struct argp_option shellOptions[] = { {"host", 'h', "HOST", 0, SHELL_HOST}, @@ -414,10 +410,19 @@ int32_t shellParseArgs(int32_t argc, char *argv[]) { shell.info.clientVersion = "Welcome to the %s Command Line Interface, Client Version:%s\r\n" "Copyright (c) 2022 by %s, all rights reserved.\r\n\r\n"; - strcpy(shell.info.cusName, cusName); - sprintf(shell.info.promptHeader, "%s> ", cusPrompt); +#ifdef CUS_NAME + strcpy(shell.info.cusName, CUS_NAME); +#else + strcpy(shell.info.cusName, "TDengine"); +#endif char promptContinueFormat[32] = {0}; - sprintf(promptContinueFormat, "%%%zus> ", strlen(cusPrompt)); +#ifdef CUS_PROMPT + sprintf(shell.info.promptHeader, "%s> ", CUS_PROMPT); + sprintf(promptContinueFormat, "%%%zus> ", strlen(CUS_PROMPT)); +#else + sprintf(shell.info.promptHeader, "taos> "); + sprintf(promptContinueFormat, "%%%zus> ", strlen("taos")); +#endif sprintf(shell.info.promptContinue, promptContinueFormat, " "); shell.info.promptSize = strlen(shell.info.promptHeader); snprintf(shell.info.programVersion, sizeof(shell.info.programVersion), From 1e2d3a78963bae6695a2c9c57f0e634fc79fa398 Mon Sep 17 00:00:00 2001 From: Xuefeng Tan <1172915550@qq.com> Date: Fri, 31 Mar 2023 09:41:06 +0800 Subject: [PATCH 54/71] enh(taosAdapter): TMQ parameter adjustment (#20711) --- cmake/taosadapter_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/taosadapter_CMakeLists.txt.in b/cmake/taosadapter_CMakeLists.txt.in index 1c401ae80e..b2f335e1f7 100644 --- a/cmake/taosadapter_CMakeLists.txt.in +++ b/cmake/taosadapter_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taosadapter ExternalProject_Add(taosadapter GIT_REPOSITORY https://github.com/taosdata/taosadapter.git - GIT_TAG d8059ff + GIT_TAG cb1e89c SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter" BINARY_DIR "" #BUILD_IN_SOURCE TRUE From 171d360cac593c889432a4c9c77e1d627a00eece Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Fri, 31 Mar 2023 15:25:00 +0800 Subject: [PATCH 55/71] Revert "current from binary to json" --- include/util/tjson.h | 2 +- include/util/tutil.h | 28 +-- source/dnode/vnode/src/tsdb/tsdbFS.c | 262 +++----------------- source/dnode/vnode/src/tsdb/tsdbFile.c | 321 +------------------------ 4 files changed, 45 insertions(+), 568 deletions(-) diff --git a/include/util/tjson.h b/include/util/tjson.h index af0b163986..6922930c13 100644 --- a/include/util/tjson.h +++ b/include/util/tjson.h @@ -25,7 +25,7 @@ extern "C" { #define tjsonGetNumberValue(pJson, pName, val, code) \ do { \ - int64_t _tmp = 0; \ + uint64_t _tmp = 0; \ code = tjsonGetBigIntValue(pJson, pName, &_tmp); \ val = _tmp; \ } while (0) diff --git a/include/util/tutil.h b/include/util/tutil.h index c8f1b29e10..e0801e5295 100644 --- a/include/util/tutil.h +++ b/include/util/tutil.h @@ -29,7 +29,7 @@ extern "C" { int32_t strdequote(char *src); size_t strtrim(char *src); char *strnchr(const char *haystack, char needle, int32_t len, bool skipquote); -TdUcs4 *wcsnchr(const TdUcs4 *haystack, TdUcs4 needle, size_t len); +TdUcs4* wcsnchr(const TdUcs4* haystack, TdUcs4 needle, size_t len); char **strsplit(char *src, const char *delim, int32_t *num); char *strtolower(char *dst, const char *src); @@ -37,11 +37,11 @@ char *strntolower(char *dst, const char *src, int32_t n); char *strntolower_s(char *dst, const char *src, int32_t n); int64_t strnatoi(char *num, int32_t len); -size_t tstrncspn(const char *str, size_t ssize, const char *reject, size_t rsize); -size_t twcsncspn(const TdUcs4 *wcs, size_t size, const TdUcs4 *reject, size_t rsize); +size_t tstrncspn(const char *str, size_t ssize, const char *reject, size_t rsize); +size_t twcsncspn(const TdUcs4 *wcs, size_t size, const TdUcs4 *reject, size_t rsize); -char *strbetween(char *string, char *begin, char *end); -char *paGetToken(char *src, char **token, int32_t *tokenLen); +char *strbetween(char *string, char *begin, char *end); +char *paGetToken(char *src, char **token, int32_t *tokenLen); int32_t taosByteArrayToHexStr(char bytes[], int32_t len, char hexstr[]); int32_t taosHexStrToByteArray(char hexstr[], char bytes[]); @@ -92,26 +92,12 @@ static FORCE_INLINE int32_t taosGetTbHashVal(const char *tbname, int32_t tblen, } } -#define TSDB_CHECK(condition, CODE, LINO, LABEL, ERRNO) \ - if (!(condition)) { \ - (CODE) = (ERRNO); \ - (LINO) = __LINE__; \ - goto LABEL; \ - } - #define TSDB_CHECK_CODE(CODE, LINO, LABEL) \ - if ((CODE)) { \ - (LINO) = __LINE__; \ + if (CODE) { \ + LINO = __LINE__; \ goto LABEL; \ } -#define TSDB_CHECK_NULL(ptr, CODE, LINO, LABEL, ERRNO) \ - if ((ptr) == NULL) { \ - (CODE) = (ERRNO); \ - (LINO) = __LINE__; \ - goto LABEL; \ - } - #ifdef __cplusplus } #endif diff --git a/source/dnode/vnode/src/tsdb/tsdbFS.c b/source/dnode/vnode/src/tsdb/tsdbFS.c index b85d8d7746..5519d43012 100644 --- a/source/dnode/vnode/src/tsdb/tsdbFS.c +++ b/source/dnode/vnode/src/tsdb/tsdbFS.c @@ -88,98 +88,6 @@ _exit: return code; } -extern int32_t tsdbDelFileToJson(const SDelFile *pDelFile, cJSON *pJson); -extern int32_t tsdbJsonToDelFile(const cJSON *pJson, SDelFile *pDelFile); -extern int32_t tsdbDFileSetToJson(const SDFileSet *pSet, cJSON *pJson); -extern int32_t tsdbJsonToDFileSet(const cJSON *pJson, SDFileSet *pDelFile); - -static int32_t tsdbFSToJsonStr(STsdbFS *pFS, char **ppStr) { - int32_t code = 0; - int32_t lino = 0; - cJSON *pJson; - - ppStr[0] = NULL; - - pJson = cJSON_CreateObject(); - TSDB_CHECK_NULL(pJson, code, lino, _exit, TSDB_CODE_OUT_OF_MEMORY); - - // format version - TSDB_CHECK_NULL(cJSON_AddNumberToObject(pJson, "format", 1), code, lino, _exit, TSDB_CODE_OUT_OF_MEMORY); - - // SDelFile - if (pFS->pDelFile) { - code = tsdbDelFileToJson(pFS->pDelFile, cJSON_AddObjectToObject(pJson, "del")); - TSDB_CHECK_CODE(code, lino, _exit); - } - - // aDFileSet - cJSON *aSetJson = cJSON_AddArrayToObject(pJson, "file set"); - TSDB_CHECK_NULL(aSetJson, code, lino, _exit, TSDB_CODE_OUT_OF_MEMORY); - for (int32_t iSet = 0; iSet < taosArrayGetSize(pFS->aDFileSet); iSet++) { - cJSON *pSetJson = cJSON_CreateObject(); - TSDB_CHECK_NULL(pSetJson, code, lino, _exit, TSDB_CODE_OUT_OF_MEMORY); - - cJSON_AddItemToArray(aSetJson, pSetJson); - - code = tsdbDFileSetToJson(taosArrayGet(pFS->aDFileSet, iSet), pSetJson); - TSDB_CHECK_CODE(code, lino, _exit); - } - - // print - ppStr[0] = cJSON_Print(pJson); - TSDB_CHECK_NULL(ppStr[0], code, lino, _exit, TSDB_CODE_OUT_OF_MEMORY); - -_exit: - cJSON_Delete(pJson); - if (code) tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); - return code; -} - -static int32_t tsdbJsonStrToFS(const char *pStr, STsdbFS *pFS) { - int32_t code = 0; - int32_t lino; - - cJSON *pJson = cJSON_Parse(pStr); - TSDB_CHECK(pJson, code, lino, _exit, TSDB_CODE_FILE_CORRUPTED); - - const cJSON *pItem; - - // format version - TSDB_CHECK(cJSON_IsNumber(pItem = cJSON_GetObjectItem(pJson, "format")), code, lino, _exit, TSDB_CODE_FILE_CORRUPTED); - - // SDelFile - if (cJSON_IsObject(pItem = cJSON_GetObjectItem(pJson, "del"))) { - pFS->pDelFile = (SDelFile *)taosMemoryCalloc(1, sizeof(SDelFile)); - TSDB_CHECK_NULL(pFS->pDelFile, code, lino, _exit, TSDB_CODE_OUT_OF_MEMORY); - - code = tsdbJsonToDelFile(pItem, pFS->pDelFile); - TSDB_CHECK_CODE(code, lino, _exit); - - pFS->pDelFile->nRef = 1; - } else { - pFS->pDelFile = NULL; - } - - // aDFileSet - taosArrayClear(pFS->aDFileSet); - - const cJSON *pSetJson; - TSDB_CHECK(cJSON_IsArray(pItem = cJSON_GetObjectItem(pJson, "file set")), code, lino, _exit, - TSDB_CODE_FILE_CORRUPTED); - cJSON_ArrayForEach(pSetJson, pItem) { - SDFileSet *pSet = (SDFileSet *)taosArrayReserve(pFS->aDFileSet, 1); - TSDB_CHECK_NULL(pSet, code, lino, _exit, TSDB_CODE_OUT_OF_MEMORY); - - code = tsdbJsonToDFileSet(pSetJson, pSet); - TSDB_CHECK_CODE(code, lino, _exit); - } - -_exit: - cJSON_Delete(pJson); - if (code) tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); - return code; -} - static int32_t tsdbSaveFSToFile(STsdbFS *pFS, const char *fname) { int32_t code = 0; int32_t lino = 0; @@ -224,84 +132,6 @@ _exit: return code; } -static int32_t tsdbSaveFSToJsonFile(STsdbFS *pFS, const char *fname) { - int32_t code; - int32_t lino; - char *pData; - - code = tsdbFSToJsonStr(pFS, &pData); - if (code) return code; - - TdFilePtr pFD = taosOpenFile(fname, TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC); - if (pFD == NULL) { - code = TAOS_SYSTEM_ERROR(errno); - TSDB_CHECK_CODE(code, lino, _exit); - } - - int64_t n = taosWriteFile(pFD, pData, strlen(pData) + 1); - if (n < 0) { - code = TAOS_SYSTEM_ERROR(errno); - taosCloseFile(&pFD); - TSDB_CHECK_CODE(code, lino, _exit); - } - - if (taosFsyncFile(pFD) < 0) { - code = TAOS_SYSTEM_ERROR(errno); - taosCloseFile(&pFD); - TSDB_CHECK_CODE(code, lino, _exit); - } - - taosCloseFile(&pFD); - -_exit: - taosMemoryFree(pData); - if (code) { - tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); - } - - return code; -} - -static int32_t tsdbLoadFSFromJsonFile(const char *fname, STsdbFS *pFS) { - int32_t code = 0; - int32_t lino = 0; - char *pData = NULL; - - TdFilePtr pFD = taosOpenFile(fname, TD_FILE_READ); - if (pFD == NULL) { - code = TAOS_SYSTEM_ERROR(errno); - TSDB_CHECK_CODE(code, lino, _exit); - } - - int64_t size; - if (taosFStatFile(pFD, &size, NULL) < 0) { - code = TAOS_SYSTEM_ERROR(errno); - taosCloseFile(&pFD); - TSDB_CHECK_CODE(code, lino, _exit); - } - - if ((pData = taosMemoryMalloc(size)) == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - taosCloseFile(&pFD); - TSDB_CHECK_CODE(code, lino, _exit); - } - - if (taosReadFile(pFD, pData, size) < 0) { - code = TAOS_SYSTEM_ERROR(errno); - taosCloseFile(&pFD); - TSDB_CHECK_CODE(code, lino, _exit); - } - - taosCloseFile(&pFD); - - TSDB_CHECK_CODE(code = tsdbJsonStrToFS(pData, pFS), lino, _exit); - -_exit: - if (pData) taosMemoryFree(pData); - if (code) tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); - return code; -} - int32_t tsdbFSCreate(STsdbFS *pFS) { int32_t code = 0; @@ -439,8 +269,7 @@ int32_t tDFileSetCmprFn(const void *p1, const void *p2) { return 0; } -static void tsdbGetCurrentFName(STsdb *pTsdb, char *current, char *current_t, char *current_json, - char *current_json_t) { +static void tsdbGetCurrentFName(STsdb *pTsdb, char *current, char *current_t) { SVnode *pVnode = pTsdb->pVnode; if (pVnode->pTfs) { if (current) { @@ -451,14 +280,6 @@ static void tsdbGetCurrentFName(STsdb *pTsdb, char *current, char *current_t, ch snprintf(current_t, TSDB_FILENAME_LEN - 1, "%s%s%s%sCURRENT.t", tfsGetPrimaryPath(pTsdb->pVnode->pTfs), TD_DIRSEP, pTsdb->path, TD_DIRSEP); } - if (current_json) { - snprintf(current_json, TSDB_FILENAME_LEN - 1, "%s%s%s%scurrent.json", tfsGetPrimaryPath(pTsdb->pVnode->pTfs), - TD_DIRSEP, pTsdb->path, TD_DIRSEP); - } - if (current_json_t) { - snprintf(current_json_t, TSDB_FILENAME_LEN - 1, "%s%s%s%scurrent.json.t", tfsGetPrimaryPath(pTsdb->pVnode->pTfs), - TD_DIRSEP, pTsdb->path, TD_DIRSEP); - } } else { if (current) { snprintf(current, TSDB_FILENAME_LEN - 1, "%s%sCURRENT", pTsdb->path, TD_DIRSEP); @@ -466,12 +287,6 @@ static void tsdbGetCurrentFName(STsdb *pTsdb, char *current, char *current_t, ch if (current_t) { snprintf(current_t, TSDB_FILENAME_LEN - 1, "%s%sCURRENT.t", pTsdb->path, TD_DIRSEP); } - if (current_json) { - snprintf(current_json, TSDB_FILENAME_LEN - 1, "%s%scurrent.json", pTsdb->path, TD_DIRSEP); - } - if (current_json_t) { - snprintf(current_json_t, TSDB_FILENAME_LEN - 1, "%s%scurrent.json.t", pTsdb->path, TD_DIRSEP); - } } } @@ -887,15 +702,20 @@ _exit: return code; } -static int32_t tsdbFSCommitImpl(STsdb *pTsdb, const char *fname, const char *tfname, bool isJson) { +// EXPOSED APIS ==================================================================================== +int32_t tsdbFSCommit(STsdb *pTsdb) { int32_t code = 0; int32_t lino = 0; STsdbFS fs = {0}; - if (!taosCheckExistFile(tfname)) goto _exit; + char current[TSDB_FILENAME_LEN] = {0}; + char current_t[TSDB_FILENAME_LEN] = {0}; + tsdbGetCurrentFName(pTsdb, current, current_t); + + if (!taosCheckExistFile(current_t)) goto _exit; // rename the file - if (taosRenameFile(tfname, fname) < 0) { + if (taosRenameFile(current_t, current) < 0) { code = TAOS_SYSTEM_ERROR(errno); TSDB_CHECK_CODE(code, lino, _exit); } @@ -904,11 +724,7 @@ static int32_t tsdbFSCommitImpl(STsdb *pTsdb, const char *fname, const char *tfn code = tsdbFSCreate(&fs); TSDB_CHECK_CODE(code, lino, _exit); - if (isJson) { - code = tsdbLoadFSFromJsonFile(fname, &fs); - } else { - code = tsdbLoadFSFromFile(fname, &fs); - } + code = tsdbLoadFSFromFile(current, &fs); TSDB_CHECK_CODE(code, lino, _exit); // apply file change @@ -923,19 +739,18 @@ _exit: return code; } -// EXPOSED APIS ==================================================================================== -int32_t tsdbFSCommit(STsdb *pTsdb) { - char current_json[TSDB_FILENAME_LEN] = {0}; - char current_json_t[TSDB_FILENAME_LEN] = {0}; - tsdbGetCurrentFName(pTsdb, NULL, NULL, current_json, current_json_t); - return tsdbFSCommitImpl(pTsdb, current_json, current_json_t, true); -} - int32_t tsdbFSRollback(STsdb *pTsdb) { int32_t code = 0; - char current_json_t[TSDB_FILENAME_LEN] = {0}; - tsdbGetCurrentFName(pTsdb, NULL, NULL, NULL, current_json_t); - (void)taosRemoveFile(current_json_t); + int32_t lino = 0; + + char current_t[TSDB_FILENAME_LEN] = {0}; + tsdbGetCurrentFName(pTsdb, NULL, current_t); + (void)taosRemoveFile(current_t); + +_exit: + if (code) { + tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, lino, tstrerror(errno)); + } return code; } @@ -951,33 +766,13 @@ int32_t tsdbFSOpen(STsdb *pTsdb, int8_t rollback) { // open impl char current[TSDB_FILENAME_LEN] = {0}; char current_t[TSDB_FILENAME_LEN] = {0}; - char current_json[TSDB_FILENAME_LEN] = {0}; - char current_json_t[TSDB_FILENAME_LEN] = {0}; - tsdbGetCurrentFName(pTsdb, current, current_t, current_json, current_json_t); + tsdbGetCurrentFName(pTsdb, current, current_t); if (taosCheckExistFile(current)) { - // CURRENT file exists code = tsdbLoadFSFromFile(current, &pTsdb->fs); TSDB_CHECK_CODE(code, lino, _exit); if (taosCheckExistFile(current_t)) { - if (rollback) { - (void)taosRemoveFile(current_t); - } else { - code = tsdbFSCommitImpl(pTsdb, current, current_t, false); - TSDB_CHECK_CODE(code, lino, _exit); - } - } - - code = tsdbSaveFSToJsonFile(&pTsdb->fs, current_json); - TSDB_CHECK_CODE(code, lino, _exit); - (void)taosRemoveFile(current); - } else if (taosCheckExistFile(current_json)) { - // current.json exists - code = tsdbLoadFSFromJsonFile(current_json, &pTsdb->fs); - TSDB_CHECK_CODE(code, lino, _exit); - - if (taosCheckExistFile(current_json_t)) { if (rollback) { code = tsdbFSRollback(pTsdb); TSDB_CHECK_CODE(code, lino, _exit); @@ -987,10 +782,11 @@ int32_t tsdbFSOpen(STsdb *pTsdb, int8_t rollback) { } } } else { - // empty TSDB - ASSERT(!rollback); - code = tsdbSaveFSToJsonFile(&pTsdb->fs, current_json); + // empty one + code = tsdbSaveFSToFile(&pTsdb->fs, current); TSDB_CHECK_CODE(code, lino, _exit); + + ASSERT(!rollback); } // scan and fix FS @@ -1228,12 +1024,12 @@ _exit: int32_t tsdbFSPrepareCommit(STsdb *pTsdb, STsdbFS *pFSNew) { int32_t code = 0; int32_t lino = 0; - char current_json_t[TSDB_FILENAME_LEN]; + char tfname[TSDB_FILENAME_LEN]; - tsdbGetCurrentFName(pTsdb, NULL, NULL, NULL, current_json_t); + tsdbGetCurrentFName(pTsdb, NULL, tfname); - // generate current.json - code = tsdbSaveFSToJsonFile(pFSNew, current_json_t); + // gnrt CURRENT.t + code = tsdbSaveFSToFile(pFSNew, tfname); TSDB_CHECK_CODE(code, lino, _exit); _exit: diff --git a/source/dnode/vnode/src/tsdb/tsdbFile.c b/source/dnode/vnode/src/tsdb/tsdbFile.c index 8b2b2caec7..d91475376b 100644 --- a/source/dnode/vnode/src/tsdb/tsdbFile.c +++ b/source/dnode/vnode/src/tsdb/tsdbFile.c @@ -92,11 +92,11 @@ static int32_t tGetSmaFile(uint8_t *p, SSmaFile *pSmaFile) { } // EXPOSED APIS ================================================== -static char *getFileNamePrefix(STsdb *pTsdb, SDiskID did, int32_t fid, uint64_t commitId, char fname[]) { - const char *p1 = tfsGetDiskPath(pTsdb->pVnode->pTfs, did); - int32_t len = strlen(p1); +static char* getFileNamePrefix(STsdb *pTsdb, SDiskID did, int32_t fid, uint64_t commitId, char fname[]) { + const char* p1 = tfsGetDiskPath(pTsdb->pVnode->pTfs, did); + int32_t len = strlen(p1); - char *p = memcpy(fname, p1, len); + char* p = memcpy(fname, p1, len); p += len; *(p++) = TD_DIRSEP[0]; @@ -121,25 +121,25 @@ static char *getFileNamePrefix(STsdb *pTsdb, SDiskID did, int32_t fid, uint64_t } void tsdbHeadFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SHeadFile *pHeadF, char fname[]) { - char *p = getFileNamePrefix(pTsdb, did, fid, pHeadF->commitID, fname); + char* p = getFileNamePrefix(pTsdb, did, fid, pHeadF->commitID, fname); memcpy(p, ".head", 5); p[5] = 0; } void tsdbDataFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SDataFile *pDataF, char fname[]) { - char *p = getFileNamePrefix(pTsdb, did, fid, pDataF->commitID, fname); + char* p = getFileNamePrefix(pTsdb, did, fid, pDataF->commitID, fname); memcpy(p, ".data", 5); p[5] = 0; } void tsdbSttFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SSttFile *pSttF, char fname[]) { - char *p = getFileNamePrefix(pTsdb, did, fid, pSttF->commitID, fname); + char* p = getFileNamePrefix(pTsdb, did, fid, pSttF->commitID, fname); memcpy(p, ".stt", 4); p[4] = 0; } void tsdbSmaFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SSmaFile *pSmaF, char fname[]) { - char *p = getFileNamePrefix(pTsdb, did, fid, pSmaF->commitID, fname); + char* p = getFileNamePrefix(pTsdb, did, fid, pSmaF->commitID, fname); memcpy(p, ".sma", 4); p[4] = 0; } @@ -280,272 +280,6 @@ int32_t tGetDFileSet(uint8_t *p, SDFileSet *pSet) { return n; } -static int32_t tDiskIdToJson(const SDiskID *pDiskId, cJSON *pJson) { - int32_t code = 0; - int32_t lino; - - if (pJson == NULL) return TSDB_CODE_OUT_OF_MEMORY; - - TSDB_CHECK_NULL(cJSON_AddNumberToObject(pJson, "level", pDiskId->level), code, lino, _exit, TSDB_CODE_OUT_OF_MEMORY); - TSDB_CHECK_NULL(cJSON_AddNumberToObject(pJson, "id", pDiskId->id), code, lino, _exit, TSDB_CODE_OUT_OF_MEMORY); - -_exit: - return code; -} -static int32_t tJsonToDiskId(const cJSON *pJson, SDiskID *pDiskId) { - int32_t code = 0; - int32_t lino; - - const cJSON *pItem; - - // level - TSDB_CHECK(cJSON_IsNumber(pItem = cJSON_GetObjectItem(pJson, "level")), code, lino, _exit, TSDB_CODE_FILE_CORRUPTED); - pDiskId->level = (int32_t)pItem->valuedouble; - - // id - TSDB_CHECK(cJSON_IsNumber(pItem = cJSON_GetObjectItem(pJson, "id")), code, lino, _exit, TSDB_CODE_FILE_CORRUPTED); - pDiskId->id = (int32_t)pItem->valuedouble; - -_exit: - return code; -} - -static int32_t tHeadFileToJson(const SHeadFile *pHeadF, cJSON *pJson) { - int32_t code = 0; - int32_t lino; - - if (pJson == NULL) return TSDB_CODE_OUT_OF_MEMORY; - - TSDB_CHECK_NULL(cJSON_AddNumberToObject(pJson, "commit id", pHeadF->commitID), code, lino, _exit, - TSDB_CODE_OUT_OF_MEMORY); - TSDB_CHECK_NULL(cJSON_AddNumberToObject(pJson, "size", pHeadF->size), code, lino, _exit, TSDB_CODE_OUT_OF_MEMORY); - TSDB_CHECK_NULL(cJSON_AddNumberToObject(pJson, "offset", pHeadF->offset), code, lino, _exit, TSDB_CODE_OUT_OF_MEMORY); - -_exit: - return code; -} - -static int32_t tJsonToHeadFile(const cJSON *pJson, SHeadFile *pHeadF) { - int32_t code = 0; - int32_t lino; - - const cJSON *pItem; - - // commit id - TSDB_CHECK(cJSON_IsNumber(pItem = cJSON_GetObjectItem(pJson, "commit id")), code, lino, _exit, - TSDB_CODE_FILE_CORRUPTED); - pHeadF->commitID = (int64_t)pItem->valuedouble; - - // size - TSDB_CHECK(cJSON_IsNumber(pItem = cJSON_GetObjectItem(pJson, "size")), code, lino, _exit, TSDB_CODE_FILE_CORRUPTED); - pHeadF->size = (int64_t)pItem->valuedouble; - - // offset - TSDB_CHECK(cJSON_IsNumber(pItem = cJSON_GetObjectItem(pJson, "offset")), code, lino, _exit, TSDB_CODE_FILE_CORRUPTED); - pHeadF->offset = (int64_t)pItem->valuedouble; - -_exit: - return code; -} - -static int32_t tDataFileToJson(const SDataFile *pDataF, cJSON *pJson) { - int32_t code = 0; - int32_t lino; - - if (pJson == NULL) return TSDB_CODE_OUT_OF_MEMORY; - - TSDB_CHECK_NULL(cJSON_AddNumberToObject(pJson, "commit id", pDataF->commitID), code, lino, _exit, - TSDB_CODE_OUT_OF_MEMORY); - TSDB_CHECK_NULL(cJSON_AddNumberToObject(pJson, "size", pDataF->size), code, lino, _exit, TSDB_CODE_OUT_OF_MEMORY); - -_exit: - return code; -} - -static int32_t tJsonToDataFile(const cJSON *pJson, SDataFile *pDataF) { - int32_t code = 0; - int32_t lino; - - const cJSON *pItem; - - // commit id - TSDB_CHECK(cJSON_IsNumber(pItem = cJSON_GetObjectItem(pJson, "commit id")), code, lino, _exit, - TSDB_CODE_FILE_CORRUPTED); - pDataF->commitID = (int64_t)pItem->valuedouble; - - // size - TSDB_CHECK(cJSON_IsNumber(pItem = cJSON_GetObjectItem(pJson, "size")), code, lino, _exit, TSDB_CODE_FILE_CORRUPTED); - pDataF->size = (int64_t)pItem->valuedouble; - -_exit: - return code; -} - -static int32_t tSmaFileToJson(const SSmaFile *pSmaF, cJSON *pJson) { - int32_t code = 0; - int32_t lino; - - if (pJson == NULL) return TSDB_CODE_OUT_OF_MEMORY; - - TSDB_CHECK_NULL(cJSON_AddNumberToObject(pJson, "commit id", pSmaF->commitID), code, lino, _exit, - TSDB_CODE_OUT_OF_MEMORY); - TSDB_CHECK_NULL(cJSON_AddNumberToObject(pJson, "size", pSmaF->size), code, lino, _exit, TSDB_CODE_OUT_OF_MEMORY); - -_exit: - return code; -} - -static int32_t tJsonToSmaFile(const cJSON *pJson, SSmaFile *pSmaF) { - int32_t code = 0; - int32_t lino; - - // commit id - const cJSON *pItem; - TSDB_CHECK(cJSON_IsNumber(pItem = cJSON_GetObjectItem(pJson, "commit id")), code, lino, _exit, - TSDB_CODE_FILE_CORRUPTED); - pSmaF->commitID = (int64_t)pItem->valuedouble; - - // size - TSDB_CHECK(cJSON_IsNumber(pItem = cJSON_GetObjectItem(pJson, "size")), code, lino, _exit, TSDB_CODE_FILE_CORRUPTED); - pSmaF->size = (int64_t)pItem->valuedouble; - -_exit: - return code; -} - -static int32_t tSttFileToJson(const SSttFile *pSttF, cJSON *pJson) { - int32_t code = 0; - int32_t lino; - - if (pJson == NULL) return TSDB_CODE_OUT_OF_MEMORY; - - TSDB_CHECK_NULL(cJSON_AddNumberToObject(pJson, "commit id", pSttF->commitID), code, lino, _exit, - TSDB_CODE_OUT_OF_MEMORY); - TSDB_CHECK_NULL(cJSON_AddNumberToObject(pJson, "size", pSttF->size), code, lino, _exit, TSDB_CODE_OUT_OF_MEMORY); - TSDB_CHECK_NULL(cJSON_AddNumberToObject(pJson, "offset", pSttF->offset), code, lino, _exit, TSDB_CODE_OUT_OF_MEMORY); - -_exit: - return code; -} - -static int32_t tJsonToSttFile(const cJSON *pJson, SSttFile *pSttF) { - int32_t code = 0; - int32_t lino; - - const cJSON *pItem; - - // commit id - TSDB_CHECK(cJSON_IsNumber(pItem = cJSON_GetObjectItem(pJson, "commit id")), code, lino, _exit, - TSDB_CODE_FILE_CORRUPTED); - pSttF->commitID = (int64_t)pItem->valuedouble; - - // size - TSDB_CHECK(cJSON_IsNumber(pItem = cJSON_GetObjectItem(pJson, "size")), code, lino, _exit, TSDB_CODE_FILE_CORRUPTED); - pSttF->size = (int64_t)pItem->valuedouble; - - // offset - TSDB_CHECK(cJSON_IsNumber(pItem = cJSON_GetObjectItem(pJson, "offset")), code, lino, _exit, TSDB_CODE_FILE_CORRUPTED); - pSttF->offset = (int64_t)pItem->valuedouble; - -_exit: - return code; -} - -int32_t tsdbDFileSetToJson(const SDFileSet *pSet, cJSON *pJson) { - int32_t code = 0; - int32_t lino; - - if (pJson == NULL) return TSDB_CODE_OUT_OF_MEMORY; - - code = tDiskIdToJson(&pSet->diskId, cJSON_AddObjectToObject(pJson, "disk id")); - TSDB_CHECK_CODE(code, lino, _exit); - - TSDB_CHECK_NULL(cJSON_AddNumberToObject(pJson, "fid", pSet->fid), code, lino, _exit, TSDB_CODE_OUT_OF_MEMORY); - - // head - code = tHeadFileToJson(pSet->pHeadF, cJSON_AddObjectToObject(pJson, "head")); - TSDB_CHECK_CODE(code, lino, _exit); - - // data - code = tDataFileToJson(pSet->pDataF, cJSON_AddObjectToObject(pJson, "data")); - TSDB_CHECK_CODE(code, lino, _exit); - - // sma - code = tSmaFileToJson(pSet->pSmaF, cJSON_AddObjectToObject(pJson, "sma")); - TSDB_CHECK_CODE(code, lino, _exit); - - // stt array - cJSON *aSttJson = cJSON_AddArrayToObject(pJson, "stt"); - TSDB_CHECK_NULL(aSttJson, code, lino, _exit, TSDB_CODE_OUT_OF_MEMORY); - for (int32_t iStt = 0; iStt < pSet->nSttF; iStt++) { - cJSON *pSttJson = cJSON_CreateObject(); - TSDB_CHECK_NULL(pSttJson, code, lino, _exit, TSDB_CODE_OUT_OF_MEMORY); - - cJSON_AddItemToArray(aSttJson, pSttJson); - - code = tSttFileToJson(pSet->aSttF[iStt], pSttJson); - TSDB_CHECK_CODE(code, lino, _exit); - } - -_exit: - return code; -} - -int32_t tsdbJsonToDFileSet(const cJSON *pJson, SDFileSet *pSet) { - int32_t code = 0; - int32_t lino; - - const cJSON *pItem; - // disk id - TSDB_CHECK(cJSON_IsObject(pItem = cJSON_GetObjectItem(pJson, "disk id")), code, lino, _exit, - TSDB_CODE_FILE_CORRUPTED); - code = tJsonToDiskId(pItem, &pSet->diskId); - TSDB_CHECK_CODE(code, lino, _exit); - - // fid - TSDB_CHECK(cJSON_IsNumber(pItem = cJSON_GetObjectItem(pJson, "fid")), code, lino, _exit, TSDB_CODE_FILE_CORRUPTED); - pSet->fid = (int32_t)pItem->valuedouble; - - // head - TSDB_CHECK(cJSON_IsObject(pItem = cJSON_GetObjectItem(pJson, "head")), code, lino, _exit, TSDB_CODE_FILE_CORRUPTED); - TSDB_CHECK_NULL(pSet->pHeadF = (SHeadFile *)taosMemoryMalloc(sizeof(SHeadFile)), code, lino, _exit, - TSDB_CODE_OUT_OF_MEMORY); - TSDB_CHECK_CODE(code = tJsonToHeadFile(pItem, pSet->pHeadF), lino, _exit); - pSet->pHeadF->nRef = 1; - - // data - TSDB_CHECK(cJSON_IsObject(pItem = cJSON_GetObjectItem(pJson, "data")), code, lino, _exit, TSDB_CODE_FILE_CORRUPTED); - TSDB_CHECK_NULL(pSet->pDataF = (SDataFile *)taosMemoryMalloc(sizeof(SDataFile)), code, lino, _exit, - TSDB_CODE_OUT_OF_MEMORY); - TSDB_CHECK_CODE(code = tJsonToDataFile(pItem, pSet->pDataF), lino, _exit); - pSet->pDataF->nRef = 1; - - // sma - TSDB_CHECK(cJSON_IsObject(pItem = cJSON_GetObjectItem(pJson, "sma")), code, lino, _exit, TSDB_CODE_FILE_CORRUPTED); - TSDB_CHECK_NULL(pSet->pSmaF = (SSmaFile *)taosMemoryMalloc(sizeof(SSmaFile)), code, lino, _exit, - TSDB_CODE_OUT_OF_MEMORY); - TSDB_CHECK_CODE(code = tJsonToSmaFile(pItem, pSet->pSmaF), lino, _exit); - pSet->pSmaF->nRef = 1; - - // stt array - const cJSON *element; - pSet->nSttF = 0; - TSDB_CHECK(cJSON_IsArray(pItem = cJSON_GetObjectItem(pJson, "stt")), code, lino, _exit, TSDB_CODE_FILE_CORRUPTED); - cJSON_ArrayForEach(element, pItem) { - TSDB_CHECK(cJSON_IsObject(element), code, lino, _exit, TSDB_CODE_FILE_CORRUPTED); - - pSet->aSttF[pSet->nSttF] = (SSttFile *)taosMemoryMalloc(sizeof(SSttFile)); - TSDB_CHECK_NULL(pSet->aSttF[pSet->nSttF], code, lino, _exit, TSDB_CODE_OUT_OF_MEMORY); - TSDB_CHECK_CODE(code = tJsonToSttFile(element, pSet->aSttF[pSet->nSttF]), lino, _exit); - pSet->aSttF[pSet->nSttF]->nRef = 1; - pSet->nSttF++; - } - -_exit: - if (code) tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); - return code; -} - // SDelFile =============================================== void tsdbDelFileName(STsdb *pTsdb, SDelFile *pFile, char fname[]) { snprintf(fname, TSDB_FILENAME_LEN - 1, "%s%s%s%sv%dver%" PRId64 "%s", tfsGetPrimaryPath(pTsdb->pVnode->pTfs), @@ -571,42 +305,3 @@ int32_t tGetDelFile(uint8_t *p, SDelFile *pDelFile) { return n; } - -int32_t tsdbDelFileToJson(const SDelFile *pDelFile, cJSON *pJson) { - if (pJson == NULL) return TSDB_CODE_OUT_OF_MEMORY; - - int32_t code = 0; - int32_t lino; - - TSDB_CHECK_NULL(cJSON_AddNumberToObject(pJson, "commit id", pDelFile->commitID), code, lino, _exit, - TSDB_CODE_OUT_OF_MEMORY); - TSDB_CHECK_NULL(cJSON_AddNumberToObject(pJson, "size", pDelFile->size), code, lino, _exit, TSDB_CODE_OUT_OF_MEMORY); - TSDB_CHECK_NULL(cJSON_AddNumberToObject(pJson, "offset", pDelFile->offset), code, lino, _exit, - TSDB_CODE_OUT_OF_MEMORY); - -_exit: - return code; -} - -int32_t tsdbJsonToDelFile(const cJSON *pJson, SDelFile *pDelFile) { - int32_t code = 0; - int32_t lino; - - const cJSON *pItem; - - // commit id - TSDB_CHECK(cJSON_IsNumber(pItem = cJSON_GetObjectItem(pJson, "commit id")), code, lino, _exit, - TSDB_CODE_FILE_CORRUPTED); - pDelFile->commitID = cJSON_GetNumberValue(pItem); - - // size - TSDB_CHECK(cJSON_IsNumber(pItem = cJSON_GetObjectItem(pJson, "size")), code, lino, _exit, TSDB_CODE_FILE_CORRUPTED); - pDelFile->size = cJSON_GetNumberValue(pItem); - - // offset - TSDB_CHECK(cJSON_IsNumber(pItem = cJSON_GetObjectItem(pJson, "offset")), code, lino, _exit, TSDB_CODE_FILE_CORRUPTED); - pDelFile->offset = cJSON_GetNumberValue(pItem); - -_exit: - return code; -} \ No newline at end of file From 7c93e32fb5a9f63b175603aa6ea92d697a78fbb2 Mon Sep 17 00:00:00 2001 From: wade zhang <95411902+gccgdb1234@users.noreply.github.com> Date: Fri, 31 Mar 2023 17:21:41 +0800 Subject: [PATCH 56/71] Update 14-java.mdx --- docs/zh/08-connector/14-java.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/08-connector/14-java.mdx b/docs/zh/08-connector/14-java.mdx index 0a9bd56bfb..d1c1258365 100644 --- a/docs/zh/08-connector/14-java.mdx +++ b/docs/zh/08-connector/14-java.mdx @@ -17,7 +17,7 @@ import TabItem from '@theme/TabItem'; - JDBC 原生连接:Java 应用在物理节点 1(pnode1)上使用 TSDBDriver 直接调用客户端驱动(libtaos.so 或 taos.dll)的 API 将写入和查询请求发送到位于物理节点 2(pnode2)上的 taosd 实例。 - JDBC REST 连接:Java 应用通过 RestfulDriver 将 SQL 封装成一个 REST 请求,发送给物理节点 2 的 REST 服务器(taosAdapter),通过 REST 服务器请求 taosd 并返回结果。 -使用 REST 连接,不依赖 TDengine 客户端驱动,可以跨平台,更加方便灵活,但性能比原生连接器低约 30%。 +使用 REST 连接,不依赖 TDengine 客户端驱动,可以跨平台,更加方便灵活。 :::info TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致,但 TDengine 与关系对象型数据库的使用场景和技术特征存在差异,所以`taos-jdbcdriver` 与传统的 JDBC driver 也存在一定差异。在使用时需要注意以下几点: From e8b7c089eef0b215263d293e0c5f2926b694bff2 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Fri, 31 Mar 2023 18:09:50 +0800 Subject: [PATCH 57/71] change test case --- tests/system-test/0-others/telemetry.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/system-test/0-others/telemetry.py b/tests/system-test/0-others/telemetry.py index bc5d276faa..c62e3c2487 100644 --- a/tests/system-test/0-others/telemetry.py +++ b/tests/system-test/0-others/telemetry.py @@ -181,7 +181,7 @@ class TDTestCase: def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring tdSql.prepare() # time.sleep(2) - vgroups = "30" + vgroups = "8" sql = "create database db3 vgroups " + vgroups tdSql.query(sql) From 41e29f418d0c773865e6103b4fdeab8d373c4fed Mon Sep 17 00:00:00 2001 From: robotspace Date: Sat, 1 Apr 2023 12:19:38 +0800 Subject: [PATCH 58/71] Infactor Lua connection pool implementation. (#20720) * Do not retrieve err msg when connection is established successfully to avoid exception. * Restore check script for lua installation. * Infactor connection pool implementation. --- examples/lua/OpenResty/rest/tdpool/init.lua | 47 ++++++++++++--------- examples/lua/OpenResty/rest/test.lua | 21 ++++++--- 2 files changed, 41 insertions(+), 27 deletions(-) diff --git a/examples/lua/OpenResty/rest/tdpool/init.lua b/examples/lua/OpenResty/rest/tdpool/init.lua index ebf8e91756..c0c6d56590 100644 --- a/examples/lua/OpenResty/rest/tdpool/init.lua +++ b/examples/lua/OpenResty/rest/tdpool/init.lua @@ -1,16 +1,15 @@ local _M = {} local driver = require "luaconnector51" -local water_mark = 0 -local occupied = 0 -local connection_pool = {} +td_pool_watermark = 0 +td_pool_occupied = 0 +td_connection_pool = {} -function _M.new(o,config) +function _M.new(o, config) o = o or {} - o.connection_pool = connection_pool - o.water_mark = water_mark - o.occupied = occupied - if #connection_pool == 0 then - + o.connection_pool = td_connection_pool + o.watermark = td_pool_watermark + o.occupied = td_pool_occupied + if #td_connection_pool == 0 then for i = 1, config.connection_pool_size do local res = driver.connect(config) if res.code ~= 0 then @@ -18,8 +17,8 @@ function _M.new(o,config) return nil else local object = {obj = res.conn, state = 0} - table.insert(o.connection_pool,i, object) - ngx.log(ngx.INFO, "add connection, now pool size:"..#(o.connection_pool)) + table.insert(td_connection_pool, i, object) + ngx.log(ngx.INFO, "add connection, now pool size:"..#(td_connection_pool)) end end @@ -32,13 +31,13 @@ function _M:get_connection() local connection_obj - for i = 1, #connection_pool do - connection_obj = connection_pool[i] + for i = 1, #td_connection_pool do + connection_obj = td_connection_pool[i] if connection_obj.state == 0 then connection_obj.state = 1 - occupied = occupied +1 - if occupied > water_mark then - water_mark = occupied + td_pool_occupied = td_pool_occupied + 1 + if td_pool_occupied > td_pool_watermark then + td_pool_watermark = td_pool_occupied end return connection_obj["obj"] end @@ -49,21 +48,27 @@ function _M:get_connection() return nil end -function _M:get_water_mark() +function _M:get_watermark() - return water_mark + return td_pool_watermark +end + + +function _M:get_current_load() + + return td_pool_occupied end function _M:release_connection(conn) local connection_obj - for i = 1, #connection_pool do - connection_obj = connection_pool[i] + for i = 1, #td_connection_pool do + connection_obj = td_connection_pool[i] if connection_obj["obj"] == conn then connection_obj["state"] = 0 - occupied = occupied -1 + td_pool_occupied = td_pool_occupied -1 return end end diff --git a/examples/lua/OpenResty/rest/test.lua b/examples/lua/OpenResty/rest/test.lua index 48aeef3fb4..cb4a1479f3 100644 --- a/examples/lua/OpenResty/rest/test.lua +++ b/examples/lua/OpenResty/rest/test.lua @@ -4,8 +4,21 @@ local Pool = require "tdpool" local config = require "config" ngx.say("start time:"..os.time()) -local pool = Pool.new(Pool,config) -local conn = pool:get_connection() +local pool = Pool.new(Pool, config) +local another_pool = Pool.new(Pool, config) +local conn, conn1, conn2 +conn = pool:get_connection() +conn1 = pool:get_connection() +conn2 = pool:get_connection() +local temp_conn = another_pool:get_connection() +ngx.say("pool size:"..config.connection_pool_size) +ngx.say("pool watermark:"..pool:get_watermark()) +ngx.say("pool current load:"..pool:get_current_load()) +pool:release_connection(conn1) +pool:release_connection(conn2) +another_pool:release_connection(temp_conn) +ngx.say("pool watermark:"..pool:get_watermark()) +ngx.say("pool current load:"..pool:get_current_load()) local res = driver.query(conn,"drop database if exists nginx") if res.code ~=0 then @@ -31,7 +44,6 @@ end res = driver.query(conn,"create table m1 (ts timestamp, speed int,owner binary(20))") if res.code ~=0 then ngx.say("create table---failed: "..res.error) - else ngx.say("create table--- pass.") end @@ -83,8 +95,5 @@ while not flag do -- ngx.say("i am here once...") ngx.sleep(0.001) -- time unit is second end - -ngx.say("pool water_mark:"..pool:get_water_mark()) - pool:release_connection(conn) ngx.say("end time:"..os.time()) From ebc997f769a1ef5ad89fbc68285a4da5e7e68df1 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Mon, 3 Apr 2023 11:01:42 +0800 Subject: [PATCH 59/71] test:decreas vgroups --- tests/system-test/0-others/show.py | 2 +- tests/system-test/0-others/taosdMonitor.py | 2 +- tests/system-test/0-others/telemetry.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/system-test/0-others/show.py b/tests/system-test/0-others/show.py index 3e176fe251..b284605a0e 100644 --- a/tests/system-test/0-others/show.py +++ b/tests/system-test/0-others/show.py @@ -28,7 +28,7 @@ class TDTestCase: self.perf_param = ['apps','connections','consumers','queries','transactions'] self.perf_param_list = ['apps','connections','consumers','queries','trans'] self.dbname = "db" - self.vgroups = 10 + self.vgroups = 4 self.stbname = f'`{tdCom.getLongName(5)}`' self.tbname = f'`{tdCom.getLongName(3)}`' self.db_param = { diff --git a/tests/system-test/0-others/taosdMonitor.py b/tests/system-test/0-others/taosdMonitor.py index 944ff52d5b..8094c4e0f5 100644 --- a/tests/system-test/0-others/taosdMonitor.py +++ b/tests/system-test/0-others/taosdMonitor.py @@ -292,7 +292,7 @@ class TDTestCase: def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring tdSql.prepare() # time.sleep(2) - vgroups = "30" + vgroups = "4" sql = "create database db3 vgroups " + vgroups tdSql.query(sql) sql = "create table db3.stb (ts timestamp, f int) tags (t int)" diff --git a/tests/system-test/0-others/telemetry.py b/tests/system-test/0-others/telemetry.py index c62e3c2487..3b6cb10509 100644 --- a/tests/system-test/0-others/telemetry.py +++ b/tests/system-test/0-others/telemetry.py @@ -181,7 +181,7 @@ class TDTestCase: def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring tdSql.prepare() # time.sleep(2) - vgroups = "8" + vgroups = "4" sql = "create database db3 vgroups " + vgroups tdSql.query(sql) From 1a2a04b030b37aead080419cbe19670bbceb14e0 Mon Sep 17 00:00:00 2001 From: sunpeng Date: Mon, 3 Apr 2023 16:20:08 +0800 Subject: [PATCH 60/71] docs: update docs for tdinsight and taoskeeper (#20738) --- docs/en/14-reference/14-taosKeeper.md | 17 ++++++ docs/en/20-third-party/01-grafana.mdx | 2 +- docs/zh/14-reference/07-tdinsight/index.mdx | 65 +++++++++++---------- docs/zh/14-reference/14-taosKeeper.md | 17 ++++++ docs/zh/20-third-party/01-grafana.mdx | 2 +- 5 files changed, 69 insertions(+), 34 deletions(-) diff --git a/docs/en/14-reference/14-taosKeeper.md b/docs/en/14-reference/14-taosKeeper.md index 3c91cc15b3..895bd82e19 100644 --- a/docs/en/14-reference/14-taosKeeper.md +++ b/docs/en/14-reference/14-taosKeeper.md @@ -202,3 +202,20 @@ taos_cluster_info_dnodes_total{cluster_id="5981392874047724755"} 1 # TYPE taos_cluster_info_first_ep gauge taos_cluster_info_first_ep{cluster_id="5981392874047724755",value="hlb:6030"} 1 ``` + +### check_health + +``` +$ curl -i http://127.0.0.1:6043/check_health +``` + +Response: + +``` +HTTP/1.1 200 OK +Content-Type: application/json; charset=utf-8 +Date: Mon, 03 Apr 2023 07:20:38 GMT +Content-Length: 19 + +{"version":"1.0.0"} +``` diff --git a/docs/en/20-third-party/01-grafana.mdx b/docs/en/20-third-party/01-grafana.mdx index 5a2942b144..b0e9dea707 100644 --- a/docs/en/20-third-party/01-grafana.mdx +++ b/docs/en/20-third-party/01-grafana.mdx @@ -77,7 +77,7 @@ sudo -u grafana grafana-cli plugins install tdengine-datasource You can also download zip files from [GitHub](https://github.com/taosdata/grafanaplugin/releases/tag/latest) or [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) and install manually. The commands are as follows: ```bash -GF_VERSION=3.2.7 +GF_VERSION=3.3.1 # from GitHub wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip # from Grafana diff --git a/docs/zh/14-reference/07-tdinsight/index.mdx b/docs/zh/14-reference/07-tdinsight/index.mdx index 8783d47f00..b4eda61ebd 100644 --- a/docs/zh/14-reference/07-tdinsight/index.mdx +++ b/docs/zh/14-reference/07-tdinsight/index.mdx @@ -43,8 +43,6 @@ sudo apt-get update sudo apt-get install grafana ``` -### 在 CentOS / RHEL 上安装 Grafana - @@ -79,7 +77,37 @@ sudo yum install \ - +### 安装 TDengine 数据源插件 + + + + +从 GitHub 安装 TDengine 最新版数据源插件。 + +```bash +get_latest_release() { + curl --silent "https://api.github.com/repos/taosdata/grafanaplugin/releases/latest" | + grep '"tag_name":' | + sed -E 's/.*"v([^"]+)".*/\1/' +} +TDENGINE_PLUGIN_VERSION=$(get_latest_release) +sudo grafana-cli \ + --pluginUrl https://github.com/taosdata/grafanaplugin/releases/download/v$TDENGINE_PLUGIN_VERSION/tdengine-datasource-$TDENGINE_PLUGIN_VERSION.zip \ + plugins install tdengine-datasource +``` + +:::note +3.1.6 和更早版本插件需要在配置文件 `/etc/grafana/grafana.ini` 中添加如下设置,以启用未签名插件。 + +```ini +[plugins] +allow_loading_unsigned_plugins = tdengine-datasource +``` + +::: + + + 我们提供了一个自动化安装脚本 [`TDinsight.sh`](https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh) 脚本以便用户快速进行安装配置。 @@ -175,33 +203,7 @@ sudo ./TDengine.sh -n TDengine-Env1 -a http://another:6041 -u root -p taosdata - 特别地,当您使用 Grafana Cloud 或其他组织时,`-O` 可用于设置组织 ID。 `-G` 可指定 Grafana 插件安装目录。 `-e` 参数将仪表盘设置为可编辑。 - - -### 安装 TDengine 数据源插件 - -从 GitHub 安装 TDengine 最新版数据源插件。 - -```bash -get_latest_release() { - curl --silent "https://api.github.com/repos/taosdata/grafanaplugin/releases/latest" | - grep '"tag_name":' | - sed -E 's/.*"v([^"]+)".*/\1/' -} -TDENGINE_PLUGIN_VERSION=$(get_latest_release) -sudo grafana-cli \ - --pluginUrl https://github.com/taosdata/grafanaplugin/releases/download/v$TDENGINE_PLUGIN_VERSION/tdengine-datasource-$TDENGINE_PLUGIN_VERSION.zip \ - plugins install tdengine-datasource -``` - -:::note -3.1.6 和更早版本插件需要在配置文件 `/etc/grafana/grafana.ini` 中添加如下设置,以启用未签名插件。 - -```ini -[plugins] -allow_loading_unsigned_plugins = tdengine-datasource -``` - -::: + ### 启动 Grafana 服务 @@ -233,8 +235,7 @@ sudo systemctl enable grafana-server ![TDengine Database TDinsight 数据源测试](./assets/howto-add-datasource-test.webp) - - + ### 导入仪表盘 diff --git a/docs/zh/14-reference/14-taosKeeper.md b/docs/zh/14-reference/14-taosKeeper.md index 2cdc24dfef..12b609584a 100644 --- a/docs/zh/14-reference/14-taosKeeper.md +++ b/docs/zh/14-reference/14-taosKeeper.md @@ -205,3 +205,20 @@ taos_cluster_info_dnodes_total{cluster_id="5981392874047724755"} 1 # TYPE taos_cluster_info_first_ep gauge taos_cluster_info_first_ep{cluster_id="5981392874047724755",value="hlb:6030"} 1 ``` + +### check_health + +``` +$ curl -i http://127.0.0.1:6043/check_health +``` + +返回结果: + +``` +HTTP/1.1 200 OK +Content-Type: application/json; charset=utf-8 +Date: Mon, 03 Apr 2023 07:20:38 GMT +Content-Length: 19 + +{"version":"1.0.0"} +``` diff --git a/docs/zh/20-third-party/01-grafana.mdx b/docs/zh/20-third-party/01-grafana.mdx index 5927dc4fca..7980736a1c 100644 --- a/docs/zh/20-third-party/01-grafana.mdx +++ b/docs/zh/20-third-party/01-grafana.mdx @@ -77,7 +77,7 @@ sudo -u grafana grafana-cli plugins install tdengine-datasource 或者从 [GitHub](https://github.com/taosdata/grafanaplugin/releases/tag/latest) 或 [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) 下载 .zip 文件到本地并解压到 Grafana 插件目录。命令行下载示例如下: ```bash -GF_VERSION=3.2.9 +GF_VERSION=3.3.1 # from GitHub wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip # from Grafana From 6c64b760a0d0fec825e98daecfb9b89b0466b08d Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Mon, 3 Apr 2023 17:58:43 +0800 Subject: [PATCH 61/71] fix: fix tsim crash on windows due to invalid input to strftime --- source/common/src/tdatablock.c | 5 ++++- source/common/src/ttime.c | 20 ++++++++++++++++---- source/libs/function/src/builtins.c | 7 +++++-- source/libs/scalar/src/sclfunc.c | 12 ++++++++++-- utils/test/c/tmqSim.c | 5 ++++- utils/tsim/src/simExe.c | 5 ++++- 6 files changed, 43 insertions(+), 11 deletions(-) diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 3c8d394b43..eeb2d4ff2e 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -1864,7 +1864,10 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) { } } struct tm ptm = {0}; - taosLocalTime(&tt, &ptm); + if (taosLocalTime(&tt, &ptm) == NULL) { + sprintf(buf, "NaN"); + return buf; + } size_t pos = strftime(buf, 35, "%Y-%m-%d %H:%M:%S", &ptm); if (precision == TSDB_TIME_PRECISION_NANO) { diff --git a/source/common/src/ttime.c b/source/common/src/ttime.c index 7996498d45..2f39cdeaa0 100644 --- a/source/common/src/ttime.c +++ b/source/common/src/ttime.c @@ -867,13 +867,19 @@ const char* fmtts(int64_t ts) { if (ts > -62135625943 && ts < 32503651200) { time_t t = (time_t)ts; - taosLocalTime(&t, &tm); + if (taosLocalTime(&t, &tm) == NULL) { + sprintf(buf, "NaN"); + return buf; + } pos += strftime(buf + pos, sizeof(buf), "s=%Y-%m-%d %H:%M:%S", &tm); } if (ts > -62135625943000 && ts < 32503651200000) { time_t t = (time_t)(ts / 1000); - taosLocalTime(&t, &tm); + if (taosLocalTime(&t, &tm) == NULL) { + sprintf(buf, "NaN"); + return buf; + } if (pos > 0) { buf[pos++] = ' '; buf[pos++] = '|'; @@ -885,7 +891,10 @@ const char* fmtts(int64_t ts) { { time_t t = (time_t)(ts / 1000000); - taosLocalTime(&t, &tm); + if (taosLocalTime(&t, &tm) == NULL) { + sprintf(buf, "NaN"); + return buf; + } if (pos > 0) { buf[pos++] = ' '; buf[pos++] = '|'; @@ -937,7 +946,10 @@ void taosFormatUtcTime(char* buf, int32_t bufLen, int64_t t, int32_t precision) ASSERT(false); } - taosLocalTime(", &ptm); + if (taosLocalTime(", &ptm) == NULL) { + sprintf(buf, "NaN"); + return; + } int32_t length = (int32_t)strftime(ts, 40, "%Y-%m-%dT%H:%M:%S", &ptm); length += snprintf(ts + length, fractionLen, format, mod); length += (int32_t)strftime(ts + length, 40 - length, "%z", &ptm); diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index 0257b3d5e6..f78b8039a4 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -213,8 +213,11 @@ static int32_t addTimezoneParam(SNodeList* pList) { char buf[6] = {0}; time_t t = taosTime(NULL); struct tm tmInfo; - taosLocalTime(&t, &tmInfo); - strftime(buf, sizeof(buf), "%z", &tmInfo); + if (taosLocalTime(&t, &tmInfo) == NULL) { + sprintf(buf, "NaN"); + } else { + strftime(buf, sizeof(buf), "%z", &tmInfo); + } int32_t len = (int32_t)strlen(buf); SValueNode* pVal = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE); diff --git a/source/libs/scalar/src/sclfunc.c b/source/libs/scalar/src/sclfunc.c index 195a08525c..7f19cb7831 100644 --- a/source/libs/scalar/src/sclfunc.c +++ b/source/libs/scalar/src/sclfunc.c @@ -1067,9 +1067,16 @@ int32_t toISO8601Function(SScalarParam *pInput, int32_t inputNum, SScalarParam * } struct tm tmInfo; - taosLocalTime((const time_t *)&timeVal, &tmInfo); + int32_t len = 0; + + if (taosLocalTime((const time_t *)&timeVal, &tmInfo) == NULL) { + sprintf(buf, "NaN"); + len = (int32_t)strlen(buf); + goto _end; + } + strftime(buf, sizeof(buf), "%Y-%m-%dT%H:%M:%S", &tmInfo); - int32_t len = (int32_t)strlen(buf); + len = (int32_t)strlen(buf); // add timezone string if (tzLen > 0) { @@ -1103,6 +1110,7 @@ int32_t toISO8601Function(SScalarParam *pInput, int32_t inputNum, SScalarParam * len += fracLen; } +_end: memmove(buf + VARSTR_HEADER_SIZE, buf, len); varDataSetLen(buf, len); diff --git a/utils/test/c/tmqSim.c b/utils/test/c/tmqSim.c index 69debe7ab5..d81c70c65d 100644 --- a/utils/test/c/tmqSim.c +++ b/utils/test/c/tmqSim.c @@ -472,7 +472,10 @@ static char* shellFormatTimestamp(char* buf, int64_t val, int32_t precision) { } struct tm ptm; - taosLocalTime(&tt, &ptm); + if (taosLocalTime(&tt, &ptm) == NULL) { + sprintf(tt, "NaN"); + return buf; + } size_t pos = strftime(buf, 35, "%Y-%m-%d %H:%M:%S", &ptm); if (precision == TSDB_TIME_PRECISION_NANO) { diff --git a/utils/tsim/src/simExe.c b/utils/tsim/src/simExe.c index 9dd63d14a2..07b3648f3a 100644 --- a/utils/tsim/src/simExe.c +++ b/utils/tsim/src/simExe.c @@ -772,7 +772,10 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) { tt = (*(int64_t *)row[i]) / 1000000000; } - taosLocalTime(&tt, &tp); + if (taosLocalTime(&tt, &tp) == NULL) { + sprintf(timeStr, "NaN"); + break; + } strftime(timeStr, 64, "%y-%m-%d %H:%M:%S", &tp); if (precision == TSDB_TIME_PRECISION_MILLI) { sprintf(value, "%s.%03d", timeStr, (int32_t)(*((int64_t *)row[i]) % 1000)); From 8079532055525ffdc3509c9952cd9b7f6338954d Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Mon, 3 Apr 2023 17:58:43 +0800 Subject: [PATCH 62/71] fix: fix tsim crash on windows due to invalid input to strftime --- utils/test/c/tmqSim.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/test/c/tmqSim.c b/utils/test/c/tmqSim.c index d81c70c65d..5eeb0aaa12 100644 --- a/utils/test/c/tmqSim.c +++ b/utils/test/c/tmqSim.c @@ -473,7 +473,7 @@ static char* shellFormatTimestamp(char* buf, int64_t val, int32_t precision) { struct tm ptm; if (taosLocalTime(&tt, &ptm) == NULL) { - sprintf(tt, "NaN"); + sprintf(buf, "NaN"); return buf; } size_t pos = strftime(buf, 35, "%Y-%m-%d %H:%M:%S", &ptm); From 30f8010d413f28af88c78fbf3ebcc5064b70c23a Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Mon, 3 Apr 2023 19:56:57 +0800 Subject: [PATCH 63/71] chore: fix cus name (#20716) * chore: refactor cus prompt * fix: client name in install.sh * fix: -Wno-reserved-user-defined-literal * fix: update taos-tools commit * fix: include/os/osDir.h * fix: check cus name * fix: makepkg.sh * chore: update taos-tools d194dc9 * fix: cus name * fix: change adapter prefix * fix: tools/*.sh * fix: scripts * fix: ../../packaging/tools/install_client.sh * chore: cus domain in script * fix: cus name in packaging/tools/install.sh * fix: cus name in ../../packaging/tools/remove.sh * fix: don't edit origin file --------- Co-authored-by: chenhaoran --- cmake/taostools_CMakeLists.txt.in | 2 +- packaging/cfg/taosd.service | 2 +- packaging/tools/install.sh | 187 +++++++++++++++--------------- packaging/tools/install_client.sh | 15 +-- packaging/tools/makeclient.sh | 18 ++- packaging/tools/makepkg.sh | 22 ++-- packaging/tools/remove.sh | 41 ++++--- packaging/tools/remove_client.sh | 18 +-- 8 files changed, 163 insertions(+), 142 deletions(-) diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index aef89a2d42..18d581efd3 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG d194dc9 + GIT_TAG bb10773 SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE diff --git a/packaging/cfg/taosd.service b/packaging/cfg/taosd.service index fff4b74e62..52c4b1d1e2 100644 --- a/packaging/cfg/taosd.service +++ b/packaging/cfg/taosd.service @@ -1,5 +1,5 @@ [Unit] -Description=TDengine server service +Description=server service After=network-online.target Wants=network-online.target diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index a3f8b53d33..858a6ac668 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -4,7 +4,7 @@ # is required to use systemd to manage services at boot set -e -#set -x +# set -x verMode=edge pagMode=full @@ -34,21 +34,25 @@ benchmarkName="taosBenchmark" dumpName="taosdump" demoName="taosdemo" xname="taosx" -explorerName="${clientName}-explorer" clientName2="taos" -serverName2="taosd" +serverName2="${clientName2}d" +configFile2="${clientName2}.cfg" productName2="TDengine" emailName2="taosdata.com" +xname2="${clientName2}x" +adapterName2="${clientName2}adapter" +explorerName="${clientName2}-explorer" benchmarkName2="${clientName2}Benchmark" +demoName2="${clientName2}demo" dumpName2="${clientName2}dump" uninstallScript2="rm${clientName2}" historyFile="${clientName2}_history" logDir="/var/log/${clientName2}" configDir="/etc/${clientName2}" -installDir="/usr/local/${clientName}" +installDir="/usr/local/${clientName2}" data_dir=${dataDir} log_dir=${logDir} @@ -206,15 +210,15 @@ function install_main_path() { function install_bin() { # Remove links - ${csudo}rm -f ${bin_link_dir}/${clientName} || : - ${csudo}rm -f ${bin_link_dir}/${serverName} || : + ${csudo}rm -f ${bin_link_dir}/${clientName2} || : + ${csudo}rm -f ${bin_link_dir}/${serverName2} || : ${csudo}rm -f ${bin_link_dir}/${udfdName} || : ${csudo}rm -f ${bin_link_dir}/${adapterName} || : - ${csudo}rm -f ${bin_link_dir}/${uninstallScript} || : - ${csudo}rm -f ${bin_link_dir}/${demoName} || : - ${csudo}rm -f ${bin_link_dir}/${benchmarkName} || : - ${csudo}rm -f ${bin_link_dir}/${dumpName} || : - ${csudo}rm -f ${bin_link_dir}/${xname} || : + ${csudo}rm -f ${bin_link_dir}/${uninstallScript2} || : + ${csudo}rm -f ${bin_link_dir}/${demoName2} || : + ${csudo}rm -f ${bin_link_dir}/${benchmarkName2} || : + ${csudo}rm -f ${bin_link_dir}/${dumpName2} || : + ${csudo}rm -f ${bin_link_dir}/${xname2} || : ${csudo}rm -f ${bin_link_dir}/${explorerName} || : ${csudo}rm -f ${bin_link_dir}/set_core || : ${csudo}rm -f ${bin_link_dir}/TDinsight.sh || : @@ -222,24 +226,23 @@ function install_bin() { ${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/* #Make link - [ -x ${install_main_dir}/bin/${clientName} ] && ${csudo}ln -sf ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} || : - [ -x ${install_main_dir}/bin/${serverName} ] && ${csudo}ln -sf ${install_main_dir}/bin/${serverName} ${bin_link_dir}/${serverName} || : + [ -x ${install_main_dir}/bin/${clientName2} ] && ${csudo}ln -sf ${install_main_dir}/bin/${clientName2} ${bin_link_dir}/${clientName2} || : + [ -x ${install_main_dir}/bin/${serverName2} ] && ${csudo}ln -sf ${install_main_dir}/bin/${serverName2} ${bin_link_dir}/${serverName2} || : [ -x ${install_main_dir}/bin/${udfdName} ] && ${csudo}ln -sf ${install_main_dir}/bin/${udfdName} ${bin_link_dir}/${udfdName} || : - [ -x ${install_main_dir}/bin/${adapterName} ] && ${csudo}ln -sf ${install_main_dir}/bin/${adapterName} ${bin_link_dir}/${adapterName} || : - [ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -sf ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${demoName} || : - [ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -sf ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${benchmarkName} || : - [ -x ${install_main_dir}/bin/${dumpName} ] && ${csudo}ln -sf ${install_main_dir}/bin/${dumpName} ${bin_link_dir}/${dumpName} || : - [ -x ${install_main_dir}/bin/${xname} ] && ${csudo}ln -sf ${install_main_dir}/bin/${xname} ${bin_link_dir}/${xname} || : + [ -x ${install_main_dir}/bin/${adapterName2} ] && ${csudo}ln -sf ${install_main_dir}/bin/${adapterName2} ${bin_link_dir}/${adapterName2} || : + [ -x ${install_main_dir}/bin/${benchmarkName2} ] && ${csudo}ln -sf ${install_main_dir}/bin/${benchmarkName2} ${bin_link_dir}/${demoName2} || : + [ -x ${install_main_dir}/bin/${benchmarkName2} ] && ${csudo}ln -sf ${install_main_dir}/bin/${benchmarkName2} ${bin_link_dir}/${benchmarkName2} || : + [ -x ${install_main_dir}/bin/${dumpName2} ] && ${csudo}ln -sf ${install_main_dir}/bin/${dumpName2} ${bin_link_dir}/${dumpName2} || : + [ -x ${install_main_dir}/bin/${xname2} ] && ${csudo}ln -sf ${install_main_dir}/bin/${xname2} ${bin_link_dir}/${xname2} || : [ -x ${install_main_dir}/bin/${explorerName} ] && ${csudo}ln -sf ${install_main_dir}/bin/${explorerName} ${bin_link_dir}/${explorerName} || : [ -x ${install_main_dir}/bin/TDinsight.sh ] && ${csudo}ln -sf ${install_main_dir}/bin/TDinsight.sh ${bin_link_dir}/TDinsight.sh || : - [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -sf ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || : - [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -sf ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : + if [ "$clientName2" == "${clientName}" ]; then + [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || : + fi + [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : if [ "$verMode" == "cluster" ] && [ "$clientName" != "$clientName2" ]; then - [ -x ${install_main_dir}/bin/${clientName} ] && ${csudo}ln -sf ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName2} || : - [ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -sf ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${benchmarkName2} || : - [ -x ${install_main_dir}/bin/${dumpName} ] && ${csudo}ln -sf ${install_main_dir}/bin/${dumpName} ${bin_link_dir}/${dumpName2} || : - [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -sf ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript2} || : + [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript2} || : fi } @@ -399,7 +402,7 @@ function set_hostname() { ${csudo}sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network || : fi - ${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/${configFile} + ${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/${configFile2} serverFqdn=$newHostname if [[ -e /etc/hosts ]]; then @@ -433,7 +436,7 @@ function set_ipAsFqdn() { echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}" localFqdn="127.0.0.1" # Write the local FQDN to configuration file - ${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/${configFile} + ${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/${configFile2} serverFqdn=$localFqdn echo return @@ -455,7 +458,7 @@ function set_ipAsFqdn() { read -p "Please choose an IP from local IP list:" localFqdn else # Write the local FQDN to configuration file - ${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/${configFile} + ${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/${configFile2} serverFqdn=$localFqdn break fi @@ -519,15 +522,15 @@ function install_adapter_config() { function install_config() { - if [ ! -f "${cfg_install_dir}/${configFile}" ]; then + if [ ! -f "${cfg_install_dir}/${configFile2}" ]; then ${csudo}mkdir -p ${cfg_install_dir} - [ -f ${script_dir}/cfg/${configFile} ] && ${csudo}cp ${script_dir}/cfg/${configFile} ${cfg_install_dir} + [ -f ${script_dir}/cfg/${configFile2} ] && ${csudo}cp ${script_dir}/cfg/${configFile2} ${cfg_install_dir} ${csudo}chmod 644 ${cfg_install_dir}/* else - ${csudo}cp -f ${script_dir}/cfg/${configFile} ${cfg_install_dir}/${configFile}.new + ${csudo}cp -f ${script_dir}/cfg/${configFile2} ${cfg_install_dir}/${configFile2}.new fi - ${csudo}ln -sf ${cfg_install_dir}/${configFile} ${install_main_dir}/cfg + ${csudo}ln -sf ${cfg_install_dir}/${configFile2} ${install_main_dir}/cfg [ ! -z $1 ] && return 0 || : # only install client @@ -548,7 +551,7 @@ function install_config() { read firstEp while true; do if [ ! -z "$firstEp" ]; then - ${csudo}sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/${configFile} + ${csudo}sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/${configFile2} break else break @@ -600,8 +603,8 @@ function install_web() { function clean_service_on_sysvinit() { - if ps aux | grep -v grep | grep ${serverName} &>/dev/null; then - ${csudo}service ${serverName} stop || : + if ps aux | grep -v grep | grep ${serverName2} &>/dev/null; then + ${csudo}service ${serverName2} stop || : fi if ps aux | grep -v grep | grep tarbitrator &>/dev/null; then @@ -609,30 +612,30 @@ function clean_service_on_sysvinit() { fi if ((${initd_mod} == 1)); then - if [ -e ${service_config_dir}/${serverName} ]; then - ${csudo}chkconfig --del ${serverName} || : + if [ -e ${service_config_dir}/${serverName2} ]; then + ${csudo}chkconfig --del ${serverName2} || : fi if [ -e ${service_config_dir}/tarbitratord ]; then ${csudo}chkconfig --del tarbitratord || : fi elif ((${initd_mod} == 2)); then - if [ -e ${service_config_dir}/${serverName} ]; then - ${csudo}insserv -r ${serverName} || : + if [ -e ${service_config_dir}/${serverName2} ]; then + ${csudo}insserv -r ${serverName2} || : fi if [ -e ${service_config_dir}/tarbitratord ]; then ${csudo}insserv -r tarbitratord || : fi elif ((${initd_mod} == 3)); then - if [ -e ${service_config_dir}/${serverName} ]; then - ${csudo}update-rc.d -f ${serverName} remove || : + if [ -e ${service_config_dir}/${serverName2} ]; then + ${csudo}update-rc.d -f ${serverName2} remove || : fi if [ -e ${service_config_dir}/tarbitratord ]; then ${csudo}update-rc.d -f tarbitratord remove || : fi fi - ${csudo}rm -f ${service_config_dir}/${serverName} || : + ${csudo}rm -f ${service_config_dir}/${serverName2} || : ${csudo}rm -f ${service_config_dir}/tarbitratord || : if $(which init &>/dev/null); then @@ -653,24 +656,24 @@ function install_service_on_sysvinit() { fi if ((${initd_mod} == 1)); then - ${csudo}chkconfig --add ${serverName} || : - ${csudo}chkconfig --level 2345 ${serverName} on || : + ${csudo}chkconfig --add ${serverName2} || : + ${csudo}chkconfig --level 2345 ${serverName2} on || : elif ((${initd_mod} == 2)); then - ${csudo}insserv ${serverName} || : - ${csudo}insserv -d ${serverName} || : + ${csudo}insserv ${serverName2} || : + ${csudo}insserv -d ${serverName2} || : elif ((${initd_mod} == 3)); then - ${csudo}update-rc.d ${serverName} defaults || : + ${csudo}update-rc.d ${serverName2} defaults || : fi } function clean_service_on_systemd() { - taosd_service_config="${service_config_dir}/${serverName}.service" - if systemctl is-active --quiet ${serverName}; then + service_config="${service_config_dir}/${serverName2}.service" + if systemctl is-active --quiet ${serverName2}; then echo "${productName} is running, stopping it..." - ${csudo}systemctl stop ${serverName} &>/dev/null || echo &>/dev/null + ${csudo}systemctl stop ${serverName2} &>/dev/null || echo &>/dev/null fi - ${csudo}systemctl disable ${serverName} &>/dev/null || echo &>/dev/null - ${csudo}rm -f ${taosd_service_config} + ${csudo}systemctl disable ${serverName2} &>/dev/null || echo &>/dev/null + ${csudo}rm -f ${service_config} tarbitratord_service_config="${service_config_dir}/tarbitratord.service" if systemctl is-active --quiet tarbitratord; then @@ -687,19 +690,19 @@ function clean_service_on_systemd() { function install_service_on_systemd() { clean_service_on_systemd - [ -f ${script_dir}/cfg/${serverName}.service ] && - ${csudo}cp ${script_dir}/cfg/${serverName}.service \ + [ -f ${script_dir}/cfg/${serverName2}.service ] && + ${csudo}cp ${script_dir}/cfg/${serverName2}.service \ ${service_config_dir}/ || : # if [ "$verMode" == "cluster" ] && [ "$clientName" != "$clientName2" ]; then - # [ -f ${script_dir}/cfg/${serverName}.service ] && - # ${csudo}cp ${script_dir}/cfg/${serverName}.service \ + # [ -f ${script_dir}/cfg/${serverName2}.service ] && + # ${csudo}cp ${script_dir}/cfg/${serverName2}.service \ # ${service_config_dir}/${serverName2}.service || : # fi ${csudo}systemctl daemon-reload - ${csudo}systemctl enable ${serverName} + ${csudo}systemctl enable ${serverName2} ${csudo}systemctl daemon-reload } @@ -719,7 +722,7 @@ function install_service() { elif ((${service_mod} == 1)); then install_service_on_sysvinit else - kill_process ${serverName} + kill_process ${serverName2} fi } @@ -756,10 +759,10 @@ function is_version_compatible() { if [ -f ${script_dir}/driver/vercomp.txt ]; then min_compatible_version=$(cat ${script_dir}/driver/vercomp.txt) else - min_compatible_version=$(${script_dir}/bin/${serverName} -V | head -1 | cut -d ' ' -f 5) + min_compatible_version=$(${script_dir}/bin/${serverName2} -V | head -1 | cut -d ' ' -f 5) fi - exist_version=$(${installDir}/bin/${serverName} -V | head -1 | cut -d ' ' -f 3) + exist_version=$(${installDir}/bin/${serverName2} -V | head -1 | cut -d ' ' -f 3) vercomp $exist_version "3.0.0.0" case $? in 2) @@ -829,13 +832,13 @@ function updateProduct() { echo -e "${GREEN}Start to update ${productName2}...${NC}" # Stop the service if running - if ps aux | grep -v grep | grep ${serverName} &>/dev/null; then + if ps aux | grep -v grep | grep ${serverName2} &>/dev/null; then if ((${service_mod} == 0)); then - ${csudo}systemctl stop ${serverName} || : + ${csudo}systemctl stop ${serverName2} || : elif ((${service_mod} == 1)); then - ${csudo}service ${serverName} stop || : + ${csudo}service ${serverName2} stop || : else - kill_process ${serverName} + kill_process ${serverName2} fi sleep 1 fi @@ -862,21 +865,21 @@ function updateProduct() { openresty_work=false echo - echo -e "${GREEN_DARK}To configure ${productName2} ${NC}: edit ${cfg_install_dir}/${configFile}" - [ -f ${configDir}/taosadapter.toml ] && [ -f ${installDir}/bin/taosadapter ] && \ - echo -e "${GREEN_DARK}To configure ${clientName2} Adapter ${NC}: edit ${configDir}/taosadapter.toml" + echo -e "${GREEN_DARK}To configure ${productName2} ${NC}: edit ${cfg_install_dir}/${configFile2}" + [ -f ${configDir}/${clientName2}adapter.toml ] && [ -f ${installDir}/bin/${clientName2}adapter ] && \ + echo -e "${GREEN_DARK}To configure ${clientName2} Adapter ${NC}: edit ${configDir}/${clientName2}adapter.toml" if ((${service_mod} == 0)); then - echo -e "${GREEN_DARK}To start ${productName2} ${NC}: ${csudo}systemctl start ${serverName}${NC}" - [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \ - echo -e "${GREEN_DARK}To start ${clientName2} Adapter ${NC}: ${csudo}systemctl start taosadapter ${NC}" + echo -e "${GREEN_DARK}To start ${productName2} ${NC}: ${csudo}systemctl start ${serverName2}${NC}" + [ -f ${service_config_dir}/${clientName2}adapter.service ] && [ -f ${installDir}/bin/${clientName2}adapter ] && \ + echo -e "${GREEN_DARK}To start ${clientName2} Adapter ${NC}: ${csudo}systemctl start ${clientName2}adapter ${NC}" elif ((${service_mod} == 1)); then - echo -e "${GREEN_DARK}To start ${productName2} ${NC}: ${csudo}service ${serverName} start${NC}" - [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \ - echo -e "${GREEN_DARK}To start ${clientName2} Adapter ${NC}: ${csudo}service taosadapter start${NC}" + echo -e "${GREEN_DARK}To start ${productName2} ${NC}: ${csudo}service ${serverName2} start${NC}" + [ -f ${service_config_dir}/${clientName2}adapter.service ] && [ -f ${installDir}/bin/${clientName2}adapter ] && \ + echo -e "${GREEN_DARK}To start ${clientName2} Adapter ${NC}: ${csudo}service ${clientName2}adapter start${NC}" else - echo -e "${GREEN_DARK}To start ${productName2} ${NC}: ./${serverName}${NC}" - [ -f ${installDir}/bin/taosadapter ] && \ - echo -e "${GREEN_DARK}To start ${clientName2} Adapter ${NC}: taosadapter &${NC}" + echo -e "${GREEN_DARK}To start ${productName2} ${NC}: ./${serverName2}${NC}" + [ -f ${installDir}/bin/${clientName2}adapter ] && \ + echo -e "${GREEN_DARK}To start ${clientName2} Adapter ${NC}: ${clientName2}adapter &${NC}" fi if [ ${openresty_work} = 'true' ]; then @@ -887,7 +890,7 @@ function updateProduct() { if ((${prompt_force} == 1)); then echo "" - echo -e "${RED}Please run '${serverName} --force-keep-file' at first time for the exist ${productName2} $exist_version!${NC}" + echo -e "${RED}Please run '${serverName2} --force-keep-file' at first time for the exist ${productName2} $exist_version!${NC}" fi echo echo -e "\033[44;32;1m${productName2} is updated successfully!${NC}" @@ -899,7 +902,7 @@ function updateProduct() { echo -e "\033[44;32;1m${productName2} client is updated successfully!${NC}" fi - rm -rf $(tar -tf ${tarName} | grep -v "^\./$") + rm -rf $(tar -tf ${tarName} | grep -Ev "^\./$|^\/") } function installProduct() { @@ -944,21 +947,21 @@ function installProduct() { # Ask if to start the service echo - echo -e "${GREEN_DARK}To configure ${productName2} ${NC}: edit ${cfg_install_dir}/${configFile}" - [ -f ${configDir}/taosadapter.toml ] && [ -f ${installDir}/bin/taosadapter ] && \ - echo -e "${GREEN_DARK}To configure ${clientName2} Adapter ${NC}: edit ${configDir}/taosadapter.toml" + echo -e "${GREEN_DARK}To configure ${productName2} ${NC}: edit ${cfg_install_dir}/${configFile2}" + [ -f ${configDir}/${clientName2}adapter.toml ] && [ -f ${installDir}/bin/${clientName2}adapter ] && \ + echo -e "${GREEN_DARK}To configure ${clientName2} Adapter ${NC}: edit ${configDir}/${clientName2}adapter.toml" if ((${service_mod} == 0)); then - echo -e "${GREEN_DARK}To start ${productName2} ${NC}: ${csudo}systemctl start ${serverName}${NC}" - [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \ - echo -e "${GREEN_DARK}To start ${clientName2} Adapter ${NC}: ${csudo}systemctl start taosadapter ${NC}" + echo -e "${GREEN_DARK}To start ${productName2} ${NC}: ${csudo}systemctl start ${serverName2}${NC}" + [ -f ${service_config_dir}/${clientName2}adapter.service ] && [ -f ${installDir}/bin/${clientName2}adapter ] && \ + echo -e "${GREEN_DARK}To start ${clientName2} Adapter ${NC}: ${csudo}systemctl start ${clientName2}adapter ${NC}" elif ((${service_mod} == 1)); then - echo -e "${GREEN_DARK}To start ${productName2} ${NC}: ${csudo}service ${serverName} start${NC}" - [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \ - echo -e "${GREEN_DARK}To start ${clientName2} Adapter ${NC}: ${csudo}service taosadapter start${NC}" + echo -e "${GREEN_DARK}To start ${productName2} ${NC}: ${csudo}service ${serverName2} start${NC}" + [ -f ${service_config_dir}/${clientName2}adapter.service ] && [ -f ${installDir}/bin/${clientName2}adapter ] && \ + echo -e "${GREEN_DARK}To start ${clientName2} Adapter ${NC}: ${csudo}service ${clientName2}adapter start${NC}" else - echo -e "${GREEN_DARK}To start ${productName2} ${NC}: ${serverName}${NC}" - [ -f ${installDir}/bin/taosadapter ] && \ - echo -e "${GREEN_DARK}To start ${clientName2} Adapter ${NC}: taosadapter &${NC}" + echo -e "${GREEN_DARK}To start ${productName2} ${NC}: ${serverName2}${NC}" + [ -f ${installDir}/bin/${clientName2}adapter ] && \ + echo -e "${GREEN_DARK}To start ${clientName2} Adapter ${NC}: ${clientName2}adapter &${NC}" fi if [ ! -z "$firstEp" ]; then @@ -991,7 +994,7 @@ function installProduct() { fi touch ~/.${historyFile} - rm -rf $(tar -tf ${tarName} | grep -v "^\./$") + rm -rf $(tar -tf ${tarName} | grep -Ev "^\./$|^\/") } ## ==============================Main program starts from here============================ @@ -1002,7 +1005,7 @@ if [ "$verType" == "server" ]; then echo -e "\033[44;31;5mThe default data directory ${data_dir} contains old data of ${productName2} 2.x, please clear it before installing!\033[0m" else # Install server and client - if [ -x ${bin_dir}/${serverName} ]; then + if [ -x ${bin_dir}/${serverName2} ]; then update_flag=1 updateProduct else @@ -1012,7 +1015,7 @@ if [ "$verType" == "server" ]; then elif [ "$verType" == "client" ]; then interactiveFqdn=no # Only install client - if [ -x ${bin_dir}/${clientName} ]; then + if [ -x ${bin_dir}/${clientName2} ]; then update_flag=1 updateProduct client else diff --git a/packaging/tools/install_client.sh b/packaging/tools/install_client.sh index d941fbc0cb..53b9c80f10 100755 --- a/packaging/tools/install_client.sh +++ b/packaging/tools/install_client.sh @@ -95,7 +95,7 @@ function install_main_path() { ${csudo}mkdir -p ${install_main_dir}/cfg ${csudo}mkdir -p ${install_main_dir}/bin ${csudo}mkdir -p ${install_main_dir}/driver - if [ $productName == "TDengine" ]; then + if [ "$productName2" == "TDengine" ]; then ${csudo}mkdir -p ${install_main_dir}/examples fi ${csudo}mkdir -p ${install_main_dir}/include @@ -118,18 +118,19 @@ function install_bin() { #Make link [ -x ${install_main_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} || : if [ "$osType" != "Darwin" ]; then - [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo}ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : + [ -x ${install_main_dir}/bin/${demoName2} ] && ${csudo}ln -s ${install_main_dir}/bin/${demoName2} ${bin_link_dir}/${demoName2} || : fi [ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/${uninstallScript} || : [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : if [ "$verMode" == "cluster" ] && [ "$clientName" != "$clientName2" ]; then #Make link - [ -x ${install_main_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName2} || : + [ -x ${install_main_dir}/bin/${clientName2} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName2} ${bin_link_dir}/${clientName2} || : if [ "$osType" != "Darwin" ]; then - [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo}ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/${demoName2} || : + [ -x ${install_main_dir}/bin/${demoName2} ] && ${csudo}ln -s ${install_main_dir}/bin/${demoName2} ${bin_link_dir}/${demoName2} || : + [ -x ${install_main_dir}/bin/${benchmarkName2} ] && ${csudo}ln -s ${install_main_dir}/bin/${benchmarkName2} ${bin_link_dir}/${benchmarkName2} || : fi - [ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/${uninstallScript2} || : + [ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo}ln -sf ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/${uninstallScript2} || : fi } @@ -305,7 +306,7 @@ function update_TDengine() { echo echo -e "\033[44;32;1m${productName2} client is updated successfully!${NC}" - rm -rf $(tar -tf ${tarName}) + rm -rf $(tar -tf ${tarName} | grep -Ev "^\./$|^\/") } function install_TDengine() { @@ -332,7 +333,7 @@ function install_TDengine() { echo echo -e "\033[44;32;1m${productName2} client is installed successfully!${NC}" - rm -rf $(tar -tf ${tarName}) + rm -rf $(tar -tf ${tarName} | grep -Ev "^\./$|^\/") } diff --git a/packaging/tools/makeclient.sh b/packaging/tools/makeclient.sh index f46de0f94b..b473f3b527 100755 --- a/packaging/tools/makeclient.sh +++ b/packaging/tools/makeclient.sh @@ -2,7 +2,7 @@ # # Generate tar.gz package for linux client in all os system set -e -# set -x +set -x curr_dir=$(pwd) compile_dir=$1 @@ -23,9 +23,12 @@ clientName2="${12}" productName="TDengine" clientName="taos" +benchmarkName="taosBenchmark" configFile="taos.cfg" tarName="package.tar.gz" +benchmarkName2="${clientName2}Benchmark" + if [ "$osType" != "Darwin" ]; then script_dir="$(dirname $(readlink -f $0))" top_dir="$(readlink -f ${script_dir}/../..)" @@ -53,11 +56,12 @@ fi # Directories and files. -if [ "$verMode" == "cluster" ]; then - sed -i 's/verMode=edge/verMode=cluster/g' ${script_dir}/remove_client.sh - sed -i "s/clientName2=\"taos\"/clientName2=\"${clientName2}\"/g" ${script_dir}/remove_client.sh - sed -i "s/productName2=\"TDengine\"/productName2=\"${productName2}\"/g" ${script_dir}/remove_client.sh -fi +#if [ "$verMode" == "cluster" ]; then +# sed -i 's/verMode=edge/verMode=cluster/g' ${script_dir}/remove_client.sh +# sed -i "s/clientName2=\"taos\"/clientName2=\"${clientName2}\"/g" ${script_dir}/remove_client.sh +# sed -i "s/configFile2=\"taos\"/configFile2=\"${clientName2}\"/g" ${script_dir}/remove_client.sh +# sed -i "s/productName2=\"TDengine\"/productName2=\"${productName2}\"/g" ${script_dir}/remove_client.sh +#fi if [ "$osType" != "Darwin" ]; then if [ "$pagMode" == "lite" ]; then @@ -66,6 +70,7 @@ if [ "$osType" != "Darwin" ]; then ${script_dir}/remove_client.sh" else bin_files="${build_dir}/bin/${clientName} \ + ${build_dir}/bin/${benchmarkName} \ ${script_dir}/remove_client.sh \ ${script_dir}/set_core.sh \ ${script_dir}/get_client.sh" @@ -153,6 +158,7 @@ if [ "$verMode" == "cluster" ]; then sed -i 's/verMode=edge/verMode=cluster/g' install_client_temp.sh sed -i "s/serverName2=\"taosd\"/serverName2=\"${serverName2}\"/g" install_client_temp.sh sed -i "s/clientName2=\"taos\"/clientName2=\"${clientName2}\"/g" install_client_temp.sh + sed -i "s/configFile2=\"taos.cfg\"/configFile2=\"${clientName2}.cfg\"/g" install_client_temp.sh sed -i "s/productName2=\"TDengine\"/productName2=\"${productName2}\"/g" install_client_temp.sh sed -i "s/emailName2=\"taosdata.com\"/emailName2=\"${cusEmail2}\"/g" install_client_temp.sh diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh index f30ec23b9f..10093bdf49 100755 --- a/packaging/tools/makepkg.sh +++ b/packaging/tools/makepkg.sh @@ -96,7 +96,7 @@ else ${taostools_bin_files} \ ${taosx_bin} \ ${explorer_bin_files} \ - ${build_dir}/bin/taosadapter \ + ${build_dir}/bin/${clientName}adapter \ ${build_dir}/bin/udfd \ ${script_dir}/remove.sh \ ${script_dir}/set_core.sh \ @@ -135,12 +135,12 @@ mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/${configFile} ${install_dir}/cfg/${configFile} -if [ -f "${compile_dir}/test/cfg/taosadapter.toml" ]; then - cp ${compile_dir}/test/cfg/taosadapter.toml ${install_dir}/cfg || : +if [ -f "${compile_dir}/test/cfg/${clientName}adapter.toml" ]; then + cp ${compile_dir}/test/cfg/${clientName}adapter.toml ${install_dir}/cfg || : fi -if [ -f "${compile_dir}/test/cfg/taosadapter.service" ]; then - cp ${compile_dir}/test/cfg/taosadapter.service ${install_dir}/cfg || : +if [ -f "${compile_dir}/test/cfg/${clientName}adapter.service" ]; then + cp ${compile_dir}/test/cfg/${clientName}adapter.service ${install_dir}/cfg || : fi if [ -f "${cfg_dir}/${serverName}.service" ]; then @@ -152,16 +152,16 @@ mkdir -p ${install_dir}/init.d && cp ${init_file_deb} ${install_dir}/init.d/${se mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/${serverName}.rpm if [ $adapterName != "taosadapter" ]; then - mv ${install_dir}/cfg/taosadapter.toml ${install_dir}/cfg/$adapterName.toml + mv ${install_dir}/cfg/${clientName2}adapter.toml ${install_dir}/cfg/$adapterName.toml sed -i "s/path = \"\/var\/log\/taos\"/path = \"\/var\/log\/${productName}\"/g" ${install_dir}/cfg/$adapterName.toml sed -i "s/password = \"taosdata\"/password = \"${defaultPasswd}\"/g" ${install_dir}/cfg/$adapterName.toml - mv ${install_dir}/cfg/taosadapter.service ${install_dir}/cfg/$adapterName.service + mv ${install_dir}/cfg/${clientName2}adapter.service ${install_dir}/cfg/$adapterName.service sed -i "s/TDengine/${productName}/g" ${install_dir}/cfg/$adapterName.service sed -i "s/taosAdapter/${adapterName}/g" ${install_dir}/cfg/$adapterName.service sed -i "s/taosadapter/${adapterName}/g" ${install_dir}/cfg/$adapterName.service - mv ${install_dir}/bin/taosadapter ${install_dir}/bin/${adapterName} + mv ${install_dir}/bin/${clientName2}adapter ${install_dir}/bin/${adapterName} mv ${install_dir}/bin/taosd-dump-cfg.gdb ${install_dir}/bin/${serverName}-dump-cfg.gdb fi @@ -233,9 +233,9 @@ if [ "$verMode" == "cluster" ]; then sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove.sh >>remove_temp.sh sed -i "s/serverName2=\"taosd\"/serverName2=\"${serverName2}\"/g" remove_temp.sh sed -i "s/clientName2=\"taos\"/clientName2=\"${clientName2}\"/g" remove_temp.sh + sed -i "s/configFile2=\"taos.cfg\"/configFile2=\"${clientName2}.cfg\"/g" remove_temp.sh sed -i "s/productName2=\"TDengine\"/productName2=\"${productName2}\"/g" remove_temp.sh cusDomain=`echo "${cusEmail2}" | sed 's/^[^@]*@//'` - echo "domain is ${cusDomain}" sed -i "s/emailName2=\"taosdata.com\"/emailName2=\"${cusDomain}\"/g" remove_temp.sh mv remove_temp.sh ${install_dir}/bin/remove.sh fi @@ -264,8 +264,10 @@ if [ "$verMode" == "cluster" ]; then sed -i 's/verMode=edge/verMode=cluster/g' install_temp.sh sed -i "s/serverName2=\"taosd\"/serverName2=\"${serverName2}\"/g" install_temp.sh sed -i "s/clientName2=\"taos\"/clientName2=\"${clientName2}\"/g" install_temp.sh + sed -i "s/configFile2=\"taos.cfg\"/configFile2=\"${clientName2}.cfg\"/g" install_temp.sh sed -i "s/productName2=\"TDengine\"/productName2=\"${productName2}\"/g" install_temp.sh - sed -i "s/emailName2=\"taosdata.com\"/emailName2=\"${cusEmail2}\"/g" install_temp.sh + cusDomain=`echo "${cusEmail2}" | sed 's/^[^@]*@//'` + sed -i "s/emailName2=\"taosdata.com\"/emailName2=\"${cusDomain}\"/g" install_temp.sh mv install_temp.sh ${install_dir}/install.sh fi if [ "$verMode" == "cloud" ]; then diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh index 2479e48670..8ed3bd74b9 100755 --- a/packaging/tools/remove.sh +++ b/packaging/tools/remove.sh @@ -40,11 +40,16 @@ serverName2="taosd" clientName2="taos" productName2="TDengine" +adapterName2="${clientName2}adapter" +demoName2="${clientName2}demo" benchmarkName2="${clientName2}Benchmark" dumpName2="${clientName2}dump" +keeperName2="${clientName2}keeper" +xName2="${clientName2}x" +explorerName2="${clientName2}-explorer" uninstallScript2="rm${clientName2}" -installDir="/usr/local/${clientName}" +installDir="/usr/local/${clientName2}" #install main path install_main_dir=${installDir} @@ -55,8 +60,8 @@ local_bin_link_dir="/usr/local/bin" service_config_dir="/etc/systemd/system" -taos_service_name=${serverName} -taosadapter_service_name="taosadapter" +taos_service_name=${serverName2} +taosadapter_service_name="${clientName2}adapter" tarbitrator_service_name="tarbitratord" csudo="" if command -v sudo >/dev/null; then @@ -84,14 +89,14 @@ else fi function kill_taosadapter() { - pid=$(ps -ef | grep "taosadapter" | grep -v "grep" | awk '{print $2}') + pid=$(ps -ef | grep "${adapterName2}" | grep -v "grep" | awk '{print $2}') if [ -n "$pid" ]; then ${csudo}kill -9 $pid || : fi } function kill_taosd() { - pid=$(ps -ef | grep ${serverName} | grep -v "grep" | awk '{print $2}') + pid=$(ps -ef | grep ${serverName2} | grep -v "grep" | awk '{print $2}') if [ -n "$pid" ]; then ${csudo}kill -9 $pid || : fi @@ -109,17 +114,17 @@ function clean_bin() { ${csudo}rm -f ${bin_link_dir}/${clientName} || : ${csudo}rm -f ${bin_link_dir}/${serverName} || : ${csudo}rm -f ${bin_link_dir}/udfd || : - ${csudo}rm -f ${bin_link_dir}/taosadapter || : - ${csudo}rm -f ${bin_link_dir}/taosBenchmark || : - ${csudo}rm -f ${bin_link_dir}/taosdemo || : - ${csudo}rm -f ${bin_link_dir}/taosdump || : - ${csudo}rm -f ${bin_link_dir}/${uninstallScript} || : + ${csudo}rm -f ${bin_link_dir}/${adapterName2} || : + ${csudo}rm -f ${bin_link_dir}/${benchmarkName2} || : + ${csudo}rm -f ${bin_link_dir}/${demoName2} || : + ${csudo}rm -f ${bin_link_dir}/${dumpName2} || : + ${csudo}rm -f ${bin_link_dir}/${uninstallScript} || : ${csudo}rm -f ${bin_link_dir}/tarbitrator || : ${csudo}rm -f ${bin_link_dir}/set_core || : ${csudo}rm -f ${bin_link_dir}/TDinsight.sh || : - ${csudo}rm -f ${bin_link_dir}/taoskeeper || : - ${csudo}rm -f ${bin_link_dir}/taosx || : - ${csudo}rm -f ${bin_link_dir}/taos-explorer || : + ${csudo}rm -f ${bin_link_dir}/${keeperName2} || : + ${csudo}rm -f ${bin_link_dir}/${xName2} || : + ${csudo}rm -f ${bin_link_dir}/${explorerName2} || : if [ "$verMode" == "cluster" ] && [ "$clientName" != "$clientName2" ]; then ${csudo}rm -f ${bin_link_dir}/${clientName2} || : @@ -130,8 +135,8 @@ function clean_bin() { } function clean_local_bin() { - ${csudo}rm -f ${local_bin_link_dir}/taosBenchmark || : - ${csudo}rm -f ${local_bin_link_dir}/taosdemo || : + ${csudo}rm -f ${local_bin_link_dir}/${benchmarkName2} || : + ${csudo}rm -f ${local_bin_link_dir}/${demoName2} || : } function clean_lib() { @@ -173,7 +178,7 @@ function clean_service_on_systemd() { ${csudo}systemctl disable ${taos_service_name} &>/dev/null || echo &>/dev/null ${csudo}rm -f ${taosd_service_config} - taosadapter_service_config="${service_config_dir}/taosadapter.service" + taosadapter_service_config="${service_config_dir}/${clientName2}adapter.service" if systemctl is-active --quiet ${taosadapter_service_name}; then echo "${productName2} ${clientName2}Adapter is running, stopping it..." ${csudo}systemctl stop ${taosadapter_service_name} &>/dev/null || echo &>/dev/null @@ -235,8 +240,8 @@ function clean_service_on_sysvinit() { function clean_service_on_launchctl() { ${csudouser}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taosd.plist > /dev/null 2>&1 || : ${csudo}rm /Library/LaunchDaemons/com.taosdata.taosd.plist > /dev/null 2>&1 || : - ${csudouser}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taosadapter.plist > /dev/null 2>&1 || : - ${csudo}rm /Library/LaunchDaemons/com.taosdata.taosadapter.plist > /dev/null 2>&1 || : + ${csudouser}launchctl unload -w /Library/LaunchDaemons/com.taosdata.${clientName2}adapter.plist > /dev/null 2>&1 || : + ${csudo}rm /Library/LaunchDaemons/com.taosdata.${clientName2}adapter.plist > /dev/null 2>&1 || : } function clean_service() { diff --git a/packaging/tools/remove_client.sh b/packaging/tools/remove_client.sh index 10a0fb5e02..2bdb56fac2 100755 --- a/packaging/tools/remove_client.sh +++ b/packaging/tools/remove_client.sh @@ -15,11 +15,12 @@ uninstallScript="rmtaos" clientName2="taos" productName2="TDengine" -benchmarkName2="${clientName}Benchmark" -dumpName2="${clientName}dump" -uninstallScript2="rm${clientName}" +benchmarkName2="${clientName2}Benchmark" +demoName2="${clientName2}demo" +dumpName2="${clientName2}dump" +uninstallScript2="rm${clientName2}" -installDir="/usr/local/${clientName}" +installDir="/usr/local/${clientName2}" #install main path install_main_dir=${installDir} @@ -44,14 +45,17 @@ function kill_client() { function clean_bin() { # Remove link - ${csudo}rm -f ${bin_link_dir}/${clientName} || : - ${csudo}rm -f ${bin_link_dir}/taosdemo || : - ${csudo}rm -f ${bin_link_dir}/taosdump || : + ${csudo}rm -f ${bin_link_dir}/${clientName2} || : + ${csudo}rm -f ${bin_link_dir}/${demoName2} || : + ${csudo}rm -f ${bin_link_dir}/${benchmarkName2} || : + ${csudo}rm -f ${bin_link_dir}/${dumpName2} || : ${csudo}rm -f ${bin_link_dir}/${uninstallScript} || : ${csudo}rm -f ${bin_link_dir}/set_core || : if [ "$verMode" == "cluster" ] && [ "$clientName" != "$clientName2" ]; then ${csudo}rm -f ${bin_link_dir}/${clientName2} || : + ${csudo}rm -f ${bin_link_dir}/${demoName2} || : + ${csudo}rm -f ${bin_link_dir}/${benchmarkName2} || : ${csudo}rm -f ${bin_link_dir}/${dumpName2} || : ${csudo}rm -f ${bin_link_dir}/${uninstallScript2} || : fi From 33cb5b681624a82a296f2240827934a4157f7da6 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Mon, 3 Apr 2023 23:39:45 +0800 Subject: [PATCH 64/71] fix: taosbenchmark multithreadfs with limit specified (#20743) --- cmake/taostools_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index 18d581efd3..2de3881dd2 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG bb10773 + GIT_TAG 273a3fe SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE From 23ec9287c925b55a25b0090b6bf4209878e94f78 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Mon, 3 Apr 2023 17:58:43 +0800 Subject: [PATCH 65/71] fix: fix tsim crash on windows due to invalid input to strftime --- include/common/ttime.h | 2 +- include/os/osTime.h | 2 +- source/common/src/tdatablock.c | 3 +-- source/common/src/ttime.c | 20 +++++++--------- source/libs/executor/src/scanoperator.c | 2 +- source/libs/executor/src/timewindowoperator.c | 2 +- source/libs/function/src/builtins.c | 3 +-- source/libs/scalar/src/sclfunc.c | 3 +-- source/os/src/osTime.c | 24 ++++++++++++++++--- source/os/src/osTimezone.c | 6 ++--- source/util/src/tlog.c | 4 ++-- tools/shell/src/shellEngine.c | 3 +-- utils/test/c/tmqDemo.c | 2 +- utils/test/c/tmqSim.c | 5 ++-- utils/tsim/src/simExe.c | 3 +-- 15 files changed, 46 insertions(+), 38 deletions(-) diff --git a/include/common/ttime.h b/include/common/ttime.h index 4a7c47d172..f189959f22 100644 --- a/include/common/ttime.h +++ b/include/common/ttime.h @@ -64,7 +64,7 @@ static FORCE_INLINE int64_t taosGetTimestampToday(int32_t precision) { : 1000000000; time_t t = taosTime(NULL); struct tm tm; - taosLocalTime(&t, &tm); + taosLocalTime(&t, &tm, NULL); tm.tm_hour = 0; tm.tm_min = 0; tm.tm_sec = 0; diff --git a/include/os/osTime.h b/include/os/osTime.h index 0a0a54119b..51a285a139 100644 --- a/include/os/osTime.h +++ b/include/os/osTime.h @@ -91,7 +91,7 @@ static FORCE_INLINE int64_t taosGetMonoTimestampMs() { } char *taosStrpTime(const char *buf, const char *fmt, struct tm *tm); -struct tm *taosLocalTime(const time_t *timep, struct tm *result); +struct tm *taosLocalTime(const time_t *timep, struct tm *result, char *buf); struct tm *taosLocalTimeNolock(struct tm *result, const time_t *timep, int dst); time_t taosTime(time_t *t); time_t taosMktime(struct tm *timep); diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index eeb2d4ff2e..03102b11a3 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -1864,8 +1864,7 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) { } } struct tm ptm = {0}; - if (taosLocalTime(&tt, &ptm) == NULL) { - sprintf(buf, "NaN"); + if (taosLocalTime(&tt, &ptm, buf) == NULL) { return buf; } size_t pos = strftime(buf, 35, "%Y-%m-%d %H:%M:%S", &ptm); diff --git a/source/common/src/ttime.c b/source/common/src/ttime.c index 2f39cdeaa0..dcd539bd91 100644 --- a/source/common/src/ttime.c +++ b/source/common/src/ttime.c @@ -727,7 +727,7 @@ int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision) { struct tm tm; time_t tt = (time_t)(t / TSDB_TICK_PER_SECOND(precision)); - taosLocalTime(&tt, &tm); + taosLocalTime(&tt, &tm, NULL); int32_t mon = tm.tm_year * 12 + tm.tm_mon + (int32_t)numOfMonth; tm.tm_year = mon / 12; tm.tm_mon = mon % 12; @@ -750,11 +750,11 @@ int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t interval, char struct tm tm; time_t t = (time_t)skey; - taosLocalTime(&t, &tm); + taosLocalTime(&t, &tm, NULL); int32_t smon = tm.tm_year * 12 + tm.tm_mon; t = (time_t)ekey; - taosLocalTime(&t, &tm); + taosLocalTime(&t, &tm, NULL); int32_t emon = tm.tm_year * 12 + tm.tm_mon; if (unit == 'y') { @@ -774,7 +774,7 @@ int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precisio start /= (int64_t)(TSDB_TICK_PER_SECOND(precision)); struct tm tm; time_t tt = (time_t)start; - taosLocalTime(&tt, &tm); + taosLocalTime(&tt, &tm, NULL); tm.tm_sec = 0; tm.tm_min = 0; tm.tm_hour = 0; @@ -867,8 +867,7 @@ const char* fmtts(int64_t ts) { if (ts > -62135625943 && ts < 32503651200) { time_t t = (time_t)ts; - if (taosLocalTime(&t, &tm) == NULL) { - sprintf(buf, "NaN"); + if (taosLocalTime(&t, &tm, buf) == NULL) { return buf; } pos += strftime(buf + pos, sizeof(buf), "s=%Y-%m-%d %H:%M:%S", &tm); @@ -876,8 +875,7 @@ const char* fmtts(int64_t ts) { if (ts > -62135625943000 && ts < 32503651200000) { time_t t = (time_t)(ts / 1000); - if (taosLocalTime(&t, &tm) == NULL) { - sprintf(buf, "NaN"); + if (taosLocalTime(&t, &tm, buf) == NULL) { return buf; } if (pos > 0) { @@ -891,8 +889,7 @@ const char* fmtts(int64_t ts) { { time_t t = (time_t)(ts / 1000000); - if (taosLocalTime(&t, &tm) == NULL) { - sprintf(buf, "NaN"); + if (taosLocalTime(&t, &tm, buf) == NULL) { return buf; } if (pos > 0) { @@ -946,8 +943,7 @@ void taosFormatUtcTime(char* buf, int32_t bufLen, int64_t t, int32_t precision) ASSERT(false); } - if (taosLocalTime(", &ptm) == NULL) { - sprintf(buf, "NaN"); + if (taosLocalTime(", &ptm, buf) == NULL) { return; } int32_t length = (int32_t)strftime(ts, 40, "%Y-%m-%dT%H:%M:%S", &ptm); diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index ce647014ae..be87dcd6ff 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -88,7 +88,7 @@ static void getNextTimeWindow(SInterval* pInterval, STimeWindow* tw, int32_t ord struct tm tm; time_t t = (time_t)key; - taosLocalTime(&t, &tm); + taosLocalTime(&t, &tm, NULL); int mon = (int)(tm.tm_year * 12 + tm.tm_mon + interval * factor); tm.tm_year = mon / 12; diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index fef588a503..0f2eb4e0d7 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -281,7 +281,7 @@ static void getNextTimeWindow(SInterval* pInterval, int32_t precision, int32_t o struct tm tm; time_t t = (time_t)key; - taosLocalTime(&t, &tm); + taosLocalTime(&t, &tm, NULL); int mon = (int)(tm.tm_year * 12 + tm.tm_mon + interval * factor); tm.tm_year = mon / 12; diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index f78b8039a4..22af82bceb 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -213,8 +213,7 @@ static int32_t addTimezoneParam(SNodeList* pList) { char buf[6] = {0}; time_t t = taosTime(NULL); struct tm tmInfo; - if (taosLocalTime(&t, &tmInfo) == NULL) { - sprintf(buf, "NaN"); + if (taosLocalTime(&t, &tmInfo, buf) == NULL) { } else { strftime(buf, sizeof(buf), "%z", &tmInfo); } diff --git a/source/libs/scalar/src/sclfunc.c b/source/libs/scalar/src/sclfunc.c index 7f19cb7831..88362201b4 100644 --- a/source/libs/scalar/src/sclfunc.c +++ b/source/libs/scalar/src/sclfunc.c @@ -1069,8 +1069,7 @@ int32_t toISO8601Function(SScalarParam *pInput, int32_t inputNum, SScalarParam * struct tm tmInfo; int32_t len = 0; - if (taosLocalTime((const time_t *)&timeVal, &tmInfo) == NULL) { - sprintf(buf, "NaN"); + if (taosLocalTime((const time_t *)&timeVal, &tmInfo, buf) == NULL) { len = (int32_t)strlen(buf); goto _end; } diff --git a/source/os/src/osTime.c b/source/os/src/osTime.c index 5d5bff8c48..f120b6650a 100644 --- a/source/os/src/osTime.c +++ b/source/os/src/osTime.c @@ -407,12 +407,21 @@ time_t taosMktime(struct tm *timep) { #endif } -struct tm *taosLocalTime(const time_t *timep, struct tm *result) { +struct tm *taosLocalTime(const time_t *timep, struct tm *result, char *buf) { + struct tm *res = NULL; + if (result == NULL) { - return localtime(timep); + res = localtime(timep); + if (res == NULL && buf != NULL) { + sprintf(buf, "NaN"); + } + return res; } #ifdef WINDOWS if (*timep < 0) { + if (buf != NULL) { + sprintf(buf, "NaN"); + } return NULL; // TODO: bugs in following code SYSTEMTIME ss, s; @@ -421,6 +430,9 @@ struct tm *taosLocalTime(const time_t *timep, struct tm *result) { struct tm tm1; time_t tt = 0; if (localtime_s(&tm1, &tt) != 0 ) { + if (buf != NULL) { + sprintf(buf, "NaN"); + } return NULL; } ss.wYear = tm1.tm_year + 1900; @@ -449,11 +461,17 @@ struct tm *taosLocalTime(const time_t *timep, struct tm *result) { result->tm_isdst = 0; } else { if (localtime_s(result, timep) != 0) { + if (buf != NULL) { + sprintf(buf, "NaN"); + } return NULL; } } #else - localtime_r(timep, result); + res = localtime_r(timep, result); + if (res == NULL && buf != NULL) { + sprintf(buf, "NaN"); + } #endif return result; } diff --git a/source/os/src/osTimezone.c b/source/os/src/osTimezone.c index ad223bff27..cd6ad7cdb5 100644 --- a/source/os/src/osTimezone.c +++ b/source/os/src/osTimezone.c @@ -893,7 +893,7 @@ void taosGetSystemTimezone(char *outTimezoneStr, enum TdTimezone *tsTimezone) { */ time_t tx1 = taosGetTimestampSec(); struct tm tm1; - taosLocalTime(&tx1, &tm1); + taosLocalTime(&tx1, &tm1, NULL); daylight = tm1.tm_isdst; /* @@ -921,7 +921,7 @@ void taosGetSystemTimezone(char *outTimezoneStr, enum TdTimezone *tsTimezone) { */ time_t tx1 = taosGetTimestampSec(); struct tm tm1; - taosLocalTime(&tx1, &tm1); + taosLocalTime(&tx1, &tm1, NULL); /* load time zone string from /etc/timezone */ // FILE *f = fopen("/etc/timezone", "r"); errno = 0; @@ -1008,7 +1008,7 @@ void taosGetSystemTimezone(char *outTimezoneStr, enum TdTimezone *tsTimezone) { */ time_t tx1 = taosGetTimestampSec(); struct tm tm1; - taosLocalTime(&tx1, &tm1); + taosLocalTime(&tx1, &tm1, NULL); /* * format example: diff --git a/source/util/src/tlog.c b/source/util/src/tlog.c index bd9ea058b4..a3d3c399ab 100644 --- a/source/util/src/tlog.c +++ b/source/util/src/tlog.c @@ -121,7 +121,7 @@ static FORCE_INLINE void taosUpdateDaylight() { struct timeval timeSecs; taosGetTimeOfDay(&timeSecs); time_t curTime = timeSecs.tv_sec; - ptm = taosLocalTime(&curTime, &Tm); + ptm = taosLocalTime(&curTime, &Tm, NULL); tsDaylightActive = ptm->tm_isdst; } static FORCE_INLINE int32_t taosGetDaylight() { return tsDaylightActive; } @@ -437,7 +437,7 @@ static inline int32_t taosBuildLogHead(char *buffer, const char *flags) { taosGetTimeOfDay(&timeSecs); time_t curTime = timeSecs.tv_sec; - ptm = taosLocalTime(&curTime, &Tm); + ptm = taosLocalTime(&curTime, &Tm, NULL); return sprintf(buffer, "%02d/%02d %02d:%02d:%02d.%06d %08" PRId64 " %s", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec, (int32_t)timeSecs.tv_usec, taosGetSelfPthreadId(), flags); diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index a87ba16267..616540a54a 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -291,8 +291,7 @@ char *shellFormatTimestamp(char *buf, int64_t val, int32_t precision) { } struct tm ptm = {0}; - if (taosLocalTime(&tt, &ptm) == NULL) { - sprintf(buf, "NaN"); + if (taosLocalTime(&tt, &ptm, buf) == NULL) { return buf; } size_t pos = strftime(buf, 35, "%Y-%m-%d %H:%M:%S", &ptm); diff --git a/utils/test/c/tmqDemo.c b/utils/test/c/tmqDemo.c index d105b50579..ce069c2b05 100644 --- a/utils/test/c/tmqDemo.c +++ b/utils/test/c/tmqDemo.c @@ -597,7 +597,7 @@ void printParaIntoFile() { time_t tTime = taosGetTimestampSec(); struct tm tm; - taosLocalTime(&tTime, &tm); + taosLocalTime(&tTime, &tm, NULL); taosFprintfFile(pFile, "###################################################################\n"); taosFprintfFile(pFile, "# configDir: %s\n", configDir); diff --git a/utils/test/c/tmqSim.c b/utils/test/c/tmqSim.c index 5eeb0aaa12..f2de219f4e 100644 --- a/utils/test/c/tmqSim.c +++ b/utils/test/c/tmqSim.c @@ -166,7 +166,7 @@ static void printHelp() { char* getCurrentTimeString(char* timeString) { time_t tTime = taosGetTimestampSec(); struct tm tm; - taosLocalTime(&tTime, &tm); + taosLocalTime(&tTime, &tm, NULL); sprintf(timeString, "%d-%02d-%02d %02d:%02d:%02d", tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); @@ -472,8 +472,7 @@ static char* shellFormatTimestamp(char* buf, int64_t val, int32_t precision) { } struct tm ptm; - if (taosLocalTime(&tt, &ptm) == NULL) { - sprintf(buf, "NaN"); + if (taosLocalTime(&tt, &ptm, buf) == NULL) { return buf; } size_t pos = strftime(buf, 35, "%Y-%m-%d %H:%M:%S", &ptm); diff --git a/utils/tsim/src/simExe.c b/utils/tsim/src/simExe.c index 07b3648f3a..1be28635ef 100644 --- a/utils/tsim/src/simExe.c +++ b/utils/tsim/src/simExe.c @@ -772,8 +772,7 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) { tt = (*(int64_t *)row[i]) / 1000000000; } - if (taosLocalTime(&tt, &tp) == NULL) { - sprintf(timeStr, "NaN"); + if (taosLocalTime(&tt, &tp, timeStr) == NULL) { break; } strftime(timeStr, 64, "%y-%m-%d %H:%M:%S", &tp); From 4819b22a21424d57bf730de9a61ec95a0cf29ae4 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Mon, 3 Apr 2023 17:58:43 +0800 Subject: [PATCH 66/71] fix: fix tsim crash on windows due to invalid input to strftime --- source/libs/function/src/builtins.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index 22af82bceb..e2b68a8b05 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -213,8 +213,7 @@ static int32_t addTimezoneParam(SNodeList* pList) { char buf[6] = {0}; time_t t = taosTime(NULL); struct tm tmInfo; - if (taosLocalTime(&t, &tmInfo, buf) == NULL) { - } else { + if (taosLocalTime(&t, &tmInfo, buf) != NULL) { strftime(buf, sizeof(buf), "%z", &tmInfo); } int32_t len = (int32_t)strlen(buf); From 22a876bcdf188d734efba4194110052729c7abf8 Mon Sep 17 00:00:00 2001 From: xiaolei li <85657333+xleili@users.noreply.github.com> Date: Tue, 4 Apr 2023 18:45:24 +0800 Subject: [PATCH 67/71] docs: release 3.0.3.2 (#20765) --- docs/en/28-releases/01-tdengine.md | 4 ++++ docs/en/28-releases/02-tools.md | 4 ++++ docs/zh/28-releases/01-tdengine.md | 4 ++++ docs/zh/28-releases/02-tools.md | 4 ++++ 4 files changed, 16 insertions(+) diff --git a/docs/en/28-releases/01-tdengine.md b/docs/en/28-releases/01-tdengine.md index 9910a3d89a..730aa7c4f1 100644 --- a/docs/en/28-releases/01-tdengine.md +++ b/docs/en/28-releases/01-tdengine.md @@ -14,6 +14,10 @@ import Release from "/components/ReleaseV3"; +## 3.0.3.1 + + + ## 3.0.3.0 diff --git a/docs/en/28-releases/02-tools.md b/docs/en/28-releases/02-tools.md index a060d2dd77..17581b780a 100644 --- a/docs/en/28-releases/02-tools.md +++ b/docs/en/28-releases/02-tools.md @@ -10,6 +10,10 @@ For other historical version installers, please visit [here](https://www.taosdat import Release from "/components/ReleaseV3"; +## 2.4.11 + + + ## 2.4.10 diff --git a/docs/zh/28-releases/01-tdengine.md b/docs/zh/28-releases/01-tdengine.md index 62d5c5f592..b4441ad078 100644 --- a/docs/zh/28-releases/01-tdengine.md +++ b/docs/zh/28-releases/01-tdengine.md @@ -10,6 +10,10 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do import Release from "/components/ReleaseV3"; +## 3.0.3.2 + + + ## 3.0.3.1 diff --git a/docs/zh/28-releases/02-tools.md b/docs/zh/28-releases/02-tools.md index 0e4eda6001..cce6834f12 100644 --- a/docs/zh/28-releases/02-tools.md +++ b/docs/zh/28-releases/02-tools.md @@ -10,6 +10,10 @@ taosTools 各版本安装包下载链接如下: import Release from "/components/ReleaseV3"; +## 2.4.11 + + + ## 2.4.10 From debe63ca9c7811c5daf2f1e8762829c3c9e0b058 Mon Sep 17 00:00:00 2001 From: xiaolei li <85657333+xleili@users.noreply.github.com> Date: Tue, 4 Apr 2023 18:48:30 +0800 Subject: [PATCH 68/71] release: upgrade 3.0 default version to 3.0.3.2 (#20768) --- cmake/cmake.version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/cmake.version b/cmake/cmake.version index de85025a8c..29f560148e 100644 --- a/cmake/cmake.version +++ b/cmake/cmake.version @@ -2,7 +2,7 @@ IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "3.0.3.1") + SET(TD_VER_NUMBER "3.0.3.2") ENDIF () IF (DEFINED VERCOMPATIBLE) From 42973676cea687b6ad029643f0bbe9ced6cb8c68 Mon Sep 17 00:00:00 2001 From: xiaolei li <85657333+xleili@users.noreply.github.com> Date: Tue, 4 Apr 2023 19:04:33 +0800 Subject: [PATCH 69/71] docs/3.0.3.2 (#20776) * docs: release 3.0.3.2 * docs: release 3.0.3.2 --- docs/en/28-releases/01-tdengine.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/en/28-releases/01-tdengine.md b/docs/en/28-releases/01-tdengine.md index 730aa7c4f1..b160058d02 100644 --- a/docs/en/28-releases/01-tdengine.md +++ b/docs/en/28-releases/01-tdengine.md @@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://w import Release from "/components/ReleaseV3"; +## 3.0.3.2 + + + ## 3.0.3.1 From 33c5f1cf8862dbce6fb8305342dca5e843348f60 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 5 Apr 2023 10:21:43 +0800 Subject: [PATCH 70/71] chore: cus name support in shellArgument (#20754) * chore: refactor cus prompt * fix: client name in install.sh * fix: -Wno-reserved-user-defined-literal * fix: update taos-tools commit * fix: include/os/osDir.h * fix: check cus name * fix: makepkg.sh * chore: update taos-tools d194dc9 * fix: cus name * fix: change adapter prefix * fix: tools/*.sh * fix: scripts * fix: ../../packaging/tools/install_client.sh * chore: cus domain in script * fix: cus name in packaging/tools/install.sh * fix: cus name in ../../packaging/tools/remove.sh * fix: don't edit origin file * fix: remove brand name * fix: update taos-tools * fix: cus name in log * fix: compile error without cuc name --------- Co-authored-by: chenhaoran --- cmake/taostools_CMakeLists.txt.in | 2 +- source/dnode/mgmt/exe/dmMain.c | 17 ++++++++++++++++- tools/shell/src/shellArguments.c | 2 +- 3 files changed, 18 insertions(+), 3 deletions(-) diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index 2de3881dd2..a4edc47c0b 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG 273a3fe + GIT_TAG 53d14fa SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE diff --git a/source/dnode/mgmt/exe/dmMain.c b/source/dnode/mgmt/exe/dmMain.c index d3cffaa185..892b4a9a40 100644 --- a/source/dnode/mgmt/exe/dmMain.c +++ b/source/dnode/mgmt/exe/dmMain.c @@ -19,6 +19,21 @@ #include "tconfig.h" #include "tglobal.h" +#if defined(CUS_NAME) || defined(CUS_PROMPT) || defined(CUS_EMAIL) +#include "cus_name.h" +#else +#ifndef CUS_NAME + #define CUS_NAME "TDengine" +#endif + +#ifndef CUS_PROMPT + #define CUS_PROMPT "taos" +#endif + +#ifndef CUS_EMAIL + #define CUS_EMAIL "" +#endif +#endif // clang-format off #define DM_APOLLO_URL "The apollo string to use when configuring the server, such as: -a 'jsonFile:./tests/cfg.json', cfg.json text can be '{\"fqdn\":\"td1\"}'." #define DM_CFG_DIR "Configuration directory." @@ -228,7 +243,7 @@ static void dmDumpCfg() { } static int32_t dmInitLog() { - return taosCreateLog("taosdlog", 1, configDir, global.envCmd, global.envFile, global.apolloUrl, global.pArgs, 0); + return taosCreateLog(CUS_PROMPT"dlog", 1, configDir, global.envCmd, global.envFile, global.apolloUrl, global.pArgs, 0); } static void taosCleanupArgs() { diff --git a/tools/shell/src/shellArguments.c b/tools/shell/src/shellArguments.c index f7008548f6..1eb4edb28a 100644 --- a/tools/shell/src/shellArguments.c +++ b/tools/shell/src/shellArguments.c @@ -46,7 +46,7 @@ #define SHELL_VERSION "Print program version." #ifdef WEBSOCKET -#define SHELL_DSN "Use dsn to connect to the TDengine cloud server or to a remote server which provides WebSocket connection." +#define SHELL_DSN "Use dsn to connect to the cloud server or to a remote server which provides WebSocket connection." #define SHELL_REST "Use RESTful mode when connecting." #define SHELL_TIMEOUT "Set the timeout for websocket query in seconds, default is 30." #endif From 79461d918333ed5b6cf1219c03c5efcbfe67c32f Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 5 Apr 2023 12:18:09 +0800 Subject: [PATCH 71/71] fix: taosbenchmark disorder range dynamic (#20772) --- cmake/taostools_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index a4edc47c0b..3f27cb2517 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG 53d14fa + GIT_TAG ddd654a SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE
小 T 的二维码小 T 的二维码 TDengine 微信视频号 TDengine 微信公众号