From d041775e9a9b7b87e28bce068f6cd085a13fad84 Mon Sep 17 00:00:00 2001 From: "St.Huang" Date: Mon, 2 Jul 2018 18:33:03 +0800 Subject: [PATCH] fix update function --- 2.0.x.zip | Bin 118131 -> 151038 bytes README.md | 1 + src/fastwq/__init__.py | 7 +- src/fastwq/libs/AnkiHub/__init__.py | 0 src/fastwq/libs/AnkiHub/markdown2.py | 2584 ++++++++++++++++++++++++++ src/fastwq/libs/AnkiHub/updates.py | 68 + src/fastwq/libs/__init__.py | 1 + src/fastwq/libs/ankihub.py | 279 +++ src/fastwq/ui.py | 48 +- 9 files changed, 2947 insertions(+), 41 deletions(-) create mode 100644 src/fastwq/libs/AnkiHub/__init__.py create mode 100644 src/fastwq/libs/AnkiHub/markdown2.py create mode 100644 src/fastwq/libs/AnkiHub/updates.py create mode 100644 src/fastwq/libs/ankihub.py diff --git a/2.0.x.zip b/2.0.x.zip index cc422b7b1875a460cdc5720741d97160c1b15f19..34b31c98362ccaf60686c4203302c380e89172f3 100644 GIT binary patch delta 42353 zcmV)aK&rp@6aWAK z2mpYF;!F){->^Dw006_60RSKX003rTb98rcFKlUIb1y+|YiUSzVlQoBa%*I7cWyE+ zaCziAYjfK;lHc_!F!XMTv?VIZYD zzh8F)d`YsMWbWHBRS}6tqr1^>zz5%qO07mSnT{3SvB_}cD>moaFrFnW&R0b$0E>@A6V6D*3P|8a!e>5br(E46`8o@Gy$gcxFiW_S*oIt6~f zpo|HwH#kZOp0H7GS~skf1Z1^QWsEc?1dq*gwc>`!%;XFKhpb+;uW3kcS`X4}l>_x! zu4Kv;gDGao4U$@;Kp8XS zYgw?AXN!_A1VAs5GF^yR&Sebyut9pXP`AR)gdNMwP-F?V`~M6qcm5|YSqyA0HSn_F z*)SEiA_YjaD~2y{6E^FmPy(2L%6PMfNgkId9+owm791j!?E>;sc_Y|yB5$WO=h*OB zDyE3uTBKB;AZK6e0Ba)a80nv{&6uq7ON@FJs!c!@u-+yX8qpIQbMtRNzDa6*wY`z zd3Gz5X;67&jR!#>jMX=XT__;-dA68R*YU`G)hM4YrG7wvn=a6ZrC{?iO_>+xN_ds* z6=?|@-^o8d(#6I;?kcw3at=bVBP*!yXu^UN2rHX%-9c(7jd`&8j1HP7a z!Y2_1%#RL084ii&TH3TREs1QU0pTjnU?0c@Q5HZWjhLbIZ+SlPc z<+m_BsaZOj+GU)7=|&dyShbH!xmc#?lci-FABsKhoB!}8D9x>Ciz;Z7-S>Bg*oo3A+k(PyM)TXnMXi9npmS6 zve%_T4Fo5DnP=&ycI-gXYT*q4mPZWn2FxqaETTAiDCGo6hsXeoPNddI-< zT?j#~;1Ho;wGwXln2{}uP2IvMa0Fe4{YP~B!{M+$w8q5}8UoC-FEpF94SMw5cO9ci zI+V|I4RitKky8Lp=K8$@ zH^udTij4+_relo$Iq<*CD)U!6DAZa`Q9q-xpqyGG=g`C)qmWI}RFaKFkQgKmAz*=N z8LD0h{tMgK0zhOSbqjIFs(x05t>md;pB7Hx0VGfaVO$oYo-1)H$w=7VRv(!j4ZJlQ zST#eR@w0$<7`j;BDuQKFTq;l<3}~&Z#qH347;#Ytnw}y@H&iSI8mSt{a5!K?{4Wec z8$5>ekAMoat-uzvKdb7s$7mSvN*2H@La@kXDipB8#=KzIZh+IMLQR~F!NH0Yd?^#y z1yGJ94nkuQL%wu_T5|RQw%J8JLN%HJG8BSQjc>$Vyo4}YkOcD4ch6pY_k#A?RtWzGW`idI`D*1_X&?=j6x%`PWWgNsiGjRAF`q zJ9-BQN{VgL0M@@l3-Xa{CJ-V}D1>U;0-4$L$O>1MSkqZ>)U|)QsGJBP<3F}ofXvcl zTwKHWDq8~K{PBm=9%~d6gtut>U=SI983TZOo$-NPXzG0B$Dck9E7xZYMFYDwO#ETH zWK#$v0(Eg%QSxJ+FH%gXAujm^JG9xrx9#3mYgD;#K{Ko&965hr^&96~0XeV?s-n^Z z_D0;Y_Y^R|$IxkbUpt+2QAUyro%LNv)W7_#KLDG_-u73H(f!MR^+Wsy7>q)Hf{lQr zv{d9+j7_~(1<0#4uvSsf$$%frR}iHE*kS{M!n6{7f`Xgf2Wd&M6@Q@v;@K#20dFL; zc@7cdg!#_~?Arl*7PLCCy&v|&f9Epu!T7}IB{jC10Lq?!%2}Q4CI8g})f0tbZ>i-(sa&_hr_@%?6uQZ83A4pzMPipmvUwhHVC2 zjGtYxW8&}gz@nXqIT~IVBog0-WU+94_@K^9wBS|W#dth?descTXCp;_3b7?(^a)ej z*#_kAn&#rGGe1_}ufhEwb++4m)FW_PakY<|Z-a0z2~U86343M{C^p~1F8{z&E$F-6 zJ$wiY`f_{r9!y7T$LcWv0+yw=83b>UMJ6(vLa+%n*_NJ2@4;v~Vc)hfI#;DdsG{$| z6|k$?%te|*_^x}sHz(hJfBLZ|4#bm9Dj^NQj=zIfp?t40K3;%Ret7li-FbBW>ig)I zw{OmWf+^2IPR>qV|9o;DoxOT@&L);*-^rW*WWb(2p$Bvag&{(jkI-NH+R1_SC9Dr3 zJ?yh16m7o^^N8tCWJss)m1aJC;RQjs6nBYS0O$MwiL1Q{g10e$xOFIyKy!#*P^d8k z0mhDdi^#S{WE|OqqHh1fkC%fse(^G}1qg(Ljtg+Syu3b)e*g30$`7UoN5k-&{#DSZ ziWLO3SUn2D>okYN!au3H9pMEGX(kut3=AHG5VA*{>Q-*>zX2mQz!ps$BL+RT^Pty# zj(%=JdrOH2C7FCg#ZQBC^BKv3#!Af+? zl|#dkJTKV9SsyCxI=cF*-w3r><&}C2OE2zt0NPp1liUKtb{>l!2|FzA%e_#JU>_k_k{d|To znxp@KqGGfoXoNhAq=(6t$jHdZPRa`E&g(QkpF0pViy~Ia($s>&;5sg*?vR~g0pC^M zl2+-7XAVp?ae#~3m>Y`*1f#A_;O>TYLQR0wRM3WeT0=N?D>?Id0AbmTC`VM$!}_y; zK*hO>wR7cvAk<`67}y0It`l0*R+k}NsvGdzNRx_A3yMuddChJ!R_%9Nm00DsB8i;g z@GdYliGDsg{qT>IH&-4ah@7&(ZKnySgBFiyO9HLJGAOeL4!zGC?jw zzLy9*eWCcG`oQv{13@*?=%d~3chLUUh?Vw(t?H(K6`@i!iwX zs-Qn`pWzDf?b-cqrZ8W(uSvjokhMC0tRWK2oOl;51NVRgKbN&s~(S;h6k(x zK62h5&<*DhyHKnOvx$VQ97ejF&*dHN+SIGZ3IO;8Gufy$mqym~LU;Y%tH&I$A8L7! zGnq<%vvC7L{J7R}j7`H1x(M3ox-&3) z)~=1g?|-(?lyh4c)|kI@NCW)V-~rjy;2~gtE`SpQTGncsK=aocb``X-wQXIPyI4Gc z07JwdU4LikE!@MhNVusMYKCEnLp{QfHaS>t@wN!Im8+(P?OEzNWA0_@MSJ3vGqxlg zZYfO9!DP3Gb+s?5P>*UhwF!^&V&ezB?wpR9!HlH+-5uJoJ}|WNy@g3$8mvg}8q@xN zul}X^!FidT(##uD3h4oL{G}D>B=nw2#a6 zvDIg=xE|ZZYNtPv2@biq>a5g~_|MpXO**uSu+kFQi2#(AMP15rb`AT%%haGsUn4wM zZ+R9z@2pxKCw#`5)lf?v^&ha0LN&F<1i79ULWQgRH<_lkwlA{br?XL#$9jaZ)9A$d z*byfOm3j`6Ugl+**khy_9pli!-!Y$d&l5a6P-xYEt+$AHRGmg_M>fY0Zu%F0zfP|X zapokj5zgBm-+%b|l$x5qR~fedefpi41w zwQ!pNjJIU~k%aAl6w}5VdD`d5R7@dDeo4e$jz(M^Jc5nk95ZwGfsUAbM*n zvG45-EWC1%nHHD|lWl`+O|=vK{ZWdo3b#>qjoxg>?!~#wr*uC$e5@);N{?z0FDhExfzx}jBR+@!z56)|wk27~uME)l1l^cQ8 z>2_Y~1f0ySOQU@6Qe9?vAL26eg2zVI<2n}mM%YuWzu9}0SyK5KyvtJ9iZysE#AzFu zqX_~8kZp*SqX5f)j(&|UpwQ{24!04sI)defnNy7pLzFDYK-1Dx%YkSNwV%9WK6hZr zK;uebYc^GiR&-+p9frMVV^07DU0=oI+2rn5&{{%=MC5n zqckz9dzAyjID0yOM>jpbB24QbhD?nvjI|0(j*4E!g-FBWLn2w&9!$7?85e%Gad4P%qb~!?~z4X2;(v6 zdV-adujAW)|1HlB*!NB`n)_+ZofM4V&`Z;OY36T)c}Zp0%&8HUe(_v&L&|{lf;^TS zb%6Ayx+>RN&4zC{9pfSh9hAMH*s!6-W1ISiNv)^dXJ?QGjDxW{X)oGolT%5xisQZ} zlRIFir8dZiCY_s0rA=*c@N#yRmhsJ|9)f6OTVPUu2uQRw9lXXh+g^;Zi(j8#jqO!8 zZ~q+cem;>QiR#3%#SO=`2WBDsJu~)CQDtTYE?V$aX|~6H0fReWd6LxFKZpR{fV#&- zbpV!`t5~?pOi{e#>Mt3VGJC|dAno-{xJOB^;Wil^`3coOzI@8M*Xk~x^>>_yoL9Nz zklY%7-C+A7mfNJVwWtL*@odN5Ra>z`D{@e`0QHlN-hTo6@CtInwrdofnngX%ohYi?CXH^3)_tk;E4Hlii|XLY-qTHTdvCJ^#2&uc%ef#l z1E8P=g@1>lL=xIyN9rn@c2?9$uY*wC$0HGcc2~XZV`b6}1Mg#f8A)f@MthrVB&LCm1|UF<_>gC9JUTc zKa(E^?5%DBGJ9RDy7OzjnhPE6UncOhd*Sx_Mr_9R81&k~E4ak_+b5X1`}N&bk!Top9GsdNg z!^a`(E9&rHe#54L4ZTAE_A=Ok-nSKh^-u54-@beM{=}M|2sQ)~J#`(s56Qs2y&zGLlHe_({1%S_%hX6r*eEyYB5u9y4*HEae;HSID67rD zZly?TfbSQ!&Rj<=$t8f|?0H;ul3Js&10 z7sIDlL2&V;DWO9a9Od(<^o{Mwun4> zKECSRU;21pCd{jc{x5q^|J=5Jjpgt9D^RK)iq?lMXZL1D=Z~~add*DIWSV5tIZ?4` ziL$tsNF6D~ajX3AhxZ-;0w5^MN;*3?bJ}ezk-)zLrQGGVnmxgrkXbpQKPh!_$1j+VI-!@NI`dU!aiCU*qX`>p1t|)`OhzYXD>5; zy0U)?%_oFmz5kg%uarZ7-j}@_&+7_2PbXI8w{YxJaF=+}Wgc#!9$+ghAWF01h2*eg z1k}c#$Mf|hR!XW8PzwfY1OLwhH@xpXC5fSDeGYoJ8&JQ2f4J%ZY&axHr9zm+lMdDF zz_Wf9Dq>XJ@pZgNQVi$r>61K%VXU2D&-J;luz0;`FzfQ52)eCvKym-PrWK9 z&MlmuP@5p}+<-?;DCfOHJoL1IF-i$01&W1zn6C1*=h?O{wlPit`uXhjyB}V@r0@TH z{@ahQ-x|k*r4X-U6@R0JLcwOhBIT&?)79}foJt>GXttO)95NaS3JSX7f-rCAo<-wv zmXnuAhNO;k#9FX_o9gcrAY@9Dw7bN}pfO11pAzBjk7_mmq9Ly31h`4pXs;}?>FO@Z z<0C}C}q?|JjxpE)^n)l{N&x>@^LAF_ti4568h-@f?o=lsbs zy94j{{u2zv{>SZ)hB73Ma=s^|!b(M_iU`{CyN6w041O7Z*pf~4IZz`33rw6A18CSZ zPjg5peL=Bu_;khq_tm=Hu~#6PEvQ~Gr#Kh`W{e7GEZ7;9>)qtpdfBozb&`T<3QI$#=(x4@2=SOnm~)u+IdLA4`ThBH2H zD@T)kn1RiIOVmPo;FQ==p_B_IIa!c8?+I=lEG}0ZcJXxJe{0H6MgwSwe0L_ylf;Gl zw+BazC!{_=paxD-kc~j%rXqI0=A33?_jb^&{xR%h&@LS6m7!CI{O;BxAO-_YbxsER zo5^IaIwU>VI7@sb=?%79CX0Bl%Bcebw^IL?xE9oH5DQ?CeE1_~+7yI`hMtif|=QHpHdE1-of5c%v9z2LW3 z*)s@O21P<`9gWf?0zC|*7K|heewCTjIr1*-u>*6Hwb(Y?aG}n_8!jHLmE9KV?z-C& z_hRjTvK~Ekn5c<4v5V7FM{#8H?9zgD@dRaKnbvW&aubw4>52(#&JOa9Xdk}{F!-Hp zp6sCTLI)W99IzI_kw7{%SP5uB!vTq}t^`~xkp!)2BmR5c9zQ))Z>Q;TP(N`0oIzc$(CbNm z^$xs?Xp|atrvzA!ay@24|G+D`ba3>C7zE15LlPRuh(%H)z*?Rr_j*Cyq!KlX5l4UG zcnEVW*Vu?DBR@T=14QFy0vdDylo(MBQqAVU7}Kk(L#L)iW8~_oO`WP|W{uhk$*ga_JFX!%%MR0I; z3ED|8{ZzU;cL!_A2AD$EY4czQ*r~7P^`2$y*VpgZyq@LjSUev{5nBLCmXO%JmfVbq zMeG>NUqB*MQk3~r+#flOCf8S2I+==sk69{46FV*z=2#AL=>DxMOmsunV!Tg(DbWbU zSeHW$P4J4iRA?VBbT`OeLP2oW=n27^*9q9U5T@am&JRJr1HORg8WYo zej-aO>c@m{UtqY2gAZxAQpyN_sBZRCpl|G$ zEi2Zpi!(~a0=~PLuQkqtvk}WR=xQZMfq*^K2tXfOsu#dGmfw5EW0Mp zMA+N4g1~y7;Vb@Qyb1jZ!iXJGnsF)`DQ)e zKvK-*YLjN-3#h1g3-NqX#H&p*%{gzdSwySZCP|CUB%b?@CfjF!TF(SdaIB73P(qZg z#P{h`)?)4t1onJFk_r0;iF|YyMy-8DOf-iI2Z{(3)C-=r4vL_rGNiOSW8sNWA-ral zPm|qc`GnI}9Nij)Ti^o`9A21(!Oy15Uy5Yk;ap>yEYOyRx)a;peR>q0oL8C!8kWTw zCeV${YUGJgsg_57$sDTC7J39bs!daPt))9R{BNbrpgQi zA&M2EDXaUw9~NzuUbE%(y^Y`Z1OA*aK;Fr6mA@?3l9R%J5hSO1JG%pvueb9hp5 z&%6L4T%9BEZvG=iR4{)`U?vFg?OdY1II|k*C z;b1|s6PQMS+WLzH$WEMva)1JFA~V#yZYY=|W_*bCM+O?zPF35ON4qyMmrbZjl{%8Y z6GJ?t$g2+Z&aGOOMQDaP^0%`rVjzoXvsUSrN%obVhuT_S4&{HcFAh+!FgG_PMwR(0-_4?x zU+A%8#VVQ4RdaWFv}C8tbKr-wwdQf{sT|&gC*qJH>LmwGA0xigEbZzDnz%*jc$Vq7 zEoV7sv*Z;Age5s}V+=On4x2m$kz=YL2o8Z$1dT%F0Wyp7N!N~TehTO=aJ&3dn~F{mR^) z*RbWD?kJS07fsC0%C0!7SEM4Mh%Jr{x{;HB4Rq}~cS=MYFJYyO*Li;5s;y6y%>Ofj zkbZ`prGvAAQ^I$}l}x=1Nq2iUy@6Jo(R~en#uHZi6sH|0It7m%1j-icVx{MS?^S^v zRiN3A%5N0wzXsno2pz{Ju?b?wB9b}h@f7cixJgoxGAt>SCt^nlnI(fn7>m*1Z400^ zcz=jhJ^;DYMJxosN^u{-@OZ)2%64@Zz!Yf3j{m%aWo`6?qPVVbz^_DZ|7>Z$#o@Yt zstNFPh=v$P<0Kl(3A)LC&F%zpdXMIO3opQT=AIOAS7Ca}8#u?D+8iJF9;R1rganLz zF?uT_Okgki7fcgO@#!pQN9VZ6@1jDByoDtXN<^xq!4aDIC{?9!*FX~~2Jljk@rnuD zf+jt~?}4}IxwnJMS&p4UcC`FKnNO5|`p;4pmS>$dU}fcf2M9gFFy?y>c1m&e#RBjD z7?}O}uG_8Y5Rs&wnFc<0=(SbN#zj~>i!m*{bT$WimkrmMCTfjP*#2cQ`D}W~6UrTW z9W?f|I#oq7whb3>O0Qzl?Vd|S;BCP9jwfG-kYh;S;xsI?0%IR*hLsT)UL9S3If5$hw77ay(*ZB-0Ic7-fzCV6LJH7D>9sP-gHNGp;FH z&S@8~$H=6J?#V!XmndyTYR`NgG`Bl0K8iD}U3)+cKNy;#+Ef+hrY$$IQ#4RnW+MF~ zhe=czNgmi|;mUugla3dv<+f|N=+sS`B#<4Pa1-}-l$=)DW4bfhms-S?JIc4#g5lus4xQy5E+@I=*b0qFkoSiOIeiwae<@HQL#POy09%YH!w-|mXaISp znWqB55vx+cp$1{Q-32}$Aw}phqzDa%tzJ6_IqW9GM2?}oqYusaF#3e)h4`k6zxRC0 zKjc$;D0ljYL7iTI@b)i)a~)h0`&qHbIP14A%5kbgPND{~lN&K7XaawF78Ze(T}50% z1k7xC&R0SDUaPwKLs(`yI#eK=G>y>RCoT}dXct_AixN^&s9XW|Pzhs?@t90D2GMqO zQw{xjUvVDL$*l)#*v$T<-r7qUSFNj*Zl%!^Z%;Zv4;|5eJHtp*v~1;bGLg{`tXy6% zp2-Z7Mcn0Q2;8m*`MFeGl`2-Afub*%y74kvEQ?FN4eVKhx;gHPkZ)u^AwRXHH;IE z(p&Pp7aBvBVt~?xy?SApj{uR8(s<^v(5#E>B5aL+^7Lv}h5^G&jac|M1C;Ok9V0^4 zG0>2=&ffFG;Zr%T;fOBN9X{pr#Tl}4PZ2|Hg@2Z_YCp%|c~m@^u1e)$dMWr-|FrbF zVVJ<>%=_~TXZt2+o@lkp+v)OW=YM{I|D7I{HO~DiG9~0l2S|)evkLR{DAK^dy1=bN zF`gQKjbcUtP;|NEC{z}sT!0r-OtZCwM4J%l8}E`rTpXET6i4LBgXVG;HTSas6dHdU zG0c2u4SOu&b1$DBa$~P?F8}I4F%?wI7RrY)hOpI!t8H!OJ8n{v?7D5{e0{-m}5P zYS)|j2CLP=@o|+2=b-Vh%5W>@UxkBzFZzw)H2z~NUqGOd**WoHO~8Amo| zrEhJa1(EZ@8W2)}VNy-%BwidWUejc$1ersH-bl=i()8H_Z%8y-vV<13iT_l$%Hk+> z8+nHeni+nC@?BJsyaVLK#tct;Rju!<=$=W1Sw^0X3t)~^uDsIaMyve+Jm1Sz)l&9W-3Bs6O|O`)Yi@RxcHPl$t44qnmQ6 zgP8l%v=)m)xg-T$7&>wdf#pj>tM~vrIeDpBDKL*;JE*>@3^L?t;{D`*y<{5}u23?2 zTP7X{1h)CE$Pcz>Jq;|ikmIg`2;9}k?wqAvVWOf|yV%=~>FV2SSh%+?&a`=Y8&ot( zprwESZBwOW5{doWI{XY*VQYtxGg+^DGD_I`CveB85~AqQlP|4d_wsN=zfQgce?IAa z`QEQyO=qViuF*HYQ_gOGQena|Oba^xVIL$t1m?%locJ1+`2rVmxhr*)%OSFc$RlDr zV(+nez?}^BBa(jGH3VMO%-PQ}_Dhy0f56QnbN1VMu3r$)WUo_e3oK&?XJxf~O_!Vt zy%%hp)Ab*J+z*)dVyZ!d%Tl;FKBH?>V21>cE#;`Nx{G6KZ+5?boWa#Qkh7}btW;9m z7CrNln$i;8?;ZF0e!*tKwJ3&pStKhr=2|lvyjd=i>aH?*^sMf(9{UjcbFg(U^K7z4 zIV4&M=y2?4j)cH%^OF}Gm)f=W2ev)^WEPWTEEQY?oR0Didtgd|x}2$}m-odip3fga zQjoKkuZ2rd)1)bX9kg~6{NpGVDiUw$0=o}H16b`KJz|N_u7P1Wj&J3tN%lE}d!-_h~TDD%UibttM*&=g7zFn9$lBJeRm zqvbLyiUd*;jMw0}uQ9I%S+llms!fpE^fShK9HNrkG^R`WKXSFeBg~e5xK(LMKr)h|iJx2#AmX)Z4D?4<#J_wiogFdelw^2_(WS^7H4W!Z4E z2{+&SCDOpiz#U|qn;}{1I-A^A)dAX`_ADkV4s77bO-dfK<7~cO=(+Lv1GJosd3TMB zkMIDbw@YcZ{$296->+}LtdUG@37a&|s za>1W}N~gbpFR~8IAvQ}qGAp>T7>d6mm;ri!=D{s;aqjtzil?8^4W0jmsus~Y(vu)s7|DF%}vY4qCP+8{ZoW@?&H_|+#mgw zqyi`{C{B-7GaQ3|)()eN>Sfig?>m&HG?XBqL0FTJ&v+H{ zc+>RI7G|eMFs|EP(+n7^e+BzPZ54bAHC&k;;GLK+R1WF^lv{`J)WnmSH1MrxV9U5g-H!9uzl%)?MxHQqzkqWZNO}9F9Ay#O4MiLpgI9@V7_(;mhwm)gY{E? zwnQt_n_u2^6=x6o^MC#Pikv=JanFV6&pq;updgv+EYC6G8-`Yaleg>Ma86)-4gAZ| zA{q08dU~{+ExR&w_=um{*GKc@`skH-TT0EdW%K39-=F+LM{ysP3Ne3Jf5@bU>J;vx zNP|uI1KiwUyl?V$Bh22du{ISbrG|@tIX_L6FL;XwSgUt9+;C z0$2;A{uq)1f^uM)EcKh+zAb8O`-h`;GALOKz|Og_nr0c|8r1+trBT$HB0eIUkR1*# zy6wRQ|H2nu8=hUjP&q4YYTD28jspkCT(cV*8WQ1#(5Sp12RjF1L1UkheKKHwa0LV) zThw9Ze9nyNZ(V#BGTk>ZCg}-y9Gj=TJnHe#&V9Zieiw!TUve8?oaiGPiaMkC zccb^%eRx^~-;#}`gVG!$=nP=a+p;(x(M_A@g^$7BWOoj3gFn@nrpdV_2eiJ5T&N1q`f_Ui$+6@Q0{}sa3>&(UHzM=Cx#ztZcY>KvHy|E2EQo7*;$ z{QvqC6L1w06iG*RlDbkzc~gnAab4C)#okIPBuzskL}3j9TmX!sv-90=x_jO;;KSC{ z?lBdM1ZH}AdR{#<-M=QgN?lZ};RfI$3rq~)`xrSk(H2(PPdkHu6IJM9f+b{P3Puz2 z-gsN(mvMUqlWo&dOBn9(Vkjc59eIg@X8tnk?gk8-+y~f!_3mwY93PZt-wjRH@O|cz z%D*b}m7D%Jn<$q$=3H#H#j@Gra0*_Q0=No!U^*WD_-t4fLt)%=P~p;|C}Mqzc{aYP zPHXVfJ7ukxh+<}c*cIo^rT*^m;7=-m9aDlky){O3%;;rVLHt!Y5TK7z=vi`@f}{TI z8p6LMHQey6A?VlJRGzsmYRFC+2|fqk$=$eg>YJLutS93~Pu<1Ga1_}6L2Br<=#i+; zz$|2TH=Lhc#82tVgg=T#{io6UF8{Y1$9-H_8s3YqwvDrYh|KE#Bga|ry;Bt>JJw6l z-*Ies3DOlz-6V~eqii*0%quy4eL3&%Dgv+^IH$v*0I=&wx944XW=|o4*i(sl*J*M1 zRGIuRW51efTyP0!S?Rcyhidzrq(keWaU(n0HE*}N{$VK zP}T@9gnC;8UIpF|@=f5jAXC2;Vj&^G+$93bDOcEdWC6!CU?fo9fw8dgnZXQXtE)mL z!4Z?uyZ-U>4+at2fPtE4W$%x&TBFfzH~bx&J?ApS(-(q3h?-~GgyFQxKqJ`9glRCuY+hNpFY_1Y6>>TVE zC{<1MaOisD0Y;ns@T3LnPZVy>h;A79mdWscAGHV*t?&p8eH;m8wN?>(fm=bi3xpA> zp$&9;7@aux^xTX_B>;!W5ipSbYnW_$n`HS%5EsvBu2FADd-%7{PbylEEp;RosH4P% zShXNUP`>?85sHqV7Rl;vB>h@d4le^qQVs*2{E)$8XTo0(Ha3X1IW)G#L4yMt7$Glz zRb^F32uCU{aUq1J41d%-V-t+`;T%}aNnDzp9iS^x6HElIvLjJe6g->JtXJtRpVe2` z1x{?lAD<@8Fdj+UGt_I>UiVWtu=fq$YEEBvFx;^7(n%fLxDHZ1*@1d6i~n-s`cVtR@|RTD0zjuALb>+P5-ZA1F~PBa4|viE z$f^rAg|RcAn!eQ_qq)Iy2XFP1H@bTUaOA0+ywJ_Kcxy_(LzTlK z<}Cf!Hpy$%&7hUR=(hlWC}#r@nk)-xtb_)+!S^+R;rt!1Asbd z2)~(*zs(t$@1ClEl25-1O1VhkL^Y`}$`a((@ac_8jX=tZhQYEbQ>?l<1!!VPQ{HAv z9hs!wZZ>6Q+3V8@Yqu7LW@)hoEoNKO4(c!K1>tosR4WG;gTH_M>*9y!FaCS+>e;Wa z7B8Q@c=i0nvmbbGdGgt3h$Sjda36E&*IdVb{JD9b@`f&d`WE86a4>^>H7OG5xiM1 zeA}v0PXlnyP~i6(C`!5oJCMzI+e5kW*aig{$N!0!-ePysZ#%MU+>7cu zkmh_K4b*mH%ft{Ltmk~^io{vE*g}>c-*A;^~yNk%QeVMP% zl**Ker4R}PG2gkUs2J^lHX&s>vdH#DJo7%v0vzD(sVIkpEY>O&rs5Q1g)woFRrv~&RF0p#Aj$j7rn&LJ@1c)um_{R)%?S$JZB`f$!VA!Gm@7szhN87D zZ=`Ou2|@cIq*Ie8$cXLtVoe$i%5JI?lyr?vBf!$V2nq zS!--70x<1RlfG-y0Xm{C)kKqtUPpnhv!Bq6}^Bf!Ax0$}}4>*%t}X1hlnpaTj>!0Q6DfLlp&Iex@{ zOW2?Nlxdy^%=ilPu2;=CT&;GrY*AqWVYJ~LW`1%w_Pp%eE789Rj1!qlgt36Q7C{7S zzvDZde{;5@%Uf?Kh|Eg)INsf0_!`>oT-{^Q6)6sM_P7xiI(ut2-rU|^n2T3z#OeCP9f>RG5TYtM`O&4s&v z4hWgzK=cy3QTu zA?>Y}?frR`NC&oD2JN<(?ZP;J&fL}gC{fav10exr?gJG!SeuD{d)7nOWMi%mtj)|1 zg%do;x!BXU5GjdG3~dFw75TpEr3V`AkZuay5tJOvZj@jsd;dQgxFZf$g(&H+LbZBX z4DbLJp6(zKI65ZA3(-nx_xYhZz^4`1)nCy2wkxzGQ@^{-G86o`l99c8O1meCQ~N{3z{vbVFK z>8sgAM_uUPv5Mz>GM%>-DXC?Jj>FJiHYs~LV1vj#Z_V%^x+ptFA7u}(ld_M~OPSOO z*u)G?f#3;3fbALgE49pjAm5^<9#Fvtlhj8FitSO;?LZ#K&A#j~eU}|J6|jfad)b5O zzU-s*UuK@PeGHKYRfgF|>r3pxbtd+4iZMGv`v_xhKdgexOaZz2u~c&p?R^8Y`47;A z*~4n+?BTR@_8;2w9zbJfAELFh2hiNvLuv2q|6mO5TSq=XlV`_&YV*6fc#z|0X7gm) zO&+tq_)*5-8GuVt`}I$2^>^FflgBzwEY?!=l`xF)HdTrat%Yq>^fw5^1z4Slpa8;B z4xG{;T-xKUGk~+Cgo=uY_FXjS6y=#3m0&Uepz?5OPUblgQ5a^cIuhmsSuHIHv@~9B zuW&5{uKeS9iIjNgj;?3DeQzqHq(PvWFm1H z*(v}>081eG3K%eoRrNK^oZw?cVhPk7y)3w3lHz`3c9IuXhZJlT>|Itg#L@WX#U-$= zqMcy;&o}39khP@X3C=NrL8TG~y1HK4}Ynl}#rK@n&3L;MI@cu%7HDO}RAs2Rh7xC0QJ{!BQgtl>9p1uCwcVA7fdx z9@^a2;JpjxjXgO!u96xTOuo%pH20g-7%XlPg3uXUS+XF^3Uc4hWw-rl5^lq)+v>2KwIcgoT4iW{F z;)$%}!l&gVoQw)vxWfrIw8MmL*k*xAw-b>sqcQE~{{7J-net*UJ=q3@?sYpA;`~!=6?F{7Vy6ua=7V4NqrCZmv-J*b(;8~!D-ENc& zzPAtHy3}O`%|gJ^5y|1LJ-PW=-Mm2)CVF7$VFkvSXbs%6rH8`&lL8>t&I}GPZLl-$ z1h|Zf?oT1~zam4~2*ul^NJJH;%vL z?wSMIqs%vR$FQ}f?QMaTzS%{7{*hty*ALA|^Y4tf>X zT6%p;5E>A@dox$Y;ew7<_3-?%LzHL1KdC;ZI~^j@Mib;YvL`!-BMWCnI^*Mje7|4G zCbtx6mJQA1=Nx{Io{pjN-2!2!d`N^{D%(kber9z7-dk@Unw;jhLR!PyXjIT)#q%%{ zXM)GfYfxaG^C^8u>+gnv7ErT6U9khOXPNfKYG6exh~4+_gol zl|958SE+TN8Vo9#GzB`Sw$YY{9k{YsqKnT@F80Y zP=3;<7O;1Jkt{j@&k{pdzh03N1_9sE(<;w7<;@7@5)3S-yLv`uufzhLZr^3lNh8(v zyr5P>ru#Q^F3R8I4-aW09j>pnPsi^D8*l@^wh{|o+}M0@Zxn7L@2ZCZ!;%Y#;J!ba z%3UQi&g%r*Zqs}5p+eWB10* zK8mz)S!3M~nw<0ul2y@wblRkP4j+t zJ8DosN9=wnZ#&A^K}W}D0fA?lSUf4LR6+`1K_gF3v~3}^-dUPgLD+B}CSuL)9-R>E=QEEyGW3=(X!i8hq)=b(A9 z!wZq>cJHn&XToo3grjhmFPVSxh8PqgB*i4U4Ii&jTpxuyKK}q+w&=K>esS<-2R1<(> zT2S(X45GW6?mGe~sfINleRGEyMS6N)@=jgxGz;>%o+<70b2eY@OH^rf+?X={rLvax zP;sfN0t!?Miy;}aEG`qVG9d0Uc4_ku_!FE4&CG-K<;f{RBFt75|=^7{QKOBX=BAIIfN-JFAxokBN<& z4V&ygCQj}y|GuZP)=L_)FM4_o&-TjB%EG4L#5_B?W#LiZ)xZenxqcTsArz7~ZD9=g znZ9x*T7e)pnd%f;W1bw+%PQf(P#7AJ7+cY?#jFFapp=io)*46<_+R{Rt_nf4~isRP?ss))=;Dri5NYmSCy~6QDPsKwQPk3)wA&9z7w`RJA zXrWHO@(!Jz**?}HKD8ZeZh3mh(_lXfs1z4d)4}61S7K1G2G&}y<>35*0ZM(xrm#!) zx@3S7GLCW^dX}Rz3l1S-vw#Xrtggv@72!=kKb0O;D2EK6VJi#NPBp2kmANsGwRIF6 z_q{`AqVr=ijeZb+vzR#c*wAa(;L0uOaCk3Pdr;Z#SJ+D+R`c!*d zqMZuoxeVfLh4vr9J;7TqIi|=`m8$ZqBt5WqkIGYHWrEw7zj#iy0TVan3*YX$-c&03 z(+gbaG$l-Xz-kTVDth~LBcDa0;(D0|iqJP^_B+QN;a2{qml12ghvmXsgraD&N=f$A zhUaGc-aEAeKB9)AyIoOV936RIE`kJ_409Bm($XwKt>yBnE&@3A30per&-x|H#!kS9 zoQGoWO35tSL=E##t=>MJ~Z<{3!$p zYrU`FvZVc5LV+V&boC~KT7g)BzYk>k?QQ?vyCv&Sru7a~AeSjs#W}uMSN264aUv_i z(oyk*#og%hov9;L2v+Zhu<;o3PpYkj3!1S23BsQgeo|@2+P}1igRe zGtwuv-bC-1pYuDY8q?})97Cz7r(VU1sO&Tz+t9!E^cwpVm+oFpHOOWEIZya={!YWK zrLr8>q}K_gx>q9?kZYPbLy@1-km9P4GwNa>9R`KXprk_%``&K-6#lQ`cbQ#pB>4X8 z&odUi_<*VyyCqq+zP`RhppKM>V9%pnP1M6FrdRGOQ(Ds1!Jyddx2ZfGW(p139k^oH+8#MXEql2(N&lWwr>=d(J4IW*z zF!>Y_+M=5HBoVv!(pAJ~S5A~hao8!n8saSO`84XxuvGW(=}VY&#TNU96ZYndWUE=I6y zbdO%$Tc(UZ(HsUQ9d|7W;aWB?;D~|H4Y!aW9@2`$ujc=}1#af7lCpVKPDVn@?7KLO zpB;ZL>|p{tE9ZG*8;h#KFAQ|O_Dk1!vAzGMua@igdc2n2jh^#rd+qV3Jig&V>gX3& zM}KXVq&g<+b!R#!`yQF|b*Is*p;!nMFb%Aq9Z% z@9Q)BwENUn_wFlrG0%6~6KW7<+o09jItExt0Los+qctk+j)tYA|&B6DPjfE3*FhlV}olXyE*pX7=1A zOKla6uiP}H=_=O$X!seFv3bRcI5TI7po$FydAwQ3pN^PZs)(I2pAn>NCvB&>n*NF1 z>|z7XL&hazxE1WE3{0I|wgo`_iH2bYsB9@xLSr4PVHgp_-p&2dORL)ID)jKfTnFG^ zNLvwcwJSf>Gz8G1oy_5y+A+%m+QpOnj+9oRVHC9_oQ=s*Wfck^jE5#c24HUR6t4N+ zYCO3mJ1E@L6gLOmwZ=9`H0GR3L1M%$(p9C-47JK*m}&R(BhcRy z^=0@JrDr3wXV=y@Ess%phuM?e8O^j_e=`sWg8d+N-aJtVTGtFm4;)q96DGtHxrFP&$}RpswB$FwDW4 zS?tjLQ7ziG1r48zX}lMR)C3x5>9PwzHI7jAV)JxWT;^ubIzVQ(VMA)-QzQvAzbItsr^x!b-N2}6P|kp zr)!6CtFgu`3dEdP-czhBS){lMKHQfFSS+1Fy7)0^b;)&9%;6 z;ti@40d+blo3!9NDSypiU*Sin|Na;KALvC$qhChv*Z237nefJ@7@2o_EDCz0CM?G~ z6TO&uJ8kmwT;FQNlh&|Ymtu@E>D}!*IKC9_A;V2Gd%SCP!+q`shWZ1-S)G9b87Sv; z;3_tIUEVn$rF?FGsx?@0Pc!G?L}slIKwNM*@Pfuy8VP@ge9%S`IiTHJpE+o7(8D#a z{B>H5S&KDpKm3NAoPc_#MDRH=}?#P?k2E@ z{oP9cg-QCt62O@Ac;gFzgA;W&bjS@-A-U@#FzQnTJes04aw__=r)SG+HjUh?Hq3z7 zkCN#8z2)wYRYle~fvf-E>YEZ2csdR}l8)EP>KjM{&EWH_i^ zsQ2QC9~$x?_aB+nJkbT@FAJO6I+tL2AN9_`RGsx!(JAdIdJVn3zp(9R1#eROB)yOs zc#O`4D@|3xVXiR?$&q1=JjDtwqC_Sm$X3lPSp9?CLQ25je2V1kNX=?kp;vW`3>O!J z+-Owq-&3}~6@XB(1vfnArtuaEf6h59*6Qw~8u~bDufEe?e8ngnqW1$)P)TQYbHzR6 z%r^a!!vYucX;J((@Bl;p6PMH=t#&ZbprHh?|xpQ*$ zJ>RD2uFcy)42a-dW|6hr zFq(Q(TKfiDW?-*px3A;6D%L*q1pefC#LWG6NHYeatfPwD^nszymwL|MrZbzvgfU!! zzojpb*RtHrMZ8o+Sl>5q@r;(&?yKrKJUhN|t=0f>UZ;YqZLMRp`(=M$&p(hCau-9; zw(!Basq#Q`e_M;y5s%N zIZC@I_i-s|T~sgCSkLpHJ_1 zV_=b@U*f9m`=7+V=qM+z4f5)k0+rq~$pXMYjNL0~qE;*FOyA&ZgW**pAfITu!am$8 zs(o6vI$Y?rV+fRqU8;Z3XzIXjALZC@iFcpC3%G3zn34)DuphDxgM{|XS|u@0ofLs{ z{6~{fD(T_ zr*IatPnOlBV*Cw*SE;Sj?&Nio64MjMFNxX`09G9^_#5-#I-nOSttX^ zYNckTua(;EjwBFea^BObVM-o{ZJ`AhIoN?A1I;th5$R}LQ~c%_K7n2A> z{U?zc`^or5l1kp+xN?o-o5o8Q7qgz)plfVg&q!T+fhXe0U=@pwOFpRegZ)~Y zXQ^WhGp5LYfTR&wM|tVCdrP7v}Br|=q@P7Ec;Z|4KL&l6!oB5TQ&vr5s53lHEWThe7s)$7s{M;xKwA(m5W zoLR)2V$iE9th=k<$8O;S|DS@<8q&YT%@A7L|so-Dg5C76JN#JJkibcxhix zpA$_qGmIT+_``QD-gM<8vRW9jW##m`BHqG-!gNtnnx#%xv)Vz2Zp+~wgXguA$q9mu z;mBKROH|@Qk0S5akQz0IjVrb<2)b7pql8~CR-fBnjFBf1Zk9ySC`xL_mbJnyS{BWr zzg5oNq^tM>&61Zjpb=_1WIPJ6H~BW9ufk&T2nLMo5`ToM})W(2QWhvf2fDrlipqAj1S^e z*JyrVt>5Ixv$P)bPR;G5H$xKp$R;*;N$X{kdK~7ig1mXZzXQLamEzpekYP^oZU`X) zZFQ|8j9ux2oNje>Ikj4J@PwJV8P^Ls&sU=U(D(!#3?6-wlPC)|*gyhO+AKc51aI~B zd3Xyrl@r#M&?FXV67@@E*bMX}m}XsYgF&esKs~Wd@vXo~N$_tc%94&M+48LltzrMt zArHbKTvDjQkN}y}qqY!z?zi{CG=mz!#K7UMm~Pn5PXB|TxoE5S#Hmmu=m$Bb}dV@G!gH(P>W z%3tgMdR?3N8k+nl_rHbHmoU-~h!`!hQO1*0@Vp+WrNolCMpc zVHq>?4)~*(>GU>3QK_LOGLvEajukxPgqMRF$FQqTN(8RoSj-}JJ*Ea;4DxscD9+pC z+aR&dmYz$#f*ngzZ5qGYMq)6<&L{`;^5T+EBtNFFX`^$VO*b(dzb|S*C41pUXh5U= zks=liLxGpr!2hPlRTIlktu5N-Gx?d1B0RT6b2G~=&;_=v$~#fFsAeBQ$9j-1ximti z2Z`g@nmP#LRf}KGEVJ06FZXMP!~9nkT0Vo7ROdlw^8x3_j@-~s_uNu5a%}E4=8;{z zR+Ik%Be~3V@>fY5`B#Xu#i=}?Dl({d1{At<07X8(4VxDSdJ{Gupc*4s)xZ9SQP7Sd z{xe8*Q718Wlu#sDY|*jOZJ$7y6f?IQl`&XJR1&Vjai7VHg9TSqQcCL|+kz7>@@5c? zb*u{QmLU{Pkb=17fX48XX3Yvgk!Ac>i!FhEu?yn{ioyG!DQ@1F7y*IrF|KrX-i#7RwhXBF{@3ss<=zO|yVitHOmf-fEnvUD>)9-OO& z8+5ZxD;Y}UCn!yJnZ9^6<;4AhpTMC^5W~(srRp++w{-eX@4p+XF%fu;*dMzF{FZH0YV4fthSQ2Y}jHIFi3^9GT;+) zh$np{9;u|Asm)}eoMvHW9&muG-wXfa{OP}wif)CG88fiLN)m@@$X{z+`5hY-{a)%D0f&{Wo_{rghPLTbd@#`8(kkW?JDSt#iRJ z{V+?8M>O1Js~haMzVqaroQ_9{ZN*`nD`E(^Bke;~pa1#~|FMI0z*ronbn z_~L;lQ$EU6s1*)BIS1C!HwvQIBCl?j39!pA$2#rhxdU?pW(|%KAj`pdoM4I%#BVJ% zP5Cet6j|%Oto_x`L3~dm`uzzj>gy5rA$t48sAz0TD;@_w=B`Y$NPxY9C<>Lvz*35# za53GaG%XFz^24Vh_!)-y3X*CdDUMW=;@npwAtlT+`d1j*sTJ&j!HJWl;%k*CpYQXK z3X>l}Q9UKr%LYXf0I~0yH%3Yz4^fvYr4|##D;FkjB@<_CTvWEy`9_CilM`(a=a_p% z7F?8Jd%vf(5?G{nZbw&z-RA&%65B%!K8~WGtuZ3r?ZVUFk9Y&ldHtF`hB?y zhvQCJ?uxG_nV2b-_Y2lBZX>28CV*YQ02Ns69-tvHfjG}@y zOVW!6?qOd2q3d3-D$QxeIlS8BO_{sa8?Mu7FzIS1`{a1K>AnGqmEw%5u+z2}+)Hdj z1{xK@H^PMkFkh~$3dwJUs(*4S3`lrGorV zu{7{7=+@~Qj+^pdZbga<<7VbKbIW>=@ui#&hl%Y9Vf8u+N`~4B$@s+;8=ffRkG%;YA#Q9i0*Xp=NGa4Y@paRRx-`y0yk2gUlF6J=8+Mj+*oQ z&Zga;KJ$^)xD?lr?cn0Hg4HTx1NyQ0%?y@G$EJjrY9AxPWa+)Si=lJkICM_NFbpwGK~eW8zcHp|5c1 z=TQ@+n4u0H7eVd;PJH0u$3TBQsT8i~h&jQ1)kL^NT-;hhq#z`A7?pI@IE$PTkAZ?% zur;|mv(s0X%R$eN2s*83V$qV@!*(bCWRb&D0M-9u;0goj3kjN=5>|fIaNFk@b%^!X zTTAdT1o0BAhpCX`oW|P6*ah+To!GrP2Z%jd{%b4?{ut|az_>>(YKt^#A1p&g>c)Zq z-tduUv^@bs-So(x9LB1>MUVi8^^#LpnUgu;Z;3E4=@j~~4O7F)*T=)Fi&@tMgz;~F zV8qJ(D{`K1kI?_+3R!~6DP;G(+r&Fr^z{h2{Wf+&vXS}+DDo)mS7JHhXP>|+7Lip- z-`pPLmRk_`-t+*@gdIrd6D5X?I9X@MOR!wvvkPdMoRByTvwA3cAwznljmk>9WzhT( zXmOo~?5v0fMOr`IM(8c_=6jF#x%`_B(3pXjbY?s7vRc`SQOsqa zPx$jp3_DWV<8UOX(CAKvW7HFG_cS6fiO8H#mcSU=x!uEfSF`*85s~VWh(sd^sE>-r z_A$6zSE}C=L0FAWZ=;OlE`2$1%o7%5?w>l{iRQk^%gaySojKi^yP4bTq5ZC52gqm3JE<)Uc}>fu0?daA2k1DzPd{+4TVuFLCp}x3a_9 ziaY(r*n?<&?KuyzVs*?V`gWxNJzm1GVBV5ZQCceToV3;zsI(5A*{E@FRFZW-+;}JR zNVlg>yU*4a%FeK$UV z{jpG*OMoPiDv0Fv^2*8aW#)eo&4V~~4t~DZ{IKc7>#yLdajC*q*@Xkh?Rvud1=*@X z4%AJd1WYQ^oYfB61mF%(O?-u8n4wE)Ipik^P=txYmJVFxsyxA!Vb!FJ6DXEo2%Dwv zSV}rPEvP9He~3icqsEj_fpqNSS2>`UP$Z8_!L%_(5wer92G~PaeDt$s$_hl^cJUf2 zRw}YlScdHg9_k3BQb+^CUZ}G?e-K#%Y*In_v4i57t31_-Py}b~+f0>rGhY*27Xz#; zAZBaGz5b0nV8Lbl^01#h9-aDm12nNIjF@?A*IY>A&9Mse6_lMWI<{{?zFo#tJvJvp z89G)sA$7`?3NUwO3UD2Ddc9aq3DB5r{Y9nT-f6nLRs3Tqb7Tty-xRq@2^e7wndNPq zU&|KtcySidP+*5JoW!*lAWX|$k~$gDoIl;CVtSeXnb|xE=Reok4}s!DWW%!hw#)f-=>V#Y>czBm`lPkkx>`YP*+^E(pfTC>(ZE}5$>DPBPFp2Ht*@{$ zysBPAKRsmm^RXD1Xbq}O?NB{HA4>Lb2v`t9ngta>iA*HU&k~p&b7SlaXbVN*i^`YX zuR7SvQV{voT&lU2+`FL|`aDYYXcT130QS^ki5W4Y8Kljb&Tp;NZwz;tKkWTT7Ej%`WGr#zdBT5GFL7nJcja{Zl+VY@}sxYcVHr7voRF4i^k{25bt@8e_0)|Ft`ivxMT2 zF3AH+%o|%T9$W2D1~#YbB!rD?x6%6Tha zk|ZKX18hky7$)*!!OA~t8?3wV18#o$M+pj9h(HSk;~=3q18oAdNt#OQjaA%hg5r#cQp6jfDU=bzj->3gF3x%gQ}k2&d+QHRIJ(~r}H zfi`3p#DP6T5=Rec$^xJ8me5j4?8v7^*w%JD#oVsKhj?*+_1pN37riFB|KuQ(7@Y`M z!xC*Wr5uyjRdOMbH9io|tvtQnRo~T0Z-e)Q6{CsvGzp+Np zehN}TTK?NRI^;@oeQU2(*T&j14U}rDcty*tB9M+r7Sdkuo9+{%&t{RiNBjZEb@Ha0 zA2dN%g6ryLV=k!qcWJq3PeRcjF;HIoMa`yeMh43Q-Ddv6o(?g}vlI4tJBKf=XIRy$ zxsY?Q+COX&$GhSyS58p?qh`wQ?xqb(Zs3OusO>7Wp-Kd{BBtPU79X1KLwiwZ#CiHD#7=>WXp1ctaZ)xT?J-M}YXzg9yoJzL~ zy5IUjI}rJ@DpJb)YrEfjJ*S;D%}dMuUXJ8@XUt|?sekiM`HP(qV|RUN?=EJ7+Xt{J z`ybLT*LfkC{)jSYI;G6tnp60|zWH&;x>=ehFQ#p1T)oz{Irtz-3Cj;C2J&Kalvi^= zC4BV>E5Ca%=b6w}0#yy4)LZU3yFEG!&!DX5TXz=x_a|gzqk8lT7{bcMKl4$Nqfy>}M*v9oAEp@ICKw zSLEarXkAtcQ8bxRm)Z*eK`&Usu$E`SFOkOa*US{DFqGtak+C!G);%>AoV=2SDNxhi z(eyGk>mRz+GWQdsVOKTh*E`ve#aphf`4Vl)mcW$ef&x48p{2b|*6(5y*8UG(J;lFE zA}wd`tU0EH2MePjS622mRkK20uWviVDi{X03Apf*266D7-Lhl=cWYNoLn36-pB$YC zS}!?pN$Cx*0Nuu)Gvlhfrm3!)P|BWIRrr z#RC_6B6*MRUAZcnB}AMpQG$Z11VdA{Q!L{L%x3l47UNOrjPN5MWqa!}>{l4-9} z-H`J!GlK}m)4)8*8| z)?;I3(QWR@P;9`DAGQy+QlxXY`iSUI$u8eYskt+Ae2)wPQ{Moh^|WZI&uprMWtFD?rbon#X>Brh#|x?6NHxT#xgH-sqM)&aIORCteb@^rq;>zClthiBdZSp{ZMuala+r_(;+on@?`ZuYF@dN>5{)0l5 zQhds9VO%7jZlc0tjY@jJw4t4QrNkh9+|w4a`ERwiy3wW5RU`d?$(k}x+64y3 zj}i~WEit~)CM5E{&uKc*ENYMyRl(ELXQI`dI`D9cQt?L)gkeO1vhi)bBQP9F3cR5{Z~2<@gJ?UEamR~ zo8X;7Qo8zJugdn5VPMQz4Q6V_73Mz0G&Hy}i>Hh9}R4mo}7AS0f4 z$P$UA?08JK{g}c?PHiF-!<{V0-p4>g?@i1DX#D9b1KT01PkcOWkFzKB%ZH-)R;LZs z$R(#&4Z--7)o2$1`=OD3ej~?(`X(o->SsSANAZ9%Cp_AN^CZ6B$Czd?ldayTo=0vj z3CU;CBsHqV3;YmN*slcV*Q}hKX0jk0PI!8PUr)=Pc@O-Y1el&xMs@rU8^t!3a*A~a z0G3F(ndmRFL1~TtbFQ<>~o5+lGT1Gn?D;ER%KohfyDIF`LFkOg#zm-K_KkM1&0xh_Uv`a z#E#mZ-!ghW<0~~jc*UY!t5Xbb;ybcbM?r9!n+b(dtJx`We`-CzXhHqg_51iFUO{9p za?x1f*oMX$2SMD=1F5g`S4yK;zzSHr+X9^~4l*lx+s;gC&w<(8R{Im2j%mD%Up))eQRNH?)i%Fy{qWQ@p85Tr?U_$1J02T=9#YH!+p6`CR?r2B^LMHU z`c5FI3d;b*CgbN-%btg>J&%3s-bKbGyWy{W2e0~BV^7;Z`|@;5`_qo?pGz^0%kR7W zR{&ptA%-Xmxm^^1W?D2%3(i@*c%GqlMMN&4Uj_bqqDA+Y0Wvn+^$I5T`tqC-tvdl_ z8sYEpR|~Ol8y6&EP1dhZ5m}X;A_pqn->5KA5-Vp?9B3jtv8Yd-cKGKz?>)Wd^LQ1-P5}!O1h%40fZ*u%-#(T=f8a$|G{ujczSfPOGk02I_%uk#g7KDZI|T`Z4_1Q4({_ z%uYddsFp3T6JzU1nH3Sks%tUM>C3vh`6!tqD|ARk0Grf-Og75cxQAxW4Idh0t&{SZ z#viy_Q-@<=Q*Db1JB*50Ua_qOD<{%C#RQozkKv_6yTC$vD&0y}|nI|lxr zG;zwB-nnaEX`68sW;!{yIW>idb2ZDXE7B_0i^2`?y`<@SwGe(7ccIqz=!gYt@;CK9 z?SbBcL-EKy${lE8=!`%Ml5lNeu)FS7pJh@j<*7E1hOc&rc;fCMe8gOv8Xlxly@7n68VsGrO~>`N#m>heVMz#;<$K1n$1oY1{cEY#lh^hnl8vi>&0^nFx5 ze12aZnEtI3-td363$773ja}j6%*k>K>8R!K(PK_hfV(TE46Yr{h#1~XY05%e#Z}t z=7j{A4=Xp}7u4F|IvJ*eOmKEmXzgQ&YA4bIUiMLcoYNaIhd-}!H;&C{6H_j`>4OKz zJPi5y9bHl>@$XIERfNcWlc_Q(h>xQw%yy;U%w}s3H%e2iN79e&^CX=-baxFSDVPB2 zq7TZpmAM-MgS&{-cD7qN0lX7mfk?fx9#X{RGjcE%Yk zJGz4<%NtRIUZ0{E=Vk!XiXhzR?{Sm8u#zD{itZQtB?!^#vTxTg}5wkEEou^Pb~Df|O%_whZ_e>5n6ce*M`l4x))vaKNX});C4XBu4RaX1Io7sQ zUf`Q%x7^cyhPGE-VAHw&jd{efKx;Vah6_r5W&EDt3HE#7Y=;QE(p&(!b?w6rXNF4(To=)~Pe-PSM1W_*TW)tt5NOI;)3 zHc>?Oe2^#9thX^u<24;jVLmI=A)&p-n0ZCm{A@JyYYztDtQSdL+y>#KBu2}HZt;SgtED|95BNb3)|M8h^#R@?#i)KfnSPP|j4fsS%m~qFxuciqS+)lWCG(Hn^E+|F- zxLL8aytf1s)7ZF$yevuP9RyilF$Qn5X-SgX)83}dUmLn4oxBDehWp@Kgb=6}eJ) zJ)+-NJ-$9aypA5!ser!)wxpLb?-WQM=6LE`E8_lFll}!r(^&qz0SON^ukxxUD{WM* zD#P5S<3ss5uTA9n1Fvp3Ij$k!vnT9fT008{$({P=klJc-Tx(tG<^{M6yp(goh%h^z zL-k~Va}=fFXM=!WseVgL1;2eLAeAscXCJP{5+_<1z$MDMI;Zs@WGiZ$$5!j7+@`i`1(i~y>{=i zKRfo|N9Wlz<=Yx0VYbCMrEL}aLiDg&j*58?J_TkBccI;N9k{I@v^<&e>>jV>*~`(M zH)xoXe0Rp)j@d1^mpEA==v#{b%mwWljK|{uu!1r?$G_3qUpd_>~YwSl>0{hVM(VOe|Cb|Agj&|5X z9M)xDZSeeF!qeQ=?BJwiC5b4PZr+=9A!a~r$R*9&3M0Hrc|&DG&TlW0ofqXX9MD21yIWzgr}SBYyq%lS zi^|-7fCQU;E>(5mBfu3LNU=*qn--G##rvsnRCbSfGd_1v+%6nlO5V(6sL$b>N+>@R&NuC%StjJSC{#VNn6YZOanA&sESpV zPNky0U~DX3FEPCO-V3YSAA#F7@+3#?q^Yxs^_5RWeB9Ax4y}WX zmKT*2Vl4DKqIuJtd%4d+ToT|FA^TCy%nxjLM=w^RPnD`<&H(gE?WEepKCI#V&ouhz zPr|FB)eD0QQ~N%|^Br<;LCc(kvg4-#t!Bw!NkPX;(Ge9A?^!3F5yeYe{Z>BATvJQg zyyX&1P2vsHI#$wGIO!cvOqZv_GU)!_rBfGuPiMO|oth>d{y&Nu@&th55oH8V&Z5f6 zjR|wZu0F??6m##s071VWJA&_9ffPo8X;eb~3R=tSb9R@5{a*Ae%Z~5((rW&nkU#e=jCj3FQ)yRyduH^Z64oEn7vw?6j#;q%B**AM*0cF_K&@zsx5FI zS=@<`6BQUrNtPHv&jg8Kvgt*-Z4siq_v`*sM#STKq?@QxM|c9*(St0vh~u15*hcs- z?E}!3IsfRP7Ie)2dmN9N*jEh{shnukE=VLwfqx=WWTeIQ?)@R!!Xq0V%)zm*+uwci zt@b&-t{YD*Xl{P%f5!FRcss2r3lqO8G`XvK zR;Z)qj5jr6*U!Dld6~zn^Q8I{iubx)FFoJ-i5UHWtkMUCkxvJ5u%E49DUZB9W2QMS}z1Y7^3u&)4%>f74q3=GmG-Abp3pp=B5lyrA@x8z8RbRHTB1q75* ziJ?0sqy;JIM!LQMulM@jne`rYRu1qXM#QoDc zmDg+iT**~w0o6sua6cq5!Y)sTr8+V_-TJB^T_Rm|6^ovpBhgz6gV4uZDt7svYC6Ze zRQe4*2ZzgJ4wdjg28;3l=+Ju2VjTL1#nAUgx+vAw_At~JYm@3$g25;k7|$V(+V5~- ze)?3B5@OV5w~0edlo{5XG1SHsT7!A!_+Y?{_tAj0n$;*CegZju1nXv)Rr-^B%Eg2y zCQXqe=d2 z()lyw#TfM7xDqXWL-T)&U8Iik$xdR2L?JVeq|q#^RGp)%IOU*OK^KjWmM`$_?b`}s zO{L)#X9o`hEkA|oCD0GDP8lebmJm(PQUv9KC{*VYm@N){g+iJg&QbXBQGvO-Cs%K+@4W3RnCR3v`=mhRNA_qQl#+}pV?DEDLQt&ozpgzi$=!&*X^8AV)9 zP1WB*a~&KbM|vR)}lH zJ|Vs61wQ9(%T(CnOpTZaK8m9{@Z+H7aOSJN*Q{R&qvY0&vyt^LW##;Vf>ZGj*{MgA3!_llbxAeTn+&RbKF}_i{MZz(Q*1OfzBmD;bQr8qY{B>RhQ1o(Yj7eBQ zq(j;k-rzL|f z-rls%cZZF1*(cof4M#X6HO5t1S|*-|X;)Xs2YxOgJOom_0`9{s_Jo(S(cGodY4JrS z;|3HkJAYtqzI1+=Cg*`~HqEC6&up>{a?PyQFK*f5)(f5{&ma`XMGHr1F@uK~ab6JK zsy8x{dY53D-d|A~NRO24Bb=75@0%La^fP*K-;jq~SKjaZF;adHsn;rUV&1ql755vg zK)G|~>E`5^<~B!|t1AX`Ag;x-MLdm>>EfcTGe2c?SJ%iZtj+^N9)7`7S7FlFb%osZ zqTN#2cEYzguYSBatWM@vv=dd~;>wIZlT5b7X$}UoPH_nteV761Rq5! zx$+)V;>xQ!EPk|%F<+0dPB>r5B*KNsA8hM!nUXs{#V2=`DUur;_a-4jXZ-^o|KogR?J27Gwij`|fRe|Qn z==$l|!lOOyl!CC$uQ?W2RGT=}cDH(Q7lLo? ztP+s#Ei<&eC23D&*9_WZgKd^*<_S#BRxLJ0OB~R66qG|lv>e+)kP?CdmP!;BU#_@3 z^HAN^T~32NNrVSp&HnH{zeK(AzIWxKq);)ioUVHo_BFb#GlSw}Y}4qGGVV(H3+iyq zHBX%UIk-4%v~={D)fG16=M!f52cmBrv!&H+I!5M;pz^)^RSRP1u6eU^a(OZDJ1Y!t zdWUMY&fAAlfnWJdm(w0OE24JVqR;$n;zl)3EirNt4Xlcw(+)5h6p*cRO;Hrt$+6{q zD-GQV?mAF?Ua+rU+GiwVnoov_+%b~Kl2)oQX0YM_b1K}9PooiTjb5Jbx*uXf)zydp z84|L<&!M>y9Ws-@zR+Yk?mW$?N*N^j)}Jn;p~$1wtf|HiPioBReQWW+!Y7eHX_uMQ z`117jXy&8B``z#{Va!Jhyy1=!-8(^B^GBl-^(#r!bZz2k0ZGU8f#8-u4vRa_yUClD}xNQ|f^0XQ5pStMrvp zu3}%F^=E?;Q;q&3-vmBc&%{LC|AN<*cVC`xb$Puh6kce)<<6{3`-JPr(WTpP^118KLumc+%-Xw=_P`_> z+4X%3qFqs!z!;7kqLr~~>DqZCf{(DShAi<#Yb}8h9vRkl0tPh=tz7z?E71(@Pg+Lxya%Knn z=)i=+#5By1%sCK;&3xG?{Gn-wI{u#*!#sN!q-R4o@{0f^MAyQH zC1cXcw*)g|K*-k|2C>>iW5mQpeR-7VvTkyKnzCTr+Q->2`HAkj z7);*`@&1B5o0k&&{mM1-CyYd;K^o_dp0I5(>(xFjsr$+7Pti7Whvx=qo)Y@ib1jo+dhkwSiV|I>C*l3j_oVTo|$?L#OxkH`j^n(8ZF2iMPbvMg_Qg5LF&*gDC< z2r&~k-mnwC9aot0a-UJ3=F^ahEE#A6s; zU)-x8!;d&$FXZB9ru#N%My28Jq-!`T^L%oy2|*hoe_ zq}7weN{oM%P-Ly@gc@78KJysnZfW}pveZQB_vNYIsrB^Mh^?@9j->Cd{gJghZDjl0 z;6kD<<7HD-tOb`#l$fj?&Qs>qQRpieQ+C5_GCrl5P^88NX5wZ(R!)yWY6eniC9{e# zVWcGKb`N&(cekb-%qyk0j` zs{I+3y5R(!N->@Hej*cFh8Y8M*!nnihQ3~8=*-U%(-k>CICa#NJsq?bjWmk6)a?m> zzF0xtd&%_Y$1ywy!i=1Wcm@kqEjjjkO&B`I$)%!Y zb`YXqe8*|J;v**E7D|I0w>!JYFb4_=fR8eu$~~p>Wzmyq_T>IJKCH)%viN(^Z3{@5 zlw%n5zL;7O%oYacEXQN?T`R(tl3;&ABR4OGWUgukDMsD%W8O?52EWGn?^&I*d&#Xw zYh{h;?K|+v^)#E}NRvzrw~$>&ZvNVH%hN`j`ld6Kh>!&Z|H6!p*_M%fnDo>q;1L%d z*SnIv#UE6J*$KbKuU zQ%Gk-IShK~cQkobA33s~m?bbU@%3v1SKo6uw7E*%+%i_fh8+8jbh@IB*mJ2Fk>UPaayw4!%!-PDo zK-U*dh<^5}l*|o{u>FBZufy{Mse~=1g7l#t|8+|*?x@tZA}d?6X4~%l`RLGkw%MxO z>G#ZoM4m#&n}jZ&X zhmdugX&5h;_MVt`J$Wy>KN$s$AcRUm!_7Lx*5FQ?jkQ=9Wi@Fs#AbMz%d>susg<>F z$Ww=0;M*FKWQ^ruu$XMgI|Zj(Tjm9j(R8;k#RWTj?a$iw)epV`A5G8-zVyrO)_G%# zF+l~<1tc7ZNmRRCXUITe3H0or>M#B1%(vNk;qeu7CuPAu&tU0hPusL37~Pod#p*mU zx&{6B4q2GDq1}Qeeob;tRO*9xoUP;Y1(t!(&06kTu?=3~F#7({0tvDjSgYi;<%soT zTx6GO^`PtwxgQ3gm{j#d!Gh~+S7Fp`Vp+{DE<<@9J_PR;B`fym%wn)>CxA$ z$3{e@&gKmL46rEPMI43$8)vAMv?iDx;Co+Gnm2i|^J$=NO*w+sCVNo36qr0{OMu?{_rC5MmB}mUku- zZ{(Z!kk$duo2&V;XLywR9d0+;6>e~$^KDZPIW~o+&vLxO8KU1BsR7HI@(T~Yt(g7L z_mT07*<^Pm5eA!nle;Ri!yK$#T~0hQ3i4|>vMcWCGR5BplMLVKnBfOLA>I~q7Ne<~ zAW$fS5XpzZ9!_f-Y}gwMa`>}|y?Qb{qNYf4EA0iphVVr5-aL23v!+lyUTJA5I!QXl zBuU$0D|L?$NDJ{-S$aD|Pk(<~iMY1iHf(}&X77!#+Vi-fp=HlkI6mfN+nUV9=P4n> zOY^<^{)3TFy;8ApDy(GFl%<3?D2nGWhZveK3A>y$Ofd4NnV@u?UdbcE-0c7^CN!v) zQThs1T-fLj8yV9|t#LDM_t4?x^`z-l71JfTXSmN&I{elIhlpjxhaQ$Se^@7Aw;Fyb z$uhk*Vf%&6SgXT&tTr5dHfA=Wv8g&>KS|!zMq*G@w>P|8D z^Oxvlp}(HDD_ zQI;hJJ2YWJK%LlPElH8TLHhL*_L;8&+T~m?2L&F{NhFdfnAIjbr`+m)7aAJQx+5zC z{hp^?+vh1tm?KD`J``8zH&>Xa!k~m+@?KWb!Vosjd!m;$o&z-!devW2Km&~tDgncBe$BApUj_yZZeMhV0i=KcUTooEZ9(!elQLc|L97oaznGU zCxNjW7uoW1f;am?SEi!Mn}(U8rroS!>){~tW;C28 zv90HOHyLND15bV-pL`bWSlymK)4C-#k%I1EqEd1tmd%i}jt@W3Fl%U2Zj3NYHJ3N|HQe%OsuwEJWc?Vl-OImA5B0$D-2l;pt{T25}K8}LYxCr z#C@!A|5>)<53;hC)HsEUOutCXOSZ6At%Uc`HoVE_B-7|dTCJ-$Q!(kCTMgGgpM1gC z@*4HZYyU;ea^SVS^?uO^oinHIA}B_x!_qTcu4bv&V>{(|F!G{fG|ud2zcBZ=3+)6A zKfCWeE4S$k+&bOQTx30F)D>QrEps3Z!Jdqkd{>t7AgwtL=UhjX_^CpE`N3G@F7pGw zv8Q@8Sm9*!t(gZK!JoD3GDb6L>jhkG4vHI^jH*KhlrT!XiA=X8VS!H+boS;*;y-Fu zujm^Xq7d&Z(taO-x8seOkY>L`Wfw0@ugkgfBWZA3%79b@y;$$Nwat$^;pBU_;GD1k z94p<3X`2m*rogvd^JNNQ_vZVky7^v(~l@bjf~A(UB8&FYftH{CLmC+nMQ! z-d=6>tW)JU9&Ao4@$AZ&%7nP2tOy^$~{IcD< zQCMNoA0U@Od$^=Bm-!f%xnN9w4wu-H+t_~@^Y{oIe%4=obal32bAE>8Fm<1yIj+6V?&=hZu*7)iWz>pZW$2%X z0r7X;sALXA+jo^DxMgOwfrr}6M2i;ArwDtbqpPDIYD3T1i)F<7skt#^-Cxc)N-5Wd zWlq?svD-1PEhi!y>_7@9^q0dQ(7Y8;ogK~5%=clgu2~YN0NIS7H9`_EHRl^VM(tMk z14Fvlx@#Ibft5bD@csqUEJmSrx!! z(NsgyGA@U@y=W$TC>$%MH$Q`C6eNmT$AlhT#7c@{2|>VJlk(n(HZ+$wv_~J@NsH z#V+qA8WF04Ui*u+Pqf`+^<)X*Iyw3-WRZ&$i_Mmr-4V(@?_%!0oKvak3a1)&N^&e| ziAj-eBmVihD?Lnq%87WfCMWJSZTf~uVMxaK+ZrE}X8G{h2%ds9EX%tSI8{)?0efmTyC)Ah;iVGaqkJB!`DTt) z%UQfM?mjsaijJ1kJbr_ZfUt7Y!L$_@HOQkdb*&{5kr-)R@1 zk-YLxzKB+^V@Y9Pnd+J{N3yxUqUGWet_m&?hEi6mhEzU`z<5FZH3OhtR=R`r-iM zTygnv+Ph8yR?fMDiucaE@3k8UP5$uQ*=wt4$=L2VqM@F-^u3DCXffELJU`uhvCPEq z9aj0kj5*t?_*Jrz3&)Y_F<%a6me#liZe~Q5pIgK!OJmVJ)G$4apEAx%A-(Tn^{j2M z>!g%ppRZ}&rtZA;#o{xAKR01D%wB!^J;ssLEA~6`)h6cfgc&07Pwh&5+)+!FvHoD_ zGQK<31#4apzH4$q1%J3Z!D}M5_>(0kwgl^bwzgeYpAXgDU2(;kzI~@U3rgfao$MBu zD<5jOaxJ9UQe+M1A8X9PGn~b8XH~eFaeiqnMSC<*{P28tEK+}UJiE0S2n#i+Kb`Af z`81EJm^qT*T$P|lnTYROk}B--aBZLztvrV3{h;~sImIm=8n6R4?;3UFM-8h#+B>Pc z@b*YN|GA&9qW)l$Z3~ZTFZ(_gvJM$`+QFo>ASrf_SX?NPNR+m|6vw$JvjaPWuyf#n zmQ1%YR^Yf8EFRe*o;KUXS@mnelH<{KV1s+d)%j-_Y9x z!}pYi!(jF1ZEu$t#$#R?)&*yY;-9PWc4_oEkfda|@xF+nEC8WFB1G)CXh^%S=Ry2F zfC5pMiv-^51Be+5CRntW@V^1dC{yEG2f}LtyjI|5;-F zH|6`l|L)m;3aI~ zBL43y!v02rwKa%zg~!Jm;**0|wE!XJu~+>~c&1Uez@{322o$Xa$RU-<*EpbKEpYeW zJJY6OlOSy!@BlNmDOhF`;SWaCzgk_cTWcr5 z=sMt@P^ECg%@Cl6f3KV0`@}yU+aop$l7CD#@eX)e2QXk4>^bdSui+aBvezTx<+TTz z)C0`WTa5tI1{ia#`fv<()B_CvQtcvC$W^ES$kG7NLUmon&sIVC283=7p$lmMSgCOp zDw(g-z8$jjU)D3&-vCe|=i&q4CgKN1_Wi`0Df=VIl>RpQ?aa9!c&`y*G=MOYZv>dJ z`?lgJCJ_uousy~NFt8E9enbR-wT%E9)ZZDg=)m=B9fS#pM?7Z-1)31MnrX~FLa)+< z(66%rV0aV23H6USK|B9g!sB%>X&F;X?p4YzEk& zA4882Cb7*369qv4?7G&8Hy$H&r#Ct=Ab!1`43up_;481@2+;V4U5c z2Cw`^jX$2m0l&5aSjaYd065bMa6l>g5M4mLHUx5%!HteMUUV7@v~B|kvHhVJH};?W zq#Wl!d^m6$%xpstuvsD^*amPxv%L@?kpK|4SR6+Fb9u>iZ;5ymG@H^z^k8WMO2-Qx~~4uIe2NWsVsfEsFIbKOvS z-C(u%yCJC)pvJh-Bj)#Z9oXLv5QD;<2p)n{0Q}&Ac<0oKn9iS%FA;-$-HFK0w`X8e zC%}ox4P5W_1tpn9CJnaEkF~b}4Z{mGUdH9=3q8HJ8$^`+9dI65dLTQNK84w=hNF4s<>F+rH z^I8!JiQwOk(jpvnx(@++dI1{F^2mCx>;3$XWLf+ZG$`tIjQ9{=XNW&L*nEhnPD1)0 DBT+mL delta 9219 zcmaiaby!r}7xv81-Q7sHlr%_}gwjJdNH>TK4bnZNbcu9#3xafrG}0w0CEz!3z4yEK z`90tBo%v(Wyl2I`*IH+tJ!hZ$43x8UBy=?;ICxaR!y$@Odj;CT28rS%e#c{i4YjF> z;rbYYa7)UmlBPfg5CBk`$bL)(c?ot{;KIJeiW6pw>+FfY0nGQH1hQ0QAv@V=CzxC% z5^CcDBk-@P$~qTwi+A{ZlHM06^*fwSAQ|h>*7FGSv=TupIdZ4OLGCrMR>VAzy1g60%<; z=0;aFMh&i~3=p)=+&>VAdA&y3iB@tYjqg~uW+aj>MmcmCUtt{_GP*Ngl@gxA9d7Kt ziTP!f*yk{}fDo~{YBW5{6Pea+vzmY0mg9ri@pzUtIYtv=cdiZlj;+8V`>azKDn??} zsW7vmi(Xz0U7B7P*#dPKWT^`eyEcbL+}1yh!E`HF>xl%ntmuU`Th`iA=|H>_{5(8v zkx$$gQEfwv2{rDpUYZZdv2~^E35Yb+F3wAzOPtH2?h`)Ox?@UWV2~}vkAyZG-%=L@ zQv^2ZcnhN9YoKS;H5pZ)$rFO-=yX%qA`S@%T4krS7uWHUt%aGxA!%Poi}Y3BgL->; zGZyjanqO?&RMT5=DMj}4j+r!kp$^bYAlNk*Rpr(xubtt}mW?x03jzL)ohvhO!Dp9~ zTd+{=Xymn1`c4-XdKe#Sm^YWmi%3)U8_R9b&{h+Rqq}|ZX+Gt8(5@?7@}A`*Tzg36TadG- zTwUCmZq`-_M{2JygVycZV-oA+Wbq+*;qv#;|QW$?}OcP*dsydac&91##ks~yH0T@huE^VVovclE zofQUSVQh$X(EjTq$N{o?j@c-opiVfBOt%-s_Y}Ge-f_B5JkA zh^m$Zxi+VCz|xs6JD9M2cS@Ifg=WYr{BWcfunljuesy5|ox;GNt&`L<+)GeUn2M6< zMbMe+Pvx79iz=yQx4{MBzQroQJC}>Jgbw}zm0yWFBvNv%5GjWR;90NN8ql{TIP~_d z!qfSlrUZn@s<)e!?F5k;a%8^A2iH&3?*>4c4P9JH6?54B32NhQKhwyu-;P>8|2gX3 zagU_&8GMCx-`=||b$gU`J0>GKBAM;#a>ytqPWZ&rVZ*U8Y)M`;$Mw56n`!B`U}oqU zrmmD%^yEDa2(l^3)K)<-&AwLE%nCNK+>k1SPMzaL272+BFe~fMl1}l4Sj0Cj3T>dg zDd=V-uQ~gfOP&Pe1_ye2V>fpx*YbTtvc`rwOS3w$=~$743o->cuRqlsrst@ z!9qvy3*<$T9_iIwPWlFEH7IY~??UeJWG}^Bl5_Hua2Uc86M|kxpA;(~}gV6hKES~hTo}Kcw(EFrNrddI;obQaq z`D?3lt&m%<6F&jl<_*@#y?UWO4h~z1SH-fmsPLxIQ^7^GoA1Px)}_NjOb+wD@kZF! zVUULx&6aAOg}OyGhiuSCJLk!Z$08k_6@@mlsuf7YQQjy}$2YwyoM>-p%)6;2F_20K z*$2b#w>ojm>;-E-|rM@H*(&>9R2)^wi_?!1Lx{!c&VAI?P)()e5nSc!z5V<_)VBI;4na$KPssWAq)XMQK~|H4T#C#=I05*k(W{q2^;0*BL_)b` zR{`@~hV>s0c)x0#dj=%4?bQU3)P8%*9wV`>pBkw7QaE9+a_ybUx1shpw6|GOX(a^< zpm#2rg>L<4GAza@mDg*h%R#o28MSey+`6A3*z5L>zsiP) znO^)T&+)q~(+GvMcaXkli(EVTmUH+cp*z8LXQ?$6JpR++$#C!GQUFp0mpjzREdd!TNN}`^YY*(?fUg1$&Q&szX6EgH%%;> z%j6wlTR$l3(p~VAT4oU7mIXC?urf4!fGhvW(E4#eOtuzH*3%)DQ zb)*brGfQovfS9{qKNsGM5}ezL*0OceTS4$0$(O2gwTSxMW?glH+cb~my`woD@YNv= zeFmHdc-4ygVL&A*)cqP;!W!!9u=UEVNs*vAJA#!WIgIRJ6UTEzR= zTu?a(zb;<~nQnP8B`~bCdF_UtAWk271PBhWvoXH*e9mWoIjX}m$@Wej`C{*QaRtJ= zyJTwFX~cMv&anHc^bq%Bk-dy|+!TX_aD74isQn0&(-y^+hx5_k1Y*H{c&-bsy30&7 z%P2LE`%){E^I`-;y-Osm*9yE#%{UUaX!b?LQE059n;t5|Jr<=ZUpCu7E2 z!4uRqS)=?;ouAUAgOjMr_A5dp z$E}=ZPgaeUc_Wgv9hIT@Qi0KSYuK}0^-MK=N^$d#Hknpj1S%bAyFV16$=u){MH_?PkR z*F%ZIS(@}r?=t2nvkVQQ!{J7yczJD)G_-iOcL8-}hdIV!i7dI>y$$|OheYF=wSH`+tK!GDoB+lkR ztvJH(bUKnHgu|W5G+dn`*){8=Jv^ygC`#w2-l9G%oy=c&B>I$zSBC8=sZ+$u+QM&L zqu;FSY|mtwh-4~{a)_sjsdYW`rt-@6pK!Wm=Bhs21x2}Aw; ze$D@`hz8HGio563W>W>4l&5*21v{628E31UQmUXfPnN=pKnsC{ zCNa~y-7l_t!69eU$Se91Vq2U{pTcqTq=pJh+BC;stTh(AVl*;O2DEVPR`0Z+ch6m{_XYmi{S)=1Y;t3h;E?6M@{bu8e@7B`+1JGt2b3 zYQCKep85)lX@h<0VsQUbf>_R$tfewOt{WQ!&p^u!PcBY^H}} zx5vx+db!^w(6JV2f9Hj7SfQd!j~A-pV0lL7lB6qtM;E4nrX)idr(4(;wcgHJ?V_{g zTWe0EnRAU8ix8F68iz`FNr_bIx-%aT$&FgwlA*_Jf&G4ab-{wq2G)Cc3%9+-D&KJ{ zZ}3ZoQikO0ePf4MmLN8?pW~K(%HXcIt6#LWGDJ2Uh|?4oe75O&Nt&R7>VfMPzRc!O zs`xSXoBWQEPRc#q1 zsqxcoV1_BV);DZKQ#)skhVH-%gXAD4({>qLtJfNN%1h#q)iQ@V7QM0FF7&SvTy?J0 z22VEyP@L4!f)@Bi8jijgD!)B-NJE^Vg^O+!0-RXlh7wE%eaX@`plxdabsvp>WC z5Yzjo?8nLDntm(`c3gND67`siM76N6yvvhL%3PFXbF$pKIP#(sZ(?AUemHM z>0g*Eir5y)%8~Cu<26WLz3CwS?UgX*-MG;UxXxmOxkE$wj?>L(Yu~^siDno~FY754n<~D?psJ=Vui-%;bp~Rre3g3@4qI z9KEY<)Ex{3_cw%%@4(ZCON6vM;^`Aru?r|(WX}xmLd1T8^~r-!>vzj=dS4CHMc$O^ zxW30r(@Pi2O~@pd%RfKJbLb4LSk*8o9BqW>Sz;L`$N%7k6@hlMGm_$!L{Q?UShxGh zM@@ggLk|La6^I>DR905Nq4Mk`w8FQxaC0--GCB~8*)|~|PBunXIatna%4SwO6t_2J zOo`6k_!<%JTR3gxIuaS3x%$N%p63;PVd2;rKE>4+?&WGVoogP9;*tLDZMb|gv=-x_ zDg^XY^W?3h1Oz(oDaJtT8y20k1P{_-GS38`)(*&uT1*F8PbM|KMu>Bsr#yom#1TxB zKO(9Ad5%IQyAteCtQxQTj6^img7R1Ckv#Y#x&pXQvDJa4s^A5bBH5+Ps&=Vj(1N1O zfT$6#i>4c|oqL4mBJ^jswDp8pL-=c`(58a;ER=C=$2uClAJ{q?FKON%nb{Ua?oeXwYgs>EqRBRVGPjN>RB-%? zfwr1s;OyH|scw(WS$!}X9l^@1kDDA(4+#cD0ND-63{oLo@uqzl#T4_2$T`{vB%IKq z_k4U9=c>1H`{>q9A^rU)M(rm~=e^rkD$Rtu+m;1cn^&&zLt7R++xrOTVR* z;|NW+T^}X_b3+t#4^wyT8tk$*J2>AaoZIu*m~LpLA;tNz`x&3+&XXtl*}pYle5(8j z@}VJBD{WQJ&}KgZhkqu@ySwQbE;GJ$Xd}DDdlsGvO{JM^fha7HF^VC{b?;M z8s&}&E>8$KZ7A4viMy<`GH~*SB8nUIm771pSxwzd$s}LVCu1xYba-Z*`LjuT@-HeV z72T83Aw*R{Gei%Q?6C!8^5G? zgxD#p@rr>!GNK2(C#UQMCrAPA*Ob z#j0`pD%V|!?se?K?};ap)l=)xUr*YCZzX|L$2CEsyUQj*0VmhxHsTBf)jeh2Jno=( z-F?{+e%?<`k?YIbW2fk12=#i4=^=%=UXJF)a+`>UyPs=PG$}uigqv0K8pm0Po13erxI9b-WKk@D$zmEBJIy$o$jC z@n~;@1|Irt*+Ejiz^tyVo58?!7VS#;wmN!gU^cT}(bR*pR%N1czIAE?TC}(2Wp$t?jg*t z>X&?P?%KL$64sCXpMFUYj#wzaMn_VWj%)*#8{LJ$(qfS!M&)A1l8K0+BR#Bw5}cHnB+#>Q`aKLwc#ZH> zf97te3)2>uJyub?AxTq7o@$M=E=8Hfk?V(DUvR8mX~-bXFEIkHjW6q#;X11>k`i3n z3=E8CT&7Rwc0%yNP(POvD1kHtJ@lfa!o49aPeOuf1YD4jcJbnZ!$7GS92MmbJ$$%0 z4v1BhU|_vT$UGe`F<*-o?iwD_M|?S5wo+mrZ}h4@?75^Q86oU17;g`-Ih-(#2*tFl zQA^;~QTw7~u~TcN+fucVA`|J}t;tk(H+`+CCIdQK43i;-a`tu!6f#)sVU9Do@6wxMg%}}D3H9fvQXmbcx z;FwN7s7Qga(}exBG^mT1GiyACx)-~gl~}c1wl{Nap-z7>>1X<|Zb=6m3*OFo%u8gj5`Wf)kB#$v|()T(DkVh^7A zFbqntDcmXzQ%169Q>teH+rqIs6b$I)mp(B+GiU9U^{~g2(-fp_nEp`6{A#S=0~MUe z5#2hRvghhcTmeZOqUSu`XT0Kb8HUT~_Uli$_iz@Y?ea8R&V|;+%${dsLRg;?Xjv5H z5Y%`Ju%=@Opbd;?(8IBf;y%Bkrc)QOamk+;6vMnQiV}yoRsC9pEy(3sSE7%^MOy9_ zx{q4r#8(7wH#1It#HJmuPW(v94YX93?ZXFKHy8H|Jazy1rKiK^*!=c<_|^0n!0;^& z(}52=w2-wwZJ#AW)~D2-7jjPfIm%pdApKZEl+m6QRLEqz^s!}xW$_8HCRkuIChhd4 z_Zaa9zL?i3&b-CZ=xrh*vB$4=v}p`ta(W9gM|A6+0YFjIR3=}WXp8mN!Yo|@O6=uUF{U~j6D{oN?!%!7>%ZI{tpjU2L5`-ST0t#HQ~ z+xqXT`}N+|Ml347lf7S5Mbi;v)MuG5O8tj-`_)M%@Mg}_YSqI+W#U2#)T8-Dx69y! zcyg|rMDF2=UE6+7UDe(}$Brl#TQfczi9v)c_@+9(V@}Xek@j*`C7Vos{JLBhWbzU- zLFi+j&GaeAa*ZnZ^wW@R-+)7vSge9>M6fL8O$NEw0z67q;+rB~xi=7s`{rl&zrF_j z`c>JnoH0L=rOiN_NY%+&hN*%5SCfTpCjpQFT2X7&7;7M9AczX^_r~UT<%bQPP885fClH#2!6N`se{xV&ju!H&|{NVvkaet5=dw^H8A7N545Y-=PF)PU>P;s=e|0LYi!o|bJ%;G;ef3aHnn54f) z67_#O_$v-l*p%OuU!x#b{eT7G61IXM`^UpWAXHQ_(&O0dvn^mmA4jOo zbJ#pR5&_WM=O9*uZEjj-Gw8$v7El0yI!l7^p%juJB7{OS0!uTfpd{>sZRz6)4zx!M zX2?SL5^=}tq^w4-25Fu1a8iGm_aEnC@-XNA zfKPO`8Zd7c6>p`G#|bhsVIKa%fHfvC`9c@RDt;K|^l zQM~qZsG2macFc=8+Qv`9EZ4r|*KU|)@lV|^``@hQae|Pr(ox71x;NOkzai9O+jV>GN z=JO5K*w%CSU~K|h2}Jbp-T<=_h@53GA6EWUj52FL^GjF&&Rv8aO)+om|Borytqpq14;C{H*Gmxq diff --git a/README.md b/README.md index 4b3d056..99f91b9 100644 --- a/README.md +++ b/README.md @@ -98,3 +98,4 @@ It forks from WordQuery, **multi-thread feature**, and some other features. - [mdict-query](https://github.com/mmjang/mdict-query) - [pystardict](https://github.com/lig/pystardict) - [WordQuery](https://github.com/finalion/WordQuery) + - [AnkiHub](https://github.com/dayjaby/AnkiHub) diff --git a/src/fastwq/__init__.py b/src/fastwq/__init__.py index 001e82f..2f8b36a 100644 --- a/src/fastwq/__init__.py +++ b/src/fastwq/__init__.py @@ -98,4 +98,9 @@ def window_shortcut(key_sequence): """ global my_shortcut my_shortcut = key_sequence - + try: + from .ui import check_updates + check_updates() + except: + pass + \ No newline at end of file diff --git a/src/fastwq/libs/AnkiHub/__init__.py b/src/fastwq/libs/AnkiHub/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/fastwq/libs/AnkiHub/markdown2.py b/src/fastwq/libs/AnkiHub/markdown2.py new file mode 100644 index 0000000..1926792 --- /dev/null +++ b/src/fastwq/libs/AnkiHub/markdown2.py @@ -0,0 +1,2584 @@ +#!/usr/bin/env python +# Copyright (c) 2012 Trent Mick. +# Copyright (c) 2007-2008 ActiveState Corp. +# License: MIT (http://www.opensource.org/licenses/mit-license.php) + +from __future__ import generators + +r"""A fast and complete Python implementation of Markdown. + +[from http://daringfireball.net/projects/markdown/] +> Markdown is a text-to-HTML filter; it translates an easy-to-read / +> easy-to-write structured text format into HTML. Markdown's text +> format is most similar to that of plain text email, and supports +> features such as headers, *emphasis*, code blocks, blockquotes, and +> links. +> +> Markdown's syntax is designed not as a generic markup language, but +> specifically to serve as a front-end to (X)HTML. You can use span-level +> HTML tags anywhere in a Markdown document, and you can use block level +> HTML tags (like
and as well). + +Module usage: + + >>> import markdown2 + >>> markdown2.markdown("*boo!*") # or use `html = markdown_path(PATH)` + u'

boo!

\n' + + >>> markdowner = Markdown() + >>> markdowner.convert("*boo!*") + u'

boo!

\n' + >>> markdowner.convert("**boom!**") + u'

boom!

\n' + +This implementation of Markdown implements the full "core" syntax plus a +number of extras (e.g., code syntax coloring, footnotes) as described on +. +""" + +cmdln_desc = """A fast and complete Python implementation of Markdown, a +text-to-HTML conversion tool for web writers. + +Supported extra syntax options (see -x|--extras option below and +see for details): + +* code-friendly: Disable _ and __ for em and strong. +* cuddled-lists: Allow lists to be cuddled to the preceding paragraph. +* fenced-code-blocks: Allows a code block to not have to be indented + by fencing it with '```' on a line before and after. Based on + with support for + syntax highlighting. +* footnotes: Support footnotes as in use on daringfireball.net and + implemented in other Markdown processors (tho not in Markdown.pl v1.0.1). +* numbering: Support of generic counters. Non standard extension to + allow sequential numbering of figures, tables, equations, exhibits etc. +* header-ids: Adds "id" attributes to headers. The id value is a slug of + the header text. +* html-classes: Takes a dict mapping html tag names (lowercase) to a + string to use for a "class" tag attribute. Currently only supports "img", + "table", "pre" and "code" tags. Add an issue if you require this for other + tags. +* markdown-in-html: Allow the use of `markdown="1"` in a block HTML tag to + have markdown processing be done on its contents. Similar to + but with + some limitations. +* metadata: Extract metadata from a leading '---'-fenced block. + See for details. +* nofollow: Add `rel="nofollow"` to add `` tags with an href. See + . +* pyshell: Treats unindented Python interactive shell sessions as + blocks. +* link-patterns: Auto-link given regex patterns in text (e.g. bug number + references, revision number references). +* smarty-pants: Replaces ' and " with curly quotation marks or curly + apostrophes. Replaces --, ---, ..., and . . . with en dashes, em dashes, + and ellipses. +* spoiler: A special kind of blockquote commonly hidden behind a + click on SO. Syntax per . +* toc: The returned HTML string gets a new "toc_html" attribute which is + a Table of Contents for the document. (experimental) +* xml: Passes one-liner processing instructions and namespaced XML tags. +* tables: Tables using the same format as GFM + and + PHP-Markdown Extra . +* wiki-tables: Google Code Wiki-style tables. See + . +""" + +# Dev Notes: +# - Python's regex syntax doesn't have '\z', so I'm using '\Z'. I'm +# not yet sure if there implications with this. Compare 'pydoc sre' +# and 'perldoc perlre'. + +__version_info__ = (2, 3, 2) +__version__ = '.'.join(map(str, __version_info__)) +__author__ = "Trent Mick" + +import sys +import re +import logging +try: + from hashlib import md5 +except ImportError: + from md5 import md5 +import optparse +from random import random, randint +import codecs +from itertools import chain + + +# ---- Python version compat + +if sys.version_info[:2] < (2, 4): + def reversed(sequence): + for i in sequence[::-1]: + yield i + +# Use `bytes` for byte strings and `unicode` for unicode strings (str in Py3). +if sys.version_info[0] <= 2: + py3 = False + try: + bytes + except NameError: + bytes = str + base_string_type = basestring +elif sys.version_info[0] >= 3: + py3 = True + unicode = str + base_string_type = str + +# ---- globals + +DEBUG = False +log = logging.getLogger("markdown") + +DEFAULT_TAB_WIDTH = 4 + + +SECRET_SALT = bytes(randint(0, 1000000)) +def _hash_text(s): + return 'md5-' + md5(SECRET_SALT + s.encode("utf-8")).hexdigest() + +# Table of hash values for escaped characters: +g_escape_table = dict([(ch, _hash_text(ch)) + for ch in '\\`*_{}[]()>#+-.!']) + + +# ---- exceptions +class MarkdownError(Exception): + pass + + +# ---- public api + +def markdown_path(path, encoding="utf-8", + html4tags=False, tab_width=DEFAULT_TAB_WIDTH, + safe_mode=None, extras=None, link_patterns=None, + use_file_vars=False): + fp = codecs.open(path, 'r', encoding) + text = fp.read() + fp.close() + return Markdown(html4tags=html4tags, tab_width=tab_width, + safe_mode=safe_mode, extras=extras, + link_patterns=link_patterns, + use_file_vars=use_file_vars).convert(text) + + +def markdown(text, html4tags=False, tab_width=DEFAULT_TAB_WIDTH, + safe_mode=None, extras=None, link_patterns=None, + use_file_vars=False): + return Markdown(html4tags=html4tags, tab_width=tab_width, + safe_mode=safe_mode, extras=extras, + link_patterns=link_patterns, + use_file_vars=use_file_vars).convert(text) + + +class Markdown(object): + # The dict of "extras" to enable in processing -- a mapping of + # extra name to argument for the extra. Most extras do not have an + # argument, in which case the value is None. + # + # This can be set via (a) subclassing and (b) the constructor + # "extras" argument. + extras = None + + urls = None + titles = None + html_blocks = None + html_spans = None + html_removed_text = "[HTML_REMOVED]" # for compat with markdown.py + + # Used to track when we're inside an ordered or unordered list + # (see _ProcessListItems() for details): + list_level = 0 + + _ws_only_line_re = re.compile(r"^[ \t]+$", re.M) + + def __init__(self, html4tags=False, tab_width=4, safe_mode=None, + extras=None, link_patterns=None, use_file_vars=False): + if html4tags: + self.empty_element_suffix = ">" + else: + self.empty_element_suffix = " />" + self.tab_width = tab_width + + # For compatibility with earlier markdown2.py and with + # markdown.py's safe_mode being a boolean, + # safe_mode == True -> "replace" + if safe_mode is True: + self.safe_mode = "replace" + else: + self.safe_mode = safe_mode + + # Massaging and building the "extras" info. + if self.extras is None: + self.extras = {} + elif not isinstance(self.extras, dict): + self.extras = dict([(e, None) for e in self.extras]) + if extras: + if not isinstance(extras, dict): + extras = dict([(e, None) for e in extras]) + self.extras.update(extras) + assert isinstance(self.extras, dict) + if "toc" in self.extras and "header-ids" not in self.extras: + self.extras["header-ids"] = None # "toc" implies "header-ids" + self._instance_extras = self.extras.copy() + + self.link_patterns = link_patterns + self.use_file_vars = use_file_vars + self._outdent_re = re.compile(r'^(\t|[ ]{1,%d})' % tab_width, re.M) + + self._escape_table = g_escape_table.copy() + if "smarty-pants" in self.extras: + self._escape_table['"'] = _hash_text('"') + self._escape_table["'"] = _hash_text("'") + + def reset(self): + self.urls = {} + self.titles = {} + self.html_blocks = {} + self.html_spans = {} + self.list_level = 0 + self.extras = self._instance_extras.copy() + if "footnotes" in self.extras: + self.footnotes = {} + self.footnote_ids = [] + if "header-ids" in self.extras: + self._count_from_header_id = {} # no `defaultdict` in Python 2.4 + if "metadata" in self.extras: + self.metadata = {} + + # Per "rel" + # should only be used in tags with an "href" attribute. + _a_nofollow = re.compile(r"<(a)([^>]*href=)", re.IGNORECASE) + + # Opens the linked document in a new window or tab + # should only used in tags with an "target" attribute. + # same with _a_nofollow + _a_blank = _a_nofollow + + def convert(self, text): + """Convert the given text.""" + # Main function. The order in which other subs are called here is + # essential. Link and image substitutions need to happen before + # _EscapeSpecialChars(), so that any *'s or _'s in the + # and tags get encoded. + + # Clear the global hashes. If we don't clear these, you get conflicts + # from other articles when generating a page which contains more than + # one article (e.g. an index page that shows the N most recent + # articles): + self.reset() + + if not isinstance(text, unicode): + # TODO: perhaps shouldn't presume UTF-8 for string input? + text = unicode(text, 'utf-8') + + if self.use_file_vars: + # Look for emacs-style file variable hints. + emacs_vars = self._get_emacs_vars(text) + if "markdown-extras" in emacs_vars: + splitter = re.compile("[ ,]+") + for e in splitter.split(emacs_vars["markdown-extras"]): + if '=' in e: + ename, earg = e.split('=', 1) + try: + earg = int(earg) + except ValueError: + pass + else: + ename, earg = e, None + self.extras[ename] = earg + + # Standardize line endings: + text = text.replace("\r\n", "\n") + text = text.replace("\r", "\n") + + # Make sure $text ends with a couple of newlines: + text += "\n\n" + + # Convert all tabs to spaces. + text = self._detab(text) + + # Strip any lines consisting only of spaces and tabs. + # This makes subsequent regexen easier to write, because we can + # match consecutive blank lines with /\n+/ instead of something + # contorted like /[ \t]*\n+/ . + text = self._ws_only_line_re.sub("", text) + + # strip metadata from head and extract + if "metadata" in self.extras: + text = self._extract_metadata(text) + + text = self.preprocess(text) + + if "fenced-code-blocks" in self.extras and not self.safe_mode: + text = self._do_fenced_code_blocks(text) + + if self.safe_mode: + text = self._hash_html_spans(text) + + # Turn block-level HTML blocks into hash entries + text = self._hash_html_blocks(text, raw=True) + + if "fenced-code-blocks" in self.extras and self.safe_mode: + text = self._do_fenced_code_blocks(text) + + # Because numbering references aren't links (yet?) then we can do everything associated with counters + # before we get started + if "numbering" in self.extras: + text = self._do_numbering(text) + + # Strip link definitions, store in hashes. + if "footnotes" in self.extras: + # Must do footnotes first because an unlucky footnote defn + # looks like a link defn: + # [^4]: this "looks like a link defn" + text = self._strip_footnote_definitions(text) + text = self._strip_link_definitions(text) + + text = self._run_block_gamut(text) + + if "footnotes" in self.extras: + text = self._add_footnotes(text) + + text = self.postprocess(text) + + text = self._unescape_special_chars(text) + + if self.safe_mode: + text = self._unhash_html_spans(text) + + if "nofollow" in self.extras: + text = self._a_nofollow.sub(r'<\1 rel="nofollow"\2', text) + + if "target-blank-links" in self.extras: + text = self._a_blank.sub(r'<\1 target="_blank"\2', text) + + text += "\n" + + rv = UnicodeWithAttrs(text) + if "toc" in self.extras: + rv._toc = self._toc + if "metadata" in self.extras: + rv.metadata = self.metadata + return rv + + def postprocess(self, text): + """A hook for subclasses to do some postprocessing of the html, if + desired. This is called before unescaping of special chars and + unhashing of raw HTML spans. + """ + return text + + def preprocess(self, text): + """A hook for subclasses to do some preprocessing of the Markdown, if + desired. This is called after basic formatting of the text, but prior + to any extras, safe mode, etc. processing. + """ + return text + + # Is metadata if the content starts with optional '---'-fenced `key: value` + # pairs. E.g. (indented for presentation): + # --- + # foo: bar + # another-var: blah blah + # --- + # # header + # or: + # foo: bar + # another-var: blah blah + # + # # header + _meta_data_pattern = re.compile(r'^(?:---[\ \t]*\n)?(.*:\s+>\n\s+[\S\s]+?)(?=\n\w+\s*:\s*\w+\n|\Z)|([\S\w]+\s*:(?! >)[ \t]*.*\n?)(?:---[\ \t]*\n)?', re.MULTILINE) + _key_val_pat = re.compile("[\S\w]+\s*:(?! >)[ \t]*.*\n?", re.MULTILINE) + # this allows key: > + # value + # conutiues over multiple lines + _key_val_block_pat = re.compile( + "(.*:\s+>\n\s+[\S\s]+?)(?=\n\w+\s*:\s*\w+\n|\Z)", re.MULTILINE) + + def _extract_metadata(self, text): + match = re.findall(self._meta_data_pattern, text) + + if not match: + return text + + last_item = list(filter(None, match[-1]))[0] + end_of_metadata = text.index(last_item)+len(last_item) + if text.startswith("---"): + # add 8 charachters for opening and closing + # and since indexing starts at 0 we add a step + tail = text[end_of_metadata+4:] + else: + tail = text[end_of_metadata:] + + kv = re.findall(self._key_val_pat, text) + kvm = re.findall(self._key_val_block_pat, text) + kvm = [item.replace(": >\n", ":", 1) for item in kvm] + + for item in kv + kvm: + k, v = item.split(":", 1) + self.metadata[k.strip()] = v.strip() + + return tail + + _emacs_oneliner_vars_pat = re.compile(r"-\*-\s*([^\r\n]*?)\s*-\*-", re.UNICODE) + # This regular expression is intended to match blocks like this: + # PREFIX Local Variables: SUFFIX + # PREFIX mode: Tcl SUFFIX + # PREFIX End: SUFFIX + # Some notes: + # - "[ \t]" is used instead of "\s" to specifically exclude newlines + # - "(\r\n|\n|\r)" is used instead of "$" because the sre engine does + # not like anything other than Unix-style line terminators. + _emacs_local_vars_pat = re.compile(r"""^ + (?P(?:[^\r\n|\n|\r])*?) + [\ \t]*Local\ Variables:[\ \t]* + (?P.*?)(?:\r\n|\n|\r) + (?P.*?\1End:) + """, re.IGNORECASE | re.MULTILINE | re.DOTALL | re.VERBOSE) + + def _get_emacs_vars(self, text): + """Return a dictionary of emacs-style local variables. + + Parsing is done loosely according to this spec (and according to + some in-practice deviations from this): + http://www.gnu.org/software/emacs/manual/html_node/emacs/Specifying-File-Variables.html#Specifying-File-Variables + """ + emacs_vars = {} + SIZE = pow(2, 13) # 8kB + + # Search near the start for a '-*-'-style one-liner of variables. + head = text[:SIZE] + if "-*-" in head: + match = self._emacs_oneliner_vars_pat.search(head) + if match: + emacs_vars_str = match.group(1) + assert '\n' not in emacs_vars_str + emacs_var_strs = [s.strip() for s in emacs_vars_str.split(';') + if s.strip()] + if len(emacs_var_strs) == 1 and ':' not in emacs_var_strs[0]: + # While not in the spec, this form is allowed by emacs: + # -*- Tcl -*- + # where the implied "variable" is "mode". This form + # is only allowed if there are no other variables. + emacs_vars["mode"] = emacs_var_strs[0].strip() + else: + for emacs_var_str in emacs_var_strs: + try: + variable, value = emacs_var_str.strip().split(':', 1) + except ValueError: + log.debug("emacs variables error: malformed -*- " + "line: %r", emacs_var_str) + continue + # Lowercase the variable name because Emacs allows "Mode" + # or "mode" or "MoDe", etc. + emacs_vars[variable.lower()] = value.strip() + + tail = text[-SIZE:] + if "Local Variables" in tail: + match = self._emacs_local_vars_pat.search(tail) + if match: + prefix = match.group("prefix") + suffix = match.group("suffix") + lines = match.group("content").splitlines(0) + # print "prefix=%r, suffix=%r, content=%r, lines: %s"\ + # % (prefix, suffix, match.group("content"), lines) + + # Validate the Local Variables block: proper prefix and suffix + # usage. + for i, line in enumerate(lines): + if not line.startswith(prefix): + log.debug("emacs variables error: line '%s' " + "does not use proper prefix '%s'" + % (line, prefix)) + return {} + # Don't validate suffix on last line. Emacs doesn't care, + # neither should we. + if i != len(lines)-1 and not line.endswith(suffix): + log.debug("emacs variables error: line '%s' " + "does not use proper suffix '%s'" + % (line, suffix)) + return {} + + # Parse out one emacs var per line. + continued_for = None + for line in lines[:-1]: # no var on the last line ("PREFIX End:") + if prefix: line = line[len(prefix):] # strip prefix + if suffix: line = line[:-len(suffix)] # strip suffix + line = line.strip() + if continued_for: + variable = continued_for + if line.endswith('\\'): + line = line[:-1].rstrip() + else: + continued_for = None + emacs_vars[variable] += ' ' + line + else: + try: + variable, value = line.split(':', 1) + except ValueError: + log.debug("local variables error: missing colon " + "in local variables entry: '%s'" % line) + continue + # Do NOT lowercase the variable name, because Emacs only + # allows "mode" (and not "Mode", "MoDe", etc.) in this block. + value = value.strip() + if value.endswith('\\'): + value = value[:-1].rstrip() + continued_for = variable + else: + continued_for = None + emacs_vars[variable] = value + + # Unquote values. + for var, val in list(emacs_vars.items()): + if len(val) > 1 and (val.startswith('"') and val.endswith('"') + or val.startswith('"') and val.endswith('"')): + emacs_vars[var] = val[1:-1] + + return emacs_vars + + def _detab_line(self, line): + r"""Recusively convert tabs to spaces in a single line. + + Called from _detab().""" + if '\t' not in line: + return line + chunk1, chunk2 = line.split('\t', 1) + chunk1 += (' ' * (self.tab_width - len(chunk1) % self.tab_width)) + output = chunk1 + chunk2 + return self._detab_line(output) + + def _detab(self, text): + r"""Iterate text line by line and convert tabs to spaces. + + >>> m = Markdown() + >>> m._detab("\tfoo") + ' foo' + >>> m._detab(" \tfoo") + ' foo' + >>> m._detab("\t foo") + ' foo' + >>> m._detab(" foo") + ' foo' + >>> m._detab(" foo\n\tbar\tblam") + ' foo\n bar blam' + """ + if '\t' not in text: + return text + output = [] + for line in text.splitlines(): + output.append(self._detab_line(line)) + return '\n'.join(output) + + # I broke out the html5 tags here and add them to _block_tags_a and + # _block_tags_b. This way html5 tags are easy to keep track of. + _html5tags = '|article|aside|header|hgroup|footer|nav|section|figure|figcaption' + + _block_tags_a = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math|ins|del' + _block_tags_a += _html5tags + + _strict_tag_block_re = re.compile(r""" + ( # save in \1 + ^ # start of line (with re.M) + <(%s) # start tag = \2 + \b # word break + (.*\n)*? # any number of lines, minimally matching + # the matching end tag + [ \t]* # trailing spaces/tabs + (?=\n+|\Z) # followed by a newline or end of document + ) + """ % _block_tags_a, + re.X | re.M) + + _block_tags_b = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math' + _block_tags_b += _html5tags + + _liberal_tag_block_re = re.compile(r""" + ( # save in \1 + ^ # start of line (with re.M) + <(%s) # start tag = \2 + \b # word break + (.*\n)*? # any number of lines, minimally matching + .* # the matching end tag + [ \t]* # trailing spaces/tabs + (?=\n+|\Z) # followed by a newline or end of document + ) + """ % _block_tags_b, + re.X | re.M) + + _html_markdown_attr_re = re.compile( + r'''\s+markdown=("1"|'1')''') + def _hash_html_block_sub(self, match, raw=False): + html = match.group(1) + if raw and self.safe_mode: + html = self._sanitize_html(html) + elif 'markdown-in-html' in self.extras and 'markdown=' in html: + first_line = html.split('\n', 1)[0] + m = self._html_markdown_attr_re.search(first_line) + if m: + lines = html.split('\n') + middle = '\n'.join(lines[1:-1]) + last_line = lines[-1] + first_line = first_line[:m.start()] + first_line[m.end():] + f_key = _hash_text(first_line) + self.html_blocks[f_key] = first_line + l_key = _hash_text(last_line) + self.html_blocks[l_key] = last_line + return ''.join(["\n\n", f_key, + "\n\n", middle, "\n\n", + l_key, "\n\n"]) + key = _hash_text(html) + self.html_blocks[key] = html + return "\n\n" + key + "\n\n" + + def _hash_html_blocks(self, text, raw=False): + """Hashify HTML blocks + + We only want to do this for block-level HTML tags, such as headers, + lists, and tables. That's because we still want to wrap

s around + "paragraphs" that are wrapped in non-block-level tags, such as anchors, + phrase emphasis, and spans. The list of tags we're looking for is + hard-coded. + + @param raw {boolean} indicates if these are raw HTML blocks in + the original source. It makes a difference in "safe" mode. + """ + if '<' not in text: + return text + + # Pass `raw` value into our calls to self._hash_html_block_sub. + hash_html_block_sub = _curry(self._hash_html_block_sub, raw=raw) + + # First, look for nested blocks, e.g.: + #

+ #
+ # tags for inner block must be indented. + #
+ #
+ # + # The outermost tags must start at the left margin for this to match, and + # the inner nested divs must be indented. + # We need to do this before the next, more liberal match, because the next + # match will start at the first `
` and stop at the first `
`. + text = self._strict_tag_block_re.sub(hash_html_block_sub, text) + + # Now match more liberally, simply from `\n` to `\n` + text = self._liberal_tag_block_re.sub(hash_html_block_sub, text) + + # Special case just for
. It was easier to make a special + # case than to make the other regex more complicated. + if "", start_idx) + 3 + except ValueError: + break + + # Start position for next comment block search. + start = end_idx + + # Validate whitespace before comment. + if start_idx: + # - Up to `tab_width - 1` spaces before start_idx. + for i in range(self.tab_width - 1): + if text[start_idx - 1] != ' ': + break + start_idx -= 1 + if start_idx == 0: + break + # - Must be preceded by 2 newlines or hit the start of + # the document. + if start_idx == 0: + pass + elif start_idx == 1 and text[0] == '\n': + start_idx = 0 # to match minute detail of Markdown.pl regex + elif text[start_idx-2:start_idx] == '\n\n': + pass + else: + break + + # Validate whitespace after comment. + # - Any number of spaces and tabs. + while end_idx < len(text): + if text[end_idx] not in ' \t': + break + end_idx += 1 + # - Must be following by 2 newlines or hit end of text. + if text[end_idx:end_idx+2] not in ('', '\n', '\n\n'): + continue + + # Escape and hash (must match `_hash_html_block_sub`). + html = text[start_idx:end_idx] + if raw and self.safe_mode: + html = self._sanitize_html(html) + key = _hash_text(html) + self.html_blocks[key] = html + text = text[:start_idx] + "\n\n" + key + "\n\n" + text[end_idx:] + + if "xml" in self.extras: + # Treat XML processing instructions and namespaced one-liner + # tags as if they were block HTML tags. E.g., if standalone + # (i.e. are their own paragraph), the following do not get + # wrapped in a

tag: + # + # + # + _xml_oneliner_re = _xml_oneliner_re_from_tab_width(self.tab_width) + text = _xml_oneliner_re.sub(hash_html_block_sub, text) + + return text + + def _strip_link_definitions(self, text): + # Strips link definitions from text, stores the URLs and titles in + # hash references. + less_than_tab = self.tab_width - 1 + + # Link defs are in the form: + # [id]: url "optional title" + _link_def_re = re.compile(r""" + ^[ ]{0,%d}\[(.+)\]: # id = \1 + [ \t]* + \n? # maybe *one* newline + [ \t]* + ? # url = \2 + [ \t]* + (?: + \n? # maybe one newline + [ \t]* + (?<=\s) # lookbehind for whitespace + ['"(] + ([^\n]*) # title = \3 + ['")] + [ \t]* + )? # title is optional + (?:\n+|\Z) + """ % less_than_tab, re.X | re.M | re.U) + return _link_def_re.sub(self._extract_link_def_sub, text) + + def _extract_link_def_sub(self, match): + id, url, title = match.groups() + key = id.lower() # Link IDs are case-insensitive + self.urls[key] = self._encode_amps_and_angles(url) + if title: + self.titles[key] = title + return "" + + def _do_numbering(self, text): + ''' We handle the special extension for generic numbering for + tables, figures etc. + ''' + # First pass to define all the references + self.regex_defns = re.compile(r''' + \[\#(\w+)\s* # the counter. Open square plus hash plus a word \1 + ([^@]*)\s* # Some optional characters, that aren't an @. \2 + @(\w+) # the id. Should this be normed? \3 + ([^\]]*)\] # The rest of the text up to the terminating ] \4 + ''', re.VERBOSE) + self.regex_subs = re.compile(r"\[@(\w+)\s*\]") # [@ref_id] + counters = {} + references = {} + replacements = [] + definition_html = '

{}{}{}
' + reference_html = '
{}' + for match in self.regex_defns.finditer(text): + # We must have four match groups otherwise this isn't a numbering reference + if len(match.groups()) != 4: + continue + counter = match.group(1) + text_before = match.group(2) + ref_id = match.group(3) + text_after = match.group(4) + number = counters.get(counter, 1) + references[ref_id] = (number, counter) + replacements.append((match.start(0), + definition_html.format(counter, + ref_id, + text_before, + number, + text_after), + match.end(0))) + counters[counter] = number + 1 + for repl in reversed(replacements): + text = text[:repl[0]] + repl[1] + text[repl[2]:] + + # Second pass to replace the references with the right + # value of the counter + # Fwiw, it's vaguely annoying to have to turn the iterator into + # a list and then reverse it but I can't think of a better thing to do. + for match in reversed(list(self.regex_subs.finditer(text))): + number, counter = references.get(match.group(1), (None, None)) + if number is not None: + repl = reference_html.format(counter, + match.group(1), + number) + else: + repl = reference_html.format(match.group(1), + 'countererror', + '?' + match.group(1) + '?') + if "smarty-pants" in self.extras: + repl = repl.replace('"', self._escape_table['"']) + + text = text[:match.start()] + repl + text[match.end():] + return text + + def _extract_footnote_def_sub(self, match): + id, text = match.groups() + text = _dedent(text, skip_first_line=not text.startswith('\n')).strip() + normed_id = re.sub(r'\W', '-', id) + # Ensure footnote text ends with a couple newlines (for some + # block gamut matches). + self.footnotes[normed_id] = text + "\n\n" + return "" + + def _strip_footnote_definitions(self, text): + """A footnote definition looks like this: + + [^note-id]: Text of the note. + + May include one or more indented paragraphs. + + Where, + - The 'note-id' can be pretty much anything, though typically it + is the number of the footnote. + - The first paragraph may start on the next line, like so: + + [^note-id]: + Text of the note. + """ + less_than_tab = self.tab_width - 1 + footnote_def_re = re.compile(r''' + ^[ ]{0,%d}\[\^(.+)\]: # id = \1 + [ \t]* + ( # footnote text = \2 + # First line need not start with the spaces. + (?:\s*.*\n+) + (?: + (?:[ ]{%d} | \t) # Subsequent lines must be indented. + .*\n+ + )* + ) + # Lookahead for non-space at line-start, or end of doc. + (?:(?=^[ ]{0,%d}\S)|\Z) + ''' % (less_than_tab, self.tab_width, self.tab_width), + re.X | re.M) + return footnote_def_re.sub(self._extract_footnote_def_sub, text) + + _hr_re = re.compile(r'^[ ]{0,3}([-_*][ ]{0,2}){3,}$', re.M) + + def _run_block_gamut(self, text): + # These are all the transformations that form block-level + # tags like paragraphs, headers, and list items. + + if "fenced-code-blocks" in self.extras: + text = self._do_fenced_code_blocks(text) + + text = self._do_headers(text) + + # Do Horizontal Rules: + # On the number of spaces in horizontal rules: The spec is fuzzy: "If + # you wish, you may use spaces between the hyphens or asterisks." + # Markdown.pl 1.0.1's hr regexes limit the number of spaces between the + # hr chars to one or two. We'll reproduce that limit here. + hr = "\n tags around block-level tags. + text = self._hash_html_blocks(text) + + text = self._form_paragraphs(text) + + return text + + def _pyshell_block_sub(self, match): + lines = match.group(0).splitlines(0) + _dedentlines(lines) + indent = ' ' * self.tab_width + s = ('\n' # separate from possible cuddled paragraph + + indent + ('\n'+indent).join(lines) + + '\n\n') + return s + + def _prepare_pyshell_blocks(self, text): + """Ensure that Python interactive shell sessions are put in + code blocks -- even if not properly indented. + """ + if ">>>" not in text: + return text + + less_than_tab = self.tab_width - 1 + _pyshell_block_re = re.compile(r""" + ^([ ]{0,%d})>>>[ ].*\n # first line + ^(\1.*\S+.*\n)* # any number of subsequent lines + ^\n # ends with a blank line + """ % less_than_tab, re.M | re.X) + + return _pyshell_block_re.sub(self._pyshell_block_sub, text) + + def _table_sub(self, match): + trim_space_re = '^[ \t\n]+|[ \t\n]+$' + trim_bar_re = '^\||\|$' + + head, underline, body = match.groups() + + # Determine aligns for columns. + cols = [cell.strip() for cell in re.sub(trim_bar_re, "", re.sub(trim_space_re, "", underline)).split('|')] + align_from_col_idx = {} + for col_idx, col in enumerate(cols): + if col[0] == ':' and col[-1] == ':': + align_from_col_idx[col_idx] = ' align="center"' + elif col[0] == ':': + align_from_col_idx[col_idx] = ' align="left"' + elif col[-1] == ':': + align_from_col_idx[col_idx] = ' align="right"' + + # thead + hlines = ['' % self._html_class_str_from_tag('table'), '
', ''] + cols = [cell.strip() for cell in re.sub(trim_bar_re, "", re.sub(trim_space_re, "", head)).split('|')] + for col_idx, col in enumerate(cols): + hlines.append(' %s' % ( + align_from_col_idx.get(col_idx, ''), + self._run_span_gamut(col) + )) + hlines.append('') + hlines.append('') + + # tbody + hlines.append('') + for line in body.strip('\n').split('\n'): + hlines.append('') + cols = [cell.strip() for cell in re.sub(trim_bar_re, "", re.sub(trim_space_re, "", line)).split('|')] + for col_idx, col in enumerate(cols): + hlines.append(' %s' % ( + align_from_col_idx.get(col_idx, ''), + self._run_span_gamut(col) + )) + hlines.append('') + hlines.append('') + hlines.append('
') + + return '\n'.join(hlines) + '\n' + + def _do_tables(self, text): + """Copying PHP-Markdown and GFM table syntax. Some regex borrowed from + https://github.com/michelf/php-markdown/blob/lib/Michelf/Markdown.php#L2538 + """ + less_than_tab = self.tab_width - 1 + table_re = re.compile(r''' + (?:(?<=\n\n)|\A\n?) # leading blank line + + ^[ ]{0,%d} # allowed whitespace + (.*[|].*) \n # $1: header row (at least one pipe) + + ^[ ]{0,%d} # allowed whitespace + ( # $2: underline row + # underline row with leading bar + (?: \|\ *:?-+:?\ * )+ \|? \n + | + # or, underline row without leading bar + (?: \ *:?-+:?\ *\| )+ (?: \ *:?-+:?\ * )? \n + ) + + ( # $3: data rows + (?: + ^[ ]{0,%d}(?!\ ) # ensure line begins with 0 to less_than_tab spaces + .*\|.* \n + )+ + ) + ''' % (less_than_tab, less_than_tab, less_than_tab), re.M | re.X) + return table_re.sub(self._table_sub, text) + + def _wiki_table_sub(self, match): + ttext = match.group(0).strip() + # print 'wiki table: %r' % match.group(0) + rows = [] + for line in ttext.splitlines(0): + line = line.strip()[2:-2].strip() + row = [c.strip() for c in re.split(r'(?' % self._html_class_str_from_tag('table'), ''] + for row in rows: + hrow = [''] + for cell in row: + hrow.append('') + hrow.append(self._run_span_gamut(cell)) + hrow.append('') + hrow.append('') + hlines.append(''.join(hrow)) + hlines += ['', ''] + return '\n'.join(hlines) + '\n' + + def _do_wiki_tables(self, text): + # Optimization. + if "||" not in text: + return text + + less_than_tab = self.tab_width - 1 + wiki_table_re = re.compile(r''' + (?:(?<=\n\n)|\A\n?) # leading blank line + ^([ ]{0,%d})\|\|.+?\|\|[ ]*\n # first line + (^\1\|\|.+?\|\|\n)* # any number of subsequent lines + ''' % less_than_tab, re.M | re.X) + return wiki_table_re.sub(self._wiki_table_sub, text) + + def _run_span_gamut(self, text): + # These are all the transformations that occur *within* block-level + # tags like paragraphs, headers, and list items. + + text = self._do_code_spans(text) + + text = self._escape_special_chars(text) + + # Process anchor and image tags. + text = self._do_links(text) + + # Make links out of things like `` + # Must come after _do_links(), because you can use < and > + # delimiters in inline links like [this](). + text = self._do_auto_links(text) + + if "link-patterns" in self.extras: + text = self._do_link_patterns(text) + + text = self._encode_amps_and_angles(text) + + if "strike" in self.extras: + text = self._do_strike(text) + + text = self._do_italics_and_bold(text) + + if "smarty-pants" in self.extras: + text = self._do_smart_punctuation(text) + + # Do hard breaks: + if "break-on-newline" in self.extras: + text = re.sub(r" *\n", " + | + # auto-link (e.g., ) + <\w+[^>]*> + | + # comment + | + <\?.*?\?> # processing instruction + ) + """, re.X) + + def _escape_special_chars(self, text): + # Python markdown note: the HTML tokenization here differs from + # that in Markdown.pl, hence the behaviour for subtle cases can + # differ (I believe the tokenizer here does a better job because + # it isn't susceptible to unmatched '<' and '>' in HTML tags). + # Note, however, that '>' is not allowed in an auto-link URL + # here. + escaped = [] + is_html_markup = False + for token in self._sorta_html_tokenize_re.split(text): + if is_html_markup: + # Within tags/HTML-comments/auto-links, encode * and _ + # so they don't conflict with their use in Markdown for + # italics and strong. We're replacing each such + # character with its corresponding MD5 checksum value; + # this is likely overkill, but it should prevent us from + # colliding with the escape values by accident. + escaped.append(token.replace('*', self._escape_table['*']) + .replace('_', self._escape_table['_'])) + else: + escaped.append(self._encode_backslash_escapes(token)) + is_html_markup = not is_html_markup + return ''.join(escaped) + + def _hash_html_spans(self, text): + # Used for safe_mode. + + def _is_auto_link(s): + if ':' in s and self._auto_link_re.match(s): + return True + elif '@' in s and self._auto_email_link_re.match(s): + return True + return False + + tokens = [] + is_html_markup = False + for token in self._sorta_html_tokenize_re.split(text): + if is_html_markup and not _is_auto_link(token): + sanitized = self._sanitize_html(token) + key = _hash_text(sanitized) + self.html_spans[key] = sanitized + tokens.append(key) + else: + tokens.append(token) + is_html_markup = not is_html_markup + return ''.join(tokens) + + def _unhash_html_spans(self, text): + for key, sanitized in list(self.html_spans.items()): + text = text.replace(key, sanitized) + return text + + def _sanitize_html(self, s): + if self.safe_mode == "replace": + return self.html_removed_text + elif self.safe_mode == "escape": + replacements = [ + ('&', '&'), + ('<', '<'), + ('>', '>'), + ] + for before, after in replacements: + s = s.replace(before, after) + return s + else: + raise MarkdownError("invalid value for 'safe_mode': %r (must be " + "'escape' or 'replace')" % self.safe_mode) + + _inline_link_title = re.compile(r''' + ( # \1 + [ \t]+ + (['"]) # quote char = \2 + (?P.*?) + \2 + )? # title is optional + \)$ + ''', re.X | re.S) + _tail_of_reference_link_re = re.compile(r''' + # Match tail of: [text][id] + [ ]? # one optional space + (?:\n[ ]*)? # one optional newline followed by spaces + \[ + (?P<id>.*?) + \] + ''', re.X | re.S) + + _whitespace = re.compile(r'\s*') + + _strip_anglebrackets = re.compile(r'<(.*)>.*') + + def _find_non_whitespace(self, text, start): + """Returns the index of the first non-whitespace character in text + after (and including) start + """ + match = self._whitespace.match(text, start) + return match.end() + + def _find_balanced(self, text, start, open_c, close_c): + """Returns the index where the open_c and close_c characters balance + out - the same number of open_c and close_c are encountered - or the + end of string if it's reached before the balance point is found. + """ + i = start + l = len(text) + count = 1 + while count > 0 and i < l: + if text[i] == open_c: + count += 1 + elif text[i] == close_c: + count -= 1 + i += 1 + return i + + def _extract_url_and_title(self, text, start): + """Extracts the url and (optional) title from the tail of a link""" + # text[start] equals the opening parenthesis + idx = self._find_non_whitespace(text, start+1) + if idx == len(text): + return None, None, None + end_idx = idx + has_anglebrackets = text[idx] == "<" + if has_anglebrackets: + end_idx = self._find_balanced(text, end_idx+1, "<", ">") + end_idx = self._find_balanced(text, end_idx, "(", ")") + match = self._inline_link_title.search(text, idx, end_idx) + if not match: + return None, None, None + url, title = text[idx:match.start()], match.group("title") + if has_anglebrackets: + url = self._strip_anglebrackets.sub(r'\1', url) + return url, title, end_idx + + def _do_links(self, text): + """Turn Markdown link shortcuts into XHTML <a> and <img> tags. + + This is a combination of Markdown.pl's _DoAnchors() and + _DoImages(). They are done together because that simplified the + approach. It was necessary to use a different approach than + Markdown.pl because of the lack of atomic matching support in + Python's regex engine used in $g_nested_brackets. + """ + MAX_LINK_TEXT_SENTINEL = 3000 # markdown2 issue 24 + + # `anchor_allowed_pos` is used to support img links inside + # anchors, but not anchors inside anchors. An anchor's start + # pos must be `>= anchor_allowed_pos`. + anchor_allowed_pos = 0 + + curr_pos = 0 + while True: # Handle the next link. + # The next '[' is the start of: + # - an inline anchor: [text](url "title") + # - a reference anchor: [text][id] + # - an inline img: ![text](url "title") + # - a reference img: ![text][id] + # - a footnote ref: [^id] + # (Only if 'footnotes' extra enabled) + # - a footnote defn: [^id]: ... + # (Only if 'footnotes' extra enabled) These have already + # been stripped in _strip_footnote_definitions() so no + # need to watch for them. + # - a link definition: [id]: url "title" + # These have already been stripped in + # _strip_link_definitions() so no need to watch for them. + # - not markup: [...anything else... + try: + start_idx = text.index('[', curr_pos) + except ValueError: + break + text_length = len(text) + + # Find the matching closing ']'. + # Markdown.pl allows *matching* brackets in link text so we + # will here too. Markdown.pl *doesn't* currently allow + # matching brackets in img alt text -- we'll differ in that + # regard. + bracket_depth = 0 + for p in range(start_idx+1, min(start_idx+MAX_LINK_TEXT_SENTINEL, + text_length)): + ch = text[p] + if ch == ']': + bracket_depth -= 1 + if bracket_depth < 0: + break + elif ch == '[': + bracket_depth += 1 + else: + # Closing bracket not found within sentinel length. + # This isn't markup. + curr_pos = start_idx + 1 + continue + link_text = text[start_idx+1:p] + + # Possibly a footnote ref? + if "footnotes" in self.extras and link_text.startswith("^"): + normed_id = re.sub(r'\W', '-', link_text[1:]) + if normed_id in self.footnotes: + self.footnote_ids.append(normed_id) + result = '<sup class="footnote-ref" id="fnref-%s">' \ + '<a href="#fn-%s">%s</a></sup>' \ + % (normed_id, normed_id, len(self.footnote_ids)) + text = text[:start_idx] + result + text[p+1:] + else: + # This id isn't defined, leave the markup alone. + curr_pos = p+1 + continue + + # Now determine what this is by the remainder. + p += 1 + if p == text_length: + return text + + # Inline anchor or img? + if text[p] == '(': # attempt at perf improvement + url, title, url_end_idx = self._extract_url_and_title(text, p) + if url is not None: + # Handle an inline anchor or img. + is_img = start_idx > 0 and text[start_idx-1] == "!" + if is_img: + start_idx -= 1 + + # We've got to encode these to avoid conflicting + # with italics/bold. + url = url.replace('*', self._escape_table['*']) \ + .replace('_', self._escape_table['_']) + if title: + title_str = ' title="%s"' % ( + _xml_escape_attr(title) + .replace('*', self._escape_table['*']) + .replace('_', self._escape_table['_'])) + else: + title_str = '' + if is_img: + img_class_str = self._html_class_str_from_tag("img") + result = '<img src="%s" alt="%s"%s%s%s' \ + % (url.replace('"', '"'), + _xml_escape_attr(link_text), + title_str, img_class_str, self.empty_element_suffix) + if "smarty-pants" in self.extras: + result = result.replace('"', self._escape_table['"']) + curr_pos = start_idx + len(result) + text = text[:start_idx] + result + text[url_end_idx:] + elif start_idx >= anchor_allowed_pos: + result_head = '<a href="%s"%s>' % (url, title_str) + result = '%s%s</a>' % (result_head, link_text) + if "smarty-pants" in self.extras: + result = result.replace('"', self._escape_table['"']) + # <img> allowed from curr_pos on, <a> from + # anchor_allowed_pos on. + curr_pos = start_idx + len(result_head) + anchor_allowed_pos = start_idx + len(result) + text = text[:start_idx] + result + text[url_end_idx:] + else: + # Anchor not allowed here. + curr_pos = start_idx + 1 + continue + + # Reference anchor or img? + else: + match = self._tail_of_reference_link_re.match(text, p) + if match: + # Handle a reference-style anchor or img. + is_img = start_idx > 0 and text[start_idx-1] == "!" + if is_img: + start_idx -= 1 + link_id = match.group("id").lower() + if not link_id: + link_id = link_text.lower() # for links like [this][] + if link_id in self.urls: + url = self.urls[link_id] + # We've got to encode these to avoid conflicting + # with italics/bold. + url = url.replace('*', self._escape_table['*']) \ + .replace('_', self._escape_table['_']) + title = self.titles.get(link_id) + if title: + title = _xml_escape_attr(title) \ + .replace('*', self._escape_table['*']) \ + .replace('_', self._escape_table['_']) + title_str = ' title="%s"' % title + else: + title_str = '' + if is_img: + img_class_str = self._html_class_str_from_tag("img") + result = '<img src="%s" alt="%s"%s%s%s' \ + % (url.replace('"', '"'), + link_text.replace('"', '"'), + title_str, img_class_str, self.empty_element_suffix) + if "smarty-pants" in self.extras: + result = result.replace('"', self._escape_table['"']) + curr_pos = start_idx + len(result) + text = text[:start_idx] + result + text[match.end():] + elif start_idx >= anchor_allowed_pos: + result = '<a href="%s"%s>%s</a>' \ + % (url, title_str, link_text) + result_head = '<a href="%s"%s>' % (url, title_str) + result = '%s%s</a>' % (result_head, link_text) + if "smarty-pants" in self.extras: + result = result.replace('"', self._escape_table['"']) + # <img> allowed from curr_pos on, <a> from + # anchor_allowed_pos on. + curr_pos = start_idx + len(result_head) + anchor_allowed_pos = start_idx + len(result) + text = text[:start_idx] + result + text[match.end():] + else: + # Anchor not allowed here. + curr_pos = start_idx + 1 + else: + # This id isn't defined, leave the markup alone. + curr_pos = match.end() + continue + + # Otherwise, it isn't markup. + curr_pos = start_idx + 1 + + return text + + def header_id_from_text(self, text, prefix, n): + """Generate a header id attribute value from the given header + HTML content. + + This is only called if the "header-ids" extra is enabled. + Subclasses may override this for different header ids. + + @param text {str} The text of the header tag + @param prefix {str} The requested prefix for header ids. This is the + value of the "header-ids" extra key, if any. Otherwise, None. + @param n {int} The <hN> tag number, i.e. `1` for an <h1> tag. + @returns {str} The value for the header tag's "id" attribute. Return + None to not have an id attribute and to exclude this header from + the TOC (if the "toc" extra is specified). + """ + header_id = _slugify(text) + if prefix and isinstance(prefix, base_string_type): + header_id = prefix + '-' + header_id + if header_id in self._count_from_header_id: + self._count_from_header_id[header_id] += 1 + header_id += '-%s' % self._count_from_header_id[header_id] + else: + self._count_from_header_id[header_id] = 1 + return header_id + + _toc = None + def _toc_add_entry(self, level, id, name): + if self._toc is None: + self._toc = [] + self._toc.append((level, id, self._unescape_special_chars(name))) + + _h_re_base = r''' + (^(.+)[ \t]*\n(=+|-+)[ \t]*\n+) + | + (^(\#{1,6}) # \1 = string of #'s + [ \t]%s + (.+?) # \2 = Header text + [ \t]* + (?<!\\) # ensure not an escaped trailing '#' + \#* # optional closing #'s (not counted) + \n+ + ) + ''' + + _h_re = re.compile(_h_re_base % '*', re.X | re.M) + _h_re_tag_friendly = re.compile(_h_re_base % '+', re.X | re.M) + + def _h_sub(self, match): + if match.group(1) is not None: + # Setext header + n = {"=": 1, "-": 2}[match.group(3)[0]] + header_group = match.group(2) + else: + # atx header + n = len(match.group(5)) + header_group = match.group(6) + + demote_headers = self.extras.get("demote-headers") + if demote_headers: + n = min(n + demote_headers, 6) + header_id_attr = "" + if "header-ids" in self.extras: + header_id = self.header_id_from_text(header_group, + self.extras["header-ids"], n) + if header_id: + header_id_attr = ' id="%s"' % header_id + html = self._run_span_gamut(header_group) + if "toc" in self.extras and header_id: + self._toc_add_entry(n, header_id, html) + return "<h%d%s>%s</h%d>\n\n" % (n, header_id_attr, html, n) + + def _do_headers(self, text): + # Setext-style headers: + # Header 1 + # ======== + # + # Header 2 + # -------- + + # atx-style headers: + # # Header 1 + # ## Header 2 + # ## Header 2 with closing hashes ## + # ... + # ###### Header 6 + + if 'tag-friendly' in self.extras: + return self._h_re_tag_friendly.sub(self._h_sub, text) + return self._h_re.sub(self._h_sub, text) + + _marker_ul_chars = '*+-' + _marker_any = r'(?:[%s]|\d+\.)' % _marker_ul_chars + _marker_ul = '(?:[%s])' % _marker_ul_chars + _marker_ol = r'(?:\d+\.)' + + def _list_sub(self, match): + lst = match.group(1) + lst_type = match.group(3) in self._marker_ul_chars and "ul" or "ol" + result = self._process_list_items(lst) + if self.list_level: + return "<%s>\n%s</%s>\n" % (lst_type, result, lst_type) + else: + return "<%s>\n%s</%s>\n\n" % (lst_type, result, lst_type) + + def _do_lists(self, text): + # Form HTML ordered (numbered) and unordered (bulleted) lists. + + # Iterate over each *non-overlapping* list match. + pos = 0 + while True: + # Find the *first* hit for either list style (ul or ol). We + # match ul and ol separately to avoid adjacent lists of different + # types running into each other (see issue #16). + hits = [] + for marker_pat in (self._marker_ul, self._marker_ol): + less_than_tab = self.tab_width - 1 + whole_list = r''' + ( # \1 = whole list + ( # \2 + [ ]{0,%d} + (%s) # \3 = first list item marker + [ \t]+ + (?!\ *\3\ ) # '- - - ...' isn't a list. See 'not_quite_a_list' test case. + ) + (?:.+?) + ( # \4 + \Z + | + \n{2,} + (?=\S) + (?! # Negative lookahead for another list item marker + [ \t]* + %s[ \t]+ + ) + ) + ) + ''' % (less_than_tab, marker_pat, marker_pat) + if self.list_level: # sub-list + list_re = re.compile("^"+whole_list, re.X | re.M | re.S) + else: + list_re = re.compile(r"(?:(?<=\n\n)|\A\n?)"+whole_list, + re.X | re.M | re.S) + match = list_re.search(text, pos) + if match: + hits.append((match.start(), match)) + if not hits: + break + hits.sort() + match = hits[0][1] + start, end = match.span() + middle = self._list_sub(match) + text = text[:start] + middle + text[end:] + pos = start + len(middle) # start pos for next attempted match + + return text + + _list_item_re = re.compile(r''' + (\n)? # leading line = \1 + (^[ \t]*) # leading whitespace = \2 + (?P<marker>%s) [ \t]+ # list marker = \3 + ((?:.+?) # list item text = \4 + (\n{1,2})) # eols = \5 + (?= \n* (\Z | \2 (?P<next_marker>%s) [ \t]+)) + ''' % (_marker_any, _marker_any), + re.M | re.X | re.S) + + _task_list_item_re = re.compile(r''' + (\[[\ x]\])[ \t]+ # tasklist marker = \1 + (.*) # list item text = \2 + ''', re.M | re.X | re.S) + + _task_list_warpper_str = r'<p><input type="checkbox" class="task-list-item-checkbox" %sdisabled>%s</p>' + + def _task_list_item_sub(self, match): + marker = match.group(1) + item_text = match.group(2) + if marker == '[x]': + return self._task_list_warpper_str % ('checked ', item_text) + elif marker == '[ ]': + return self._task_list_warpper_str % ('', item_text) + + _last_li_endswith_two_eols = False + def _list_item_sub(self, match): + item = match.group(4) + leading_line = match.group(1) + if leading_line or "\n\n" in item or self._last_li_endswith_two_eols: + item = self._run_block_gamut(self._outdent(item)) + else: + # Recursion for sub-lists: + item = self._do_lists(self._outdent(item)) + if item.endswith('\n'): + item = item[:-1] + item = self._run_span_gamut(item) + self._last_li_endswith_two_eols = (len(match.group(5)) == 2) + + if "task_list" in self.extras: + item = self._task_list_item_re.sub(self._task_list_item_sub, item) + + return "<li>%s</li>\n" % item + + def _process_list_items(self, list_str): + # Process the contents of a single ordered or unordered list, + # splitting it into individual list items. + + # The $g_list_level global keeps track of when we're inside a list. + # Each time we enter a list, we increment it; when we leave a list, + # we decrement. If it's zero, we're not in a list anymore. + # + # We do this because when we're not inside a list, we want to treat + # something like this: + # + # I recommend upgrading to version + # 8. Oops, now this line is treated + # as a sub-list. + # + # As a single paragraph, despite the fact that the second line starts + # with a digit-period-space sequence. + # + # Whereas when we're inside a list (or sub-list), that line will be + # treated as the start of a sub-list. What a kludge, huh? This is + # an aspect of Markdown's syntax that's hard to parse perfectly + # without resorting to mind-reading. Perhaps the solution is to + # change the syntax rules such that sub-lists must start with a + # starting cardinal number; e.g. "1." or "a.". + self.list_level += 1 + self._last_li_endswith_two_eols = False + list_str = list_str.rstrip('\n') + '\n' + list_str = self._list_item_re.sub(self._list_item_sub, list_str) + self.list_level -= 1 + return list_str + + def _get_pygments_lexer(self, lexer_name): + try: + from pygments import lexers, util + except ImportError: + return None + try: + return lexers.get_lexer_by_name(lexer_name) + except util.ClassNotFound: + return None + + def _color_with_pygments(self, codeblock, lexer, **formatter_opts): + import pygments + import pygments.formatters + + class HtmlCodeFormatter(pygments.formatters.HtmlFormatter): + def _wrap_code(self, inner): + """A function for use in a Pygments Formatter which + wraps in <code> tags. + """ + yield 0, "<code>" + for tup in inner: + yield tup + yield 0, "</code>" + + def wrap(self, source, outfile): + """Return the source with a code, pre, and div.""" + return self._wrap_div(self._wrap_pre(self._wrap_code(source))) + + formatter_opts.setdefault("cssclass", "codehilite") + formatter = HtmlCodeFormatter(**formatter_opts) + return pygments.highlight(codeblock, lexer, formatter) + + def _code_block_sub(self, match, is_fenced_code_block=False): + lexer_name = None + if is_fenced_code_block: + lexer_name = match.group(1) + if lexer_name: + formatter_opts = self.extras['fenced-code-blocks'] or {} + codeblock = match.group(2) + codeblock = codeblock[:-1] # drop one trailing newline + else: + codeblock = match.group(1) + codeblock = self._outdent(codeblock) + codeblock = self._detab(codeblock) + codeblock = codeblock.lstrip('\n') # trim leading newlines + codeblock = codeblock.rstrip() # trim trailing whitespace + + # Note: "code-color" extra is DEPRECATED. + if "code-color" in self.extras and codeblock.startswith(":::"): + lexer_name, rest = codeblock.split('\n', 1) + lexer_name = lexer_name[3:].strip() + codeblock = rest.lstrip("\n") # Remove lexer declaration line. + formatter_opts = self.extras['code-color'] or {} + + if lexer_name: + def unhash_code(codeblock): + for key, sanitized in list(self.html_spans.items()): + codeblock = codeblock.replace(key, sanitized) + replacements = [ + ("&", "&"), + ("<", "<"), + (">", ">") + ] + for old, new in replacements: + codeblock = codeblock.replace(old, new) + return codeblock + lexer = self._get_pygments_lexer(lexer_name) + if lexer: + codeblock = unhash_code( codeblock ) + colored = self._color_with_pygments(codeblock, lexer, + **formatter_opts) + return "\n\n%s\n\n" % colored + + codeblock = self._encode_code(codeblock) + pre_class_str = self._html_class_str_from_tag("pre") + code_class_str = self._html_class_str_from_tag("code") + return "\n\n<pre%s><code%s>%s\n</code></pre>\n\n" % ( + pre_class_str, code_class_str, codeblock) + + def _html_class_str_from_tag(self, tag): + """Get the appropriate ' class="..."' string (note the leading + space), if any, for the given tag. + """ + if "html-classes" not in self.extras: + return "" + try: + html_classes_from_tag = self.extras["html-classes"] + except TypeError: + return "" + else: + if tag in html_classes_from_tag: + return ' class="%s"' % html_classes_from_tag[tag] + return "" + + def _do_code_blocks(self, text): + """Process Markdown `<pre><code>` blocks.""" + code_block_re = re.compile(r''' + (?:\n\n|\A\n?) + ( # $1 = the code block -- one or more lines, starting with a space/tab + (?: + (?:[ ]{%d} | \t) # Lines must start with a tab or a tab-width of spaces + .*\n+ + )+ + ) + ((?=^[ ]{0,%d}\S)|\Z) # Lookahead for non-space at line-start, or end of doc + # Lookahead to make sure this block isn't already in a code block. + # Needed when syntax highlighting is being used. + (?![^<]*\</code\>) + ''' % (self.tab_width, self.tab_width), + re.M | re.X) + return code_block_re.sub(self._code_block_sub, text) + + _fenced_code_block_re = re.compile(r''' + (?:\n+|\A\n?) + ^```([\w+-]+)?[ \t]*\n # opening fence, $1 = optional lang + (.*?) # $2 = code block content + ^```[ \t]*\n # closing fence + ''', re.M | re.X | re.S) + + def _fenced_code_block_sub(self, match): + return self._code_block_sub(match, is_fenced_code_block=True) + + def _do_fenced_code_blocks(self, text): + """Process ```-fenced unindented code blocks ('fenced-code-blocks' extra).""" + return self._fenced_code_block_re.sub(self._fenced_code_block_sub, text) + + # Rules for a code span: + # - backslash escapes are not interpreted in a code span + # - to include one or or a run of more backticks the delimiters must + # be a longer run of backticks + # - cannot start or end a code span with a backtick; pad with a + # space and that space will be removed in the emitted HTML + # See `test/tm-cases/escapes.text` for a number of edge-case + # examples. + _code_span_re = re.compile(r''' + (?<!\\) + (`+) # \1 = Opening run of ` + (?!`) # See Note A test/tm-cases/escapes.text + (.+?) # \2 = The code block + (?<!`) + \1 # Matching closer + (?!`) + ''', re.X | re.S) + + def _code_span_sub(self, match): + c = match.group(2).strip(" \t") + c = self._encode_code(c) + return "<code>%s</code>" % c + + def _do_code_spans(self, text): + # * Backtick quotes are used for <code></code> spans. + # + # * You can use multiple backticks as the delimiters if you want to + # include literal backticks in the code span. So, this input: + # + # Just type ``foo `bar` baz`` at the prompt. + # + # Will translate to: + # + # <p>Just type <code>foo `bar` baz</code> at the prompt.</p> + # + # There's no arbitrary limit to the number of backticks you + # can use as delimters. If you need three consecutive backticks + # in your code, use four for delimiters, etc. + # + # * You can use spaces to get literal backticks at the edges: + # + # ... type `` `bar` `` ... + # + # Turns to: + # + # ... type <code>`bar`</code> ... + return self._code_span_re.sub(self._code_span_sub, text) + + def _encode_code(self, text): + """Encode/escape certain characters inside Markdown code runs. + The point is that in code, these characters are literals, + and lose their special Markdown meanings. + """ + replacements = [ + # Encode all ampersands; HTML entities are not + # entities within a Markdown code span. + ('&', '&'), + # Do the angle bracket song and dance: + ('<', '<'), + ('>', '>'), + ] + for before, after in replacements: + text = text.replace(before, after) + hashed = _hash_text(text) + self._escape_table[text] = hashed + return hashed + + _strike_re = re.compile(r"~~(?=\S)(.+?)(?<=\S)~~", re.S) + def _do_strike(self, text): + text = self._strike_re.sub(r"<strike>\1</strike>", text) + return text + + _strong_re = re.compile(r"(\*\*|__)(?=\S)(.+?[*_]*)(?<=\S)\1", re.S) + _em_re = re.compile(r"(\*|_)(?=\S)(.+?)(?<=\S)\1", re.S) + _code_friendly_strong_re = re.compile(r"\*\*(?=\S)(.+?[*_]*)(?<=\S)\*\*", re.S) + _code_friendly_em_re = re.compile(r"\*(?=\S)(.+?)(?<=\S)\*", re.S) + def _do_italics_and_bold(self, text): + # <strong> must go first: + if "code-friendly" in self.extras: + text = self._code_friendly_strong_re.sub(r"<strong>\1</strong>", text) + text = self._code_friendly_em_re.sub(r"<em>\1</em>", text) + else: + text = self._strong_re.sub(r"<strong>\2</strong>", text) + text = self._em_re.sub(r"<em>\2</em>", text) + return text + + # "smarty-pants" extra: Very liberal in interpreting a single prime as an + # apostrophe; e.g. ignores the fact that "round", "bout", "twer", and + # "twixt" can be written without an initial apostrophe. This is fine because + # using scare quotes (single quotation marks) is rare. + _apostrophe_year_re = re.compile(r"'(\d\d)(?=(\s|,|;|\.|\?|!|$))") + _contractions = ["tis", "twas", "twer", "neath", "o", "n", + "round", "bout", "twixt", "nuff", "fraid", "sup"] + def _do_smart_contractions(self, text): + text = self._apostrophe_year_re.sub(r"’\1", text) + for c in self._contractions: + text = text.replace("'%s" % c, "’%s" % c) + text = text.replace("'%s" % c.capitalize(), + "’%s" % c.capitalize()) + return text + + # Substitute double-quotes before single-quotes. + _opening_single_quote_re = re.compile(r"(?<!\S)'(?=\S)") + _opening_double_quote_re = re.compile(r'(?<!\S)"(?=\S)') + _closing_single_quote_re = re.compile(r"(?<=\S)'") + _closing_double_quote_re = re.compile(r'(?<=\S)"(?=(\s|,|;|\.|\?|!|$))') + def _do_smart_punctuation(self, text): + """Fancifies 'single quotes', "double quotes", and apostrophes. + Converts --, ---, and ... into en dashes, em dashes, and ellipses. + + Inspiration is: <http://daringfireball.net/projects/smartypants/> + See "test/tm-cases/smarty_pants.text" for a full discussion of the + support here and + <http://code.google.com/p/python-markdown2/issues/detail?id=42> for a + discussion of some diversion from the original SmartyPants. + """ + if "'" in text: # guard for perf + text = self._do_smart_contractions(text) + text = self._opening_single_quote_re.sub("‘", text) + text = self._closing_single_quote_re.sub("’", text) + + if '"' in text: # guard for perf + text = self._opening_double_quote_re.sub("“", text) + text = self._closing_double_quote_re.sub("”", text) + + text = text.replace("---", "—") + text = text.replace("--", "–") + text = text.replace("...", "…") + text = text.replace(" . . . ", "…") + text = text.replace(". . .", "…") + return text + + _block_quote_base = r''' + ( # Wrap whole match in \1 + ( + ^[ \t]*>%s[ \t]? # '>' at the start of a line + .+\n # rest of the first line + (.+\n)* # subsequent consecutive lines + \n* # blanks + )+ + ) + ''' + _block_quote_re = re.compile(_block_quote_base % '', re.M | re.X) + _block_quote_re_spoiler = re.compile(_block_quote_base % '[ \t]*?!?', re.M | re.X) + _bq_one_level_re = re.compile('^[ \t]*>[ \t]?', re.M) + _bq_one_level_re_spoiler = re.compile('^[ \t]*>[ \t]*?![ \t]?', re.M) + _bq_all_lines_spoilers = re.compile(r'\A(?:^[ \t]*>[ \t]*?!.*[\n\r]*)+\Z', re.M) + _html_pre_block_re = re.compile(r'(\s*<pre>.+?</pre>)', re.S) + def _dedent_two_spaces_sub(self, match): + return re.sub(r'(?m)^ ', '', match.group(1)) + + def _block_quote_sub(self, match): + bq = match.group(1) + is_spoiler = 'spoiler' in self.extras and self._bq_all_lines_spoilers.match(bq) + # trim one level of quoting + if is_spoiler: + bq = self._bq_one_level_re_spoiler.sub('', bq) + else: + bq = self._bq_one_level_re.sub('', bq) + # trim whitespace-only lines + bq = self._ws_only_line_re.sub('', bq) + bq = self._run_block_gamut(bq) # recurse + + bq = re.sub('(?m)^', ' ', bq) + # These leading spaces screw with <pre> content, so we need to fix that: + bq = self._html_pre_block_re.sub(self._dedent_two_spaces_sub, bq) + + if is_spoiler: + return '<blockquote class="spoiler">\n%s\n</blockquote>\n\n' % bq + else: + return '<blockquote>\n%s\n</blockquote>\n\n' % bq + + def _do_block_quotes(self, text): + if '>' not in text: + return text + if 'spoiler' in self.extras: + return self._block_quote_re_spoiler.sub(self._block_quote_sub, text) + else: + return self._block_quote_re.sub(self._block_quote_sub, text) + + def _form_paragraphs(self, text): + # Strip leading and trailing lines: + text = text.strip('\n') + + # Wrap <p> tags. + grafs = [] + for i, graf in enumerate(re.split(r"\n{2,}", text)): + if graf in self.html_blocks: + # Unhashify HTML blocks + grafs.append(self.html_blocks[graf]) + else: + cuddled_list = None + if "cuddled-lists" in self.extras: + # Need to put back trailing '\n' for `_list_item_re` + # match at the end of the paragraph. + li = self._list_item_re.search(graf + '\n') + # Two of the same list marker in this paragraph: a likely + # candidate for a list cuddled to preceding paragraph + # text (issue 33). Note the `[-1]` is a quick way to + # consider numeric bullets (e.g. "1." and "2.") to be + # equal. + if (li and len(li.group(2)) <= 3 and li.group("next_marker") + and li.group("marker")[-1] == li.group("next_marker")[-1]): + start = li.start() + cuddled_list = self._do_lists(graf[start:]).rstrip("\n") + assert cuddled_list.startswith("<ul>") or cuddled_list.startswith("<ol>") + graf = graf[:start] + + # Wrap <p> tags. + graf = self._run_span_gamut(graf) + grafs.append("<p>" + graf.lstrip(" \t") + "</p>") + + if cuddled_list: + grafs.append(cuddled_list) + + return "\n\n".join(grafs) + + def _add_footnotes(self, text): + if self.footnotes: + footer = [ + '<div class="footnotes">', + '<hr' + self.empty_element_suffix, + '<ol>', + ] + for i, id in enumerate(self.footnote_ids): + if i != 0: + footer.append('') + footer.append('<li id="fn-%s">' % id) + footer.append(self._run_block_gamut(self.footnotes[id])) + backlink = ('<a href="#fnref-%s" ' + 'class="footnoteBackLink" ' + 'title="Jump back to footnote %d in the text.">' + '↩</a>' % (id, i+1)) + if footer[-1].endswith("</p>"): + footer[-1] = footer[-1][:-len("</p>")] \ + + ' ' + backlink + "</p>" + else: + footer.append("\n<p>%s</p>" % backlink) + footer.append('</li>') + footer.append('</ol>') + footer.append('</div>') + return text + '\n\n' + '\n'.join(footer) + else: + return text + + # Ampersand-encoding based entirely on Nat Irons's Amputator MT plugin: + # http://bumppo.net/projects/amputator/ + _ampersand_re = re.compile(r'&(?!#?[xX]?(?:[0-9a-fA-F]+|\w+);)') + _naked_lt_re = re.compile(r'<(?![a-z/?\$!])', re.I) + _naked_gt_re = re.compile(r'''(?<![a-z0-9?!/'"-])>''', re.I) + + def _encode_amps_and_angles(self, text): + # Smart processing for ampersands and angle brackets that need + # to be encoded. + text = self._ampersand_re.sub('&', text) + + # Encode naked <'s + text = self._naked_lt_re.sub('<', text) + + # Encode naked >'s + # Note: Other markdown implementations (e.g. Markdown.pl, PHP + # Markdown) don't do this. + text = self._naked_gt_re.sub('>', text) + return text + + def _encode_backslash_escapes(self, text): + for ch, escape in list(self._escape_table.items()): + text = text.replace("\\"+ch, escape) + return text + + _auto_link_re = re.compile(r'<((https?|ftp):[^\'">\s]+)>', re.I) + def _auto_link_sub(self, match): + g1 = match.group(1) + return '<a href="%s">%s</a>' % (g1, g1) + + _auto_email_link_re = re.compile(r""" + < + (?:mailto:)? + ( + [-.\w]+ + \@ + [-\w]+(\.[-\w]+)*\.[a-z]+ + ) + > + """, re.I | re.X | re.U) + def _auto_email_link_sub(self, match): + return self._encode_email_address( + self._unescape_special_chars(match.group(1))) + + def _do_auto_links(self, text): + text = self._auto_link_re.sub(self._auto_link_sub, text) + text = self._auto_email_link_re.sub(self._auto_email_link_sub, text) + return text + + def _encode_email_address(self, addr): + # Input: an email address, e.g. "foo@example.com" + # + # Output: the email address as a mailto link, with each character + # of the address encoded as either a decimal or hex entity, in + # the hopes of foiling most address harvesting spam bots. E.g.: + # + # <a href="mailto:foo@e + # xample.com">foo + # @example.com</a> + # + # Based on a filter by Matthew Wickline, posted to the BBEdit-Talk + # mailing list: <http://tinyurl.com/yu7ue> + chars = [_xml_encode_email_char_at_random(ch) + for ch in "mailto:" + addr] + # Strip the mailto: from the visible part. + addr = '<a href="%s">%s</a>' \ + % (''.join(chars), ''.join(chars[7:])) + return addr + + def _do_link_patterns(self, text): + """Caveat emptor: there isn't much guarding against link + patterns being formed inside other standard Markdown links, e.g. + inside a [link def][like this]. + + Dev Notes: *Could* consider prefixing regexes with a negative + lookbehind assertion to attempt to guard against this. + """ + link_from_hash = {} + for regex, repl in self.link_patterns: + replacements = [] + for match in regex.finditer(text): + if hasattr(repl, "__call__"): + href = repl(match) + else: + href = match.expand(repl) + replacements.append((match.span(), href)) + for (start, end), href in reversed(replacements): + escaped_href = ( + href.replace('"', '"') # b/c of attr quote + # To avoid markdown <em> and <strong>: + .replace('*', self._escape_table['*']) + .replace('_', self._escape_table['_'])) + link = '<a href="%s">%s</a>' % (escaped_href, text[start:end]) + hash = _hash_text(link) + link_from_hash[hash] = link + text = text[:start] + hash + text[end:] + for hash, link in list(link_from_hash.items()): + text = text.replace(hash, link) + return text + + def _unescape_special_chars(self, text): + # Swap back in all the special characters we've hidden. + for ch, hash in list(self._escape_table.items()): + text = text.replace(hash, ch) + return text + + def _outdent(self, text): + # Remove one level of line-leading tabs or spaces + return self._outdent_re.sub('', text) + + +class MarkdownWithExtras(Markdown): + """A markdowner class that enables most extras: + + - footnotes + - code-color (only has effect if 'pygments' Python module on path) + + These are not included: + - pyshell (specific to Python-related documenting) + - code-friendly (because it *disables* part of the syntax) + - link-patterns (because you need to specify some actual + link-patterns anyway) + """ + extras = ["footnotes", "code-color"] + + +# ---- internal support functions + +class UnicodeWithAttrs(unicode): + """A subclass of unicode used for the return value of conversion to + possibly attach some attributes. E.g. the "toc_html" attribute when + the "toc" extra is used. + """ + metadata = None + _toc = None + def toc_html(self): + """Return the HTML for the current TOC. + + This expects the `_toc` attribute to have been set on this instance. + """ + if self._toc is None: + return None + + def indent(): + return ' ' * (len(h_stack) - 1) + lines = [] + h_stack = [0] # stack of header-level numbers + for level, id, name in self._toc: + if level > h_stack[-1]: + lines.append("%s<ul>" % indent()) + h_stack.append(level) + elif level == h_stack[-1]: + lines[-1] += "</li>" + else: + while level < h_stack[-1]: + h_stack.pop() + if not lines[-1].endswith("</li>"): + lines[-1] += "</li>" + lines.append("%s</ul></li>" % indent()) + lines.append('%s<li><a href="#%s">%s</a>' % ( + indent(), id, name)) + while len(h_stack) > 1: + h_stack.pop() + if not lines[-1].endswith("</li>"): + lines[-1] += "</li>" + lines.append("%s</ul>" % indent()) + return '\n'.join(lines) + '\n' + toc_html = property(toc_html) + +## {{{ http://code.activestate.com/recipes/577257/ (r1) +_slugify_strip_re = re.compile(r'[^\w\s-]') +_slugify_hyphenate_re = re.compile(r'[-\s]+') +def _slugify(value): + """ + Normalizes string, converts to lowercase, removes non-alpha characters, + and converts spaces to hyphens. + + From Django's "django/template/defaultfilters.py". + """ + import unicodedata + value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode() + value = _slugify_strip_re.sub('', value).strip().lower() + return _slugify_hyphenate_re.sub('-', value) +## end of http://code.activestate.com/recipes/577257/ }}} + + +# From http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52549 +def _curry(*args, **kwargs): + function, args = args[0], args[1:] + def result(*rest, **kwrest): + combined = kwargs.copy() + combined.update(kwrest) + return function(*args + rest, **combined) + return result + + +# Recipe: regex_from_encoded_pattern (1.0) +def _regex_from_encoded_pattern(s): + """'foo' -> re.compile(re.escape('foo')) + '/foo/' -> re.compile('foo') + '/foo/i' -> re.compile('foo', re.I) + """ + if s.startswith('/') and s.rfind('/') != 0: + # Parse it: /PATTERN/FLAGS + idx = s.rfind('/') + pattern, flags_str = s[1:idx], s[idx+1:] + flag_from_char = { + "i": re.IGNORECASE, + "l": re.LOCALE, + "s": re.DOTALL, + "m": re.MULTILINE, + "u": re.UNICODE, + } + flags = 0 + for char in flags_str: + try: + flags |= flag_from_char[char] + except KeyError: + raise ValueError("unsupported regex flag: '%s' in '%s' " + "(must be one of '%s')" + % (char, s, ''.join(list(flag_from_char.keys())))) + return re.compile(s[1:idx], flags) + else: # not an encoded regex + return re.compile(re.escape(s)) + + +# Recipe: dedent (0.1.2) +def _dedentlines(lines, tabsize=8, skip_first_line=False): + """_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines + + "lines" is a list of lines to dedent. + "tabsize" is the tab width to use for indent width calculations. + "skip_first_line" is a boolean indicating if the first line should + be skipped for calculating the indent width and for dedenting. + This is sometimes useful for docstrings and similar. + + Same as dedent() except operates on a sequence of lines. Note: the + lines list is modified **in-place**. + """ + DEBUG = False + if DEBUG: + print("dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\ + % (tabsize, skip_first_line)) + margin = None + for i, line in enumerate(lines): + if i == 0 and skip_first_line: continue + indent = 0 + for ch in line: + if ch == ' ': + indent += 1 + elif ch == '\t': + indent += tabsize - (indent % tabsize) + elif ch in '\r\n': + continue # skip all-whitespace lines + else: + break + else: + continue # skip all-whitespace lines + if DEBUG: print("dedent: indent=%d: %r" % (indent, line)) + if margin is None: + margin = indent + else: + margin = min(margin, indent) + if DEBUG: print("dedent: margin=%r" % margin) + + if margin is not None and margin > 0: + for i, line in enumerate(lines): + if i == 0 and skip_first_line: continue + removed = 0 + for j, ch in enumerate(line): + if ch == ' ': + removed += 1 + elif ch == '\t': + removed += tabsize - (removed % tabsize) + elif ch in '\r\n': + if DEBUG: print("dedent: %r: EOL -> strip up to EOL" % line) + lines[i] = lines[i][j:] + break + else: + raise ValueError("unexpected non-whitespace char %r in " + "line %r while removing %d-space margin" + % (ch, line, margin)) + if DEBUG: + print("dedent: %r: %r -> removed %d/%d"\ + % (line, ch, removed, margin)) + if removed == margin: + lines[i] = lines[i][j+1:] + break + elif removed > margin: + lines[i] = ' '*(removed-margin) + lines[i][j+1:] + break + else: + if removed: + lines[i] = lines[i][removed:] + return lines + + +def _dedent(text, tabsize=8, skip_first_line=False): + """_dedent(text, tabsize=8, skip_first_line=False) -> dedented text + + "text" is the text to dedent. + "tabsize" is the tab width to use for indent width calculations. + "skip_first_line" is a boolean indicating if the first line should + be skipped for calculating the indent width and for dedenting. + This is sometimes useful for docstrings and similar. + + textwrap.dedent(s), but don't expand tabs to spaces + """ + lines = text.splitlines(1) + _dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line) + return ''.join(lines) + + +class _memoized(object): + """Decorator that caches a function's return value each time it is called. + If called later with the same arguments, the cached value is returned, and + not re-evaluated. + + http://wiki.python.org/moin/PythonDecoratorLibrary + """ + def __init__(self, func): + self.func = func + self.cache = {} + + def __call__(self, *args): + try: + return self.cache[args] + except KeyError: + self.cache[args] = value = self.func(*args) + return value + except TypeError: + # uncachable -- for instance, passing a list as an argument. + # Better to not cache than to blow up entirely. + return self.func(*args) + + def __repr__(self): + """Return the function's docstring.""" + return self.func.__doc__ + + +def _xml_oneliner_re_from_tab_width(tab_width): + """Standalone XML processing instruction regex.""" + return re.compile(r""" + (?: + (?<=\n\n) # Starting after a blank line + | # or + \A\n? # the beginning of the doc + ) + ( # save in $1 + [ ]{0,%d} + (?: + <\?\w+\b\s+.*?\?> # XML processing instruction + | + <\w+:\w+\b\s+.*?/> # namespaced single tag + ) + [ \t]* + (?=\n{2,}|\Z) # followed by a blank line or end of document + ) + """ % (tab_width - 1), re.X) +_xml_oneliner_re_from_tab_width = _memoized(_xml_oneliner_re_from_tab_width) + + +def _hr_tag_re_from_tab_width(tab_width): + return re.compile(r""" + (?: + (?<=\n\n) # Starting after a blank line + | # or + \A\n? # the beginning of the doc + ) + ( # save in \1 + [ ]{0,%d} + <(hr) # start tag = \2 + \b # word break + ([^<>])*? # + /?> # the matching end tag + [ \t]* + (?=\n{2,}|\Z) # followed by a blank line or end of document + ) + """ % (tab_width - 1), re.X) +_hr_tag_re_from_tab_width = _memoized(_hr_tag_re_from_tab_width) + + +def _xml_escape_attr(attr, skip_single_quote=True): + """Escape the given string for use in an HTML/XML tag attribute. + + By default this doesn't bother with escaping `'` to `'`, presuming that + the tag attribute is surrounded by double quotes. + """ + escaped = (attr + .replace('&', '&') + .replace('"', '"') + .replace('<', '<') + .replace('>', '>')) + if not skip_single_quote: + escaped = escaped.replace("'", "'") + return escaped + + +def _xml_encode_email_char_at_random(ch): + r = random() + # Roughly 10% raw, 45% hex, 45% dec. + # '@' *must* be encoded. I [John Gruber] insist. + # Issue 26: '_' must be encoded. + if r > 0.9 and ch not in "@_": + return ch + elif r < 0.45: + # The [1:] is to drop leading '0': 0x63 -> x63 + return '&#%s;' % hex(ord(ch))[1:] + else: + return '&#%s;' % ord(ch) + + +# ---- mainline + +class _NoReflowFormatter(optparse.IndentedHelpFormatter): + """An optparse formatter that does NOT reflow the description.""" + def format_description(self, description): + return description or "" + + +def _test(): + import doctest + doctest.testmod() + + +def main(argv=None): + if argv is None: + argv = sys.argv + if not logging.root.handlers: + logging.basicConfig() + + usage = "usage: %prog [PATHS...]" + version = "%prog "+__version__ + parser = optparse.OptionParser(prog="markdown2", usage=usage, + version=version, description=cmdln_desc, + formatter=_NoReflowFormatter()) + parser.add_option("-v", "--verbose", dest="log_level", + action="store_const", const=logging.DEBUG, + help="more verbose output") + parser.add_option("--encoding", + help="specify encoding of text content") + parser.add_option("--html4tags", action="store_true", default=False, + help="use HTML 4 style for empty element tags") + parser.add_option("-s", "--safe", metavar="MODE", dest="safe_mode", + help="sanitize literal HTML: 'escape' escapes " + "HTML meta chars, 'replace' replaces with an " + "[HTML_REMOVED] note") + parser.add_option("-x", "--extras", action="append", + help="Turn on specific extra features (not part of " + "the core Markdown spec). See above.") + parser.add_option("--use-file-vars", + help="Look for and use Emacs-style 'markdown-extras' " + "file var to turn on extras. See " + "<https://github.com/trentm/python-markdown2/wiki/Extras>") + parser.add_option("--link-patterns-file", + help="path to a link pattern file") + parser.add_option("--self-test", action="store_true", + help="run internal self-tests (some doctests)") + parser.add_option("--compare", action="store_true", + help="run against Markdown.pl as well (for testing)") + parser.set_defaults(log_level=logging.INFO, compare=False, + encoding="utf-8", safe_mode=None, use_file_vars=False) + opts, paths = parser.parse_args() + log.setLevel(opts.log_level) + + if opts.self_test: + return _test() + + if opts.extras: + extras = {} + for s in opts.extras: + splitter = re.compile("[,;: ]+") + for e in splitter.split(s): + if '=' in e: + ename, earg = e.split('=', 1) + try: + earg = int(earg) + except ValueError: + pass + else: + ename, earg = e, None + extras[ename] = earg + else: + extras = None + + if opts.link_patterns_file: + link_patterns = [] + f = open(opts.link_patterns_file) + try: + for i, line in enumerate(f.readlines()): + if not line.strip(): continue + if line.lstrip().startswith("#"): continue + try: + pat, href = line.rstrip().rsplit(None, 1) + except ValueError: + raise MarkdownError("%s:%d: invalid link pattern line: %r" + % (opts.link_patterns_file, i+1, line)) + link_patterns.append( + (_regex_from_encoded_pattern(pat), href)) + finally: + f.close() + else: + link_patterns = None + + from os.path import join, dirname, abspath, exists + markdown_pl = join(dirname(dirname(abspath(__file__))), "test", + "Markdown.pl") + if not paths: + paths = ['-'] + for path in paths: + if path == '-': + text = sys.stdin.read() + else: + fp = codecs.open(path, 'r', opts.encoding) + text = fp.read() + fp.close() + if opts.compare: + from subprocess import Popen, PIPE + print("==== Markdown.pl ====") + p = Popen('perl %s' % markdown_pl, shell=True, stdin=PIPE, stdout=PIPE, close_fds=True) + p.stdin.write(text.encode('utf-8')) + p.stdin.close() + perl_html = p.stdout.read().decode('utf-8') + if py3: + sys.stdout.write(perl_html) + else: + sys.stdout.write(perl_html.encode( + sys.stdout.encoding or "utf-8", 'xmlcharrefreplace')) + print("==== markdown2.py ====") + html = markdown(text, + html4tags=opts.html4tags, + safe_mode=opts.safe_mode, + extras=extras, link_patterns=link_patterns, + use_file_vars=opts.use_file_vars) + if py3: + sys.stdout.write(html) + else: + sys.stdout.write(html.encode( + sys.stdout.encoding or "utf-8", 'xmlcharrefreplace')) + if extras and "toc" in extras: + log.debug("toc_html: " + + str(html.toc_html.encode(sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))) + if opts.compare: + test_dir = join(dirname(dirname(abspath(__file__))), "test") + if exists(join(test_dir, "test_markdown2.py")): + sys.path.insert(0, test_dir) + from test_markdown2 import norm_html_from_html + norm_html = norm_html_from_html(html) + norm_perl_html = norm_html_from_html(perl_html) + else: + norm_html = html + norm_perl_html = perl_html + print("==== match? %r ====" % (norm_perl_html == norm_html)) + + +if __name__ == "__main__": + sys.exit(main(sys.argv)) diff --git a/src/fastwq/libs/AnkiHub/updates.py b/src/fastwq/libs/AnkiHub/updates.py new file mode 100644 index 0000000..86249c6 --- /dev/null +++ b/src/fastwq/libs/AnkiHub/updates.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- + +# Form implementation generated from reading ui file 'updates.ui' +# +# Created: Sat Sep 10 10:16:01 2016 +# by: PyQt4 UI code generator 4.11.2 +# +# WARNING! All changes made in this file will be lost! + +from PyQt4 import QtCore, QtGui + +try: + _fromUtf8 = QtCore.QString.fromUtf8 +except AttributeError: + def _fromUtf8(s): + return s + +try: + _encoding = QtGui.QApplication.UnicodeUTF8 + def _translate(context, text, disambig): + return QtGui.QApplication.translate(context, text, disambig, _encoding) +except AttributeError: + def _translate(context, text, disambig): + return QtGui.QApplication.translate(context, text, disambig) + +class Ui_DialogUpdates(object): + def setupUi(self, DialogUpdates): + DialogUpdates.setObjectName(_fromUtf8("DialogUpdates")) + DialogUpdates.resize(500, 400) + self.verticalLayout = QtGui.QVBoxLayout(DialogUpdates) + self.verticalLayout.setObjectName(_fromUtf8("verticalLayout")) + self.labelUpdates = QtGui.QLabel(DialogUpdates) + self.labelUpdates.setWordWrap(True) + self.labelUpdates.setOpenExternalLinks(True) + self.labelUpdates.setObjectName(_fromUtf8("labelUpdates")) + self.verticalLayout.addWidget(self.labelUpdates) + self.textBrowser = QtGui.QTextBrowser(DialogUpdates) + self.textBrowser.setObjectName(_fromUtf8("textBrowser")) + self.verticalLayout.addWidget(self.textBrowser) + self.horizontalLayout = QtGui.QHBoxLayout() + self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout")) + spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) + self.horizontalLayout.addItem(spacerItem) + self.always = QtGui.QPushButton(DialogUpdates) + self.always.setObjectName(_fromUtf8("always")) + self.horizontalLayout.addWidget(self.always) + self.update = QtGui.QPushButton(DialogUpdates) + self.update.setObjectName(_fromUtf8("update")) + self.horizontalLayout.addWidget(self.update) + self.dont = QtGui.QPushButton(DialogUpdates) + self.dont.setObjectName(_fromUtf8("dont")) + self.horizontalLayout.addWidget(self.dont) + self.never = QtGui.QPushButton(DialogUpdates) + self.never.setObjectName(_fromUtf8("never")) + self.horizontalLayout.addWidget(self.never) + self.verticalLayout.addLayout(self.horizontalLayout) + + self.retranslateUi(DialogUpdates) + QtCore.QMetaObject.connectSlotsByName(DialogUpdates) + + def retranslateUi(self, DialogUpdates): + DialogUpdates.setWindowTitle(_translate("DialogUpdates", "Update Checker", None)) + self.labelUpdates.setText(_translate("DialogUpdates", "<html><head/><body><p>A new version of {0} is available for download! </p><p>Do you want to update {1}to version {2}?</p><p>Changes from your version are listed below:</p></body></html>", None)) + self.always.setText(_translate("DialogUpdates", "Always update", None)) + self.update.setText(_translate("DialogUpdates", "Update", None)) + self.dont.setText(_translate("DialogUpdates", "Don\'t update", None)) + self.never.setText(_translate("DialogUpdates", "Never", None)) + diff --git a/src/fastwq/libs/__init__.py b/src/fastwq/libs/__init__.py index 3fcf778..31c7ea7 100644 --- a/src/fastwq/libs/__init__.py +++ b/src/fastwq/libs/__init__.py @@ -19,3 +19,4 @@ from .mdict import IndexBuilder as MdxBuilder from .pystardict import Dictionary as StardictBuilder +import ankihub diff --git a/src/fastwq/libs/ankihub.py b/src/fastwq/libs/ankihub.py new file mode 100644 index 0000000..9d2f641 --- /dev/null +++ b/src/fastwq/libs/ankihub.py @@ -0,0 +1,279 @@ +from PyQt4 import QtCore,QtGui +import httplib +import urllib2 +import json +import os +import sys +import zipfile +import traceback +import io +from AnkiHub.updates import Ui_DialogUpdates +from AnkiHub.markdown2 import markdown +import aqt +from anki.hooks import addHook +from anki.utils import isMac, isWin + +# taken from Anki's aqt/profiles.py +def defaultBase(): + if isWin: + loc = QtGui.QDesktopServices.storageLocation(QtGui.QDesktopServices.DocumentsLocation) + return os.path.join(loc, "Anki") + elif isMac: + return os.path.expanduser("~/Documents/Anki") + else: + p = os.path.expanduser("~/Anki") + if os.path.exists(p): + return p + else: + loc = QtGui.QDesktopServices.storageLocation(QtGui.QDesktopServices.DocumentsLocation) + if loc[:-1] == QtGui.QDesktopServices.storageLocation( + QtGui.QDesktopServices.HomeLocation): + return os.path.expanduser("~/Documents/Anki") + else: + return os.path.join(loc, "Anki") + +headers = {"User-Agent": "AnkiHub"} +dataPath = os.path.join(defaultBase(),'.ankihub.json') + + +class DialogUpdates(QtGui.QDialog, Ui_DialogUpdates): + def __init__(self, parent, data, oldData, callback, automaticAnswer=None,install=False): + QtGui.QDialog.__init__(self,parent) + self.setupUi(self) + totalSize = sum(map(lambda x:x['size'],data['assets'])) + + def answer(doUpdate,answ): + callback(doUpdate,answ,self.appendHtml,self.close,install) + + self.html = u'' + self.appendHtml(markdown(data['body'])) + + if not automaticAnswer: + self.connect(self.update,QtCore.SIGNAL('clicked()'), + lambda:answer(True,'ask')) + self.connect(self.dont,QtCore.SIGNAL('clicked()'), + lambda:answer(False,'ask')) + self.connect(self.always,QtCore.SIGNAL('clicked()'), + lambda:answer(True,'always')) + self.connect(self.never,QtCore.SIGNAL('clicked()'), + lambda:answer(False,'never')) + else: + self.update.setEnabled(False) + self.dont.setEnabled(False) + self.always.setEnabled(False) + self.never.setEnabled(False) + answer(True,automaticAnswer) + + fromVersion = '' + if 'tag_name' in oldData: + fromVersion = u'from {0} '.format(oldData['tag_name']) + self.labelUpdates.setText( + unicode(self.labelUpdates.text()).format( + data['name'], + fromVersion, + data['tag_name'])) + + + def appendHtml(self,html='',temp=''): + self.html += html + self.textBrowser.setHtml(u'<html><body>{0}{1}</body></html>'.format(self.html,temp)) + + + +def installZipFile(data, fname): + base = os.path.join(defaultBase(),'addons') + if fname.endswith(".py"): + path = os.path.join(base, fname) + open(path, "wb").write(data) + return True + # .zip file + try: + z = zipfile.ZipFile(io.BytesIO(data)) + except zipfile.BadZipfile: + return False + for n in z.namelist(): + if n.endswith("/"): + # folder; ignore + continue + # write + z.extract(n, base) + return True + +def asset(a): + return { + 'url': a['browser_download_url'], + 'size': a['size'] + } + +profileLoaded = True +def _profileLoaded(): + profileLoaded = True + +addHook("profileLoaded",_profileLoaded) + +def updateSingle(repositories,path,data): + def callback(doUpdate,answer,appendHtml,onReady,install): + if doUpdate: + for asset in data['assets']: + code = asset['url'] + p, fname = os.path.split(code) + response = urllib2.urlopen(code) + meta = response.info() + file_size = int(meta.getheaders("Content-Length")[0]) + d = buffer('') + dl = 0 + i = 0 + lastPercent = None + while True: + dkb = response.read(1024) + if not dkb: + break + dl += len(dkb) + d += dkb + if dl*100/file_size>i: + lastPercent = int(dl*100/file_size) + i = lastPercent+1 + appendHtml(temp='<br />Downloading {1}: {0}%<br/>'.format(lastPercent,fname)) + QtGui.QApplication.instance().processEvents() + appendHtml('<br />Downloading {1}: 100%<br/>'.format(int(dl*100/file_size),fname)) + def installData(): + if install: + filesBefore = aqt.mw.addonManager.files() + #directoriesBefore = aqt.mw.addonManager.directories() + if not installZipFile(d,fname): + appendHtml('Corrupt file<br/>') + else: + repositories[path] = data + repositories[path]['update'] = answer + with open(dataPath,'w') as file: + json.dump(repositories,file,indent=2) + if install: + appendHtml('Executing new scripts...<br/>') + newFiles = set(aqt.mw.addonManager.files()) - set(filesBefore) + #newDirectories = set(aqt.mw.addonManager.directories()) - set(directoriesBefore) + onReady() # close the AnkiHub update window + for file in newFiles: + try: + __import__(file.replace(".py", "")) + except: + traceback.print_exc() + #for directory in newDirectories: + # try: + # __import__(directory) + # except: + # traceback.print_exc() + aqt.mw.addonManager.rebuildAddonsMenu() + else: + onReady() # close the AnkiHub update window + + installData() + else: + repositories[path]['update'] = answer + with open(dataPath,'w') as file: + json.dump(repositories,file,indent=2) + onReady() + return callback + +datas = [] + +def update(add=[],install=False): + conn = httplib.HTTPSConnection("api.github.com") + try: + with open(dataPath,'r') as file: + repositories = json.load(file) + except: + repositories = {} + # 'dayjaby/AnkiHub': { + # 'id': 4089471, + # 'update': 'ask' + # } + #} + + for a in add: + if a not in repositories: + repositories[a] = { + 'id': 0, + 'update': 'ask' + } + + for path,repository in repositories.items(): + username,repositoryName = path.split('/') + if repository['update'] != 'never': + try: + url = "https://api.github.com/repos/{0}/releases/latest".format(path) + responseData = urllib2.urlopen(url, timeout=10).read() + release = json.loads(responseData) + datas.append(responseData) + except Exception as e: + datas.append(e) + release = {} + + if 'id' in release: + if release['id'] != repository['id']: + data = { + 'id': release['id'], + 'name': repositoryName, + 'tag_name': release['tag_name'], + 'body': '### {0}\n'.format(release['name']) + release['body'], + 'assets': map(asset,release['assets']), + 'update': 'ask' + } + if 'tag_name' in repository: + oldVersion = map(int,repository['tag_name'][1:].split('.')) + while len(oldVersion)<3: + oldVersion.append(0) + else: + oldVersion = [0,0,0] + newVersion = map(int,data['tag_name'][1:].split('.')) + isMinor = len(newVersion)>2 and newVersion[2]>0 + while len(newVersion)<3: + newVersion.append(0) + i = oldVersion[2]+1 + if oldVersion[0]<newVersion[0] or oldVersion[1]<newVersion[1]: + if isMinor: + i = 1 + while i<newVersion[2]: + minorTagName = 'v{0}.{1}.{2}'.format(newVersion[0],oldVersion[1],i) + response = urllib2.urlopen("https://api.github.com/repos/{0}/releases/tags/{1}".format(path,minorTagName)) + responseData = response.read() + minor = json.loads(responseData) + data['body'] += '\n\n### {0}\n'.format(minor['name']) + minor['body'] + data['assets'] += map(asset,minor['assets']) + + i += 1 + if oldVersion[0]<newVersion[0] or oldVersion[1]<newVersion[1]: + # new major release necessary! + if isMinor: # if the newest version is minor, fetch the additional assets from the major + majorTagName = 'v{0}.{1}'.format(newVersion[0],newVersion[1]) + try: + response = urllib2.urlopen("https://api.github.com/repos/{0}/releases/tags/{1}".format(path,majorTagName)) + except: + response = urllib2.urlopen("https://api.github.com/repos/{0}/releases/tags/{1}.0".format(path,majorTagName)) + responseData = response.read() + major = json.loads(responseData) + data['body'] += '\n\n### {0}\n'.format(major['name']) + major['body'] + data['assets'] += map(asset,major['assets']) + + if repository['update'] == 'always': + dialog = DialogUpdates(None,data,repository,updateSingle(repositories,path,data),'always') + elif install: + dialog = DialogUpdates(None,data,repository,updateSingle(repositories,path,data),'ask',install=True) + else: + dialog = DialogUpdates(None,data,repository,updateSingle(repositories,path,data)) + dialog.exec_() + with open(dataPath,'w') as file: + json.dump(repositories,file,indent=2) + +#update() + +#def addRepository(): +# repo, ok = QtGui.QInputDialog.getText(aqt.mw,'Add GitHub repository', +# 'Path:',text='<name>/<repository>') +# if repo and ok: +# update([repo],install=True) +# +#firstAction = aqt.mw.form.menuPlugins.actions()[0] +#action = QtGui.QAction('From GitHub', aqt.mw) +#action.setIconVisibleInMenu(True) +#action.triggered.connect(addRepository) +#aqt.mw.form.menuPlugins.insertAction(firstAction,action) diff --git a/src/fastwq/ui.py b/src/fastwq/ui.py index 5f5ce55..c0d8796 100644 --- a/src/fastwq/ui.py +++ b/src/fastwq/ui.py @@ -19,6 +19,7 @@ import os import sys +import json from collections import namedtuple import anki @@ -206,7 +207,7 @@ class OptionsDialog(QDialog): about_btn.clicked.connect(self.show_about) # about_btn.clicked.connect(self.show_paras) chk_update_btn = QPushButton(_('UPDATE')) - chk_update_btn.clicked.connect(self.check_updates) + chk_update_btn.clicked.connect(check_updates) home_label = QLabel( '<a href="{url}">User Guide</a>'.format(url=Endpoint.user_guide)) home_label.setOpenExternalLinks(True) @@ -458,45 +459,13 @@ class OptionsDialog(QDialog): data['last_model'] = self.current_model['id'] config.update(data) - def check_updates(self): - self.updater = Updater() - self.updater.chk_finish_signal.connect(self._show_update_result) - self.updater.start() - - @pyqtSlot(dict) - def _show_update_result(self, data): - if data['result'] == 'ok': - version = data['version'] - if version > VERSION: - showInfo(Template.new_version.format(version=version)) - elif version == VERSION: - showInfo(Template.latest_version) - else: - showInfo(Template.abnormal_version) - else: - showInfo(Template.check_failure.format(msg=data['msg'])) - - -class Updater(QThread): - chk_finish_signal = pyqtSignal(dict) - - def __init__(self): - super(QThread, self).__init__() - - def run(self): - import urllib2 - try: - req = urllib2.Request(Endpoint.check_version) - req.add_header('Pragma', 'no-cache') - resp = urllib2.urlopen(req, timeout=10) - version = resp.read().strip() - data = {'result': 'ok', 'version': version} - except: - info = _('CHECK_FAILURE') - data = {'result': 'error', 'msg': info} - - self.chk_finish_signal.emit(data) +def check_updates(): + try: + from .libs import ankihub + ankihub.update(['sth2018/FastWordQuery']) + except: + pass def show_options(): @@ -505,4 +474,3 @@ def show_options(): opt_dialog.exec_() opt_dialog.activateWindow() opt_dialog.raise_() - # service_manager.fetch_headers()