From 7568b4faddbdfe2d210b712af067d5c119a88d31 Mon Sep 17 00:00:00 2001 From: Frederik Baerentsen Date: Mon, 11 Jul 2022 10:15:14 +0200 Subject: [PATCH] Added CVIssueCount --- CVIssueCount.crplugin | Bin 0 -> 172005 bytes CVIssueCount/CVIssueCount.py | 158 ++++ CVIssueCount/Package.ini | 4 + CVIssueCount/UserDict.py | 213 +++++ CVIssueCount/__future__.py | 128 +++ CVIssueCount/_abcoll.py | 695 +++++++++++++++ CVIssueCount/_weakrefset.py | 204 +++++ CVIssueCount/abc.py | 185 ++++ CVIssueCount/base64.py | 364 ++++++++ CVIssueCount/bisect.py | 92 ++ CVIssueCount/collections.py | 730 +++++++++++++++ CVIssueCount/contextlib.py | 154 ++++ CVIssueCount/count.py | 19 + CVIssueCount/functools.py | 100 +++ CVIssueCount/genericpath.py | 113 +++ CVIssueCount/hashlib.py | 221 +++++ CVIssueCount/heapq.py | 485 ++++++++++ CVIssueCount/httplib.py | 1445 ++++++++++++++++++++++++++++++ CVIssueCount/io.py | 90 ++ CVIssueCount/ipypulldom.py | 66 ++ CVIssueCount/keyword.py | 93 ++ CVIssueCount/linecache.py | 139 +++ CVIssueCount/mimetools.py | 250 ++++++ CVIssueCount/ntpath.py | 550 ++++++++++++ CVIssueCount/nturl2path.py | 68 ++ CVIssueCount/os.py | 742 +++++++++++++++ CVIssueCount/posixpath.py | 439 +++++++++ CVIssueCount/random.py | 910 +++++++++++++++++++ CVIssueCount/rfc822.py | 1016 +++++++++++++++++++++ CVIssueCount/socket.py | 577 ++++++++++++ CVIssueCount/ssl.py | 464 ++++++++++ CVIssueCount/stat.py | 96 ++ CVIssueCount/string.py | 656 ++++++++++++++ CVIssueCount/struct.py | 3 + CVIssueCount/tempfile.py | 639 +++++++++++++ CVIssueCount/textwrap.py | 429 +++++++++ CVIssueCount/types.py | 86 ++ CVIssueCount/urllib.py | 1637 ++++++++++++++++++++++++++++++++++ CVIssueCount/urllib2.py | 1488 ++++++++++++++++++++++++++++++ CVIssueCount/urlparse.py | 428 +++++++++ CVIssueCount/warnings.py | 422 +++++++++ CVIssueCount/weakref.py | 458 ++++++++++ CVIssueCount/xml2py.py | 121 +++ 43 files changed, 17177 insertions(+) create mode 100644 CVIssueCount.crplugin create mode 100644 CVIssueCount/CVIssueCount.py create mode 100644 CVIssueCount/Package.ini create mode 100644 CVIssueCount/UserDict.py create mode 100644 CVIssueCount/__future__.py create mode 100644 CVIssueCount/_abcoll.py create mode 100644 CVIssueCount/_weakrefset.py create mode 100644 CVIssueCount/abc.py create mode 100644 CVIssueCount/base64.py create mode 100644 CVIssueCount/bisect.py create mode 100644 CVIssueCount/collections.py create mode 100644 CVIssueCount/contextlib.py create mode 100644 CVIssueCount/count.py create mode 100644 CVIssueCount/functools.py create mode 100644 CVIssueCount/genericpath.py create mode 100644 CVIssueCount/hashlib.py create mode 100644 CVIssueCount/heapq.py create mode 100644 CVIssueCount/httplib.py create mode 100644 CVIssueCount/io.py create mode 100644 CVIssueCount/ipypulldom.py create mode 100644 CVIssueCount/keyword.py create mode 100644 CVIssueCount/linecache.py create mode 100644 CVIssueCount/mimetools.py create mode 100644 CVIssueCount/ntpath.py create mode 100644 CVIssueCount/nturl2path.py create mode 100644 CVIssueCount/os.py create mode 100644 CVIssueCount/posixpath.py create mode 100644 CVIssueCount/random.py create mode 100644 CVIssueCount/rfc822.py create mode 100644 CVIssueCount/socket.py create mode 100644 CVIssueCount/ssl.py create mode 100644 CVIssueCount/stat.py create mode 100644 CVIssueCount/string.py create mode 100644 CVIssueCount/struct.py create mode 100644 CVIssueCount/tempfile.py create mode 100644 CVIssueCount/textwrap.py create mode 100644 CVIssueCount/types.py create mode 100644 CVIssueCount/urllib.py create mode 100644 CVIssueCount/urllib2.py create mode 100644 CVIssueCount/urlparse.py create mode 100644 CVIssueCount/warnings.py create mode 100644 CVIssueCount/weakref.py create mode 100644 CVIssueCount/xml2py.py diff --git a/CVIssueCount.crplugin b/CVIssueCount.crplugin new file mode 100644 index 0000000000000000000000000000000000000000..578fce974b11afd16f683edbc2fbb2cddde2ede8 GIT binary patch literal 172005 zcmZsCV{m9uvt?}Cwr$(CZQHhO+qP{dH%@MB+nM`bP1VfzX7-P>`_$=Qr@B_J#!`?5 z20;P%uQQ~0sq){(|9-&!yM@&xot<4xh3#GKT<8`4w<`pIXJe~MESZrOIRpSe5e@(V z^8a)-G%}`h@cg5pUcbzM;wOHzgt2PscNZV>RalMa+w+DyfD_dOS>CPZrj!e7Jj7Hcc)uV|4UEbf(=LMrNO(>u=4 zT|JWY81^UoP`zv_t+j4eG5n)7xjcit1P%RYxt5l} z-rDvdzaR=v78x!ugZzEAAl5=^KA^jU&PProw;~84`*TZd8rjMnzL6Hi0HV{?O=TIzx)%7YgzSxO0n)}1&d(lro*7x?6$qU}{! z^@RV}A8o^V5Usm(>$=h{7lvL(AJV!~{oJ)zpog7u3a!)VpdA@L_((hWVh)}X3VSzE zBFZcY?!q+av{TQB=R4u}EszNPtdWUmp{~Ly7_A~8$>uO|P&I}~xM%!9I7QggT z`u+r4*l3hrAR#U!0@lFLl7l~<#ed>R3D-8} zRsb>Uax5i~j~i3@#>|3F=?>5XxQk=-at)>Tz-hFhAc7CkGR>d<21PY!K>n8A=j!Al zb%-<=Q5MOZeOp|(c7_ql zJKEeAGHMVB;UzAWq)0LNNOIm>6bn#hk*O6nN!PnUGpL}jjC700j#8!S&q?DB14-U~ zLK==ranakLC|gsjP|Pn`XaE3DnY6so6m+Fc32S)qko)3lCkVNL&6@K`Us~63S?^%F z{qSU-iZH1l>A+!&j5416~q(X`Oekb2VJF0CnF!D&-EkDv-vvxbbZ#nvQ2 zm@oiEfzq$dOvAbcth{3b*0K^nYcfOO?aS?~)Em+@oi&!L&=2zat=t45KJxrhD&~@CrLu2rLR;Yn81+1fl%@6YhWQ;#oMp(B z;|DQ1uo&v?TSH(0#}@$krA; zs)3&sMHK>xV+Ex@@*Y;*Uzkysg1<2rLmu(FT9>#W1?Q-%Mhy5QV7=psughu~_Cd+s zv;cuc21tut!C9x!?O-dBZtW=eNL;#r49>!mX5SJ*gE|M0sjk^0psyF=KvidVzq?1% zL0!73ByCU+txy;dY`r7|EwGfO!!EjGLvA3}d-PUV3)4!otu@x{2c$7LqTeHBgmX%{ zw@P+t(j_jsJPl|WH*fG#6!})La>3SZ_!M+xBj+k5wCTYjLVl|)LbV3_M;ay&KPRdm zU_A~g=7((RRxVJ_Kaj?7VCNObhZSSyM`Y>VLP}2qu3^Lnv6;n`yL)1XxjRBgN9~}2 z6%CKU)y;Y~&-Ndl{+zW+9h!OCSgE#FOSl*f#;qY4gN%#!5WyP|WVSL|C2srVz>QjXz>dkO+PBp% zMQkfkIVOLsQQY4~7kiDa3K^axSZl|;HrRw9=9|DU^%r)!Mu#XhLqqz-=DNLGvKyCc zqGkUXQ)xvKZM4Wj=fd)v?WYHyJB}Sr5G#Ts7G5et9>9$LE^I7L>L^8d7jJZ4j-yWx zq!fqLzKu&n+@PVz{$8><^L*yLl|vR`+grxYf86g&@PZ`L@}r5WDDdQf+j;a@L^<;8 zhb^UVBz_V`XO@tZ#rE0s3Zjy_GS(U{~j5^73DZ@>~?V+WY zDG9@*5CT7gd$MS_x;EN(V`i9q-Suz9c=q+(?URxox3j@wOg8;ax03u7QDu7XR_e+$ z{PY9QFON*e&GBRB&da|=9dH|3#jNaScHX+mBEnxwQyec1hoWu$if9u&U*W=GruvIL ztdSVWWa??W?ZY(H-20>p4LXNh41b1!vK?Z>Vdkgdu#DK#zju>(v{KT2_G|dT?j)|f z$F+5btLNqmm!8SS4HgA|qr(n&x;`+puT!Lgp@|ylZ_WR8$z7x%d{(}+g@HpCLIyE> zrH}pCNX&@8F{>yRWRNiOy%*4Tnf&9}l@M)Z0#6(2*5H#B-+_Qt%;lInf%xc>{I@;B zC89HAOkTv4#uAF?Dw6<$`S56ewz?2)&hg~s);*FerXZHp5MUJX*SqegVu=emoIUs_ zI^vBb7`W9nz{JJBb@At1Zym$OU=aT6kw5C#o;y+>JjdDhH0tIXoyqqn`KQzW{4aCx z3e_Uv<#pck`(CU|jjO-$8`yvHig+jjCp0(!fIKPvcq$G5-lFL+!Vamgv;3=AdA zttDIEcJ8;`-My(dJ!3D_iZ}90KKZBSHl0b2ME%cGwd!_%`@Tnq;Whmo9BvbQ5g_s4 zv@hEKF82=uzYruLa?c$SNj_!91eIc z*A5sBYVOdWiP|<2!^D{(&dhUMG`#8?;E?l#r1;pGoK08L?#JzoFfBx<^Jo%D0ZzmVaM*6b(uL+D)L( zaW!*H#EhqAN>)T^&!xe?{bFBoMk1ZS7ac)5Qt6@haTqNC`}Ym#aB_j~(9fO$roiEA zWBK$ipZCw`KmFV@OcXb@~ zL7=A0AW_=AghXigv=7;7&G;8ZB{?spc;k^kgw))Urd|pU^=Z+xNN)CGh+H_5~4+Nf~S*^RO)PT&}C8Y&dUYk% zT}9rZTuFK?`4rGT2s!wz!hKV&+VH@SS?EcN9$eB`O0?!dURzsp#NNG&LjL=g7hdt4 z<>B3ki4NFQ})rGR|=_F#` zgn)`%<}i+=-4tb@6%=*F%E>}BVm(c=NvBp(Iq}B|c1ITW?{;B!a+2nLJExvhr~d2E%-Z^Kmr%C3cW|il`e_eP_Taa;cXJY529;mK zW#n9Z*yj^;;a-ww*>WY{gIf8}5m@AZy?^^w`YUceRAhBmGp}*RO5OTVBrc4c7_u z^0WJ1F~;iG1BjoZTT8qqyS_6xFKl#tgQ6br=<^PX-q7g2-X#!@kASSnL**`lQ1g;; zC~!48@a*_X%#=JuW=d~KbP|RJ#~&S@0!}4&nE%|=JI<1a7N&6bPBM-p_8BJ=ht^2M zq3ks~0qyXTa42~QIhEeV*x(%>5m}SBm95A+?QNMmKN$bLoUX_?{oOEoxC_aWyn+7r zCC~QfdX<{1hbfDoX;xs%SZ-$*k)xme=G?@s%}t02mY7!QfjNT2lIC0aw`R17D1YAs z#iqjMmBZ?4+@kLusKWDEoyHBjM*kp!W41R+f zt;sTtIc{Xw4MKL8aQV)V3beqsvb8LSire5!NV~>%`TBNqyv>)2CTThsyqqX< z>_JtKzJV##UIt!xaNQ7rbQKPI370~Az*~*MDF=q3wn~#BaQ#b?Nufz?stR5)IVV?? z_hyX`Y?=Bc+dWyA^VtdqNiTzh?_ohkxiUo31Q8Pb*_lBPm~EDaLe`>SA<`Ai6jj~8 zyJ*SkW1wr65&3DZ7NT8?K@ikkB4xX4l@Fw~&uwcwe;KQEPMJG+xL+4Km^(Nz`^18> zjsVH7iQ&MmZ#vjDTv;b}S(t-9^ee%(`p_!|q&s`4b(8xWXy0iqB-ndNC6JJk*H5&%T@?z3A+aMGl=8Wze6?!|$VUsWUIRl=^KI@=BJe8LPHdswQF@d+ur6iWC@en|M$0 zi+N@?Q*40VY5+Kt0o9A|>XlEaZ>a0w>K5VJYhC5A^H&{AvauU>+NPs3!xYHfp`_NU zF!fcFk6wP&oFsK+*};=|D=-!vvZ@s$t~&8#n0U)b9W^9g z9uRbp_-dw9iSTMCO#*Y<&IeGP_$nDd3jNxR=V(~v*X3hBW4cje$YGluGD-7wt^xDS zFkp`gcS}0bu+zw~$7s)mqUT>Q3BnRYt5nmHBTwYpk-d9V5&%;?{-I==h3?qk_$kfS zfw?O21fa~dQHye%c1~%lnnPW)I6XdioR#ZID^2D{GBCGP#MJbHy z**6gE;Sz_7Mx`RV6qr7O2#7PPqC(C;61*B`MT&(>DsSv)i*@p_x1d{;yEhjp?nbq0 zY|GqrUguvJjx!vwfmO6mT;NpYD*K`%wDPR|Dwxv?EYQwBH8RQ1XK| zaCa>pN(avoJy0rBa zd(@K3#Q=`z7_NWHLz6Ffl+aUgH*m)*+%HmbnBSL*23#k(gTzgFUU`4N%Ws7)dY6kA zclKPR@GlVrS3%5yqbxcpxSE?P()z-~x=gB3bFaK>E(vsE8nrbO6J1Xo9qoJw+*h>C znVIQ$6&Cpvyi1quwqL++-jJ48kn6msnhA~KGIKH0{d&yUAlFOBE3R{l5-jCc{45~& zMkC1G@-#nRE^g!w5uGe-%hMdq+T+&?EW~OoES+JQ?mae*{bdR0=5)GlBd)d1T zUx$qC6;5XfHzLwMogJ&^^FHoW^ZB*YO;uNftkyOalqVZcCpwPdrsRpG2q2Y(l|`ozG1biIGus6*XITe%(;^|LB~ zzkIu6J9PwU!&+2~aBE4Li*YjSKmv5~%G_Z=V|)YAXg^K>UI>{pL{&&xHn6uBwpz79 zA%SCY==IKaG}|sRR9JEPRB*|0eqdd5X4mA%fEXriQQpj9cN4Ys)8w(Ue~sUt`YCR^ z9FYJ9$KpIcgxkMgzF&qDH)#qzugMVM+Swt;Ypz*a7-ie}qniFwQXq}88OGnUVc(DX zm}bDg*_?+^9pI&oHK*h+;yC{eoFy)Vn|s0x^n8uI6s1z#(LIBSjawhs1EP3>MW9L$ zg&b5tN)!2E%dS@Zz4~E~m&3#7=YGGzDD;k&k{U;MR3m+1Jq_3A@v0c%QaZ;oVfBcTq?$M}i+vCONQx>1@V?MB1S3ierWL%yr za2V=$wCBne=et2@TXD?YZDVb^{3v$YdF@vYS(gUA|51oFkelzfVnh~|>nvk-lPqUE zEA^Io?>AtNXD_7pn2-0iHCQ&zeQ+&yvxNR#(m74P=uq|E`&Ts^+@>@c3a!Zn(jTDy zvSCFR+x`{F$NM}-uZ*H&jq=Pbv1iNZWngtrYk1j1@0ecYC%Z}5;)eAI#$-pnw^g65 z1Ipq%7~7-#)w8yjb!ciJs}llJ1~YORRchGAk#ai!zx6P`zm8wwW#ka%^GpO1yxf`c0tUi{*6PRh$VKSaaR zZv&jLgw_Oz32=Q|pn(xDB95dqR{Thfvm#2&(JQ_j!!QGe@Cy)CCvc?NxlWsDWK zWuu#`MOg7Y&V|nM5_`ARf<5M@;eqTK_$`Lj zrmf~;AW$*g9<6sQ3y*o5pRL&aeJccFNsb3#qT6OdGvIxBEf{goEXmF!W+99HRJWzM zESpskG`_t)lU}>5cf`xHX*G66SsrA(HPoWYGQv9Th^gk%BicwSY{Z;LPI36X#;7BY z9swwRysX0$2@i5!@|jd!>MzMbZc$DK1Ux3Pl1+ZC|&ad3-e_0$T8t34>WBhxfhpHs<+V$S;a6c}N(4B&x0p0=A5~_PnGTrSRION>kR0gbW+mxURr5i~_G2GWwh= zbSa$$5>l{NrsGV(K0s%F=2qklOP03{uoP@_FG$2;LO;h6LWI20KH{8O4Sn?KD=pu5 z@?Uv7|3vpLlM{$P4Y$GW3RhP_m3ZhR5^a*48q;S-v~6an*;bsn)4G}j?l2dLFA~Ud zJbYLAxT_4%xVnW575Z}KT>XR_a6sGJNJ~j&+}`?nT4RGf z7W@9qQq*k$0I>gO-^1A6#^ztRTH4z=|5wQBdOBZ$5oL%hS21qUiHBHEcW#Bd z@a&vgHqhIx)CS!H0CK=QggpS7#bT{K;L#XUMNV16i_-$oRX-jIfNL6U66q~2&rc$M zJUB0hA;ycN#qB!#XCc711&+jEf#GGl96ME3bx=|(Gg_}0m1TkDMqC;k_`v}`1EW{s zyDVe3_EA@_wAHu*t~K@BpkKJo+TYnupHS&RsdzC3Fc(-)F}1I#TU8!}T}~b{NSrvT zIY9Em26shPi}oB5efPlXm|ZLaH`Z3JqRaE6?hV6cajp!$K=0%WB!up@_viUZ?T}k- zfZRH_x>r@z_z;|1HEPJifr$R{yan^aFy6WR&I{Z>DNEw5wZ@wXY@%AKg-{TR##FPe zVJJ(kF^Z$ZP;Xq>kU?=Gk7i zH*MUP3${{qxFypDJYxkz0x#K5tCfby25kfSwoM&UNx+&j-Nx%lbr-hc1C!y zt|VmBY&I9Hqzf{5Y@Xl$_{{KjlxyFI1BT!Cc%8bBi^&dw9%N0HJgBHMYcLldXLqx- zX+dmlg3%XGrI};SA4E{=Tf(iTEFSp8l#S}xT!ljMNNQndi$v-mrykoHTVN!LgTzW9 zg#{qlDWC|IqMSNXt)&?dLR!s54BTJ&%o5yOHKE+{ao}gN29y6$+ig7=)2s!mUzZ+L zyRxSV6IU{oX7VCD+=$R)*C~Y})32`3We#XNe!3x4nSwIm_Odb2W&jH}CiCa`JeBqo zNZHSAzjs380N4u=v>x2t`va-!lQe>)bnuR%kjXp^Zo&vGjwd~A&>5~mJg4pR#u!W| zbZ?+{A?#rox$Ed!tT6&x;hredk;)Ov8)O-B3n;SN732~2khZBxk*9&I%|%j z;6c9sEd6HGDswFMA|~X;pfc7MM+rBDj{`K1!FlrQN5qFXlliubs}t#3rY_= zULStyiK!c_zOc0JR-Yh;z#4PdYTK~~kt7wUv=t?iF#;RU34?q=+UqHHbs=~>t=dtq zfHleT3<6t;%|<2BK|x%;BOhX-$?B*YzqdFMBw%K;t)SJSJL-VXAzTc&;O^T!-CX^J zm6wxJ4~TIZ_fO4s2#ohOG9&OFyIk9exqcfT46#N^g2S3R?P4G7 z7JU;fgGEzTp6F16Q|p?llm1#JgxrxY`+O3oHv&9?=fOi7%yHJJ#9c+CtM8XFWN;~x z>YC~3ltMky`)F~s+feI{-Qz4KNEkO@&sHH1+65j{M#~6`R@(Gnni-AG8q4~FXF=+c zwZzX1MRn-##LEQB>PXfg#M(AY?&|KO%$@gR`z?>CUA51T^+gj$7a-GZjnH9ZUoh(Et z=qMIk&Lx=t_4QbA088c*-Vox!R#PQPGusXkzWU(gL<&!0j!W@h8v3)tU3qam&#!jm zwe!J)BE|I7>Mrso5_QK`MYRVqs5pJ|_E?7p=Qd*&S_5Z|X%s}&;b9O54TU|M6v9H= z4>7z+yCFv(nqVl&;D@>VrxH>~Fkjs!bTs43GBVj#DGDwyfOuLS?u`jluR)o{MBdbf zbBW=)=tC}`FEtyHGY&O4sDf-lDSC6xg%}Cf0VTM+!${R+P!;WPtk6q*=5PuV?Ab*^?uu- zhkACTrU{)k|Lr_s#JPuqESe+B4#X_yl)=~*vWpdBDYXV_S(Y$3m`ho zH)@5z>OlrB;>z6dt61k7X#`plGWENRf2KRyAP_`~UI*7>+gH7#%-egGQmi2OxQ3Mx z$jjF5+SpB>KTCwi*<669&Zs+d# zhUMiZG-ylw{GNWg^Rq&xS(gVq@e=}<_I6gctQnN9Wk(h4-7baJ^Qx*wn>54axHUkG z<1oO<){%iNBvlATn)ejVVXR`w`)t*Crpxp=x@xX+6#dRwPT$J)yQ0jix(8e~a(PHv z#0YmLUGT&7pnMSCx1C#?i(-A6iNcB#(Is28b(m;slgzW7_&-ZeioM|M(jFv%d5<>y z$^2^v)(=rb8G-H*_%fi88l& zCpH5&uyv>ayF_P`A>)3NdhnVU3nWC~FZ;?NhUNd_ZLsO<6cu$u$*=5$*>sT1DX7nD z_1)2&fAW~An<}*I_r~qPloiwErgFWN+F3?T5O+FRf+JKyj>9v6ee+WLRjAQ$v z7hiUuoOsAk1I15|B0>7qASs6*1Zjik_x&S+>0t>gwT%r9Tlpt#*_)=pp(9anLTVTo z+_}(L2~A_7MPM>x3;O=iY}S16*|6W{?U1!NcUETrs9}M>cu1TGO4*B{R{b|{(7MBo z@0s^q{1^jfwRpyW&I3KIdbFOcuAO7IL>%}#!$Y;s$CppGWj7ZuGpW;TbB%eRohPNZ zlU1B^&Df)j?Yb^0J#2qqrQj+|T5mS=`uRl8*DA<5x>OpN?8}Dx+M0WNQ6!%EVD**X z;ix7iY{`ate4XqZs`)5yl^ctX=5-Llse z(*c(a)+T$=`3(ongdjupixy;GE^t0oJ-}nKPi5;Y^`!9L$^8xliu}qxO1V*P8J5Vd zaB=Qd1AJtt8Jbv(WT4;6X;q$&*!SmRHY>i!ym>Ilelb>(oSEG)nSYzC%A%G(pt zKUwTlNdN)hJl_!p$L|qan^s<)qZZ>trYEP7+@cJ_cLKRnsuZs$NPEJOr+F{VBeg4 z1c|{>-9>KQ6qh702lLh@Ke-|HyPoN ze{k!C4Mi&@2{+c`HuogIMl;E`NU6xfw90oOTrykVuHck;)6uX6EcoF-+CM+Xg~iCh zB?Wq+w%`Tv8ffi`5xveG!(bP%o1Z7fyZ+)ELYdZpv&r66+S3Y6oo(WzH9Rm(+d%8Y z-EuV&8sjEL>8E7=S7z9(j}p498tap=Fi`pei)~kk*V9{9a2hMhU0uRUhh8;l#0+P9 zq`IsGM@|Tm_`0PlVg{{Wdx4M?WAr=IblOMeP-{})>AG5-6yvj>W_pM)_JbfsjP(W3 zDXac(0uNC@J55@hMGdA^VHm4jBB2I$6EuHj0o0rqTdqk^%uHbe0Wh?oa%Mg-zAZmF z#{fF+PQUo}Gy|<|3daLm{tW&Q^oo*s?wuHqLXe}FzIZAo$jjp!`UF^02x9=W$Ek!sL zWXK*S+G}4#79J+Jy}qqzLbU460-ev|2A9pavOLjZuj3jV6;f6%I!1)oxT64`)f#!s z<1vrf=nT^4q24Iu0dQcsz(Z9gf`$rGzrrMUe$Ra$z(!lSV{EzkE|&Ei3D7nDTy*#;7a|6Xf{3F zqqL3{D{c3Bs@$mJvRN+n)#gl?6{FGGz6FWTp7UYzaQpYfSJr%d-46k08_a-&^&B5~ z_d@~ZsJ(SgE_N6_hs)|KeAjk#!V&${Fh+E@hGvg^t9=g!OnC#tRtl@3mz9zHDrpRx zvhs{MQoYXgNvUuJEZ<`*kDd^8G?v{xF*T$kLcq%`MUFcoc7ulzqSZdbhU zJ0(swcnV_)<4@@&Yx)l=21fDY2KkrJ7hOBI`ZfDQHya`gHu}-inF#$u0GltqnHK1vda z0^i%!Jq_Zd1?TOVweH5`5n=gT%f17w>?c!NQF8fatC|i%a)%iLEWNiOk`iM??>-pC z!@}|HjY6oA$-tuzh!T#EiD4C3PTOvCCSfLvI~#I@FB{zPRlLUpiWk>q;QEWl$(rbE zTY#G}ARc?{iw64;T327@c({oE7COo||R;>=k_)ScGEv3yy&y^3}!IElrSnY);PK2r5sL&2i&NrAbF&bm7t z%fWUEE7~#l)>NDS%FqQUnb3Vi=wiGL+)f+*AGulg*x# z9ev6?NG6fYs#>@NKEa9HY3`y7T}qM7A$}D5wupSa8b#ZxWY=bA#s-`cpiYu*e zYniuU`3ol1)`Hjo>vC6o9G=VMqr`R*^qMxZ1mUOH?YK{yqWZ#8S{8wJ#Q^cBlfUz) z(R7~}Zi43YLq$^<`{v3}Rp7~b3Ht&)LC1zQji)J@;o^Mqp2MRh6En+7=va(&b0NXv zX3DnBCD?UO4JWl={;V8_Yc^|>DQ07-srqy^D^C;JTgvUv2NSY~PhB11CsE_$4)~k> zF3WUuGV;v*O;ff_Yu9+zg(XhUG@5pot+&EwVDnyT|IUeua6QvgP6gtogA8k z%ZNwgDZ0FwY*D^KVKj34(PqT&O$gdi3SX7@f$U^@^*bky3GWAwP99EP|Kk*SLOg7K zuLlmWB&St>;RWvjsXA-f%<*qYp--okN^?5zN{SnrW1`XCn!@J|`e7+x= zx2_rS!Oz>}h(m&p&_(IuM{>Tj8S6VQN^c6_@rrD{&=MHOecQ|%4s+<)jZ>petD022 zMHC?6Q9OUQtO?qdokr|roSuhG0>|j)rG-%BY%$4}%Lv)(a@)rS$C23r)_WegVGC>; z;F8n)HsNnb<3l2kzym-Z{twbk=synT+%q099?OihoVa3@?pi&5AM(6^YbC$nbFRl! z@QW6{|9At5x#75ZF>>wIJP}n}N9^!RY+()PXEh$hqakJ93umDj=ZM!5d*=r%o>}NV zuwpi~E*g})52L0Dp$1=G5#H98<$suX`-vp$+H4|xAQow6Lfc2perdMJm-pGRkj4V;5qhXva*(bxN9d5zfM8irHZ3aaFfyI1)u}$AkRX*Wz;@ zzJ=av_3?Gb+3vbhI+*xJaSx+3zPx!B0{S#@W>JdccRBr0KZ=RUS;cqcfIIm`Xy`-J z4rCJC(PsP-R&Fcn^9Hw?NaUKO*l$^LT7&fKEHEx7hHy+o`hhKWOND);)=%2cM#%c| zoc{5o@z+Z7A^72_k#!PvYf#?+l6MT!yC(~4R-e-sGI>8cJ5zK($-&3XJ-oa+E-r3H z(_C^W!v2^$K-2+QIr~TVi*`gEUZhuSBWa*-J?w)`yg&$M1hE6x-l3TTX{Spk1lJY^ zwn?UqxUjH0zT>+I0*UiY=MvwHh$q_;I!!XH5AF$Fa09xKS*IL9lu3TN?>))9CCnVH z`CIe?F~RD_ky;PSQk#lICeH@$Gt+VfCwsrvN8FWJr=cqE-}Z+U-D%FHi#?FPMn)ij zoo!bH^qKzP>nFC?+*k6|k3*t0ziODHLifc1P^SOwuBw|$eT-@H?LRJ;q;+^sWl^v6xJrX< zSq$T~A^4B4gz4y2_K@%`P^|iL=A3m*wj^9k&x9I9Pa2gUv#*1~(QM#_FK; z<)P5Z`UmG#NTV{DOD@}x<*mmi5UT8g&F5h|{M}o44|C$iaNf(G2#};+JGq6hKgu~I;75~ zEK|T>1U_(t-RhKTJkLmWdD+-DJSXSYLkj50`(AUyi@_s+?(EwY_vO(ZQgZkcY+Om3i$p=};^%k#Se^aXR6cC2s$zGkmGw)ygV`lD^< zel2~V(WbKZkG^KLJrm-s2YAn@w*InC>hTOMH8`GYSJY`XHTX6AeoPwy3^QVby69AT z{)!9+fVW{Ii+~6E8s*a~;CsUPC!$tksmbdYcb8xT<-8#5T#LE9U?bw@e=lOP4^RC1 zuH3#Vdcz*uI~aBM#J}+L&(7dM4+QfJ;G1v9!?WDDeJO_m$JI@1Cd?H+FDB=z|MdVI z{5gESLD_J4wGEpQ!Y?`v?U_1=YBN>PMB`esdXZ?U2|!GV^nDS7<+CcgD`@t*{v>zX z;cYwyiu@6UwaX?vW*s^;d91bpV?vi}S;O)acwPK?()pggnWhFQmrhtWnQ6r7(;IOh z{;&wju3G#_;nMuW`gibvmL1G2c9ynr5b7Ci!vbA9HGySb&z3dK_<9zZR(o?L4vyH2 z`l>%nhtp#(cDo=EKIME^h|b|m;H~Khmq{Ai0D%+7?F>wKrp_`h;^E~(Jsec>ZUGbq zmlY@^*B!Gbv+UaCTi3GTN~T4=oKQ$iJ!ps8FX2FKuFUJ0X7H8RXPnSn>?yqypb*r!i2W(uvz~Pit`H-)wLr`_vpRqSS?HaTfJ8^{y`SHSuSul8ZXMTK( zleu45v$)9&i=E3$9ew_9w6nBDzl;B9oVr$ko-MgP`KBa|(Mo$Yi)UN? z9FK&+kTyR5g0>&=7UzCFsloi>Tx_>y;Ht>*^C6;k*IhB!b_qMZ%U{$e#DGcXb7&-` zTYsWtpE1#sU;N~B#+&wn-_{u=r3dc#?Qj<=oWFM5fVk1_4y`_T)e?@sV_*%&3G5NcS|1Z$WUC@ruN^Lb~^9aCqB_A znfo02^wPYB+pgDJyh!O161*t1I*;`cKY4L_Hh?5wu-i*?2}0kZ4*Y&_j|tY>h}HNZ zSnvC0GOxn1s{YQTCpDOqEnZ}s_Z zH{^&RPm!HQp-Z9?5_kK8!g? zWZ>fKY;L+25@y%MFk|T`Kau3~r73m>&w2~xbjLO4Hn_O%!G>|#7`#%!#jyq>erO1Q zh;G#XbL`oAx%^!R3IK3`2mpZf|BgNFTueP&Y%GobO+8CBB>$<&5c-Jk@xvSlDy2r< zgy2W0_5g%v4gwU8$>gl_jKQ}&OMmk+9n;t(HAV8CHrJW|*5qNbOD4?=TB4>rDQJOv zqq?>GA_%H2flSmwKA7K82gEB4eWu^;{{(+hn{@kKf3T|lSp~fV_y5*9w*fUZelVzg zS+Kj#t>~WZbr8$6Z6Ci4Y0`tPU9BaK;?N~a-YTmsizXxGt>|C(esnrgkn z(~~65g;?c{_pv9|w_^Wmiq{mrRu9Nmacx~`eZD?KUd~jI&}kZDf!e%h1bNxYL{+m^ znck%{q=p4%gRiBtk5v`>EB{UcjSh_IkXdXub_l3$)@CFp7?R7FipAxscSHPPlS%0 zsGnz5FI5I)YM9&Fq@B6d*5d^B6Z{YiKUSml_TT1^zDJ69UB)1mjH$egEpA@+o!k^q zP-)3j;?iEFV5beJ2edb2CqqTAno%*lGlZa3Y*T~_HE>8S2t5@h?l1>RFr<0)7BQT7 z*0(aw(A&3bSp;$1kcvywKo%N4z>mQunP0*Dv`2I zqhf{hmbO^FJm0h@gA^FZ=g&PpiI;x11edP3#(V9&gRQ0h%7<$mpN)(W12Vx0iT4bH zMLxGBUH&|kqYv3;N6pP0+LXM8j(^qR|H_&k%z>cWd`A?jU|8~wyChmu!blg)zEG$i zBpJo(%i9zYe{k;zdL-Re$V`tLOS83cJQ*FuixrklgjaG%WM%m79tk4wT-4;Hb36~( z^9~F{hbNfVWuOSR|GfuDpiyJO&N6FI4iKC?fwdjL!+ka(guk2@4zh-Q(^4xRw3}MWOHELSiqz=xo&6! zt^2DHU1xJRkcKfL=m0KC+e_4t7NBfF@z6`M-B{61QP=QDOq`q_(XJlhrX~U{Z$ONw zfns!g^MEpK6-%4-0c94vQ!LK`&Ysvk$c0A;;(`fQR>_tWlf$L5&@#^<48;8Flk-vD zV9h0I7#>x!!SU)*BA-l8?ha99=Z%B9@F|F2LA~_k&$mALAh8#X!TNBV?%}I3^v(8I z0p{Js>T~Gc)a`BCP2N|^(no9f7C)1mJoy@Ab;-c&#Tw_vZ3q!AQeF3vk90TlSNU#> zQL=nbYNeG$bPB{WO}6xja zl}WryU~yySCm;A>ywH5NM-Y&#gOS_-#0^Cc7I9Q&r;kEm%h*w*%x5mWxtR!sGLM}A{UUpD1$xt_KWQ%*Xg-Jr6f|5d|r6t1(< z@!Su`KwkD255o65#m&zwJcunbZpOUc8oKV-95k-WFX(@wJW1}X;nhEM?*IedGF z{-3(|AHdrt@LCW5*Ctlr&+tB_?p1~xRaBb{ZkGx5XvHKagiUTIcly_tR8b4u^~>%j z;R5n$sK!21TjMo%a(7?S(EGrbaMb;SOl#t|t_F+QidVgO>U6qlnW88)J znyMlWmbl>aKSQ;*We$G=ICdDRj^l`ZO|o?6NB)Lg3agC2_^(Ab5^UF$Lm711$qCJQ znMWC`r6*BT4aJY;?^&03^Gnz439=>;>EHHu_rK!TUjkDjR6Rit36gEn27|eL8p9%? zmZW~7w5N1i#Ed#YU=ff}udDt1+Q0zN<0Y(CsPO;UO#hQzTYP8M;(-GIfd3Ox z|62k2PucbVt-=29GjB&BuZ$aWYqRrSiTtfwWKNMPXQ zr5QF4vQRBmTh3U-?{UzNfW+KT;`NtE?naVK#|O| zpr7MqMvz1c0xJ@5QXpj<9)Yh(7o{6a@DSL0LWN+vh`V#x3sQ|Jcz2Lp=Ogxt;5k*Y zyx(NBHR_0^p@tk$#IuT%PDCQ;E5efnlRk9oR}U~k_ay;C7?EYN5#e6{K&Jr^$_~%o z7y}FA?Uggd&l||HW$JC9V&?Lhs&GpOB|K4l$Mn~iXh@_LI5Aetr+&@H)q9mS068gE z4iJbak(ltsJLO7e%tOjE5wcDy<3%8xFs>13*1%W`LK$%qAc2Y0{t1Binl{a0taOlm z1fg0v*Rqs212m6Tp_m0$Eh;VoqyQ16>B0{iM+gjI0jO8U(IR!6uNUc>MYDl)?W4V# zoW>1_j4+0{LcQ9{pe1ccD38W{LN9R74N*;Q8LjC%y!!06e8QC%z878$7b1h=lt#=R zky^!HfFt<~Y*XR5zP=y=k$(=5d6)0$iIiY1@!~&;W*H3Khz!L+F~0`heMuH50kxQ5 z$)Kj+t2(_nNQtub82LJSqI{$esX$%Wjf8abpI?x%yYBa5@^^bP-0oJcxp|RV!hFza zNNC__I}g*7fIN^QTZIu&{VmDiLRi(UUa&-4ND}1~*vPT_q6M!{NQHi3|jQvilwA@Bc!p2I5w4{S3jGtws$=z}%O}KEUiIs#i zuLt|SVL@<3ys%#QF|5s_8)2fM#=nrTAVE;;oJFx#KUbUOV8cyW!wv|!ve-wjW}1}K8%qlGVXChz~n z#yK?!7Ou&YN+w8J!+qP}nwr$&Xb=f>UF&8Ih$NmTF;)|7;@n${%UL;8nL77tU zUsS8mxC`f@x+p_R9ws8LOR3Rq7^~WSzE196fE}LFHOwvSII0+jHB9(&Urvj%v?aw| zvOaSj$+&s4Lh?|7Y#tw5lx<$S+-wv)G2FJ;P*{*uLn{v1DE^(CNuQQY$}y6@s|G?* zVm5RC))q&%d-3Tb9+5-&GXjv|Rzl!P%lJ2qPlc#tOz3Vi7zI+RaHTrS zr$T=xR=cjV#;mE#&H*4!n>=&#nr07rS;;NiBAIp)byboF)0(#t z^SY-(y8W=VdEHU#MW_b2hH$b6g|iRir8EzwD$MKm5@P7~^{2clh)TFY^$H(O)@{Pg zf~igH6%5zu+p)1a3i`)p76DT$o%;4wGL7f}o2m~-*oS`jyTuJ;k(I^BC%iHBZs zj37 zCx4)d_5reD{Q_pHVtl4Y8~;FF);;K;s}I1-Y#<1{XFGr*z3jJ^~u{`cu9>Qu3iBGA379eeZ6)spV;$c zvgYb(m6pi_F?M~V!dH0g8wc3`;FGAA-3U!u;glEW*^(C@(3c;9$`e2_8Z1d~Ya)*C zp}O<(<1rfXcsG-TXl>WB9C5#0f2W{f049wq0vu-MB)}&Lzjk^yM8ym}&(04!?z{$2 zorWAn3$sA36cRIlF@AI|20@{JmlPc*D zZEOv2kJ3wi|D%?LUS5$YCvw<&czns0`xdd!5*b%Sh=bV7+P(IK|FGpXjeR!JDR-Rd z$^gBNQq`W_|JT+3VU&cB!%%bn3zxqC!X@T^>Z+-Wt&y{xo%R1TYD?7g?EZc7QPMmH1mXNJ7*9T>BQiOw%b`9uHtJ!Q)6X8u&bc~1)>!Z7&M)`b!BE)q zd`hGf8TvOEuPC}_geBcPE4tMt>v?7+D$}22H>87FAyQKqdQ3s@_n>4cGXEqVAv5fj zLtIM_*8Yu_ejoYZ0jfY0=fl|mT?Y_>FD%$drb>#0evuR++`=5;B@yXBdzFOA#&T3a zz}W!W{qrm%9ouJ5?CBa}~8tRMm0|Mw6(-wz2*I|I$5(&1w`Wd8Vhs_s2Bd zYtFhARy2^F6Ja$2n=Uj*IS6%vcLJ$EDcuB}M=3$7%HyU*Vsc4Z_byqXtz763Pa2dF z>sWsy3Q3@IVfaLe1D-U~(Txw;K}~8SfM#UX;O9||&A3=TFi3BZ)>QomC`IU{UPckghrf?kO-)O4BUg`)ueW?YJm1a^v5v{Jk%L?<6sArfcDS}fork0}P5qxI z*E3g9%5!Q9N*0qe6%svGF=H*~}AoZYA z%-`yj0_ng-jf<(ect3J5Cf0jnWqEDfrk0sW(waF~}0uyBurNWEPO-EdEu;e}hdj?B(Ysiz9`Q$%Ip4N1~s{&dP9@ zD&}K*bS*p|6xI>YxA95o?jph^)#1EhLlT23dLHdW2p$y}3(SSOTUeJ0?dKrN{j{8d zzgX_joA#kQzRAcS0nR}&1w}_i%(vEL!c;yA27I+;u$p|@5AFg5HLst0EKKchuPtsG z^pv0&@7dE8=50;KlNLYk!c&T28j3z})i5ZxQJq5!8;>Afc4&t<$9U!F`-k?J zypE^9-{YNZb6*nL-AMo+K6CHup_}{_kR*;)JB7Psy~GFmHOt-rbb$NN#Y*OHRdj%h zVO~?_49SYSz{B*k-qm^Ex)-yGF}^suLrs?o>@{SIS0-$iF#Ah@8rUkrUC57!F>p6~ zK+1v%M?3Eggd^C4EBp~)+C*axJlXuFW(i^VAB@5E2Z}P<%AJW%fwB2w`%*5^`i%ds;*TCQglfe&{ zeXnZ##lNTM?cC$RnYMj)H?Y?pOUTb9}n-sPH=9+N;kK@4X?8!{dSNwv^ z5><*GV!RJ+H2sQ+Ge2JO*+1YH7Xl4HJ5+3! zb}1&K0^G6KGeZ!McQ3&D2HJxsfQ0`RGUE(cM(ify-7<1x8~xEea$^VmaR9|B0IKtjpTc1dmO7sJRJ#9A}uZTlc*tMNn>|=eO$}_MzO?!(LuGi;{#RV_E3d@K6sHBle)UfHkEP`bgX%#9bsrCc-jY z7tDPE2EBO~-+b1Pq36}MYz*xeXr)WO=}GU0F{c~Gvx8*NM3}*oPE2V3-(rl4H0A7i z+bK~oZv0rDKsm^%pu5mJsA6hPlA`@~_JqL~SX@G|oAcj@U{EkPoVvE-o=*1h0G7K3 z9?*M{pD|a3A>jRBx}E0UL70DMN_3>MA2D%kmE|r`Xx^m#-O_=5F1u!T&y{7k50&?C;^J(orEl(nzyTOUjEy zVFbEWG<;KpTP}8(Z|>N}Xb#El^H&lroo@tsNP~eZ#a^2pAjVD`F$0{&HS&|@OVAYZ zIBVJk>kRnoRDOOXc#9j;%%N(tH|4N-L%u7X@W(K$R?bwGX0oN&Pnr(F*7ha{tw~MZ~n89&^LqLeT)Jw5E7kWG`YPCDWo?OoIY6hR&Mc)w#)$HPr;g*y65rc$R0U*L3JhlZeoH9#G(8ym;Ni3ji;Hi`3-S(opz|etf~8%ov)u`a*rDX>y+iPf3AlUK!G(Wr?0Ft$ zpSA)=lW;!GbmHFswTacbF5M;nMuLTS008L!X%o#2oXr3C05PMX_u(fe9AZ*dVJ z%bZj!YXQAcfC9!Fy&w#(ZA~oRnj#Wq1UhAl?aMnyy^;F3;^KBZ=urInsWnOXc}Vs? z$o&L|*`+$ALRZ!l_`U{+{p2Y;x-Q?5Ug>s7NEC8mxa7GZqm!X2fO)4i3&-YgQ4xE<|cV zwGQn%V%VZy9z9Ww@E$xCNI~!3Y6vn#W`c&UWWx2-EyMGMsTWSSw4Wj?o+Af<)Il-H zvxEQyfF>=9FRIPZp~+;`ujk7@{mF02k6i%hRUf`bynyHUwx zHkFc|j7pDzW6gY=*u}0SE0yQ$E=z)kLzYKMCpdZs(qJFwz5$qoa+5n2pWYe(Qg)aO z$sP&^-?ZBvBKoUNW11Psb)p4U)C!^T$f#S?`~{&gNz~{Y$`60n_-d#qDmk>`^q)d{-eaK-k#Wd8VlDa&{HGy- z8j}%V7|AG?cpwhrzNiuad!VJ#id`5dBbX$%+c$MD7IT0_b5Dn_pX`%=}81!XuE?+1qB#E1W-}>xZSNOcd}NTNx|{ za!hF2rZmbMQB_@6S=eZE2ziW&GHwU&9JI)H8$?g1aceZ1sDqI_$Z>L)YmRhSr16jq z>S#8g{Tr#2lK><>B~U0^hl%BhKnVXTTTQmK)qt~$s0}9(;U|G*BHF58(Ta;T@7aJI z8YaHkl`JC6I5?4g9DZEv=Hm_@qnZxhJ(9hB2vmH5)^y-CV{CL=`bCnIbg2!@uxdlzf<+cblR z9TUSzhR+QKMmx0=9|aAEzu>mFFB;)oWItL>EA~RoR<??%jI6*7ntBD@l}*uQUSd|Y%MWburK1{JQ3nzWlTpTM?eC}408hXXQ13$p-baxTWcDs8bv-!fS_V#LhbA0sL6 z*B5fmMocdp3C#APzfawsp4TYTS2^U;(G-E`kSh9{? ze|aeqtRkb3^C;f{32L&T(kr0LikMX0tW&7QIt{EPv1g+yV-_C^|1&X_FFZ>)&XkrP zE^`%ve&A69n!3^>BjZRrI42XRI{Zi1ex;W+I*S3L1t=1Y)}|@Arw4S_{|2wkl`hN# ztmvbKhz`keGs;*dY@(sV2BLc>WjCT*`!9-np9v3q>=0W@g~JXX&jluV_3CVLvn34EiLI>)h@T@uAfuS zJNDk6-}L<5SkZnz&zD#YAMz2Z`B~w&VLe7#jOKl_IKmc*RienqE!Q;_u6jJn`3ckQ zu_e}Z8gpK3Vo=k^o;=;5f<-|=cSVUA_;ZfG3EwlGaYI-A>iY?SK=H~4#BD+OFTOF#}r{?%v++F=?>%BB)I>4?Sb{;0wus+jcD*sBj; zTWc95*>3ats?m;2sF?@r}s?VV%TR6X06a z)Ey{@=ub(;lca+^>tbk(`NM;uh-;Hkv=N$@w;8L1I>*Z8XBEkc!Z?`O!)Q}zn4R;l zX{7JUW&H7m|@tK^O ziG)y{ammRsbhW7{%Q;T|BP8FIdNsZ|mx#U46l-|4t*W_@QqaNYIO!ph)MrqY+dENy1o^az`jpyt|8Itk&tWCF5ZWc(P%?~-+evYG3+T>PX^*MQ+ zq&xG2=4-;oG)C6AMbba|cc9h_a3xS(-bPJ%qpkY(y7O`V%ztaZ%8D@uy8@>!pvAfwUcGM$p_g%ynzW5{cptw!lu@aX8M&Ff zrcHUWhOF6^nRuwf2`qY3=y;d3(J{QUq*o49Zi;k$^bAILlWwOGpAPnDXU#$UepW~O z7S?5#Jk0$s;Qw{Dc!_8j9sew?j^uy2yPBIA*gO1-;(xU*oR~xresU|n!WiEX=3#*t zvEsT_b~RJQJyK0d>;nK-X}a3QA$|;gL+6%wo0_0KeDrRSS^H;Yh>J68==RT#N*53Gy&x5gjDu$eqKt zheAssqoe(z-q*53bu(NDdhj&m!7eFWf}*wN^D2RWe*_2kIQc_KjGgjgSH8 z)dserdB%p~?xi>?@d?Q@es6g{9c|VvGgV!0Zv;rj?Aq0*pIzF>>ePOIBLwp9x-nwI zcW9irR>jNYA;#XO@^GZpV|y1{bwuS$%U;Q+kPbCjQYN$f zvtCKp*cfF~jK!9hHYR?0rdI|lt_KT}Isu<5RNJ&cHJj=x$LXt$e)`Yuus<3%)QeuU zo8MMhQnZ|fW?=-x1x}8u(-wl{r|P<-6P74)7@QTepOAMO6zR)*xPX zO0g*{TUyoJ;T4}#X+p6Q=4mZXFyhu5Iu|B2sU*%q)6gs#cT_Is06GO@zdzAUTrn{z zOT=TJfj$+#X^tsKf2px;1#EJ2q;OQ_BHBq<%g>C7Hp(<|urfFLb~59ybDQ6cnc1Cc zq@w$4(_a8wX9wkxTRldEs40qmv+UM5b@!Q3b75*BoEjs^2Yea>ncQ;hBo^(hd!74| zZ3=|n_NV(!3oeuQ7pCuh-MYeW(;1|y2o}LIvFJbKpqjM3Ri=j@>Chx<1h!!K<6g(c z?c4anR!hQiNbXOQ=d?u4Lw z4bflE^=*It7^?p!dO4r$>b*$7?0k;*#O-{Bc=-(sVd!KoM0xv7MAyFkG-5j4yBm)h z_3EYHLwonJ9HGDeBx35_f15Y~!rkS)vbO&TX`!kbU&p}WX|j-7jl^o#f2^T+#7xoP zkDDV%d1aFwVO%A4_1^|+m zNyZEdC}0LS#-;^o=+9%{?^i%Rvgz941z&>82tZanV`+*r2 z4MsnVZ-mnA47g@`*(d#m9ZXkyYz*oGv^|Bql$t{kokM*bV7>ulB-wGcpQ5Y44Ll$O zzRw69q1}w@I7%bxn0O=MpJKx>#W?*Q%%ov=9xH?tKMj+OTB7WK;&zNl9~NB%iCT^*Y`U*u z<`J2!oKB!Ir;z;eHf}*%bEAud7x_#lX9cZeBq9M>IK&k1Y%6^V1BtZ6$r`NyT;~7H z^kO~EOkPVRm-86x8_Cf?mJZwB-Eee7WGaFnri>?dXPUNVYB3@D=m3Au&#ZypO||^m zXWIz!(NWLXh#(<3LVLs)3lXfb^$4t74p#`rSr}n9@s<(E${Ll{v7aQmoWd>A+|hr8 z(}pMS?4T{c(maDqVn=Wi%*>qjCBXkLbWD!WjEwCrc>?yI1wz7%q+)YR3w*{}tQX2P zz`9hg9xKRzKub654ubvuBf5Ag?@O7F*GvMC~Qb&?!>|@^-NxzrV@GmzCqyROhrD=dcXZ>-43Y` zUrm}49H%((&iZ1a%OnlKoy^%bMBbjCSc_R`9`oDUtfpEwM7U(5f5VyJDpLepgTNW^0*YN5N{6$y zFma?tbC5xjq+hXR`(vpaA)sYW0jYYdLnO1T>a|XAb%+>z55B6v(6dL6Q9KU_U|#jX zq5<^$ph{B@lBhSNB*6)5FZEL%yJz~oS`w~Jyd)|Wf!fDcXNIyOsmLtw6b!V1lAPnj z#JU}Y-xv>h0HD6gBFLL0S*-F?lR>dG)crnzPM7~eNWpt=@qvHjL73}|oVFi0sU^qL zKXQ9aZoupxbc%zYfcRACJ~r#o5Zwgw@iB8A>JjTsEJv8&A&6@3MYQe&Qyb49-ovd@ z>M`n+cD;ksbZ(#7Eo6KE6BQ7}8SQ`C8U@h}kc)+@CX0r+HzJ936(OrFPp*R&w_^;q zyo?aKtVMNGg;Bso94=azJbekoMSTtx&pf$_Sj++*a_fU+%*6#q@|9ATIZF~$NEJ*I6iAv}ebDupnY1bu>c{XB}mP&y| zKCO=iiUVVe6%S;I^9}i_zW$IU>3NgrZ~1Unf(a1@xYGWgvB!wbMe`t0| zncdrD;o<}43OK+X%B1m~*B^d=h;Gb80fdKW?xf)-!$%4Z+p|2LG$HH!%SGgcKhekp zsBn{^-o{(A7pyQwSWRbI;c(88GmtP=?CURDH|}6=W9LGC&~D853~ZA3Cjo2sh+n`b z<(3>VitPDE&>QCU>0;0BhC8E9IOW7Ki-*e<65Mnq?ejw4GDJ9_lm#;Z^$#+a?z4lKyg&$BRFF$ zgP~O9Rf)YnFJNR9m_UYMQ#i%?c`d4mwQ@P?f*;iM9x_@^T8}{>56fLaPZL9bJejSws=IkMR-<$%sxF%SR-3HXM;R%~0hw zbb^(ygPj61yMu!doC^FAYmDWtHq?s*JaBaPPCK!F^i<>(X0?elO@dUF_PEMo*4q>- z5#c|>_H|A%`dt8p_9bn)D^MopoYDqOKxUFudzJ>y)h}Tg-AutDtK^ZFj!8>ahx&GE zsfCa)ss0iU>`OU$M3u|6Wt>uEWSOf$RqIR67kqPyVp{iPmaN+4-In`B6@6Ov1_Wmq zFq+Sy5eoNKX{w|x^cYrVJzJkJ-cK7i@^0+H08q^>;RY{I9L1!}xiVHWXYKKN#9|wz zOP0`90cGS|N-^`{&3rZ2=U)A~`NaDR-U)-}195{K{ZY(8?!tv(6a6p)>=W%xxO;+5G!771f4B(^OsQW5 z5Se~r#9@U-)Bxpr&nFFFhYsqZe=wBDW(|_*cc}o|c)_NUsrU;Z3o*q26G+zY>15IShGt0!(%Qx~O~2Ik5EouaY~onN_>aNl$R9oC$|UITrte z6d@91U7lSa>*7$@nE6iK+GT!8r38G%nX-9~(-^$f4!44C=p~Xi(m#SO7ZsA%iUrO3 z0S(s`ZoM#v^`fcaa$y;>Vi|8jJDWzq945v4HalnD^3hj2cwShEPefTjK1}#UcpNC; zF-u*gw~wy!2tPb53$5H`#CJ634P1p};Y3*(oQpQ2GQBe6(f|cn^&4=X*mooNOp9OIXKf=4~OF+Vw%!^bi2we$O;6 zf$!eXbPh_f|2@xXZk8gnFJB(@+nLx57@n#;9y_-|n(&=$14-?8BdLr|D<^(OeJ8>q zQYAC$(yZ6(U^1TB^Jg8-gV1H5wTWu8xlNvj?-d!j+X$K}80npgB3 z+|p4KwwYOYO8G}=0S9^l!0GlyqB+D|Qqbs4gIpvAXYYsFQZAW)0f);{Lx9{I`FHq% z!VPh=MSv{dj!>dIY`D_~$7AAHZo|XSa2)3K-ZSkJ&Qyl60#m)A7ldt(xXi6TEH0^H z=LH|J4KP$}J8Asud~GCrp^6g$9gp!mZ0EfHdYJ}Iz$Hp z?x3E4GiT3ZsflLLb_O(N*EXp+w< ziJZ^cl#tfAMT60tnX1J^{aHm@7g!V08(lebN|O~h%+Vr4i+{@+>vz+0myw;aV?@bG zbLKtF4t7zWx-*}1&ebBc-yJe>5LH*QO#=BoZs@@?u4GOg{JWfzZ@5q-{8ePJ0PucD zS?8p7UNlUYXhG7uF8Sl!uR~5T_u5pS6#q(chsTihy26^vaI58H8G9PY+_)h&P8FUH zc+UaoEU1?-h`%y9lraJ zP>yq3P)&~ByoD_Si(b!qW-d*}ceNbf(&Spgoy)hCXkv?Nd|Om^UI5KFX_=bQJpPrr z^}KE9*V3Hj+wXQBF1ZW?`wOn2Q$zG`g*hP4m(ZEzF=5y!zPA zu#h?M{8|v-?K0r$8+ES!rqQ&Z0vOu7fNNC3zEyv7G|d|3tjvXFnlF9+W6puoSU#MT z3?+UWn3=)mH^>VHVA+IYb?mC2Wb|zDW)E2LEEO4r>x*WuIie394}_C}h7C2w^t2>SkHBLndgW0NmKu;0CC^Np_`^ehZLGl)lxX%0ab@v|bs9ti&SlU<(hF z-zfGDneR9rk59j*2tSU*%2)zGD=SgV~iF9;9&&}izyrazm4 z`}{(kZ&w-q|HdjFLM#dBZMvg5 zW09HN5w%S~BgoXC6zMWvup|flon22FGRCo2(ei%6{*PRv=0b(wb^OU`gT$8{d={UF zJ@CN*`qvik)H!1DEbgc`HqzyMgDCY;LK&EKbK<+Wwwud5M;1olQ3MM!-Z?(_^{K~^ zdwa)h^%{ov8@CCC?;v}2A)nvfnv4cmo}-Fm1is1r@UugCCZmq`J<8j|>V5+mc7MVl-v5kLufi;%a@;!I&@I3tf*SGfQEO#)- z!eswjm76zF`)tOZlvy=J$V`WnxS~F_z4sOO%PcNV8}QP{gFu!chwk+$8}pJkX1A%g z6rJYo`dS#SKr}E*a(!I|@wrv);%awVJCm`e2K6#NkMv69R^tuH*h$iWpbdWW^Tp%( zg})leIJ@>qF0j?;_>PjRtAaHnt+8x>VS8Z+fF#e1nOtLDgBcU{RZqxoiHdZeZ+j)P z^KgnLH&a*Hb_A`~de<)8>*+i}l_w==FS;=5oXHb%^>Ky)OPpRr?aLiACUoJP2@C!` z<~N4kPFp7#c?JT~Vff#pOz24VOYqe_;2gWNiFTtIsMV#ZaV$@UC9CPB?DYxF1MaRv zpK^M^%=E~y$kBi^wA9fXv};Y&bLyN!?zt2Q2mW5a)cM0fEGAZWtl>?jzcmztoWLQ5 z#n_e;p|*cFI0w=^&9h&|usWLm3OhD0@w)YJCCe|P+Ou~gVnu8Rv;#H(g`jDxxgHP( zCivPw5w>9<=f2|s!>d7>QI46xk(QaI{1C2Ttmz-sUWffINqf zdt1H_co8*~lal4t57Vi~Hqi&ZM{eg(Kf0KGq=Wfq;Y21XGT83Y|Lts5kN3RL-uZ8x| zz^jV6KmMjzTxd=1(zA;ZH~*H2V=0$k{v6B056e@gaV5Cd`W#e}B%UG}K0b4vv3WC* z0#yDUcyw7hTKd}Ct#Z#^7H{Wi4Bjjkb;SX}YQu~)01GY`4xJMI&VhXW)b3eVOo}>w z#+rjHouIdj8$2J5q)?n=qnY$KuppCp$W>eqVe$D#Jp7G{nQxoGS65#qs_>Pi~_RuycKlleg1oh3`A zU#l|l#q2+RzB2KGU$T7A9U1bErUg@hO`Z(dG(CAzk{VW?U_(<<{JkH-AHW3rdxSO9-^thNgnq&9Wl`amo9sEi@oI+r!SS~*%2pj zBjtSz+NSJoA5??fIjq62G{E0#r-tji{{RT*%SJmmaR31L&-}L*i?g%+|F&3Ga4poZ zC+@rU9{n(;97r~Vz`RX1>OE-CT6GyXaIQ$!6~Qb(nk1Mfqd~~l?BsQP<{%DmZbc^y zElRsOJ3G57J1aXo%~`N+<>mPJ_ReiZh9onMAI%eU(H$H>=pQ(}kI-|>bIEl=?*83*)m7fBvH(^Efc;>5|SS&=nEZ~ z!anX$@KT$2sFTcL;*QMG&>x-K(}zcfz?FKjbF@oMSfq}kh;oIB9yxN7)iEs*#TL55 zo0vq8r)aMgK=M>u(`7dAa`^n#^oZKpXmyS=`<}#;iVIh*8PUB1vBdv@L9>A$CdvyP zp(}xV?2k1a+jlBP+sZ|boR7g+;a=bkU54x5dKPx zHl2RuFxan@8RO7bfk_ct^f4xz%>0J5QtM_I!RNE@@LhkoxzGP~?K9)$ z8fP|V)vq?_DcZdS8zbiyC+}%PeYW?wlvl5Fvp;M;gIqzq`)Z-$&R2oCTt(7Q4RV4g zlJi_X!XE^HMGEuLK3<}rgVOc$U_8Tz#~-ijUe=2e@uPh_P+>~jJh7>`oVC@{-We7J zbCS`ei-fG7VM;)H>jG(IS<5fxU~x*CrGK8Iv#&Zh5%{)lX>+$MGwMS09>-U9$d>aj zIggBaBASWI zJXEbG=7Nbk1O0Q3UpaXBIs`tkX26ss?&G;j9obz^y}B~Q1_m5276e!xXx5C|FSl7J z{LR)l`j@QlufgXVQdAI3(&7Y!U!Li(HXqtM5B@1~Td%KAAQZ>=iZ0u1*b ztIV+52;3uPQOt8PHuo#w4@Nu#;3!A_nQ(8&Sa4rO=yGdsMX%CdvOFtxt-+hQon_0sXV5i1_k0-a6oUuB5zBl_$ zzeVxOoNT_p`0Eu%x_0*N54~=|@a@3lLGWRApI+a1sDpF@pAs8;$PgmG z#jk*5v$G5M`UHC~ah_e|Pp7a2Wv#C)i2uIA;fxmU)#5gt46D%9lfe0lxaB?(%<>E# zA3&B03gP{o%+Tw}==bl|E+C%*L?vCf$Ti;+!0Hwt-`>U^{}+jio`WLZNDjV5Zm$TD zvb;G-LC+5zY~B*hi!cLT_7ndToIK{E-oo_O*6r=X?JXXkyPLa!J$t^|XHSqD`7N#l zHVd$q!j(;4=El~=?NpWQ62zgyL)gH?!GXTt**UJTx4~{6-t+s(!2q}CE0Tq~_XnaH zJqjICwm{_WE&t&9-?#D}!MjGs`gVfw zTQ>HBqOh*Mlf*24k~Qq^zdqNv{S1E{8HewVx{7kzc*6*of<%{;m5bq{_Rj%Qha7It z%XSy?-JG4nksnq*xZn+cfo1Ho@c=;a?e3aA!Ka>=X7SGv_d{9?KwsF7BldBIXYnkpQq4SxS)V5L%9wdU}3r@v*`}Ad`M@fqjP3pMCv+c1|C#ueLkj z>pg%H4(%D2*B5N_U(o@5=v}DW#jSdNi|4jVm&(n<>Ty> z6qpd&6YWUR_KC9o+~2iTPLio?&?0S%WrStj*_%NiEqiK|jnV11`7ZHIjE3ZR7 z!5*=S>M1QiFb2EW=Jo}i5!5Dnyj`+(e~f1-ev&tn;dJr zv5nC_j)BBaAgwGYh6FiJr7*ZizmYD?p`L+~5 z-mF=&RwOHG_4okG%_Yg-&VOd&LNc}fcuf2N4%FBn`LmL?oU%k?iuJ2v6bAdZ<3wahef@wjiI&NTfH8XFrVX5(aW z@Gzdb(qABY8Wd0et1mXLiAQsOI=X5$-ApB`e(4U0T%B~JI&Fz?##ku)Ze=qgZuwrRw1Y;*A zIiI4`%(N_~vE6~o-Uz!H=wishAW@V7?XJ*Nn{C~N>IBW7_MXCyAZx}C$Z0@-PB%qF zu&M8$j%}8oOgX@3cc>)0*w#2jp16Ryh8oIH@%oEAU~h%JLP5;9B*0$4fS$+C9Qd%Z z;uq`czOR)k_Bx$-W4iSDVtp-M>&Jr)P{Gl(8dvlu3IC=&^7qe=l`3?$=#l085?Snq zJD%G2Uhu|?@Mn>Ot!igvGhaBWZ=wiSU!L_S&9ujN>#KLCO$wJKW4@cyk&(x#cjt$0 z4ER0kUUfh1NS`xNw9g0|@=K$AS@11RRucaZkcaIJ>`QF(Yw?0k1t*s!p zz#FW)vWf8Bg*Mh&2YZvzDzQ2*bap(8=m-hOW_WV?Z-M9SK_yhdhUif?WDy~hl zcbHPR6Ru~)^F@dA=Zu`JiaX-N2bQ|CrJ9e`f9(|cMt}3EV0ccU#6Vy zQWi1^r z46%SFH2XZ9(R_0KEPlV82=@Thu?12jwFzjhhsr?izw#@u1+?+-^5*->$YuAJ-}u?ah?RcN+BgKgw2va6NwrkZU zY{JA_Z8qxq@!FWNG59v3p!VIcSA$>6)hUVsi*cwaVvPg z^_NV$XveEg2-9>;B&)|3auPXWrx+WIup$?ULx2JO$dHxNhc+PSmWnSVulcoAibh>F z0Fs`{>YXtCUL1Ijv{bargZSO0Q$GPl@0?X7@)kHGs?)2K7_)a z&FgCu{ZJ(L3(@<-HZ$JcwTg;jJy6#JdJ3cjbb(k161PLWqlJ38N936Q?Y=v9fR+$1J;h~;% z6O$%sz~o9|hdA}aGG>kHU@`WGouYCprMWJehhyP9BfqFgLUmbXYVLda*D$(2KA|iL z%X~?CXA?R=bdUR4g>YwIJ3M{v{pQZAhPr}2VVN3n8G;?ym$yR5nXiQce@xV|v3KET zZ+PKh?>PnX2H=^BFK2qKc#t5f=-H^Iyv(nVM6MbH*e%K8#&NNnt zM_fBNBBv;8#|y`YqEAZ**TyV<;_o`CtLaIy`MY#q^dJxRS4nHz-`!>)g0Yp|QAzXK z6AVCGDYy?FWQvlm^N@M@@SUPlE$V0a%x;k+*%-v^Qtn4pGdPbiD+v5Qw$7sDYv%(+Actx z9uSi{hix5*=)j6Nepf&pFot>Z5up_(jzXqQqQaIUd?IW3a_LNYMB+LZAK98wQ9g3Z z$<2RB35Ax#UPeLIAF#-ys4)~_S#K&xd6nuRqZI7hgc-1EV5Xt;pB%wsP?vU$K*knz zFGpVV-!z*R>PUD|ggdGeT7^aU&@e0(d6uj+iaQYdg|B7meANzwk8m&k2^E0Wp~rIt z)autqV+)LDScr6^7Vv7sQjlyG_2CZ-3)B&ALe=AlPtt!OYRCp0R)uE@U;Oxq6$EKZ z*e=))ob=!oSabxAJX*g&C6|I@8 zb;;Sjvyb#m!T=T(3Ds2_`0Z0uGMzNi)jZPJYk4lMAnY!$L+Nl?9YndTTehkacN}V( zK{mn*W3czM7IEn$Iw?Cp6*({cq=N{&Q(Hn6_2TyFgTYeM$}1CPyhVhT(jm+DFbXuq z&KSl!scDh4t0`PwF^QW^!03u#u9mN2#K2wBtx4wlI1gc%Fm9Ml6i*C4Hbz&|?6ZKu zAjWQHA~w{XUCEKZz7{d?B&(<=0);A}v#t#T5*ZG4<)K(A{llnlaMszZ!cQeb-{(*u zko5uQ+c<@SzO}Vn$=@+q2-fS#ny4($IjbkUWyY8)uocLJZQxH|P-=aFbqjis8Hwjd z%~+UA7UDOsYa}x}gD&y^;*IG89#Jvu=H7uNuxKY?hUBtTas5%+F2sVJ&Cs!KP3b{y zDaXS1t0;3@N0c6c;Fr=!QnA;(5ct^3kNCROT>iupb3`!&ekxp9FOX@E+a=>2<)w+G zw%a(#?{O8ub{N$5R}c}iO9uyTrIL;2R&k(kgWiT87p0&hPc#O<#2Ae$gO{=0=J@o8 zQcJ*ai?EuaE2IL`qOiv8CTfBjEx7PdL7IK=a2mlDvNP}bz!jcP<+T9DV9;ifv8mrB zONPFON|HxjRa2yy7cVfx(DC%KoJZcyyRLOP>bkbpF@Htz>-G_q z*Dr!m6G!Q6&9Pt*puRHbccX>MzwD2C7#RV9VBFBV8BZTz-=MZ7t`f!zL7Wmux#A@# zBu-vz@Vu059vgofUkW13=Zv#z?mv@nwnGNeU9Zw>+f=C($%bLU*MI^pR$(j`O+=%i zU)G(MAFs)QO)x^WCfz(axn&v0pdFey?B003iXwaulv5e3O>AB2$2Q=edAO^SOmJV0 zcAD0DTMs%OKA9xrMKi9y3rLWazC+!<2G+g@fE0qf^VL;VWWZ8Vctp_Bl-VGe`OwCX z9ux(&d0@MQ!z`=kN;R)aAUij47HpIGwV3m%qN12p|2tkFgD|E z8>onUmv+aG9f5kUBYe;1x93w2bN|CCf`Bgj zezYN)u*8A?5ZbHsew5nT&8|OxEP(~E1{75hE_GwxIq!Yon_+0oL%ef}4YR7z_BcnG zK}u#mLQPv!_C(;naQ`cj?%;mkK`%g2pEix`0`WcOj?Z8Png^_6H$=Lf@T zo1mRa4+Kb4_kozHX!C<4!+(G@#9CR#2wxOrpdC;GDk-zG1~iz`%;HxN7iVIy7iN>? zX|b-sE||8sjHothdgfdOmL)VMFWER#Z1nT;NYRwGQeP#s*Ih7@gvi_d5TMv>y-A8X zZ)HI2Ro(eql@j5-?O}zXiw{@~m}`Pthd~InCx(Hr8(J~g)g+qOCFtim?Wc{X1*CA* zA80=i&)1dnb%Z8hEtXFoVP{=8y<6{O7hw;tyH(oZDous9a(=lw#TjM?rTSXi-rR2X zup~3qkMG2Ny({2A2k4F!3a?$SoWP+i%Wr+yjUD0UNN1G=)dowo=S@g-v41OZXT-M> zBFpx)CjLI8`_!ibS+f)({BVB&;0Lj<`%1n5?s<^Dg&lYKB;|Y{s~*xhs$CpjMlGz-eH! z^eKpBm;kyLcjKZRYzy1{OHzKK%Yjt7nYN@1UM0*Hz>^0Fse8S zqsx@1L2F*FXr24En>z~mE}v7|j={VwIzfz}xPR1;_<5 z*qIhBl-p)k9br{%IG>lHDrzOw?=sjcDSF@9GMj5rlb5QaJ$6o)saN3dgG?c_n$-d0khm2>uL`eI7% z^$Fwy+(sIhpOoY?zpb#fF^lEUjbuIQNFm`sXpitl2v^7`1^t=Q3XDdK5EAvL`KIm& zoX3>4$%o6$>KjXqZeBfseh6_!ic2Sl6x|0YI4V-610E}U5Gs2<2Ob4-&=YF88>390 zaXj2yba`^jM8R2Hj742X=7a#xs`8}6xtG?R)ooGVugCUrTXxYIRSR3cuaY?8lcJd{ z;cS4z->j>5ZXgkOc?VQH%!kkmN+yWeL2*H)OHq!{P>1R0tYm11$oqZ~&~(vif!8G` zwjM0^a=!;QtE6Yt)*|oyh%>zi#>|2+8EylZXXUO}|-yfBDYJ>EyGstq%$ZdvEuzkl61TcC}_CI7FyK(yD>f+Y)J5r5_~AfVOmDu2^(dq}{dCTCS)q}8Pk#kvh+1MM|E*pjhV1BF&ywHjmw5=)Uu^8@50 zn9v*r9Qg+m*4FnpKU;DRj2*1O>{}^J5U?4d%_%Cv)YTrENS9p2;(rmu)o3>x?1Z&P zI^3gfMY25_?M|E6W=b=nqpR1;f`9&9y{Qn2Lts&iDwt}?NS^>KTI63#^w<9NMfCF> z+7&pg!_bhq#u-B!<5oQ^4*|n|p$5PaS`hD< zJBUINGzEj#A&4Dk%ivMlw#OYR7t1U1?DibzO;eHW=h)}|e86zy=yJVZ%qIea;GEnF z_6oJO=3)oz@dSkg;PS;;Fsk5+4?YDV^c3()m86gg$5@KOnR{1**Z>9`U5*L!_fuR; z>7uh1Ka{?9#e++HuHuy9>8@M{808Ri8L9=bygPHng;JyFZ>XwGQc|+D;Mk`u6Hn#& zym5TWsYk48wNy<=%c=-4<%B1PC{`x1%8BDldVTh zL&~DGpa5l?vkk8xx^ z{abhwms^t2Nq*E6XBOMelGlzZ$IN8YBwb7E%*pN~he6lh9J|8!me;%snMzsfwa-bA2_2_cCMPmD7?Rv+`g5^$Dq%t57VF0ha21%B3@m zl~>i9Zxa$I#q&@r8@bv`^f%|0RG-A{dh|piSgkCJ9sqg=d>T86Dp^Kyc4HlA-v^eN z#hVJ*f$-_MjLbxQ1P!(jfk|pAV~;Y`R^q-K0OwDeNyHe0)(jbbsEeH7>Rh`mM*0db z?vzMx#s`(hT3S=-h8w{s=zSgS_BAx#1$FxrZRKhlt4)g=_SJkN-0{`b57Mk!!wg$U z!Z9i4;+cW3MXxD~8X*AOF1uUjCwy$@BJipW6xA*+1=sRO)|Ah0u3&zdDye%Y*VF`f zo;``7K*#)>^q_b0pj)SaJOCLur3ZP**5a6%E%EWuSe)V}srJ11geBCyrcCih4Uj6I zmSDV3kiUj5hYAGP;8hmf15YUdfk9$E!cEDu;!D3o%s(^#%Di6!Tc$Jh?Ln9 zAu3!kS5zpTy$z4UeXi+^AVRl5u5EUk-zEHsr!FUS=z{oBY%_>I$7LFG0 zGpT=dDp3o+iQp=;8Z}AZA~lG(v|>`t6WNEk8PDvX5nNa0V77#0OwWla1KGh;A&X*8`6j^TTx|7Gjm~W8tov}d`No1&(C5vbx4LXc8?iJ_Tpfv zk(aiBT7_7!Vc+60jlB>CLE*(@2u)Cvlm-B1oB(3lx6~R8#>gMrC!(mKOxmE*QnW$K zhv;4v(B8165UAHh$QLuxH&Z^-t$~NV&l60uI^xLzSmE*n|xB%?99YhzLFl3H2=V^uIGa$0?9(n>A}!ntk& zv-OqFWeVSbMnjqEKHa$!XARwq0bioyj-M2CvGzCS23#i!_;#BBZ4QZ$-I@z%7vC$Yz#j?{Dwl!Cg1lT+Q(Szt%h2lhToskQd;K|?F$ogVQu_mXi-ISTZrlE> zKj_0cTR3a^xeX^Dd_l>Byhn~Vx1?aK(zpWTwNAS@3hQa)_y0&M+)(Ccx}xOkv9Wk+ z$oN%*$_T-%w}xQiSLgeDoUmwel|?FJ=;kpANABWjl!n`q#s#UbBKdDR7g+5OAs8*u z$4L$dHgo>7DTli-Qjr~dFg!a4EQKd~P5wJ4yC3M4KM+|=b3szFe8R4SO`Q}uWIHo2 zQ3f+DCjtO-ZUo^X{!af|PvCt=8kwaXpdv=lpYZ7-=ba!)Gz~xm!HRTVDjH>^%esis z6*`&8xwelW5TvZt!E!_(wpnN{M|4~^EI1alR;2Kd49c#X?@Whynx5S;|2x*v@e_myH_SdlT*)vaAN_yHc6{*kQ;AP+ebO@^c?tr zQzPHIhU>kr99%_aqUyj(4AQH=)x8SGdxHvTcvb4L8z9@G-TV8|q`oiRWSb|NrP~)P z5?H~zsoAy#I)SnoU6gloyqUBh!A$m}89Da-0~Fm}^`)P;+`a z>HVw&2*dxJ-0*}`>llZ;ZPuHG4XqUeQ_4i6gW$A`Wb(&XiH)&nhADDh8k3D>pw8q$ zyN38&BMJ(vPcGRemvyG71=jDmq@x9=C5Dy)%cAyK>)|+C5yo_K>E(H~5xD%Z4t6{> zRZ#V>OHJsZgZyL-`AIm}W?KAnO?(`U=I}Biv&3*sb`QYx?S*NaB2;y+HH)wcps0Xy z(TPB**$|b=x|eCP7~OH@BkfF13RlhK0{z8@6@G~hS7&L6xO7f>L648PX&>;hY!OCw zW(7CCp`0lwp2kxskfvaIxdVrGD`C0w@&_OdBXBz`qG%mBG(t*t0GO4QR+!WIGTR2a zq=SBbs-v?1fGeqZ^#NonWO2zgO9a$uHTE1HY08dpiI3b7qHalpSF@z(68Um?O?)(^ zJDbcP;EYOrBO=tlar!N?aL05b8;coeQE07PMQ<26LFsQnfd>^)mG zla#+)Ib^3v%cpjWN*mIzynDc5E_w&)B{5q3UGuY4>xXXx<^VXki_xVwHo|KjZPaj` zQaRje5R}pkVcMO|{qL=-rUW;3lRGDX^jWmaV0AD}YQ}7A6Apu-roELqc$)?TYQkdz z^}>wnC444A@vk0p${2%X3A;~!zy$9(q1oRHG?m0*+jsimxO^28ef`72*O^f11iOfn zv~y_1pCV4Mu3{PtOyMChn|B*Rgt-P;m{?^z!D$cd#fDu;w(~y9XVjtcQqF)q2z<4E z-`J!!pkVW^Os=wbDBVPr5t4wo80K1gISjXqK|+Qdy^+y<6aseINVzW00l5Qmf|ui5 z4g7p^%802+liV0mldFYx1gXD|*7*yHeS3(T*90vk$;DOSjD`tvb@pS#(v-)@OdjC7 z(3`;-l}8k=H(Hmp%lse2qM{tkK^$;uHY(a^zWjCN^3EbvPj1vUWjm=_*gZ$WWxW{e zkXnJzpeOipQ`PIMGo)O%_`3|fM>g0^jV)7vtAo(Z?%!M%JzCd}!S(x?6u^z!)~ZsC ztIaxTQF=&U%XQ*Fl~X$1R>9Zi!P|BhloYnNZ^1Bt0jGRvU-kdX1n_bTjGwQlo2Ih%hl1e)C;qd zRmMYEVoKbkS?^*^2zG`cppx`n-NTtSDS*h~hlfqol6Z%44tH@}!=(eV*oOBcw#DB* zC0hg}FMefLm<>_xC-w5!g6z6eM7=|e?t0G(`crUzp-ix;EN61WW*C(Ehkt>{-!475 z`5_zmLZHyQHxU7F%Q?2tbPYz$*U3CoXzeg4aa9yK z&j4n`nfvaiykA^?GW>f_s1B>F*a;bRTho;H5Ae4B6{p|`D@?y0>QA)*PK!3PJuc5yYGc* z(zj3Mn8>uNJxx3VXmFiF48wgh{K*G&MfZd5MZky9PU-~mb=9vvhbUW1z}Hm@OHw1I z3$v`G*<*#%YVXH|PI{h--L$u%!8LW%)wbc$J43$aK+VF1-u}9`sxu&k>&Y@qS@|4O zI@5kC!Ri@uxj^n^ki)$GC$7`CS--0BGC-hB>!+j3W;xRw|#83L`h**WH-PTSgNiM0YCs@~-B zisIwA1U@YmJ1~A7oy5x^%oeD4Dxsj>DrteXH-0wYeA6Uww1S{wg5oDn>@ahvI%mT> zu+&1ORDlIyh2S%^h5$ketstSA(6lH(VUSF_Jk0y9;loA#6hpY2&p@~ zZGa_&E4OtrpoF_dxjHFP0j$49_nkQlfcgs7go1iv7lVY6DQmF0!?@)`47rNv*(uV>PASsL%HLb{>(|+XU`eSH;>L3&cY;oc51FC~_mSAsNCwh1HQUW#-J$3tcKz%#QWjsBvk*$e&Zmp(N89twO8l1#=v9>*#{6>rfznLjqJMVI zBEzR0z6NzRZR+cCN7EsMsDRCd@Ky93wKVYB|747er%30bxl62rLZn@3tztNHh5tBn zU7%!HqvN=^iqyL&N2gk&R5d1CN(Z>VF@)eX=+BP^XuDoa@Tq&}u;BP!k~WLAc42fH zQ)NM$j&+Bji%X65QLNy#MduG+sL6qLb{!t>!lAuHu2R;=?oP3orUwFVJjbtkyQa3g z)K{b5{Iva1VNy1&ugc<6s7K;l@tCi+V!VBu1yAfV>9632uvZNm;NWaIV-ZS!4!*v{ znb;v!(Z;V+iR}O*U9$h>8ENA^_9qH-@VW%fA)dxWgfYe2IQ=k6rp{zjLmEsPj5a#f5%A;^CGxyve&0psA zZEV;H*-1o(LweS9RdM~iT|am9@7~5#UyNLB3Z^6^%+PTj89t#p48=q}*6@8SKfp>V zsoUMBUpn4tS^EbO9JwBUoIO*Ik*420M@U+mQn$nrwgDz--T%@FMd2-qNgB0G(D{UC zY;`dE-0137f`q#W)z?@{)e?AP*`@55Uqgv^Hf=flIJnGfjTQHV?uxP4e}Ee+r|XL! zze^?~sE#__uG;uoCJ&iHo>Ayif$9JA2u2HkK+wh_-F=&WyLqbx|!2z zmw%(mho6NwZ8vk(V+>~(evA$2>1U=)|G2Yz%aZa#^(^#UWO1V$MR5}=S%S6Y$j*F>`Mmd6`fYUYhrk^!^O<|yYte*RT281=L5ON|;c+w6NOgFvR zN89feY!Utd(ohCFR12;>GhO$BwEPYZm%L4|J#>Pw`|Fl&nq%^}s!v6H5Rs#Wdc?~(Y4%J=neCF0vV={c|F(nO zuQ?g*h+I0=18UrRwiXze3$@u@bla#YoQM7bakF%ZZS>vFxlAHs0Bwtpdj0QQr|Wt&Dj({w47M ztL2~xUt~Bcb8_$qI*SiRUzs(LnTJ-L_!M7JMUrb)`s}BNq~52JfF2kWqY2zS;={4N za`s6!v9RjT;2GoCXOKfyXyDs``(Jl1vQ7mnGUt8 zsl{4r>;7PB+M3fFP+d>(PfXZ=?ssi;G=!O4*x1uF z(cQqQZ#~<4YHD2y6@Qo&tF$I=(y{74&(XAXplV9WbB$@>oaB+;|9udss`a$D z*Gkw?er&Z`5~aBS>{b`PR@kOTp>kYwnXPPL@1ejR^>0v-Hp|B5wnmVj0w+jVh$Pb+CJ=EgRE}llVt5F= z>aWP!jftpgj-Tq6_EtYLQ!=$Sbi5UYSFPb}y8@8s>AA0(#qNY^*$H4%bS&`Cb<#50 z9d%vtR5G8r@mW5F`oYmQT?3-C(oCo^hAsO$rj7auOF`o}ia&9+debGlbl7%Eo369w zfz0#!stswd%YOv^y%?rkYUkz zGW<;%fQW9mV4Mm`Zp3zdd*Q}6jyB=^wb>A&<5|mzmJdV0W1_2WnS=eqU9Tdo)gmaK za<71@#me75{S}l+lt0BqlB+jXFX2gy4lAGjuK9!20l03qel4mj4~~=@Pw(g#_fP|> z&h@Y72tuWW5@0Us`qWZ{q^gWxJ85%^OQajet`WA`;at?g(fk!7#{!co+ws+oGte<3 ziE8~3z!}En1j8!<0b*;6%q5W|z(FNG$`AE?uAy)16U;_9LJ7s_=d4g<>7TY(N;*K& z@$-SyFe&La`sC^@z_ZKbOQ z05TQ~3gXMyc*H{jhyHVj?hh-^{TvaixTbdv?7gL%W7-VuW+tGUAU{MW67vxU9i(=u zwGN8!-{XA3Rs=<@6>=O(T~gG@^Z~)mg^3_(79E(CmYS~AaoMXGzifk={;+}4M|`sp zzP-m-eU+_JiQoi|pRcN?n{rKpLk@V?Hc`gwOrA>-gJrjZN1Fm0I%p}?^{5%d3gk=iNR;ZWroTN} zc70;fX0GMOc7B?t8(kN874G6Ro$V2sg&IJ-@_Bdm!_ha%rs(`ESzS=IocAjN+wEcE zlC<9##DWqpjko95=xAEcG{+)CykW}1Vl#Ks_Ii$-t+5%WW~H0e)#S@kF>Jieh7D`Xv9G&rW{6D*30f<+k8D z(U63=H<;wha4|2M0tua&Sy~Sf1fXD}M1HD*M&x=IIF28!gN{9mRT!NOHbZMNPqv{d z#uZ|sosr7aVI}ULUm zy&>pZN0R_QPEfkmo7UYu!zRZL-XAGFMa~`Qk4n8L@R z$_i@nW?g|yM3@&JBiQQ_zUN_lrB*%*dikG71RqZ(18;jP9CotC`^X!(q- z_4JvfC?UvSlx2BqsI--mDf@QAjcd`bozYTCXW}YskE6M~PU|>9)RH{@SJdv(hN#kE zO$N`lUX_1>9$YOqp5R3nrjOGX=e~CGdO-fQ&k0f5X(ZGvCc{vDyU!8GC&};-NG2&^ zMso<;N}Z+J{LqAH4H0>lk>e%tTctUZZ>Tys9h>8-I`Y=TAZj1*vKE2vahSe&9p}$> zDY+Fx_8$2M{2ex!Y(hQa>MwWmhxfg^8=C!?+PYpz+$@jPScdK^+ik5bFZ%zPNeCfO z=41W~Wl@F&06_Zhv~5fK-$23`wXN7+8jEk;@*j*8@JN(F*~5UW^$a6?<4aCs?wL?D!52f`*wg8ZD&7nkzG_lR*%Z1>B_-p>6slskTp+gdXh=|-M1joCekfLD_ zvXs=C+;wJ>7$CyQq|qoo^AM(_%>y20(QL)tW!zLOGF*+eOBP4{P9?R)ary@P#4I1@ zCNs}LL%C-q>tcpoYHwJ8XeFdmB5uv9b~CSQo^j&1jy)iGw5HtTR}3uXmC2{h^%Fim z$E?g87*&D1Rbc};ePyI7HRj%%bZOych>t)G{S*)SJRw+W+(^2#vM-}{j#5`1%gB=D zL0>i~G%xq0I>FO)&)#|T!AuKy211!al_8LZh?pfS=a-7=`wSiXSOS`{cHhd+ew)sy;OUQUR^OA|oH!mfa zZ#<9AhK#V=chEHFlJ4jgE%*)sjR;W#ND@4l<q#R-5`71bpTJD2>^TCyOs5 zxw+q7PlgU3k2W^`Io*AKPDQsxoDH*s9AhC1tlY-UJj^58o*PBSKk6^2oScskdMsTB zjcfS5k5tP4(h)``T2%xKwmq6DB`%|F!BlLOqcB~dA4ZH>H?>mU1fwwd@pkgvoi1sU zsfK)Z7)?dGBZ04s+yvpTcf8NO^k_t)ChXCmy`+{Vs?-d%bPHbS12)wjO-8uqu>bHD zZ&v4$vIJ}l*x#$!lK3W;&bJhkqZU6>NzAFI{CfB=FWL(kusdxK!y1E$ikCso-{>`k zX>HlXQRNj5pnkK6m^(|z)zT^(Ci(0?#k8c|b?T64iJZXXNg}J;J&l~ah(52CK7+0* zs{cR)C(x0+4;l6IP8_a2LCD&TjHi~sU#?A9Ry@EC;@gKp_H&+p^1Hy-F<^QV%A89w z?K4C?gun-*L0=k@e^;>J6dw5Sn|QMCPN70I}( zb23%@DcZSxKoi~RUz16h99bJG+@PF^JP1y4Vg^-6!!+0qL2(8?$16_9m3RsIolOqv zzmk{Hzg`re`;-w#y@3JiFQEw|aXJm()FrF#N$0&|M$}bwo|&CDq@g(!)W$O( zHh*xw1vb>qhjT3VJpHzn8yv?3G>1V)is3g#ijw4?ly>s9r6|jLMorubXWM@|OLInd z?#i3?KU&xu(UC8%a0j1G;SfLgt_{0-eg?1MentdnGVrpGe}GPc!t#5l#cGp9Bjkm< zUakHnZuVE#MOdEY4b@^kkN0^StqTR%^?i`z+W$ag)kcKrE)TrT^a-Li!U!BK{1+>0 z7;Htu6Bqy>1>*k!FX?42w%PRKZXh>UG;9hhtLD;bZ1n( z3L;XiId(;sN=c7yQLlSB#S_nN^oQg_Jv$`MCnjXhWNANR>2uro{mLtknK#tXlD)xc zGM9L}PRn8wD0t9-yA}dPg1(`mSUCJD+y06*vxrWr1ns6>AF&;vRsDsvund~X^wtAmbw=LM`f*AaItR+56x3QWq1;nZiU&@#mJJz|e|lek?E zV_cQGH-qiXf$b}^t8G`c1=v-9GItHC+}i+oU`CSIT=YEK64*|Imx14dKxs!@~j&)0)swY2d}-Wc4$FEV>Viax)k|B_Xw#eRM~fwF!>%~Ye&CcENB}tQF)q1v}>GLYQ2;PXR^lGhxc_U0n>n( z$&|<^t(`po@B#Y+Nslwpb<8vl|MpG}^SdWuTau(zhM#MDhL z%3Z64gu8Wnvl(5H&@Rb!l3Do6XzTWZtTA7yX|gQo9_zH zma9?yjBqau3Ms;ef1p`RVO>2EhOLxZQj3N#93?n8vA%AK#Fl^yZ2!=F52((m@0y}M zWI(ZJq>(uG_s>a1Mm(PI6K*4pdz`Mx~Yw2(6v#O)u9 z>`JA=lcg^cMl!O^EP03Pt;wyj`jysHp5 zeZ#4VhmtY~i^ig2xL){lGtf&NP@Yps5yGv>qYRW6K8)rTQSgx1#1idc)P*mFVo8q4 z@mdOMf|OOr2N>OAlkx%3fjJY!9;8GeaZ1rqoPJ`TF-rf`cdgA*GEC`2=}k6fI%b%r z@dWXQ3?ot)0jGtBNbb0cf0p<3=N@Q|bmB*u((K~@=RGPT_2ypsSFqE+;Mo5|EwMKB zbhmdh`Bkf}DtFTFW$OBi8co;Gdz`Zv*4afxL4kpV&;!LgCwx3nJaGc_o2P1w6fF$f zTRrdj?rI3$-hfxTi4USh>-ddFl3! zfmzG16TeK896E2r&^PvtZE3|^AH`OBqnVaxAQm!9ZUbuljVQ#`Mjz#Fia0WCO#HIl zG2Z|AE(PCT4nPGfRT9h0znX=hQXr_JniQ{jfP9()h{1h7VTgTAE zG1kA-dxB(o`_e@-%6C4^N%0DJo+zR?!RVw}W^}5g#%xl2cwT+0)*CBli`U%1?C3mL=U4qlXJWqG2JE8 zll<73dx>&DgLymnS_V52(vE%qP>3pvh_XyW`A@|;t>2LyCot_I8T-PYW$1lk~EJ9DOIt@QIqEee>eH}X_ zwR$_=tOD$n*Pj0Zk7*B{UHF4B5FC?!E0Fo8MKoI!L7hm|#OB3hU{j)kg>6st=I-@m zCTs51j6OY4i=(yWWV}@gl08qZGzs4O9OK)X-i1=?1hy$K&G({+(i#-nvpV`o!1)wz z3%UnU#txI5VJ6Oq3lPVaXDd@H4Kyh?r&vh_)7hYe)AHnnD3Fo7ednL%$QRz3ZLr~` z8P}cnH6~DCU_Or(CJFOe`}GdTwnk06QWdIp{qaoBZTaI&;LbKhFnEJyB{Ov@csT=M~ytUZlS+4iLJ0hVY?D*4R zsrb8w!8JATN7MWs=NsvD^vMXuUx+&St9z6$Kw6?@dolX_%_+TG>k?MZc4FH-Blj)! z2-z+4RS9ZkM$!YYW<51RFukN&*pGc5h&|Z?eF*u$@X8eMWLNPb4}(v&)V1h)^mhuL zk~Bw-XDj2Ta|Py_MW8LjoUk9bL@h(zhZ^CZDoPZ9fY@wYetPXe`l%0nEk6V%vfltj zqjz<`@EdGJ&u1;17+XCJYnwoB=v9gFd+B?xv|Y*F`MJ50EeGvV0By+cQlTR?24!Jh zTg#r%u8h|-s76kg*M8mcp(?c)Lf`Qv_077fW{iMb6k;m!^AL&QJO!Xq3K2b zcm?hm#^lzIwFW66LkRr@xHx+Ow@fPX7Ce$df&`LsXFOw*>?HsAv1UNSntfFK@2EZ7 zouNjzyxbH0^33C!Few*p2MzJsa!s0`!)6+-jVQ~wul?M+=;n-Kdwlu-8ChQsL-cHo z8dI@cH2Kh5)W43F99p}kdIX6OWHk+^2e^Zi1Z_ii!dRR*I2;26WdSg8n?vpsmzwHd z=bHi8AIsRX6F{SHduM?k2AW#3Q)84kT_&1P{3?O;B7pJ>BL=@50&Wo5ozDU~=sCWx zumkD{oU7MMn!btOUwyly`5~<}3x+5}jMwpV8wjZMVx}P9%lqYUC#dUX3&W-z?kQiG z6>RDoXmtkQcj1%qs+@r?5#|zEC+i0>YLuth9qmy2Gb8b&XKTYeC!w-^5J&8!qbfNX zc;%lomk(VKERpvU%#teM%=tXTfbQyY*B5^2Q((?B((XWsSXJnXoWGH8HS#v>I-Zg` zAr?3)%FBJ0#7u?$!nz`YI~}fm*+j!kH-Br*GX{N$?N+5Vkcx>cmM>6@fT9BU4MR&> zAQE_{0)swJtghrJ-FGWjj&zaP-lI*}$#jf44*x*J*qm-6UQKdaccH=AW1>-0(3?OSI<$(4{gjBhdvG3f-K8G%8L-hAejIFH1^sKOJ4 zw>tlN`VlLU%JIg*+Vk|3xvh-b-y&t`xhIdKpe{YG+6LrDp>&D$E6^d?buN6ljW-mv z>^MCUK0CcX8FmTg9@IH8@ zV@b%8eb&TB%9=4q#+4hnelJsMm}8|zLn>4!;%ml6v1`^~P#r1pG|f{=K9*`3G%vwF z$drnFgQY-mHkaW6uSE1BRkwm15l&G@Tg(ihCC-88jX*qy?kfmxD%j89IL zl`1xCn3#5G^qYEcc8oQ%Z6~d0NzI^1Z$$DaV;3_tBy!RoPw!Nl^iXnwkyA_hwNFnb zLId^27OG}g!pgZB>B{dEZ2@e*TgNkxOdt=*+dXP7R8C6>7Z}D!5^FTU1V|ut zAhskHPPEVGn~nhNc+?g2*T-*={f_xK0rRbRG=a9X1UuXiWzVAjad)liSsU&3R85XguRLGqM z7wKt;@Fwh8nO_k)!LXu+@hn9lCTl{vPCNTj=80u$3*6N$bT=U;ps3gC2Nm;Cr<0aGIu7k!%)?% z*nT?dId-jxtEXF-#fX)YhR1>!$wWCl{kmhbKvhE84I* z3*rhGDo=Twx%IuVp%X*;Z?oWT_|$B8!S>;?Pd19f@{9kSDGHt_4!@Vvbpk(C3QLtW z3l}BN-Xr){fXu@hA!Gf@KN;weOtlDFP+y7ArGU#KqACr5OmP)a{sBoRQit|4Wa++j ztNksi(;?h){n-#n_Q24U-kIL3w!y!$N_{9QP`+eKQ=@bVI=pZpcGB7aeTKC6YnB0= zh)hHtfm7;qLlFSy=ND{ErO9`SIGy_*-g2*La;HM`9DWV3Uhe{4K6VpX9{J8`DE9hu z@!!^FG=Zz$09LMjK!|e?;kdWz9Aiiet_Zm-INV=gk!sZzXv-4ZY~6+BirCaMa~;-L zsl?BoBNt%TSckMF#TSmpwyL&hspw2siczR=wj3$6QXO9KX}Fv-Qa-a-X5$zG*(rDn zg7F_BCMS3Dnnvz9)BMZ&Bb1Z1rim*>iUq2(X)6)yEUP2t+4I8 zDf+msV15uIt$GIP_CQGVYo#E@?td7Tm86SK!n$nd$Pvvuhc+3moTC{J4F zHRbM27o#D@A+0Tf*FlNPx?}R~Q0}T|`oY8?!ajv15Sd>HBotj&Ku~i;^iNEMpvZQlz z`FIvb5Z?K(_s?KpFNDiSi;6KZm^7oba^Hk?LJ^z_FhI=|fUn{1MG($4<+rE9(>fsW z^t#*)oE*uh;Ppjjxt+@kUy^;^XOVbC&8LyY=6KosUOSe;L`0pwG7ems2Y#c1lx1&79#cvZI%E zQp67tG~jLOKcS_zc+p+z;hI>eX{jHNi5QuyVe$ufS^XZ?Vk#OeLHFzCDHq^?J@`k0 z6zHZK{XJ%eBy&df!23|BtvwnVY9CCON+vWXLSSrV{w{v6slA8R$YAZHPB`S%x9n$x zAJj(B-nbNeC1=am`E`g&$>aApSiw^xiG3*t`vv#kzaUbB0KHnj`xRJO007kgU#_xs z{twQ2Mf<{bOBCS;5C5*d#Yn!=gws_a-=zxmB1ad}6S&n%9?7JsKs-fC)3wg*j*ZDH zAsM>^O`fG?{N(yto9p*luKg^`X7Q*jEYu}^Q7MLlTW;2-o)k=(YL>jLrfweGdDD0} zVGDczI=za-k1|fU41;%db=@_yUn@tINTA8m?u;!zs@Aq}7M;6lUNn!z<8^=Ew>IkF?gDRlM|dNI3o& z3Bjyd$|2Lyq|^POa`w(>>MTscO7h-d(?Y21|p-BmcLN0RRdjn;)Ot zfwV(IPL;M$ozSpsC>q1w%FIlWXRI zMxcpiqJ(fzMT`a`WnQyjabjDW1Qo81KuoVK2;;mWK-$eVlFV*JiQ$@&4xK~$kk&b| zLY<>!?GaN-%Q!v{q>Cn#dsHczwEes=lh|BZvq~Gz(drnAT<^5_``VQecKMSCF3tmG z(WXyMiarUm^M@77npq~U7&cE)=PU*zvg(5?VDos_OK;=sK!v;(bZ~PGu@JDl*&U$5 zWOfN{V~lp}Y2D>z=g2ey&1`6kEHNu;2$7);Po_9z`!NFyS#IIcJtjLc>;_ST5!<^NDlN%=gy3WCq|nc?9bq^VU= z4IBSMO=6U$Ik2yW5h@vs=ls@P4tU)+7)&6&cHSW#Mie-{Ntq`k$lM#C;GtO*0?I<; zH1As8`~Bz>E|ez2Lh{n%Y+fe(+Cc;Bn{@dJC%|!GrC#f{!0+Jcyt^9Z3EGPDo9%l( zKF>bJ#4!ZZs`1t=EYVG|1lUA1NrCfV$APwKhJz_LDAp~hiNNN0G~bu z3l=t_mBWj2pPhv(0w{Y=tXr~x9IvIsP9K{L5+G$x)9QAychyic+UfvZLk%Zrfd!nz z-zPm!JI(!bIIq*k*c(N>HzBLLSFmHt*=q7Ez?xUaobhL9kaJIAgTyW^yiRv&4?O)-*WX_-u;;;1xco<`?{0njV&^D}i2nkG=K)SQdCJ6K6m@BPp7{(1II zsa=>)t(M!Dsm?AKWAf^4t)wYahzQ~pzswIsjhcmKJQvX=zl_6vcI7jZH_WZ!u|pQT z=Pt=?mRS`bBRhv#p9_)(*As2cj$0nYn4Sf*wGF^+;DX;g?g#jp5wtdBy>97}b*_xZ zjViE(tFNE#DKnyJGbI^6QB;4tJ3$FkD=U<4W$q+WicyEyx;zUr9TJlkgyxNtTrc*} z`Z{=ZGBuKIa*J#Qq=Z5%q37ZXi%S=X zJR;hG^u`dHDlNVv9|-c&`Lmn*_)zX*l^#I#u!Y?x-nF@j@&j$?`4jbwYM|Yz5ot>yLgr)Fz zb8zjX{x5T&QrpD~PTLwTzIt>wpz|f&K{y5n<;{4qaGe<8Z9SbF;4?g}j10(?cd$h} zH#-cBQqbt%sMP)hDR*F?mm5V|$qh#$W<^pJ94c~q3Qa1~_0eF%dYaoIbNL%qDw-WdY%lZF+O~8x7{x1$RaC)#|;+fvpQVm0*_~^Lxms zIt+K}1IwS1qMAdp-v7v;5O1mWpCTlz9Au+;kdOl2@f zM3ZCrcM`({VW6BvTh4)yQSY{u&As}?oQ!MEaX#pkgcJ}wp7ljtqjFQzXH;zE-5f0O~(C5kRDq84J4#DrlfOWS=^-N^+JZhC!gbgB%kLaZ-3z zak>;QI^=>Mye;QInp%FHSWk;0cs$vS-X8VJtkV7zH_wOox52H!EDarIE z%j@L`J$8?EYTn^yjjoXic(9&}Tvec+f#z+nNGc8?y;wZ?B=RKwd)QC3AowS5{Mhm1 zKA=P1=&7D8mwwb|jbmH*+OPk@%btJFM5(Q?fPnrW10EER^5O?moMq;(kf-k{k2k$K zIk&CJg^dH$Jk%sOKRyQt&7U-<0*CY96CnF8(R_hXM+OqjRnyJ&85r04BDXWn(xt|{ z?7Ljvam&G~hXf4~+&6f9Ui(sAHH*sg=vADn;t6JP4gX1uye7JpeiF8K)*z{gGa^}^ zVzyJr`1uBsnCY?%_O%S;GYJ=tMq~t;S9url#vDTdfp7v-co5WCFi>xu*)6AJ(R+7l z^V|j_FlIiX++bvu8DfY86xJ68FxpLso5KF*$I3qPgrZ&&az2DO`oj{R0Cgh)Uc!R0 z~z-pi%?IOxnH&iT}_V%<1SIN3w zRGc;%W9u&Ckr&Kw#>yATTk~`ZjVjZHhW6Z=W=^D*$Z}-V){q0%ZF)va2FzP3^y$t4 zFs$%c6y)h~&&k(Juu9-fC_qHA^IE2I<{62p`yTVuXsV0>`(wr+RS6D&sK2O8$Y(j4 ztnbW%X}c(`tDypsBlVDAZ`yeY{N(l#{BayKx z&4eASw#-Ngs4a{(3kq?VE&XAYk)9HOZUGyZW?T@&mtrrKb`t*xSIS`yL0^QJnW1M+ zOrtZme(aRVk#+w1gW|t9Gs$d8|Mcq4hy^LI zw|`4XLF>RWzI2?4ZeRj1xKfp32}($@4UTyh|9v=GOz%nIBN#O-ka@gX|4-kvH&jj3 zTV?aI?VOR046!1?>(Mj>=58gyGfiiPf|0km_sfxcl*#?DVm`d!F(W^x9dcHKG_&Yl zvF@@8SZB|$ySaU~G7$%}px~Q#hL6vkyLl?L?)-98Ui90KKA>7s^48+ov9twlwwjeH zh@47tmt({rG!9HqBqUq;XJPIk#H^&=O#N*koQTl!&bnc5)XA!(Y(wgGnErw}q z9b~s9yS)ajQ)0u+A};@L({jX9;G;PT@MIpAuq7u*EG|zZduru;z!VHeFTd9^(AJg~ zyy%RR@3rMVa|*uxG>^SwvzTApo1k`_uL$zQgPZPcS+VKzpB8?YVNrh`;QBViL4bC9 zvxMpTAGSXbn?!(-40MQ@n??WX^JzQT^#RxC{9EJG)Ly-TX*1#EOj!wExe*!VaMeo8 zcddW!hu|;09{29}z7C_v^n4MHvSPO~`a`MSYeceDU>SE_$6VnztGWf%mgPf_~WP5J8YIc7sBW`Sq4tT2ezb>oHAiP%L zba~ySoV3En-r1=NlI(SD=6u5r=S`ZqtkKMOXdv`cu3An%%ws%wJ)QNwyr1pX&!oh@ zSvDvAW&P8`f66T$3~rqlpYs06yl6(Tt~QM!JXN(N+Y0Zu+O_h5PHPm1{mDN$ z2%4P|JM9-!Ouqc>S@$CP8w0;b^m1san;OQjcgoR9s25UpRheCirN{F~bZRd(aWeN|n|A`BmFbIE>g=seju*D>|ktqT4~8zO90Bi&XU} zs-HYcXiP6i^fEjXBI2#<$YYV#9-2F#jghaRdq>FN9Dx3P7-l@}?$8l^bhWE-#Hug8 zM28g4<}uK?QY#0dg411WCV#eKVzMo-Z))x$yIaz66cZ?Y8Ku`;Z`~IsW=a8to=m%K%`<19SKV|Mn5kXb)|?;IM(p|QJ^IyNC>T%i!9)%c;xziE zA(4~)mY`pM@Q_JNCM8+d7}_@_L-hS|gKhZd4@QPDq>#FN3KE>sAdo1_!~we_CnwXn z`(^`61~SdWRs2Dq*s-g)t#pcRSi;;3G=X>{;$YnfN!Ee-B#8IvmWuAl#`F_>klZXO z|IuhR2KQ?Ac0{ElfOP&U>dXbqXHqqxCAikOjhZR{<%!6+DYwDly-P~`Tm?JPOr2_J zlT?NwXKT{R=3M*H!NgK4RTDh*M=c@UAN03ptD0X~mSM}s3&Xx>Gs-8<>WMbxjUmTn zWENS;X_H_ha|Val?p}ji?H^#_QTEj%nD4FANMviA$LUrXYuI;t()TGuS{H*vuBEMz zV0INkVftUI<$}yvejM#~=CGZyM_#A-B@&xGg}8|l^>yH+`L}tjQBMB)zCP0a0>So& z%^E>4Mq_#+ePRt$ADiy%wi!UO9Ju!Zo^{gr)rb`7sg#E%UZ?EIsl8gx9@b-IGH0qk zp9;skqOur&Ve4<$$QFfSk`z*vIEgCpfI z1Peu4sOU^t9TC(!DT$@aL^yl|UMevck}3L%3b}=eZd;rS%h2YVDci;KV=cdck$XL- z!WeZ6AEE5wPkbC>W#6kZu%r1g+ik&yYGBHiV!U3=xT&DxObqT;;M$6KR!QK;qySS; z?i0_BWf*DA0>FMO2;R8yDgFsD+Vu`imfJuWr}_N>Q==1q_|bRE3eF!dGYeFO``(E% z@fd;&y+Y)__KM)h;i-52}QK_w_|{%~V97xHH` zR`yT^o@Y74!_$Z`jFz~8naQeh^I@ObyW&L8JrrqlyWb4pD(j`Emaws!>UnF zMp1r~`k0@ftsN$|86`8xfCuqK@3aLr7`-O65xE4bS!=xDa@3KvV}I@(xSZaZF_c^B zVcwqUFxmR69MA+vy7Z;C)oV`* z_O&^C;DVrr-S4QWOmGJ_M(&eJPV^mxA>S*tM(6wTxh!Nm7e}G z{n|oL1~<-{JD#Qos)wM?QlA*fUt(1{-uzxc{I+}^1r9gR$WT^ajdVR7G~(Jv;#<*k z#Ve;^m}!QY2R($duL#6+cdfQ_#C4!g+xMApNbPUvt>YubD@R8zFWD2eh}o4Y-qR;f^vVZ__p=784Wk{t0>!{Q;ddPpo}di%^qEy7Y!n?+@@M{^4r#eJEo;!Tq5^ z5Mg1XAM7=Jff(EkMqAwxWXb(JHotj8RBl~7hb9|_)4cy)$7^Wr;wk>R9@`-QhfUem*~QVC@jom1niMU&CHg;OdkODwVUO{m(ZMR> z;sPfD5#YoiNN5l9(&JXLs6vL|TNHwaLr$?bq%v&{km1cPEpsP+v>!GN>e&`8LPz}l zTXFNyYvc@wgj)`+}4e!zk%^+{SFjm?Babzg6{dsX$J1I<-EONqV3Bi~;9@EP4- z_v^3iDJ|QBt7Hv|ROK>bb#F0i!%;_L=Mu3P$H1hd6MKh1Dlb@v?PH{^q= zZa9@tROKxJQp)DS&_N=q=zsBmOw%;}X8AsH^L?ePX`Z1C9=D?Di0U2ktZ}f>*fRI* zU$blEp^)sP8f(tVn5Q_;Qp=9N@F!$U7>SX-sq8YoZ$xRvu$+Q!0O zZXmAyI?dMG45Oxs#bxn7Qi>TAW^4|CAB~6DBjYb>LMQF}j-=X0OWi$xK>)-YJQ6S*p)W@O76Xb*+CnITRDA^X4n;LnAG~XnZ0i$0LbP^~tvYVzr|5!-s$@0XB4$H+-z*HXTxv3}d|( zg%ZEz#2v1yGMB8z@3H>- zON5mab&f*+wdR=rj~ths)35RIM#szUNEqSYzv3@ADgH<&$sr{{PUU$8DCPd02vj)b z%5ewQnhGN+4N57_cC-vC8pFN?Df*RE>h% zT=KuZxhW4VnOE9nPwYHu8kMSO1|^d&8W$y+7X}3j>>ls?2SG_u?aH5=bCH$afA0en zy(p!TTFRHoBb)x1M_E@m4zjCSu&8Q$?52Z7JgZ+!dw{UWHB>cgkV zslsZtl+m8r^J59hDZ*a_&q8QjG!H_@z@-DUK>2Z0G#XR~>^p zBdV4;5FF9~;DyHXtCf>!(lj^~huNQ?>+2x`Al!FZ&5!5g_*m*Mt||i%+&F0p$oF=b zo?VNy(wVysWYH|tFJgxHLV7(RAm)wmAD#RI{Pr4XU$jRmJg+{IlISLow9kU6m)%OW z_ToB~^@V8+)xpO9RnU8vo!SLZ$xUd}g`-Eov>D_e;20q4&;C0Cohm$ofL$vo98dYWK*<2sKQ1R zID=ADofAqEHTp64JLB^5!;?ky!)F^vkU$yVql6tzn4@Zv->ke zAM=DsK0a7Ez4=QV1k=bMYq0<4gjz=A&Krm%aWO;iZF*t99RLtTI8*K*nJ(2LHk$4a)7nsgf^)hQ789d~?BX?L)5$90B2+ z681XDz4$BAursC^r+Y(2J@5e=Ie-Wh&+B>FQCDsqz2DzgT|68>@*jWhciHW{94yU& znGKWNS6+0~mXZRc$XXtu0=Pa1P3>2^2C`weRp-O1BoGf^>7tt?P56F`LPvGPq;o`aT7H9`5V2D}H@h-svnkR2@(IOH_Ep3uo)LUe^kJ z|A{O6<8K4s+1hz8?y3xipS9Uvqb63|W)YOTQyVS5R~seX*G;*rOcyT_Ofh4`Tr<%e zL0+TFPqiUA%(k%;Ow?1fOt^!9*{PAS5$u??ef_irl*7M6%M;+`0VydKR7#Z2+aEiK zxL@jD9AeLE-3xJ~$VO3&o%0A>48j?S=3va|BhBQ(d1md;J@*UG7D|$UnOp8H$S^U? zq`3%?-Z%0e*Msn$zV|jbJz*#r*v4n-%ZQp;p4Px(Se&NKt*eTPwy6_Fb&ttDq?ESQ z5NTj}6pjI*CTXgkj3WkzhMtUg)Se3nUX1@!v5@j=&mC|no@Sz6Z6L;|mY#w6Q9lFS z|H9SIlmU3VJCZX2YyXxzQ(&Hc#JMJ7lo9ZP+gPifhU|%nU1Abzmi>T(itxAX0Fx@} zLo5-;EJ35+9%#f0o;XhZ1*3zyf4&6ll@b3VTEforUk*Iq-+5Df@e6DSs5JtNGO@AsQAQ5ks#OEXbM2NgjSk>q?;(mt8B zn_Owxin+98Zg2}z>~pgu$|2V|I!Z&2VpWFFoZoa!_8$~A1bF9IUv#k5xazF6p@s)k z`4|vvC@>dUR7Zz`Ar7P`df8||Id(KtjM!^qp>&K<@yF8Gr9LOlZi$Hlub|p=y z?vX3t=zk5KDDny!82O0Cxj0yFD>=J%iS@MlS5~Mt`(~0J|2UTh`A5brE|p3>a?xl zTFh44Zvg(O>q#%cp}$yi;Ty3NXi|(G7rsFgS0=hGGieFoFKyI$h^(-FFsKsCILouL zQ=EnIP5jMTTCAgRa0x(wmHZEcbH(_|Cc8}CpVdSuvuclpr6tDgP>3$_?*PaQ{$<*?PexrT50y|F~z!^KWh> zmU-4HXax^#Dgsmm1%zO#;(gSKx1CfF%gUeU-gZ_%Eun1Qb}ox^*#qxBO2_Mf`GSsg zZlx1gtaCpCsR60Giw!n*#Bi2U$5^x5s41zxg~zH5BUk(j&~wg1gnuK1Tb-a~u-Czw zlh7N1U#~dBagsuo6aeWT3Rg{>Tb27T9XxKY>)Re8dQX{ppgX)@1 zG8R&z@z`f!IAs?f&_(84GFy83_7qW7C;N$f8bzs<5MQxX-}Xp7_|WPG^i)W3!KME= zD=lSpPB%B+II9l)7D^@w?%In%6@+?AWt#~u{ne?Kk7<#MbbBVBS2L$U5*7;DcCaAv z>BahWu$M=ExZEp8? zd5nLTwWAO5CK78U)J}fS4Ha9@YUOF<1klyUV2|`HZJsHU!4^f8PLw3FwRWLus4hs$ zBhpZYFOgToq#cWvk9Q}HsLxnRmR=hr7ivr^V~Axq;9MEYvb?;#RdUKTmkhOYkUcpM z6rwkUbQJdhE>%y(>A(SLZWhZW50UVghXD7PXv0X(Ws{}7RAay%F2kZ(8<-#B6!1yf z0XW|Ch+)52#xOqM&MIZ!**On}b3~7}+ozDR(zEICU|1oVTX7v(=@ z@Ww4Ab|s;T5%tQ>^unC%JI(`V~Xv?#VLkEVwc z9b4w-upRld)cCR>L$Z^~W4-y`PrBdk7XCUi_<(Rt=RQh9kPMd$1$Oef$Yf5?#CP|H^?r zi@Kj5Z4n12`8QN!tmVubG=X6-*Yg611jl+rxVcoU22eqEx(WxCyD{osvfSya5aZP; zlM}f^?O_pmPk1u5L3;nya$=$$T2?UuKrB^pwK|5j$6O?^#@=J)5hkdn^;OK~rX|89 zxhC|i`evzR!-~l%_Zk*iG!k4Gt)!8tl}FIv$^Dg7bO92=l(5Ku9tMa(a8N{*Y&Z-` zC-(^vmmW3A0)r?|3Iv!|P&26@F$YPa@h7~48QFrch>)mh0$3$I-7}zyadJ$bqs2Aj z4;I%(cC}(fM46AF(7pX~HeeEPp0i1J^AUkylZ}w29W0oyDW4D@ zRx$&rs)fGVG(88PG|h_o$96LCh#XbWYo#Ab(C_^_pn z3I~|o)p~^xEEdNkf;+n`hb-J+CMS2R1O=HKWpE%8ZSBXg?||l{1(;+Qid}WC7nPgV z7gaUO{?qRCjVP5o;g0E}xh(7EA>TM&UGyDB_sQuI5E8HefjV*kq&5<8Gz-8Ydfdb} zPF~pB0g~-|yV_0KmduG- z>QDM2TEqr{m(mlu&`+b=8?ji<;IxtR)j}4rCF}BMiLvU;Q|Jwu=oBB#2-zg$LoX1| z1DTgdqI24g=a;f5CL#XT2oIZh(@m`H`I5AW#%B)1RJ+_@5&0r=tTxfz8{s*oq6A?{ z2sBTMi8F)4$HV>Rtr({*w)zA>@pSzZ&__~Uail*T90B@b`68-TU)rHqOL=27OUz8D z>;@-GP}Mg1sD%A@3T|sV=BB_~< zFX&2qD;nj?$2^1m+}`@rj~4CE;GBP*#-G~uWLEs+cu25q#x4;{_Zt=9&-t8^knvIv zCzkQD58a6;;~)Qmv9jAc5sZ2mjkXGS20WohzQc?TKUL04 z-4~JYcuKF}gs?mua8`JUxOTkWst584lyP}RnZ#<$So$JX{?2YPRDHBFQxWs9T}@N- zq$Rl@>Tp#)u)>~dsia?FVR%W9^ip~i-{7FkxZf7uc!Rre1-yhD=6xF_&*WevB4=l~ zen<>NiEE~%cWNmKF7>i!BW%WXKlYGsBKY-rl;;p`T_LBOwCd5Ku3<#Wpf{s+bmfx@ zoYOYf{hmLfrN&f&9|AL3%b2_8mA**ZJ4^?IjhhCO@+Sf&=vxWaclVxMc2CWEv8+C) z)w;^}FgC$(nD#@Hr)fFIRn1ofg!pU|gpMF3%79!>Id(uS;!4JeE(v}G^uvgyex=?k;jv5 z#!qf+FDk`SpBYE{xEbpjKs~--UomSHEG;bP7AeW9&A%~B$Fki>P*3MstkUEz;CnLB zv3zyPWU$SPL9`#snSLQW!4KC2t4TF$R`L0I{th5!Fsht;2Jz=H0Lq%|$r1Z*JzKBb z2{-+EF1q=FJ8z+hYt@bXjn&xNB4gV#Fo zhr+nG5VW1*jkcmXToNGdC#%e|5Jc1Mx8G(hiAXuyg0%%!C*hGAC@;3^f;k?RcW9q9 z&dGd7r0TAG&j?qQ06RWc_Pi5(K-4Y52U2_*jwhl*6T=wBC zkUN%+70c4W{j2oEz!N_=Oo^XG)a+dbB>N5W9PWm!lI9%Nov--|?~L(=Osh~RPx>0{ zMzEBFwi7w$kGJladyW3RCGo~X5r_wIoHh84d3@%p2+tuTX172uaRt`Ij-YAoi7vOP z!V8q*<>q?uJzKDe*m)L%tgQ@SdlDoYLTho`1c&vCQ)iajbu6atT0Hh#$-3r2v^&~e zmi)g_bGRX}jb#o`bEZw~gL&)?_vdrR78!$wKmsf|TQ~bJA<^>Jjy{HQ-fP-B2p7Ly zWxXs+7I6NJbnE?I0%JSVQoP>>=5p{1vp<;Nnxi!R0Yk6sCek=)gPndR;OchF$Lg4n z>Crmz-N5Jy@wuNRzM1BNgi^zz5u6?=x}4)0T?VPQ`)$=j!`rbvk`Blz`-U>JGIT^J zO-}P3Dm#P!LD>j>GVB}tJH-T?g}9AGdwVnj$LoYg_Y>{@kl7@zY4_u_*!R_J7uF1) zuD8X#xKZ)g6lh_HFEOo+&myKp{KG;yqBkGvv01sM8zLgDG`ap;h>1z}RxG3>({M&W z*UVJ_G@4*KtD}+by^AeeSfubn1(})~L31>zAB&Xq$8%8ZJBHuDZr`d&I>m2g*r){* zsA~l$dfQ9iqY*6NfYK+Jn{6DLJFg%D?yw=iH*IvyKbGecFtY*BVov?bRn&<@PMc~i ze&HOhf;+F*-Er8iYU8VHu6RV}w?(h3*Zg@1p0C`9qqk-8l^)TbkBi&G@&1I2U7k1w zTTNxyU)YG=^ZA@7DA*!0ak>YCQ%%f{Z%7YYVBe02TVm20rg*NG@9*ms8DAmPIg})v z2!)85_$9Iw&Xf-N8novbj_DFbOk4-1C5j=XI>nYi;G$|^@joIk2BSxO^dX#x!=|Sh zWAeB&%e{K?8egXj+}GVM&VDN5Cmq4Ys%<;Y@cIHMTklV}B@(yWJLP9He_ub30v7kT z9(X%mUw=1-4=&0cM}`Wq8!xUcOndQgVpiTq$;J3I_DOfsSj0L(UWCt3vHiVe4>#aV z6okb$>v(Jjy=}1V=UMY2BwcJDU++hlmyS6g=S`%zor6IcD0=bOUiZ)AE3A6=)HmI1 zY~L?fPWo%tWcQ0(W3pg6h{yNii3ztc|K6gEX30I^?EQjB;vNzyiX{$iFXYq%zS~gBkne#ocl2_hSwa{6XytziNkZ)iBN6i#21#8#~>f*gAuFI{g1Q;%k zX1h3Zn~SB(gMrXLA?&j=Gu6Xy%1v~s%0K@g^qdMff1r?@Cqr}{YV+29MZ$#9aK%|s zol)m!l=JCTL#-?}Fb-WMack_CmwdM0Zc|p_Pz1O5#k7qOr$=~6bnnPN;kIpi zyQ_dOn{NEJmA1k@TF+-DJ+Cf$WAK(ZT5O}S*Y_RLBKic0a^^R&fgVL++!ZtEOdq*q zR`+aFhP+&ffYG;lO+*v54|*V!f|45`M|;y2S%$rTjjVU*Ah%~pU0N1Xm`^%`;=NYb z6MB^K+f)y?lG_x^$91Sy=T#}Mjfggnqa|@{y`kw#X?%M}aha9-rGo{0#GAFaP=Nw( z&Z*4c3y!wDh%Dt`&l3KiJ>so>Z?!>hsvF!pT7~_o_h}9vC|>y(oL3u8;7u3(t8Q{} zzUa`x&_#SvT9F^7DWpOlOkCx4!v#S9{k9wo^1Y&k(o(hEcwP$({LVZ@4BFIfHZ}DekS3+y1h< zG5Q6h{ArYP-p_m)VC&jHB>Zw_p!?V&L>Mi7ycA}Rg&w#q*v!JFr3RRTH8}d7`JJ!& z2R~@67_L8YdS(b7|9Hd6U(`Sm74rQ1w1o?(1{|fVJ^rPxKMtJ!j*s<6-$*JSLAkd* zo^tYMRFGuQ=26mIu8-SeBBH65_JhOFzc5hmNfXU;Cr)Ad7Pi}cFrOhC$$wjrno`)W z5ci$G9>tsNqji|0`(p;@ol#{}R1x_ar>;%15|ktg|5KM@2S&?fOwx0O<0_CJSi-q+ zVKBH%999LU+a}JRy)Hn?SJnn#Q01Csr)n*rH5e<))_po8__ponHk5KuA@>$;rh;$j-&qnNHro$jZRX zgx12=f+Si1${!y=F4_h48jJVTBd}{2d=VsiVcb;-lp!ctpgHXV0WICGTBskk3>*y!gA{=Wqk zNcpf;*55`vZ1(>VcDA>3vT*;8ve}|_Wp~Vs@XvSg8@?2u#Vf7!PcM#gco}&d*YHE4 zWJP1wZf%D((qZuyXl!53-t>FIm@S3ibr@;G*suSN+chpWmbaEQt5R8|Caar8#R8?b zx1zd3$z`{8%Dc$s^?l(;46Ve*rB``HGDAj(rG~;zxrsyoEVNw|Rut<-4+x zk}j=sfoi9`)$?yTe2&($nJ}T!Wmk);*zJ=zqKt`o{UkJSzsd@I$yl7Rz45|r|MF!C77|vQDQqGR?<&}3rC7w)2b*pPCukW#Wd1r;~jG|Tu!(^bY9w3{N zt7WZ<*-IsrczGkG+-D(D3Z+UvG_`)vJF|-#{Z*hG_)vuis8n*Nb;a42T_2&;;Hfa* z`gk8w!29m0&3pU8e6RtA;1;W$Bs+j;b*AMK_`|B7WBXMn)l8wL<>ApMrl6i)m6MBy zvS3Zr_GQuY_`0|4!`~K065Li==`QQ(npeD^io5&I_k_s4A`W?eZ=evl&Nk4mU$_vn z+k5vy4NISu$5t~Ky%6a6gKMPs1N?wTnfeL`+R3bXX5Wo zh3JWY>FxNEtJ1pJ7eg8lw(upQ{Ye!vj|Uju46 zkOy3sUQ%=2L4qCE=vH=Ekc2Ob(lK|-4EwiXn~kMMdAt|O8LYW=t|@gR$-aHS$JU;uvjd=LU?A?wT^y+wyTehPfM$oEVgF4%bHX*n8)0#;o`5#d z?v~6s*HEx=3Hp!$q$Dz?yZ?MVPC}#85ub5`phRUTvywE4+OA~*B@IsZ=ygBA+*PvL z?^uzc9Ed|miKRlDLfWEP#kRI`um8Xqme430que{~%9zB8A)pCIl#&{Zf5&gVoCFO+ z>8kBCln8*#WFol+;!Q-(Ye;55}x3&d=}ldw8w!wp+_&cb+`O=WbJ3Xvs|BQ2D})C4?GdSM03BEW7XL3(E$4+A|>C z5waRI35^J&sAsw14A`6DUem)5e^FJk>}(I{9Co4yw;g%1sAs$<@XZMiK~XQ_Aj=DF zg|adE*31}fH3;8bzKI>4f|i~;k-cz~s1+&-tp1~##H}KCncF^X+ErtIAKIQ(ON1K5 zyXkWAsBg^-oy`3Gcf6(AUUb{%xX7Sipb-gywE zBT20vL8ucbS<;N?RZ5hoF^OkB#3IKiqArsfeO4quyVsl}2RZXo1H#=n_G7)a=N32B z!+HfYRDua;jDaGV38&tPFSYE`OLFrwHdD_e-|8M! zBR_uH+_Xw$n1|~LSS@;>G%=lBJv{dS;Z*nAw#4V$M*>SVZz;=tiukSnSiNipcB+bw zCN+p*K}#%+ysRD_x173w&2zPhh!c}Ha;$~E2VJ^Duw$(n68V#AV195iP!(GAn)4zF zQ67>J^T}laUB1dT!Q`HtRMQniieh>`z{;Z%Bb529kn!5ul%>GOCBjg?WQTCBd^^tKn}L`=$<^|Wxon=_7+2n%RJ8*$KY|!akN0DjU2 z!%5;nIMvu7bYFUG1U;^pMK>`n-cUqX6x+i^V6@z_8Ml$)Yw~0&b(<~aQ|-m$H6_F$ z7~F_G6Q}Id!7JJ1oBss)_0-?2#=AfY#knx2f3C;4N4EyMcEweuZ+Ia1a5~r@c);Om zGa)9o#u+;{rz!zBOg729@!JD+pp5ufyJOd8CE`Jd!MuVlJvB<_4;Fl zPQ(N@OA+D-4EI13VuX*f*Bq!ViLAY-Sw{YE0_IGnXz7{Zf9$3|F$ow8Q!k1M#6pU@Ad}d$NL|=WC&E#0plfBGr6zCE z2MBAeCWL;6s7VM}?{wThd-g_$E>$C9BQt5aUckGW?|LF52(ZAaL0RGAaQg>STlF-; zY06%EzNFeBDcwQdNm9z_he^CULOBvlY}Jm7U$BWgL&xASDN;#nXRDZ}TH-*Ef%pb3 zjoKqk(g6Sje@0T|mS!i`8!k9}Bmh0Mg~s1So{NxvbLyKdfMq#?0g)yY?0Yr*_i@)= zX`mi^k%*u)UeLEP8O>8jbglwxYbKe3t+uJra~~6Br7gTc%LtYxlUGFlhHSpPoKj_8 z#cuVE0cA}}-A36GMui_gSwV>k%;Ia@m@P~^xy|c(s{9qp!`R@|xGrO4RMFitUPXza zs+`lz#0%Ya2%M-0)$E`mfWR6*6>^&+?@!e8Rr-qNW_7vi?xvc)UAuu?4e^0Y>JW}BkSm-#~$KjHa8WV+y2Daby+qP}nwr$(CZQFJ_$?rL{nmK=8`>ONmzWYiImA1~bV@J)Q z7>boN(+yDC2_z(%Daze!c;PGH(}`T5U%ea7S)FT7RY^1rqDeUTpRn^o{k=u`3*GTf z;wi2JhbS~e>0WMk)Fu{0Dt2dX7wz9CMwzWMF42l%Z=!6e6p07yuaAM#C*~Yl+*R}= zXlC~^ZZvpDu5A-QzV4wTLE*JAPq<`xVhI_q3`dq#V2XW&F@rr6;DC49De{(V)Zzak z)IGG7o0rXB`0lXUcJp?;xAr|?#gL#fwfF`3M%&c-1bkMDjTWjV#QVHuFALRr4YvW(MGW)fmu^2FVo&(=Mmf zO(@X+EW{8Md@0xDBmXwi|OoSf;SBQ4?#A+#Y z>LhdZG2FsEzj$0_Mg^D4KR6P}yh?CKV?1<6;*yQb`S>*i+ekjPMB8#P9d~T_swdfA z9OgLABs8Q8C}l-+vG4q+NkPY0c@Y{A8+}idtDH5(Bl6r+v+Fv*!*Y-dj+9US&4G<$ zkBxXTGMz|fp2&U;RT|0LTiZIx&q3|~@s{<$oSnL|F-tAan&;}7G1chl3GLeRYDau{ zc5g>HEssVOli}zjYs|;ioi~HK9ntLI7V@#iGU;9Jk_%HXwTz{h6C@qyxQ6*RvS=v* z;5$iN{>o0&26WtOKP*7Uh7)IxrH!&X#sS=~xZ-+P)C+d@R~w1Ab1;w`H56g9?dLwP zj-w?%p?Ngt-ZfnL{L6%a?zu8XAOv~Xihtj6Muwfg?>3aicXk9C{_AX8o_Ka9-OQB3 z@hj8vh;fUsIzQ79qYQt95XGCjtpdtA)WY$vYC8Pzet-t1+feB+x99-BYyLrO6gsu+ zr?}PKHPEMllU&gGR5_{iNKP%s&qvMi+07E4-_Oxw+M1f2uP+amEF@4$K%AcUqs3ee za9~5WqbL-wj6{#?7;W*{qRG%-LzuM;p<=8HFCNl_1HLy8hM)Y8*BXFV^XR_OsH8PEHQ&{L@{5 z`(L40{Su;F)IKb{D+d zJm2Iad)LoIj5NEdDlVB1WO+VAzbu-3j$v63wK2(})U{s|yyUttiRPjT*-Mh;#eNU) z?(76DzCQIulRwzs&|J)hIQ~K8y77b7_PL`U2aT5q2H!Eo$(6VamAw_c1z*~*vfw(; zL$!VT8{QU zX5M|p;!j7WqVkR{QFNk4coV2b@S@fNm+3S?LQ+>%lX?HbVugjz0TuAnwNumQL}L)* zM{LV^IHfVap2e0e-`uq8ZMqdF1==F$%2E_J%~HZ| z>=j@y*Q&#qy=t9&E z8*>)TW2VR}L;GcHqQ-8w2n-@K6}jIn=mpAnCsngMgcRu1gK1VzMxbw*G%qtwujM#fyF4=b}t9M~GacWxl z8{jvgMdkLX4pQPcOk6+lV06Y5N!7_6-I*({*=I?Y5cv||c4p4v$9Ou?p&O4YR6yNk zy&{^h=$H_SiPL*0B6k8g4ar3^I5Kz~PueLmt(`5r`Ny9p1ia!IFLRHes`Vjy--Cp; z`r(oY@+N=wQjD+Tw)=T5FMaTg2nyllV+=3xu8=PcQ~t;=#<1HfEWP|7YeZs&L$+mG z!18p-vDKSNzOF{B@Fsh;Z&OI%1l%1(3((^BDh%R0ks~FuamFsjYno9kQ?Gh*%mkGs zj^gLZek5thl^}vrD{+GF5bj*l^R061Kttz?sH|)ts71FCdiE2smE^wOwMQsBMD^Ak z4!XXHrf6_V4J9=E4efN&5_mb~hnZ`F>10#vgv&|*0M1KHXLjHr@Ni9k^BZEPI-cQ? zmP`F?c8fc`|J9TCl)?7;jg!hoqxi>$RhrnNRwioC)$1N*xn;b5Q1T;+?bj&8L zbW?M{?6#x--e6lbyWv~>j;OS{=%e2MjH)+Jvivq~zRFH*{g6#p@zCD*#Gd=gZ5TKPO2cmbh5mpKaddgw7mz4 zR;!5d#JTykx2JE{7p+9=kv+^mt8%I8L2C^(0mPCB0{WOo!l+tF$%!83he%MUFoEk4 z1*BZe=1e~?&P!52&Q7gSfzwh_@(|X|xgf?lg?=}tBj&O)}95><~#gAaL!v2|>0ihVJ!UQ24Ee zn?R1Z?432ybOAn14vOz>fs~~JCLL7*Z+h?LT~oZyPi5j|y0&HUXn~{mh{cA^zvxaJ zQK<*@H)Xx)_|a_#_pxMTA?5J`O4zhRb%TgOgQLZu=>c-mvAb_f_%ZrXGWd5oZaLPx zazvHrd|)?0t5D0$%yjc6ip|8g-ck)35IhTPGVBkMJ$)BM&W0dAM}i^zaUMe6R=`k^ zZY|Ks0pKW|)S3`*;V&43ejoM03R?&iHET3NH`wv!CJUzrAJQ`=n}EJOaZmR7C%`9= zm=mhBJ;j#t8V95w_*Mp1(ag}u7{LFRq7iP~GdLki?X_n->k-QKWzy${9(m?%b zAjULMQvcjgqjI*gKP9$P@DDDcp)FkK-3!;>j-BMDHeYoxyDrKY7Sp*q?=o-fN0H-= zIK;S94l%b!MfHGPpMo`=QC#>eQ@x6^8>7qR?dxuTe~e12qlb&@hI7*DVms%1l}oqV zEc)rU>-oH$s-sl^qQkw{zj~s};~S)Uu=qkmb^qvwvRub^hgad%Z{huWX;jS1yVA~m z-%FmRW+1wVkX}i_8?}^g?(=FK|3>^e~4A;>8%@!3+PMJ6#Cxl$LBpdJYqDnVJroR!=F zP=Uvpy-{b9lS&P8s}ZKK<5^NiLL!0(Nf8f$5KU5r2G%_G z>cV2=I-PB9u3N;lP-5`l!FW#(A8h%SXtw7DQxPgUSlH~CX{4IY$TnQYJ8bPRpS(T@ zG<@;Pu^1H4oB*BaggzK$zy8!hNM;e6`YhrhTlU+rq(}nG)HR>!&33!5J8nvC#U) zg^Z#uJH|(8I*y=RJXgncg2|5ZgTRuocWNQJf#~5{nB8`)yMh*RE^>f8uXdD`W(!IX zr`ogxvIrA)An$f4{~FvU4K1>wBgtdctz2cEc6b&EVbQMbuw|C^p}M;|54A_mp3e!L z1-NBk%oDjz6{Db#fJoe9ffR?C!zn5P(<9oV)$0!s=x=vJMGx`l?`dc6lTTC3+Y4p> zb#{JyEy6o(Lelow^D^0t%(QQmmrfPV6!od3#)wbtf|Q^Dcp3!ZV^8wPhnsngXZJ`s zMNB#4n6Ml^&U?K>(?g{UDi?v=m^amzr{B%e#TLE47&q&;LaAF>r!?w?J!CXq0P<_t zFhvuNN4ghq$>0mBaNLTw86GJ18CFr3$A;?3;9>++ve?^8Se;dME@lE>On5u>1+baf zM4dSN5>>{qlDN7orX-_9Gv4yAx+)B;mqTU{kZ>7UU>R{X1$6#X$czQ%gsid_xskq7 zhgSZN7R7dVtd^}S#}ww4{jxfmKx{%fdQ=90lHDeS{xfDJxnfTv1CP1hdr{o-WPa;$ zNIOKnpP%Tga@q@8lqt793^9CY#Pt*o3B(ckDW)q(V7qvoDTe+11>!-Qo)y1oJj9QM zBKL=x9=Rwo1>No|#p~?+in0h{RmkJ;q1kYwkwn5se|6O%%ol05InF?a2QM~u0NfY< z(hF-Qz+J22+NY@>HgAB;r{6&W6{-+1pyL*mIk^g@W^kaN9jawsC<9wrVU3HwLodt2ssARhbJUAXmX>FdR zv&-KRxV+M>=LVo6I%ngr24GsqlA~e6X5_g0G&4?I7amws?6xe@1qM(u2vjvY+@*?U}>Nquib>Byn?`^VQEQK3Q_6$Pwwz8YL1p)w%sO1#Y> zIN)!RowPj(x9I{W@m`J*KK+6TXz-s!kb9B2M8TM;isa@&t_;SZ-Y{!a0~1!XpdR9t ziD+-&2h(j`fqaWzJ_K*li)3a7PLW7gNU^}66{+B?1-|gn?)hxnDn)AS0SwnrNCU&| zz~KYt6G-|HJ~>4~;t(KARN2}DTM?n*j*8#Q$uc!#0lvJPOw z32davS~l-v{+=m!a89izt$laFKg__;)de&y#dK$#YB`&(CZJWY>t2AHT=Gm0;$WGw7Q_gyy^SckIWoT z>79ChkBv|v+1fH8{38Fprcq_JcbzqrMyl+kO{n7syF~`@6i!(6aoil{ zy`3)e1!9-PpHC`Zct*l&-h#+^k^ng@|L6o#JA-JfBN!JT86Wt7YOJFe!_KR+c8J<) zfjt#aE{CCyMF&bX|YgPs$2`{fB^w4x&I>yEv>eY_bUDP%8*1uJ&3=Zl6vk-&6xZ;3V$zhD%8& z^0U~PUXW3uMgmtt4IwZy;@Kir=(z_Ei>--bg{Uy=SY7D`KtgGVEp6(;UMekWHj#M)R-_!wy6$d;c>n?O@w5lmbqZ zjJgzsJ8Ci}3PtEYaru{bWaCn9y2nX=onsFTx!&JA9x=mul$+eM?#4+l?Yb7D$wM%} zP1YM#rp6HXshUiGZ5SIi#*sf{67{)+k0o5c4$f%a1lYD)10((sb$N4Nc4f-Dg|yI7 zr7T^!xq|QesjoqINN=tX`6a85J+IpC0(n^DF0p8im$9C(!N_(Ym!dOsMRnCe1F^~V zt~l-41;+|U7s8j|cSkk4xa5gp@xRghRi+=I!GCU!FnWa}N4U1EYQ&1Qp3PtN^$*!A z!NvRWiZ5NR0~qa$eJMi1d_%2fVVw!#Rk5vFs(x%1t|=V4CTZY_`je20Y9St=tC^2V z!hZc2U*oe^H=(q#fJvOmSN{4L)^vVwPX?DKJ|FysQB6V^d3|066|dJE?7&_O3!&nb ze&zD;F(pSXr>7(9m=27WcL2+^(z*M>x;ooqwYdm!TGQaZn_ZJSL31N=aY}fQk>;+# z9d7Pyf3=bk{Mo;7SDm0PcSw81FY(P zqCywPaOM;nk8O9uvN}`7VyiMd?PAQ6AA`aH^)Nk<3I!qJ-qx>?@~qcookxW zhm{2UyryAynJ6ia6 zIn2Xr{~l_wcg4Y8xlN0Pco2mlz;DA6ASHc}*$gsaA#SO8Yxzy6uRbKjQ5>ViHB(9O z5XCBN1WJj?ZSo0_JKZO(;@-u)OkvxYN+m!)QZn)w?~y$}jj!be;k3S|e2CG%qzL!` zK_Pk!FexzuQ{1Y2qA2)xSVds=5@ZENI6`CN(i4$vYcr;{tim|HY0IAmfe>qzvpmfM zgtb&taGtO|6sG@rHpF3lv2Jl-9|zU?jd;xXb$%1F37J=&5Ump3^(fmVRKrBwv6Gfp zZVj-bcI`P~yW@7^y26}rU9-PF+qZ7eNI^1!FaGf_r03b@@q!}M1SRzyJdFlBMUG0S zD<+AFtrHT1X8N*i-UPQVHBU|3L;PE;t1*#d*36X^fz##--bXV@iGIeY2!|%>Oot^8 zg8qI146y+KU&YPi(}GL;L>w07UkYP$_#hTKwCLsx(#X~G>z2tfv`zp!G`2ubVkOq< zrnodfk81|{Scpf|dZFi)cU#r%Yr6@xj= zR)zX(?Paq<+ZO)v5?alZ4D?*`A@L!YigZPbGLQh5uzpCF{e0E>BhV^^_szOt;f zPoCqaG{6yJeK&X1^l+u*#=2kbCyj3LsZ%(D^emEFI~{re%gi+NzXaNCS~q&d)4caDJLi!L8z(G%Pro^_Lqb5>^&Y^eH>V|k&K0~*%0mRVduUUI`i9MY3#t3>MC z!HDCyzd_Qg=nu~EJajB3=DtJ0u?VymdM`nTB~a;{-S(~>K#uRO5^2gkL6JYQeY!-L z#g^l8Q`Lue4s&`LRj7!aQO-f7mCjMuxl@oDO>}Mwhb)x@V?r~opLJ{QQN*N%@uaI0 zoX`#pKomm~9@C-uk8`gfoVIjPMlP?9K`hu7T$%^r_VV*gpO8Jd8p`8zhBdX^W|DV3 zVy?MDHiFUj@QlzHd^d|sIL=!*D#UU`_;;+316HhfINy1I&hRUOBq;gUvo(~LR&q(4 zpNci>mCNY+uT{8ZK8IzYX>#xSBwBb`<5l@?LF3EI*fi#(dO;(Y>GN zvXK$zG{jV>0i1GK-DZ{S9gMn@s4IG#r4}ilkO`yxwn< z$PkXJR?Z)icF%7F`drd!s5D*FkJkT4+UXe3ie*}~o$V2JIi^=E(S|vOSKz2fDK2cy z-ayLItPIYL6Ja-xomz2h7T(q7`_@`11+E|bAz-AsKK1GE=KHFW1b_HFW9LY}&H+yI z{tV6`d!Ps2k2^Fc(H~MZL{?DIcdJ-*z)>3B)STMNG~~-91G6_awULD$=)U_$gT@f= zQIt0#7Tik9p^S6TGnUN+Z044=j9flf!$xKs8jV&(B1m=bKrQEb^zNA}v|P%M3_MzD z;+C3m>2KYN(~OfCx+gD`t-DXx+ffAyO)HmgR_UD2D*B( zg;09|jw80A;5_gG_c*Ow>bS-*S+A4wnOU-hzWCsB3!}lS(~81*t$JH)0W)Y7DR_yJ zd9bFNl?HLDfn{4|^{in9R*ZisZ*P{K(+bd#%!yBjJW2~52aBJfSmHyY#%^FM>r}h$ z*s;9Tz&4cg z1SZBkF;TgKxkovOy=h>Wy}y~Cw7M&ax4V)ApLJCbeX08ftX64LE%u6acJg(!vfJLc z|D(D$VJ`#jf79rqw^^eFP%c&EuCMt9)|bA6ZXKQD)yHV!N0 zgMvbwN85j-o|@Z*e33<5D1w(MriX~^@)52^lKu%*rSFc z=odp@$%9cLP1!dFx_raU!_p1%j>xjMp`qEE?o3YO|EVMDusvOdVY1)`If){^kc1K2%JHnDPO zDcM3bc(1(ud1<_-VMH<|yA~ zHYa%+uEcGGFQ~t^)Qz)f@-Ti|-cgi|M1O`g=779%sDtByKe@g|;Rv^B7`$%LYmHzV`7|6xDoq8o4NNxn1*Wy_w!y#-;Ke3MDUkz$nKa+86RP&`_2$@;vo8#@)JTg(P&f>O}ctVJv!39iZFl+Vfa|-$^xqTRw#b{O!Z_>9Y+lTjw%V zrq)5%rK$~NU~6hDbON}l+u6__Rro#mvla_I2)d3#CIhm)zNJrrk#9sspRXMQ+Diil z!=|bc&0B_?kc1wZ&76}R&ypYa}Qn9wQix>)ToM6|A2S!9OyT{72^>u+p z(`&Z!!7RI)(j@XJ_-4PkJIgoP-Sy8@&I4uF_;v0tVSMBL#JdfRFWgn*9)++Xl%AM7 zwJ5@4Z*he$V}v{5i0WhkV&_Y!F3jL+VOMr}zIp{uUJBzTzAz=sM;_bvVOYW5tP1ZT z`^vbqzA%D~?99U~J7U5&ps?--0c_Y#=f8@Ie0~a$7(Mwjwv3hWPTqa{nD9t%a*U36 z%5V%Az4Zj=?ri=b65%V%y)9utE~Y|h8>T%8HV*wBrv)H4>248Y1RAyg zah#nBU4r2^r_huMnh1_H_(T%|2CzyUv;dX=S?AmG7G7{y?*(SkVW~vJ|jAAIqlHETWw{v4jd$sTP?mvM4Npd5CYgn zS1-fsPJtezv28;XWvl=i`}PSr=1(_1El_=S4S~kbt{z|hcbfxgx<)#NY1)1x0!q4u zIwI;rCBw)P*9NP{Jez#>jDx3NyJZ53U>m2#^z1&5?#p^8QyBB?1Wpoe9X{D5QN-M{ z6qS;J$+zgx+2ivwhDsaen$Z6`%^3gG@cg(NM@UKrQ_A=TmO+vQZ`Uk0&|VTwu?Ce> z?J{FPi+R`7*=BBeK zwC0OwlUbpi8BMnk@3QjbVL}x9a@x@dQ%B(^?Fr#?gchlc1t{{CY1(5#)a z6fj$Yb;fpba+Y$*^AWP)td=58YT{Vwd-_~Va1DvY$%e)4dVD=3jpYH?I=*S#&lSuX z%v3vp|)cNR+M14RF3FLELss+r7W?y;1T( zE_HOV{%PKH_Dl=NT#{{Nm=TN}zf>d+WW~~t!&>3Pjo$Sgb9-WI`MWf*mGXDX^T%Ad zOVVE2Nn&TX`4u0o>u)oIT${UP3tW$ne2nzd?Z8<9wSjz%PZz@tWUKGWyu3B3wrWWI zjq-hvsf_p~{MUM22tC@sxo4WsT@kNH=pJSV-lgRI_oA=rC^o-Q?159q>jT54H#G?ylTgLc^|Y}PxC^6=tm8EHX~e||Im2F zLiLy7qer@Im+-h$!LE^?6tukY6hT+0!Y9{*N3p)!!$we;A{(a!&l1RzLk45Pc(hbD zd>&8V-@z{a;HSsKi?iJ+GxWOr73lz$(zs~^E=226v8aN7vc3oBC49Lax0(!!4x(dQ z;y35};m+nnsh{JTN!%RU92YEb@C!3N?`7eQiQm8?6ya5W*>+SE62;i}lP0q{PdPlk zz$slMsma|2gkxndV$zgv7D(I?XKLL#nYM3}%gY)?ejB^nUhwDJFM%ImHUZw8 zc{{3luvB^U`qqMc?9MpDh^5P4(d?1UUM zA$uZR5n~;PoMJW{$bF-H93*RwD(8Q6p5cPGOeimIi!kPVVddTP&J9x7cPEmZQZ_i7 zFF7ALqhc6rnT5-?m}_vO!IQ73*0j79{_Zl#F)P&;f*XXeJjQ7x$#G4sP4)BN`s0`=-_0DZ8z)>vNWP4%_T^Yj|Uln|~7O zj|61amtZZ4`?n^ACLNcJqD$A7Z$K9l8GP)|_8K~Dt+}>~hE})-(a4=#@s{kvM%RZ` zeV!w}y)R7K9dP_;ypNVf)IA_O8FT#@E~)KX+6Vu+sdMDp_#8=*5^pGcXu9Vv@u3^) zxQK8lA^rSbyN@ltZNv_ZZNxbDi0hhyu%m3H_r&Qv^w9mAJx+anlLD|2N`4m7DU(r} zHkvbMy!hvu2`1Oj&?4>EPm`keG~E-6=zf*8zpGFg|K3<A6 zs@0Hu+b7adY6Wf$n5u=Jm-9iejTifxTy$j6jlHtg_1E%!Da)a0&t%b9~OOG^T}4*&+Y}a zy!Fpq_9VIMXoWWTRG4AK)H*iF&kX3r30ft7_mb(u?<>X|S!14iuh|o!=yiT5R%4zh zOOcZ?GJ2a5X>CEy6BzoGEH`ZYB^D8hW68<*QmEqJAXAb@4pOqEDE8!YGm%C=RNMSt zv|YRqBAa~d!kL|>Vu_xlH5mIV4e(m0i=b9Ljd|8&z(z}_2dRf>d13VCBol?jz4)G2 zvBxX(T8n65wGs^kG@b2#;6>O6lF36=hG|ZW;o71M>M)%D2ndJ;Z*+|{?y$M@h>3{4 z5hE(1w&O0`?wUBu)dwU-y58@B`be+rWxJ`6coxIWvl&F(8JXYUYqiXSSYA-JmdS?U z=vw$W#O9Ygj$vIth4c!=)(*Y7p;XvP3rW<3x~|TjDc&iG{JaNeYM`-epABSNtG`2P;Gk=iv|f*_FHk9r(0BP4DyydmVHfbSy92bzc>uOoR<;O z5j&=kzLLS8j6Duo3?(U!$qlhgsgSGs{1WejbbPvu=ti@UHy7J*wqTML54Fs!L%Um#FQv1pceQF^ znmbaKQxiR31XqsH8WeiMlU5f4|1I*Q?jBO8(H)bvx`$>^`&w;u7*lar&}>{N(J4sS zRKPO)&-qlwaO+-K^OKis?K2Fj1>=n5)T6+3ZL*q`z?V83EGU26xG|~gD9pGx+#T?r zzp($eyNIQ3lmJc)0Pq|7-*7ThBX&l{e?rKWe>mBdFwAcs(>GixOj2-KaOxN)`Pxx0 z_~gbKh3z$SX)LaH^Ni8^?O)=Xla`#)xPMnp!&`iFqH5I&9xprl`{|I_KvZ4NyJ3Cg z6k~UEkV-nq6(?*J@m%Sb*O!;xsR}o=Y^nt2{vNj!n*fIP*a#{4n9@q!a zL=>kCuzxnfu#0-K!-HtN@v}OK6F<%CTcrt(@`p+`iCp4!;+2R3;I#v$Fvm~Y32I`h zgTP530z$>4or|u*N1^P|;vZ=?fZ+gtX0e2OQt3)|s7n_VRghrlf@$V>*Xl%Her#|9G(ckcHS)aVBx-HIWBLj(!2=mx<*EGe1X*K}6^Qx18MTKW z-EIK(<^sn3V3cMXOx>{o5gfC9CzQCLD6A8IVszb5mt^ICV%7HTpIuH`0Jw3$UxnNh z2^92vTOYgPpth$ape43!{+e!2_=z#r!Wjr!`!m+KI8TU*=GsQWg@O!PE5lqe60 z-y)eT@TGdL0}c^HfZIS<_Fp=f$~y+=z(xRM8x zbTGDqQ08mKT5*r~832gZQA+ai#~PS!BlMu@xN_JLIG%M0P7ROv5>XyLgOUibMN|H8`iB%U91nL4slOA26 zF7b_lQF56^3?FnX;m?POz^^ndndOS;BP!JkN?E0SjWBeHOyw+w1Q>oc2{fJK19CLu_@iH%~Tt4{4gHSORhDs?dWenL8`_%_2#au>fk0i{T+$jT^9 zap5h$3MR`XeuV*8(o%Z70VE_)|5Q4VIrXL_3ql6T8p^hm%;6>itMU1JA3u2Zd7Mhz zf>L!_8$<>2GumGAN7T2<5ujFzMH*wwM&9^F;3>N0>U+W%Z~=Ag)hhO?@xkgYDgT;2*lG59lQC@u{a-6s_ zm~ar%L^9?X-792Bej}T{A}%ukU}BVzB9916bgY9YbVFP<3&ORCIjCM~_s;iV7i(-Q zr*WVcK}1uBwuF?+Ba$*ZhxEH~#c4o}23WwMhG#e-Tou@BQln(pm~OhHuL1xJcdn`v z81(~a;z7H&BdAPGWKs;;QDM5>ykk)>1OQ~y$NL)sga`&#Sn5%Ph9{$!nzMjXS1=R< z+8&#r9>_4EvjzYzRl&qPR&S7&q{!fUYcXAcZJ&s9X_z#1RAQ-qd7=G=Bam4IWDvrB z{vn0Nk$n&}1d&=j;Kn=35MClcJf^4t>wW7tYA4FT3W!YaQtR#vC%OzmLkWGh&RvbB ziLzT422k|Io~3ehNzaY3cf_4b5(&HnIB8BTCyuN$lFbPtc@T<%hylTzqVI|(N+n?K z@8nii>l@%rQLzv)yFkwn_)wz9uF(r1s!T*f)vbZSOw1Cx%Y_MNMi zcsIb_ATPgcaqX4G5LTQ#N1(hx;;b@8wK_h2RUh6)9O*`Cd`sYvNTOI$LJr@bimByw0?EGYaJT!az6HwSDlI7=mVo9(F4y-d?KvXQoIe^e`&Q6nMYG<1U*t4_vuM0rf3GIV@9fifY0u^_ofh5uPxw94I?ndVKkLUuO zVguKa97Ke$ps|r7Yk)NPUwqKny`}**D`n~2U{Os8NK92@*mHcuazb-b*nA7QZqlag z27D1B1uSgz3>R9N!RkfY{^XL;lz;QbPfm()PfrGzWl;<`N>=v*Hw)qi3hlfJ0`wk- zmFhrq@!2jVPX@;r$%*$G2or+=aQ9H1CIjGY4h(XVOo=KSpVz1_u$P65qiMb+f3j4u zq6B1NQ&7nSpyB|D@L8(#saMi(@Hcr;t7E8HP{4-VsMej`-57*}71Q*+$WSJ`A zkK3y9T(v|uOB&$D{$z+EuKfg{ZRDDok#4Vb=f1!?ibQQlEi=)R%4X4%I-EZjBN5b3!o_Xr#)90P>So+Ddcyw#T25PP;|O zH0x~O+>WOy&n-HF3fVbY4}VY#-7UHqpyU|<0m9MSJB0-^wLPdAWD6BnuRG4&+5a9+ zu)*z0rID9@4qpI(F@&lQZId96@6@t!N1(t+cJ}Q9AHS6K;)4vqUxEMOIZqj_>{Qr&*0^B46hR$h_8j>_RKO*I$|C9ielwNzZk-WbES@Lk3wc4$oMc zoMMy>hdw&A_t_cp?i(=3axL?0ImIezN%d_AEuu9I14|BmE}SK3-+@KGnWf_%a%4a# zCbq}ULc5CIWt{>uiW*BX0?wzC1b;6*R7q+MbJOFV9U4yAs|<$_BbHO&SrvR+OUF!2 zccWH(pc!?fE%?^PJ3f%RSI3m2YiFrUAiQIM)w=ok0p@3}K?dvAUGpOvst#bDvf?@2 z6Vu31=%CTMGs(28RWXUkLU10fWn&N%HOvjnYdU4n%5Yw(LNYF0#6k(=(&q7eVBz|@ zqS(%${2qI&W>&Q~YvR1_vYsP7kzEoFk^n=ZxsW+?$CV)^oDV)-WOhk`iNJd(u6}T-p)Ukuuniq z*_^j1-M!Z|M_GKUbwg6VpmRvvFT)GW6Fjo3-yM%EtiReQXdExQOhD~OE)w>`BYZik zNlZx2>}r{OPbSV7tHqYB*ss}Ej{16>^AnoeVGj=^h>c(*+)Uz|s351$d@Em&Ksbsm z#DlLCGwhHBynAS$abVLQg5V)0=**prbG$Sae^|(b2nJmF@ zi7{vG425jcR|XvtW>23LqeGXpaSw_#*Qu=ikEva1whV`8#Y{F$f9O<777W8%QYKv; zdyxs!lJAP7TDB z;!KffJ_d~|yP;#@+Pt&Gj4-(AbVsdN%_5SQC&BzWfR<&)7RKeCiz3x$cchy3@EhMd zxT88Ntw=+w@C7k2Of&FCP(-GwZXj)vH5pX~N@R*j zVeIBrPrN$@@(N~QIf<%xiJ>1-vSL49fHc@<_%&COFxs`=M-kxVh_8lX?ZCD5@3D|B z5>IlevjL&fd~z_UZDJA{YM4JxX;zQj+@*Nn2jt`oikmI9W4{Dm$h}keyyop~8xMeT z--#$onSoo`%i~q7TGgJoQ5LizeqRBX6UircrUG)h7aZvuyVE?*-S3$yM+g=sY!x}aPmSJTiNTj_9 zCk6d3$pll)+S|I;uUmzN*PoT6@jz1}%6vf>Cq+I4lG&DpiXD`VF1cP3PdJxU_RD=2 zFaO@^CGBb|JBZ);Izz7(f%h@cfEq-W#ic_P9wh49@+5(=#0G7BC*x2>p&VIN)xFPB zC2g}1IrOo}pJeGeQzq=xyk|&k;fWc3I#Xm|5rymsJ-g9y?8P;ygjhM6*+VeJDaBKW zApArA$5!El&XyqBl@?;lNZbM}-nOlc0<{ar+jAX{Dbs|RMb|WIHRw!o+B=sDKJYBC zKyuVsN9_0(y^~&_#y=-eZDR05smtG_6EhWsvVTu-*n*DJva6(KS*se0=W!?c9~ye0aPTh@?8k@WtxO^p<0SRw zu%RjN;Yv$>o&8IiVS1r|eAOFcH^yeP3G#x+`2?PRuG%ppqCFNZdt+lmt5jh==TEF5 z@0eqOTBhSFuIe$}+QIQx{n(8mW3pWghnx3{Ry;vpn(nTaE%luQn_Xp8ivVrIgX<{T z@?HaUcuJCW$gz2#ebHohms!}s0!LV!?fP=vVq1c@=-_I>yY#Y2W0iB!>Uc1#Gz3}e z$C=b>qfON$;X<}~d&4w8adI6f7NrYtA@nI#r-7TU)NMT`;6#Ez7Fn}@eUpIF5;2}W zL9pt0)J4#s4Zty*#zRP_z(#ejLa~b3;`7K-&aCgyiUo`jMOll>P1&ms2(|+DX4#0M zSul$XdfXNjmBi-y(Yngei(*@}DmjCLWLp!+b?2+eeF%E5tG%0Y;o21k$$c-g+$P>H zqRg-%4vx&G$j8uhr%vd^D7I<>Zoso88;BG|(bln7J-HzRz2*t&_Ax(M z<$+8;O#X8Los7=g?eV;i5RzUSI_O7X%A>m{&EukTT_}<)+^S~V5_wHC9I=+w3ZkY% z0%ZxiMXcG3GjXK~I@F*|wcxCyOIGU`xKxJ%pXEUyiQY#W@a$DV!p=C4tB5D6eBoU7 z+4W;32QQoDf}HCdqp4^=>8XZZ@UcJ47WyEUfB7ojnKD++#Mtz9MIUfG+#Roy$N(*Q zGtBM`mLW&E(&8nNLct3AS=`mNaPq-D8iaVuzImGYTKAJaWD|Qmo#$EP)1qW|!Lob@ zzPq`LUp(Im$(-`FfsY*xE;A6POrC*qQ;?wu7o}91YkByj>;%go_Rp&pjygk2n?A<^ zcHqoN)}z?d?c&gz(Pp2Tw z+qP}nwr%rP-Lh@lwr$(CZT5YC_WJiIBhEZC`e)o#pX`oF!_nKE~Si%R^G*rDu(u&zolFY}@_yq($2*+2)?JnMAXQ-%U}wrk+Z$ zo?KU2Ej+z7^JRq9Avzf^62QFtzwn5nfToAIP5ugYvmEd{DZFyQYm338z6w-Q!^S2X z6W73>N&B4`rdu9XSt_4$MLJ``(#B)o^b%tlEZ_rFC~vHU+6KGc=3kw<8m;b&YR7=~`6%Z)ZU09HFA_H-a+B~Sp< zz)8I*sT1Rf?rRAGmp@&ag1Dh0!Enm?=+&$ilkv_85wS6lum_?UqA1jlCWJAU5Q{s7 znvVcd#@tu^{+U54X;hgj#^&;|X2>v-YgIW5aV#ED{257yzr^g%0O25J&l}!< z3#L7XLtMXuE8)VN_=f2JDLXBu4Gpep%9JDs=sns;02=dWs?EihX1L%SVa4rxo@YE> zMxO5b&(r-3KorAb5`%Jo?`V}cRnCriKEN=2W|n6A)#_346}Vm`JPZu^<^b8;IL%nB`qr;n=kM7t@bM zgRwpAD)W7P=5h?44LQkRU-w%$&B+zHTxuFZ3TrG{x5qd2rq9e-&%@61w0UVZC(C1y z#+o=hrJdYB*E);LB3~QIl%H=@Ud3AG-{L3tc%n0; zmc}wl$L{}fPqOV4q`MfCXpz`{*`}~?mF@C|b+pFZo=MMw-3G^RksFMJQsvNl4!U(< z4l)gZ5OQa>aP4ns&s>(+azG@tUN-`7qOVx3%L^sDyYp$uXY?@NVehjNP``&ZNe7=Cv4X$7QjazrIkc;BM+usv?2g4;s3bt3HAK;P@ChZ*TFbtAb58jz0kDoQR;uFp#)nvK`P*iwA* zRs*{G?&ODeL%w<1RJ8nZx$Z3C!;Yjp&1;UHd4VJ9xzdxp$!~UVZ^e*Ud$srVs*<~7 ziTYRd#@gOu9`@Um?$F36@?VQuSL$~Yx2b9r&kNt%l37=MmK|622utz6-IYfu?P^f= zS-4u(JJ_=o@E8kxe&zF>^BlekZn}gran6y+vL)uRJ-Q+2U{JDtTH7@V%jfb5iZK0j zp55BaZlhgR_itl$1&VIR9A%dZELbdlmw+~U7d%c$E4OibfN?kdTU3POtKKm|YUZjS zHIn9U77uSDmM!yG+=i1rWW6%UyQy919!O^kHa@RsW=(3x4{fa+?1#gEuY-3+;4v4e zHX13^y(iIhUg6)A1l73bQDOlFfdsj#V%{vp4kLfolG)_-9(WYW>t8X$Cy2_~33D@s z#%qGf-9(moNP#6eLBmMr8sA8a=r`wmoEvkA>ec~IWv;Ex&v_Vq8LSrVhQE% z55Ga|8%l}1ONami<4^;dy0;Dpr;hv*?bnAfgVNmuD4>i z$GIus@b6xw*KhOi5IbVZ--p98h|Vm^V6jG%Y{D)2QX35r_ZV3{$(s`~{3oURMq5~Q zQb?_}qBv&T;aNmanV^&g7$HKvHM2svhuk=V3Jc$#>^dXbtmhmx2{l($m~nPeeKla9 zZ7dgs)#PIM`UBOw#?U#t(n-sB({Hf1CI}P3%{BGfng+7DvwQaK32f5&*Ur3kXlE8% zj%j<=Ep6%AgwVe{AC6_Zy%x7O_TH_QZvJ+*LbNkU?$n;)Ojr3e(gM9ADhO?-CW?#i zg!D)gf7boE_Lf>z9_0I^d9s7avb&Qs$jKHvD~61xz$#G5NcEeW6^qS>Ao3y>`U{6G ztRCi;++5cEFs@s2qBoghJ#kh9mo@A`{A$|_UCFfI!Uc(u!}lLPob8aErc`uZ=_g_h zsrah^Kjq_eFPBl~GDziDOqav%kcB!jr_-SeY8A;QHTKGW6|5*{;t7$T2ECA|wEZyW zLLjE>ES11S+@4PC_#*E`4NJ8`D=7l7tK3x~-NUW4J1TJqKpS2`jgMQXUlA#O^2hjtL9=ABs?jxoJ&3^$F0j{vLek>}dMmxd~}E2mHk z?_4qr$hLs5dBoDrvNeyc@jTdbe^a;y9jrRZK?36cy#mFCsg!OnU8{ zxy?^WZ7UulYxoUy5q0>*qt(f}qp5%x#G;(ny0S-4*s#TXx!C&-OXW71 zIFer;nCmP9odsT5pO=Tr>wcdjZd{*}9w|rpA?rpHG~fi3N+ZJBa-STX^EHO2jnFUC zjpN=OTme>`?tKHZ1PURD!9X=kL80s4{Bh-i;2Oqm_q^=MPU_Tqj2LHF-}HQE#y)C# z!i5Ahb-t8w&oAHGjR^xc4)~id2|T`UzS5r&dFj1C_g+yq2G8@jh^|k*UY;T7TgP%G zoFag~!bHJq9~4sq6gbU!J_HJSW)%2c_5u&?T0&eH+07R`30$=f+2Ts4rmClizC$!J z+O9ouT(wTw^gHYTYkw5)*km@sdQzln9gAysR}tbwaWmm{KU{z6LSZ(@ORJx~k`~(& zq_Dx#lkQKsHIMKs>SM0%(T$_EN4z-$gV0Q0k+bsVf>BOhbA(AXEAIpgiJGH~vj z01b&Q92Nnqu`gs^=TgoGqW}KweHv=js*NbkzY!=||Y0Hc}T@FkF; z6O2llY9i@_A`dZ)j1J1+Rxo#Dfx;%r6=xT_Twpak@mm0V*rY{QZdl1&a}GfNPBE2# zzn8&)=hly*Oba|l<&#@>y^ND$3Q5G8IX#w~E)~5c0Hjc1!n5vDAXF-Bh&4Tb?ld*E z^L)?Cwl{7*)ROQaqATWPgWTLy(q^)D%`IV+%?=z3bk$$~Ub>vv!fVeoR$Ht6C}MbR z8~1$}TX+F2In`&yiw8*4KaLcG;;Cp4k~o;km>_>v;ShBjDu-R%HLzbipoZH*LHw2$h&W{fhm@oHdKe%vkaQ+cqUr-xw&4i}b;e$GKnmJHGuqDIh z80#V$6`clf{ug#aF_il7lM&YK^_|geM|9evQ#9rEnibqR+P4rl?5gXJ**G)4@Y6WA zprP-i{HIqf86~mr95P&;POHwo5}j=4bA%p0vkDu0E$9TY{?XNOCcKr>ePmh>HdQX% zQF*`=&7RRGt*s6TiSdrLImzbuya$WbGuiO+j#g;dk{F_lj5e8)>MPo?lItc%WL866 zrn=fDjs3GA*@W8g`2gY&8&tYIc&^D&k1+N-2c>WDx<}LQ3Z~07s>3xGhu8OWYQW&C z8GSdO_Zw9`3B=A^t`h!a>Yk=fme?X8;6hen`V`}P29Eb$e#$Ag)isTbqcG|CKvlPI zoG#HzUC}lbPXyvR&tL*Q)FkVRGoP4VYSj*O`(C(JnAC*I%vO$ZQ4>RdHeEN8u=cymOg$nP2SM0 zG-B`6)UO1S53A{-1uT5e7&8=d?h@no7lQa#OV#=5AW?p<{a}Qj)mFY^MO|)>liSHO zmFr9-*C5e5%M^y5k08H(5a@STC*6^)Fn=ykhk7iMGM%PA6X1QupPme6P6fUXPyQTk z72Isu7>sXRzmBi{^)()TmmGbk9DP3KRE#fy&(I4cK5M$afsEA^^IZ78@^Sn|vaR5H zXkXP&=GV3+U7QlW4ikT*byPly)bxMIuJ2>ufWvy81wlE;&NM3~onmDu=EPZNXV-DL z`Abc|9-WVq&*_g+ur0#4)JXB;ct$RhT0SSS0QC;8xCHYKUGDDH^?evP3dh;x{JY~! z(BH42#aA4O4V0VqGsu{kVW-J*C~1O>V1J{a05tLm`*0U>l`!by%z7r#@5l$t0_z{s zPy2x+1I@}4byDlC83U>8lbH06QVL$cHU%lq^(;U5oGU5P>2={|Po!t9wirRHPDLS) zusU78Vp-Tjx;&QB+OancC_gjAxkPjOpe8xLrF_NrQzw@zXx4Wv$YOxyViGP@@nDPB zI1JDS(THCQG{Yj_KyZ0p_wEcu%z|YjmKIekU@6Tzo>Wpn#Nd-K5pQjpnNc&jb|qDt zOdD9I=tSwWVox~pn7iF<=6zzANDarZ#uNJ=Yw_5W-mY*b_TAf*K66?S!|byN=; zDwW6i8i)O|@+eKC{8ULV0WU9HD5F)>Ul&=N-fd!LPSJZog1=M&y`v}pm(e*mmbYRQ z4FF(V>HmlhIolgso4WkRC9CtP{Q1u%n~nJs+88DL*{pI$tA+a%30D{Z)MIM2p+lHK z#wyu=@z(pfzU_UaO7Wp8F78@1eSLj%{rN(-t*C#pZsoZ6;@d%G-{8{J-Q2x= z`P{R))6o?UE!?-a6pTbf1$-Y@hejnsOqg^@LZZ`X%5_Y*$0fadYWel3FkDQP5-FOJ z8qD>s@+>dFCja+*k%#&pXn}!11g@mW40W`Uz@*Xi3_M1wELeIBm^Za-s5rnWhleqA zXrEj3K$F*G*ndgc=b9pn*<_F#kPML5$R5ck6f$K7{jzgR{8yJYk>p4UeUcsd1a59E zEoq07CQvNzsg!SWh-8?-u6u$Q(sHizx6jqFwV!^l;fuUTyzq|@AfQ?hs4!z(0c@6e z^iVWwCulDbMK#5mX*QT}ZJ$!s+jmJnooj0Vfb(j2xO;Q}Y(Nu9PLB|^K6m1q_cVpG zp{|{h#6?g`UuEFo?e>qKX3Ehy`)ZMm%NbFwKI6m$9-qVrWwN*nThcWR62uzQV*d1U z;Z^E9cB$t!V1X0XGGiC@V^Db_WK5V{(1xV~uZ3Nnq%W7$32-I$OuN!e`Gp|q;$hWH z#iC`iJzs2u*dN@l~7q9K+^E#B( z!nC+Tj24B+3?KVCHMFcV3^s(-MrViz$gu;eyBesfoMRw8Wt41NXm=G`HeY>;L7NRb zpy?TjvM|^-200e>LZeG;h&tuc#u^aM@nab;cO)PdxjJz2i{!Sb&1#(EVda+J7 z?LoJ}qLkJ^4`Xt7^d#;f16(Q~zTDbT%Kr7(Bho%mwyk&MAOu&nR8tE|{HvEx25Ll3 zndcF+M@R7oFc=ap^eYPePrH_dQTvcSvrL4wNa8hZ_NfPy!1tpHvp5y}q0my5oF0!d z4B%L19mK)#wKWLOc!Nl2c^GsE*6?Co5N;flJRhixjt1>zSXu$YpK9j>1I3VX!{@WW zy`!f$r?R{K$Hl~2{|yEUBk%l=MhI6)?cMDo*v7 zj&)!`C(1u?d$W?TtGa2W3~NRr_#eE;v?MmeplB^){a{t@MN6lk%uF4^jHWEY3U!FM zVud>n*^&IHBu6UxQ0;^VqyZKM_O48SEceJVmM-5&#DcKU^x3Fz z5aCE!?Wf&M8ev$lqD@K|TL&uqlV(B$j0TNgepvH%Rnh+Zars~ofWU6a4e)5Eu>uw$ z<>2Gz=Z5d$)IG#Tz6^O~%fxbI_A3whDM$qr>_%*w`Utbr`_3MDU zIk=2`jph6C*k<6{mfrtvTwFYSrC;sG$@Q(XJ3D%cER2=UnPA`Z^rA7?aqLtf09sik zn{3){KXJn!P1Fe_$&yD;(z9vb5jfP@4lS+1P*UV|`EwW;acJ$;-`qEzZaVYlHir4n z!H4H{pXfQo^ontkBdZ#UYfEd)F1ObQV2`dfhGF`vr%RdJ`}>W8VzrZxlb7cX;NUFa zBUGm*0>K10;9k!)pwo+T(o0CiY6XgyI-FaPADt`KbE6T8fmEK~NgXPQqvDG!0VV+F z_hb%ztbh>ToU_e7T?`5MU_9|g{4lf9@R%AqfZ zz-?{sGQO{OU2pI21ge-YK+ly~AOZn-4JIM^ZBI-DOv>U631 zM*k$wwQ~0CVF7_wyxbvcT`dNZQjw|hPKpTCp=?BT=&o;TsfNrbK929ty(#vzG(9BN z#J`=egR*)B3*FgO+Q(z{YL6(jio`}!q;koCQ(b0#|G@r zwUIy6_7J|kdfJpgkYvgtw;8V$>XqzrWn}_?hGP&QT-G^jMQ7!y>{3+Hw~KX=^*8M| z5m5(X}33GRCV z+*A=;)C%W(o?;`TmCrO<)PjBC))P<+I5|rpTZLN4HC}H0a*yH>(eJLQeK1UXV)gmc zLW4QUl8*CZKXgSew)r9w4}c=|zp5k;U5VyBMpRkDzsTMFVYXmrVN62MwpyPCb<;Lv z{k$exv5h3MFK&v`!)4k_HBaCLggXD#TD2cpU_Z+^f_~=rWFHYU)y9qCty3gY=x@T~ z#w?(bf*S4~xOJ-_uM&3OJN=$V@FIwvl6fip6-?D$0MYn_5LFFz?>0=qu}{whzn}sQ zRj1UsRv7zBdq?2PE_9*UrSmFBW>{OZ10ubu5y>G(w52!hZvpX6Mh1Z>G@yEu&zB=n zzd6=Eyc$b#EZ7K5AKd~Q-L1F zz}+0Nz)z$y|`jFn{oV z$|#LBicJwG-f&QdOgO3Iz(>5nqgmTAcaiQ|;T|p`)TM-9tB`5*=o1;tIeHgxE;UUk zac4G`vC^rb)~8a%Lu^}%FJ7R6af}8+NuGfTmVeBk)i>l>%NKs3whZLeP~O)#de1NLl# zWPBX$@VtIqhN%BYHPQyjj`}1)aZRsNoU^4RY2FvQ~ZNBlj%%>o%)1 zQ@ty%AjDu!VUV|4!-TozMNpo)5OSA)UkJi2VoPf zL2#4QXU2gRJdXn&y-73DLMzP?8PY{sW@#64c*!%9BHCaiD}KTh*lFeLjx29K_uaL; z@bkqe3JzFSsbS{n5$$80VZ)`9_cHHvc^H#Z%>h4~-(a(W?+-?5omNtFfmh}29~L&1 znx`|zVoVuCU9!n^?@0@K z!Kc~~ZG=PTNm4}AG3iE0R*E)Y!J>TU>ItSHDdP%}%U&AxLQx|msnEL+zk%c0n`7_= zYcjcCmIn1VEe}CLqMym{W|erpjyVZ-BTEY@mT!hGxv@PEG_-M+VCJ`=YXT3DjMdq`m4c^zwp5`juqqQG7l( z{Nu+bSP=`#m2bPTr7Gxc1LQ$y+n$H__PNJSu%uG)7Km%lq~lX}Vw5oIwcqk~tcuM3 zHVV6Vv~RpM8+5}fKcg1m*nzKF22EFCp2^f!WJ#0GF>DmqluEBmO7C8fZ1%b1joHd1 zW+}}EuIZzzzmU4tSB8mar?T9L^A?$-2M$z|y{r4MkKncP+18?}JU8^YE+4DxjT!3f zZ1HLimk#RdF1yVir<@~l5mlc0tqvAy49y~~tw44uz;xOS0*m5aESiXA+23EXkdMW* ztsA0aU{N#OFs(Wvman*U7`3atLqkv;C(rNrb}*ce8ii8z`=5KOaK=Bo;>6|z+B|E1 zH|;@*2EH&FtIbPCN6*NP4dAa#um3I0%x@`@@ zdG)+7MGht$lG*7pNzJIOOhR%3m_BO*q9N}menwXx3qkXv#Gr#U^Li^C>50>p?re)# zv2B>N5I$*}hFmC-}%%`FhYxVBV!FBJS{WZMK;o(NB#PbiY z3aynMnU{!frTuFy#mPf41@z`m=nD9#R?A`~f6=2b{Am_z=!R;w6F`e*ads^Rm^K7k z#!p@kf4BTJ30sd9eCwFxxe4RBf?M#>he-*>5%h#BPYs5_fdqUw@s+v`tmpN@XqMsx zF5!BhSBOj-gW`d1af~0`qr=4O^mGHY+OpeUIO{Qc*j6No?r|G~Gi$(ZFC4Q>(TxZo z=uPe4BxA)cFj%zv9Mx!WYcRf>sWe?)@iGUw(1I{YHUz!V);U{^Ici2-4}{&h>>?%s zWP@-VKVM*jb6AXclm&1JhxN$pLg08?xS$h@K2Vz+g91+d(cb*cM98%Wu784`?9gjPC6ggj zg_Eu%D|Z5(X9=IR$NN`g)o)lE(jlYWv3~F8S7Vg~6xPm9rW6j6tI!!S)8oeq7vxRx zKhki|u)Om=o1TO+y|g))R_#A##ewPTPKgnPk|;!Cs%3)+`FI6l7C?;`DZh|N_BxJwk7QJ`?@O9Iq5EA>bjd1ce3Z4s4P=^R+MW` zqejH`)NkAub5dVAaELr?3<~@Ace7j4yD*>8h+^?|wf#~vjI0Z}CRx!QFYp>06FT~a zg%LFUW6mIU&n_;ACrKJ-OYn+Pr3g^?pMK&A54?wnj+jx-r+}(dBZ4 zCto_7+~MoYwX8}4Ls;;0DcQpka4Rm(WdG%q`gf!kNQ&nr$nHVg>aunu-ur5iry8}} zANBT@Gu$4mRui|p9K_XhSg`rv$UmC;@D}SI=g!-|p9LLt)v^>XfyD-HAr|ny z&EirXg2@_iE!op~C4_PO!%|NU$-IFnDPZk>waU?|eb2vysY&n8O3|gGvkAl-El%^Z zuQ$XUdnW4REs+;1T@v+Wbw^S9n^+yxTfncNf(62EVG{Fdt!~=DBxB{y{B|~}L*>cv zg$#(jM=N;Jge%C{)PL!j0@x{=uZfpyzMhry=nRlkf8jDQMc(#J#igyOaNh{m;~7&>`#CO82+fpKWpm4=a1vXRnuS^9g!+ zn6bnOO1ab8=y8*&p?*Ob`+>L4udlY~I{b>%mx*H_w(sv(U$0x`wPXBoET7-+J-vj! z56Z8lF0-pYtr848j82{h@+@x-L*D}lQNZf|I{W;ROsvnNB8L(Jo3D4%((q!CPR?x8 zJfxq8LXtGMl+Z|zR0@yi*aUgDQzL%lJJ?1ifv+Q=k4~ZQ>!CcjSy%CsH zDYT&8h&5xXafYUU_N&mn{8DDL2S(`mF&XE$Db!YW5X5h?9SQ`B+}!H}4W!y=g%OqQ zm~wRyU_U=O&3uYH^6Io-n5NW1Sxn7}-PV6Iv z23J=NiN0Si^YD_vnLBRLw?7rYkZfCC5xz3TQ9tx*1rsetj7<)9v~9kWduyzA2g#$J z!>>~vzccTw^_H(yZ6>n4LQYcQISA*AcXk2)yESWJBg24$001z_2LOQlzdfYR&Nlz4 z%wk(RAF|wen}6Vo*m22%v7>OL59NDQh@tktKDhL2B7`U+ z7E2;DYBEF{dF;R6&m*JJrZDQ?73~p5BDsBPOf42o`#Y$h@EE$sUPV&Gtg8zqobp)W zmLNV(~n=$;roLXl$8_k#vJq#bU3oVp{ z9wGt=hlEwV5O0eP?i3e7HmvyOZ53cu*qxDO)ABMhLZS6Ffhf^JWXrSP&laccZHXpP8XP|xO>NoX07>0Tu z2o@Dr2#4G%#oQ;2F0JO6xuQ`d>0xh8L6nam924;0Qh<1ILaD1aia3xRWQRKvSJ1N3 zJ9)7@lJ?0Riv#$OKs;{XVm9#cO9zd(`3fLPFDllwZPKsZfueT>UiXd;d``yvX~NOCU~Z#7?_ zjyXsHN3KhsC!&5!ofQui!vrHVo|Sb6QFZM08K~m)O%Kcn(m*=AK+`l*>PMD}dZJvi z##S`8M{X=xF!Z6TW%cm*Dm(mDFPX53m-mC8P00k=7J?(XcdNr#ZYg?GqcpO`>eVo;X@xg?6QQh>YVG8DZ@v_{2K=@CZ;};a)t3)0t(OBX z=GoMX-I=->A&xrQwySLmR~=m2_MAN1UCa0F3-!s1jl295*iB9!rh|Fji`b2DpV?wH z0D5&`-)uY=)eFIP_X?Y}9p$gFhlNDmT?htnzpBQ_{VU*1zK7i`-<<@f`24ijIY5<_J7-lAvv%I>RbvDs(c{O-6buFl2P7qoV*;Z|vW#BR zLoRpVe?kw`0~Q4tIQFCKOV^O+dBY_lvZkpZJ<<3JfN^6cImz})935OL_7)jzf)%iM z7-t^|yqENJz-KVwh4vpCA=r#5m7pAk`xZR`dU))BPo0q3!kKqt6Uy$s&1MeM#c@2Y zQ2mCIFJKJ7Uo8^nYgru=VJ;o$Z+HS}g$~~8Fpskr{FaSQ>fy^cs{}_GODzTBjd~on z7@kiVj(zaD4*q1kIkjHMG^pb8WRox<Fj6|d$$c5!=p_A5V^+zQ6 z*xc;#+j<322vuU*MMB{A$NbaiC|r|ZYDGt-RHj|B1{f=@*?86kErNdqL$uiBROXKD<1Lew*dkgc_aT%!{C6=R3l#3>I(AjHVoYRx{k4Y?_xU_N!0 zco|Bob7fE96A%JYGW*!93oHP-=92^Mw7&>`$bS-V<4q^YrGVoXH?Xx>nc@Z;ND`gm zSc(o?)(~Ysgze8rSp#!qlAdC6;bdfGrQ{?W_wuMY6~CX_IAy_8j z7v=Lky)&@d6dx^R`-+uop!^6DUcd{1@t)p0VTDv?SU~fCy^$Yi9?aO=g5%A z1UL)G_U>;ZqKEXpdgVtTp@?twAJ~cUZkl4eqMRpfWe%1Vb}ES~{w28AYCvJ8ps8qf zYxG{MJd}sCZXhD!tggq)9Pans;S}#-QFp~T*ZUjzxBs1t+Hlv1FU6)!vlo^%frV2z zl&CBohXRoNpqrA%*hwQK=geI00gStxlx+R^yzKBZ+17>^Twap9^T^FzC(IANs&U?* zWn?GDx)qq38C6xrJ$?3)(Bn$UT^ZM2a7U?=H=xf}!>cV6m<1c2r3fkHg$qirs3zjm zGK7*;1M@3YPVw`yt8fvPF{-%PvH(uts2bOoSHjVeGF*A;7`X9M5|m7j`FI6;OkLS2 z_;d`L{~`@KEdfbR_!%4$mPBfXAJjh z8_v5-TZeN(&x&9s7F&H;$6E@#EYFZtx&Oq^?>qjsRoiFKLIi{Q#ILNFkQm7|(Z{z9 zSGGCM94Cq)KP=8?4a`tl0V*sLNQX0_q9$Cgd=#kqBGd*b^Um@5cOI1Aq+K#wPd zUG|0&(Zo2im@68kcdOU{U+R$M^jXP8Sthssl^!lB@kj{1(Y4rGoHtNbw9&?t7*vv+*^#)NFDXH!B@o%^sX=;JT~s^vEBcs1WH??WeVQ)HSja52>;P z%1@E6`veX=srm$MRB0g^Qo(h#Q60^tbl=>jK9&T7{m*x)%k2X-vB)Zsk%> zH+rm~iT7xkD_>$4G^us%+j0)1mpGs{q?L-L#F@-CEyh;Iun@F|1zgmdkc7BXGrD#er z^*{*GI6~h`G_yFOag=fVJ}gN}e7%|yNIXGSS37?$Hr7q=*fmLh^L%KcAo%+`M{@+= zvOTkh!l5p^D7>k8Yo9>BBCE7-;}T9ul?7vGMQE)v!zk0574k;iuKpgV{CL6X(U2-2 z{*OWYfKs+usY}*a{zifGOOLTFyT9=of(D=xM%2TKCpEt z90e|r39ZWhvCe#$zA{%xgSQzL0rw%k}!+P#g$qysHDXE^Y;a5cvz7CQn(Fr8nq z5Fg@hY!%d(z_FqqIH79ui*}URx`p~{zlcNV$may}BX%^!U-C4oxZo;oyslgN{Pw&~ zt-o6~TQJ}bnP0%4d*_e)k@Lf{es@50AtT+=FT%ClMu|VNx8e%tVv`QiKM&wULl$^_k!}9#?gt~Zb%J@r|9JD=;X$355S)f zW(x@7W)#U!xr5!B_CcmO=5Mc}x065q6S1GOAF11tv0B(bFY68*j0WC;)aXjp;0UCy zgU0|mUUmbhJr+eglG_Dq`o<&=a=7K=)XvN%3#ms80iN-AfEWmgmsXI@VBnisR;wbg z<)d=T8K3U>{Kt@9-^!mF9VC8P``O&NU~Rlbj55Kl_MaMHag7)i*DAiFT+)f&#+v|+ z;qeHEuQFbk5?w|;CoM2dq6*_##0Q8KKi&8-fSS~(BO zz;IIj`5a*e@I9V|nJ{TD>{nEXtj9|M282B!lr!_xCX`MM5{jjy@*&oKqOz%CnrWnBI0knU4gv^Ngn2r696(f{rhim7Av=)A?L;PJ+q#`Y-3Yq_(Z8`2KQH z6*$M5p^$Ctc3H$W0d_>;iL$e3&R2X{u0wZz%BZ0`wiBvI!)>rJgGyzJ%3|asZ<r0Z4N>r7^z*qf9``S%R4w03Sw1ua5lxg*XlW)X@tfP(On&p1xfK=1T;y)++oY{>Ll3 zFcc;R89UUFx{~Kx|G4>zS2r=>WmbelNm$`xbp~%`4Q?LWkr1HuEt5;tmV32IYJ6yc zT_X<{1pqA?xwQ3#9i*$z6ZwC`1UuFO=yMlQ-=)1Q0@Q zzflTBDiWk*|1QLd9R#dFYxv9hN+{S90Yhs%0D7*fdVJ1YeZ(P@4 zskC?pC>qdX31w*Uc zP-MIVkpk^>kKIz=@%(L+pUQIiH_i)cNBCC5az*c7nqh!kq%1$ zDF|2$A>ld3yjIJhY~^<7t9g68;)6jA5iawxHa&S# z!W3&^y806O&m(Ilm?_G}lw|cUv$sy@DWw(taf7A}o1dZSNmYALh|sjEt8 zI50wZ_cKnwu~L6|>w*JcaXQ*zD@g5*GMXF4YmaJHM!?FXQ6fLn?b>M z#a*&#R5RpIeYAOC7|A`jV3+ITJYiJtpKJ1o#r&3+_aQ1p`b5J#gU@s8_{@E5h6lbw zu8eMGq)>}E_M9#S@6p$6nT{3I-`c@fgZBb;6bO#{-ZrsR{OivitLQ?xt>%=c{tcj? z0~u;)mrRQQ`bx$i8`>xdmmv^7_x)7)cT}=#z=_=ZlCQI@??q4h;*58ZN%N??n6%#o zSaWG9?=QfAzZ%EOG$&-}003QT|5wD+#mUmn{QpHvJ>3sk5Pto3H*X0V1aKULCS~)t zKvSIm9@CKihq7_C)MvNse_VZ;i8UN?NW!ON_OxcMWq6AbTapy7UUrv|d1hQlW*?;?Plgjal?aDVTDH$2|}l)d^anP58f##3&7vD1(vn6*9Mn4clO zzA#taltI@J9)^mz>=}R2K4Ya2S+UuAg5}{@><{g5hGgofG7_ycx+^>%-@(5q7R04K zYRfb_x!FqO+9k2zm1Z(WrB|c?p=4c>h)$?w{SMVE0gX`ept72$^6wtoV0c3tvuI)j znI&q9gj?FNQP}dRsdQNpMy3TDO4aZn`%o2w#0LCUK2AebC|Nxjp;bY$LTcjB)@on; z-cCoZ=nBjPhZ>-Kc&!~-&_>w|wyemkUn5_ck3dc?=?e^0pF;w{Jp7i&yok`32>7F|kZ<~QBd&ajg?_n656w$s zS^BOn=#DMAf-0R1Ii(PF^|PY3r@VK(hq9OR(`>`NenNKso@8Y)QS;&3l56(iAhe+Z z-TV}N68{q?6z-vUb=Tu@uex=&{qx5A*6(g#{FU$LXz?@G_h&d&-*?-cvE?@S@sJK< zKCx0VAu3Z0{0xiNPFH4BAjg-D`ixCa@+^CsJlE?Z-8|R(Czvl(f6?e4?4X?+RWCHk zC9ae%+=W7E>XU?jzTx>AEBrRNTRYr$cpBP?q`&U{ny3uj#qs6-xhc){dq*YEQ+V?c zCHy!+_T8GKLISWCAcjh020$K?DFy9OS~N!u1#>C|EL%3$9H8Wv6~IA^8ImN+Z{q!e zu?qlsEQ26xWKuyl4I(7*ct|$~58P}Wk&HrX3N)I#g%{gBl|(DD2zRYeLOh|(e3G0R zk5b}~CY#qxDylM2Sx;jp?i;gGBsp^&kIfhjeC&#%hFEopS2&*^mv~lr`Vc99Gl-}i zOjW&YJ`DF#miHzACKpT%Pz{r)^uzL@~X zMmL$I2guoR@6(_a}0>5*`zne6n+Skuz~)w5wYN+cvsvv&*(^+qP}nwp~+q-tMgVg0u4NWbE7#BK^6AxEOGdn2B*DB?wqArnDzQ zh=>3BrpQF~a=8oC>o-+Atysn|GvnpqPss%Fn0h_N>T&0#HnZe2f6kDECdoyH^_v>iN3 z46xuHL^R3dr$qL-M(_Wsvkmz0@+D({02`%CHf)JY3}crhbMFGY$PfzQW{0Z!KmNp0Z2bihcz465mFa|h#P zrKfLsYlg&c1tQ$R7&Z0C&kN4h>V|I}Dy2=?PIxfU@)y#_;h^_sm=memUXY4@lL5&L zj1D|$|M1Y2uleF%LFj=r^Z6S@MTmK5UM&fc=iPeApL0{6ySF~FntzXHeZ4nCs!}Oix=aNb6Sjxck};3> zdwEk@+)LOjJf_^)T0&7HmRF|0MGW9mJ9yU_d*sjUMn87gF#ovse_VsOHW%m^Jqypr8K&BBuhnQYq}dqjCmr>| z1BVB_0^cPy{&)kgy%4gCbdOLL(_HWAIuI{V+Q43?Kv~;wwoX}J?G_=bBnNC(dtbG- zzR*6d7zv#d$O&J{Ph`Tr1b2u9^m+6Hu;~X(EhOjuQ-jWRyXTrvs;t~Xwu~t&6Wfmk z=`+>inTNml2JPoUE|Z`?L9*5NhCU#a>pV{-=Rn=E>0RkcP}M z6@qibOY+_gbJty#MsW_M_3e?iY#*d=F?_e3i_=KZgiri>V0S{ENY~*hhTNzSp2Q*t z?*X#e;0T=VfO=}MK8e_QMv@1&IAQUE$n){?pdB76qDxYLY2Og_y-V55)PO1u=lD?v zhTORZa#gGjm@8QEuCLX|X$5tienXy?%c3HGeMH1gN6#!%xEkcX+g??0H!LkDJGQJ? zFA|&;fEbS)FY3=ZSsK+V)baR|z^I2TkhAmnr!m%n&P2TV*ohRyiz0Ekk=Eh8y@#pQ zc*nwL6kYVH`BcwLvzy-~R%)L(ryxUx1vRnQ0S6kMK0#c+R(W@@Vw&Nwrhs*59|;lB zRri6_1X_(fV0P`cNp7drb4BaC8htcpkWVqswE_TnpqeDJ&JErK%jK%xfb&#zBg)B| zv^$d)$%^rlo2}kv+Ahb0gtUMRV+l1{EBryGYZnMvBCUkg74p?dYekjqhH;k4&-9ps z%JM(;dM~@8Lay_Hm}|VtGT_>N#_aqPbt0}c9Notnp!5KH?)~~%Nrmmn*NV;W@>|HO(@|HRF65cnY8iM|pP^y@$V2MQGHJ z4R>4U_4QtD-&fD|tf-w8arBs}K+XD|_iu_^I>Hu}QEG+589Q_=q1u!z`k9M4*C2!4|hCU7Pk7w*&k^350wHlkz zkm`Fnq(`!8U$4IHR+#X6>OUr5cLZ_jNNBh^B3djIH@4TIZP(bCJtu{E%1h0Q>do;@ z$krTeE3v~#L!me?f+&FMG_%TmWM z(a#{37>p*T@1|c-A~4X9RcX>bFb7Ud4%T|F@E+LYrHs}_tCLgF=uY19F|8tjTZAq< zLCta&yWxER5EOMegvWcNA_K!O2DlwY=l*03QQ>fot4}Qa6)tnM`HjBiSwYszmNF}- zX)5UF#D%3}fj}c^M5RZq>Fc0}j_edEL1071x27ARMOV6|Ow1Wu-O=G?pL_Bixj00! zJ8P{o*SMEw7I4nmlf*%2U>E!aOX;ih^3`Y5NMzR*>S8jzNHYS2XB1R(c4%-;<-bS` zxzHl6FWttTZFQId+88DTIi6cLl{{95|NEhg#I{FD#n6IXs}f<}J!)9TL zN%kJLZ0Q*QigWF95K!QnwRHORyn2cmrN*N|;~n9gTTA{_K1GRu-zXMO%9M_|SUi_p zXtpR-+N@uNS*@Nl*OK;puKHD2C!cL!Wu;)Fl@M=VJz!1y7T z+I_i-OU2FkCqA_;p_uX2E&X`bLhp_&J0UXrmaluko}GJEtOL~$TIIx_G&?_Ez9JBx zZWIk@`6;-QOi=_jnKdO*8L8rO%YRweqM$!toWI>BHi;%G0zIk`g4D|)-T0qt35{#N zqkzRaqLiRrMW+iPc>_IZFSHOxlI01ww&ePjc@~+nLwL+gR>ql0b{cVNN~bKnSfbE+ zM5+9q7Uig)Uim5X#hF?mK@RWmKV z;)J}a)G?SZdNSI1ePLe&sr=^tc(|gly%iX;5`I1)!NTE%>f-8k0IQw91X|jbKr5HC zQ1O}(9aSkMHPMk~n3zk;v{8B(7BMVU6k4Tzpi zLv6s;X6IkoW&vjxhK_n$A=5_e2X!8#OAhps68-$G zNw@goiH0V<*LWAsHTM!3p{?NnZE**#5{_TNwxyn8nwV61LkAU@$v|Xnz|3nhoULYy zD}Y{MqR!O42xbeoaC7GuPYg*#~YM!ej^LvX0kT|vfn`8!!0-(6m~p}wKq zqsNH2(@92jQ#@_t&L2da790Ag)sVL0izYMfrB2`D3OyJ7O6fBDeoN7|E=*uxV5LZ2 zMWi)bM#m)yCaitIuV~)7h_0W`6WBj}NN~Uo1CqS5E$~RT0Jhig3@}_=0t~s6c_`90 z`7=*hvH7y5%(_})6YE{gT25z%*>~n@7LAf+y{JvucvfdV+3jF+$IMg?AClA1Twi41 zH{5)`4{Y1=jjT2{oBYg+{DkFLZgg&5JhJ1w<%0u@XilG8Wdt=)t2cpISYXv|kT;Gh zj-bd2{;^fml{BqvVw2lLAI?wLEo0zq&DAwckN=|AlCf#l3@x#37&6D75L!-!8|Kff z$f(sRE9xR35$OkB&+nn-?iCkZF$i#K81kx282i0F?B8zU`TUlnwCkd!eI)4f_HI^6ji!Sb(=^E$4c|LV(GD!3}3xh*(wabNaQ zf3vZ{bzB6=zubrC0j@SNG%t-OL%Oa0XpA^G(X%UIWgcT9G;g=RdRDU5WgvtU&(H{L zb?3Sw(W1GzZWFb)iFob`Jm^gv?2sM;0F2`k3Cu@#vaz_HptJ z*>2`|#MiAwdlyKe2!yw$;q{a&TbGt#HF-$$&Jy4Lh6CJryhGLIYA46j)X^}Z8|ECv z(tT-w5>Y?8<)v0D9q5OR2&ONO*wiPb9xsf0S z8`q{4tmz{Ev>4p;rn`0DON{x!MgV&mP2iEQ3sLXRU7*X$XHDquoVz|vAwv$zoJwee z$vmX@CiPoo0F|ZNjy#$n7pq9+1I$kOP^4CHm}zgE*{niMCKP&+yX-ZwkCt;DE&^n1 zg{#O$r+ukvIm5F8^O+U9l!s|@rInaDy}24s-3=nnT3yyr_a|m(Vi!_ICjmR8uH|@G z4W10y;r?kXsT4=%H@oT<$3idgP$g4r1V|x#i_1bMVe>&na-gZF&vB`!QGlWXD0s6XS1RSzu!jm` zI@uf}KDH#K_LfhJ>I6CbB@S7qcNIyqrYgPXggIvciDk%pn>&y&y3}}mq1L>Npve(W zYib%IH9fGyu=Fbg?#Fpq5D{=me99gBmCFykaXJMv8ih$Kbnr837>kpshlEK_6l)$w zUW~?~Q}K29xAB}XV-Q0&`=n+ zb_gIDbYc2$s<`P~PP%LK7$&7=+Dhr!y3)B$$+>|u1lzqO7>1gDT(=Oj!Xb_=*5EOk z#pb$@@tqqIY+R@fVkiTQ2`Ko^*&XZ4k|hAkO5Q^flLK zjY;?$l77+s=^3huQ<+1}h^Fa9ljf(i7_EZg+DtlbqR?!!y>!KK^?aR9cZ*{#H>~F! zh@&A5X97o;Zh*NAre_MOwXlJKU^bIq^KGEGOq3~N|GKCEeB=Oow>!}46(dRd2qwZMi zV7$>!SJ(hvT+u`v2u8%4a)-&3PdT)zD%fY1lTl znx<{dGn)Fy_rtA9)*hCq{{39F^MA-X8z$Z(u>-NQk8IYbT$#Pb+&P$ zb98cWHu}%l$$G2~0stQxq<5P_v15^_CO=o$aB~NAe%pD0rqw2f))9{-C6f?TjcT}m zZeL6V=nVM3`F+iVJs1@V06?iM008>`{fd)`wcTHHE0g~v_~woe%<$jS&_4o)K@vdb ztv3`x6rkfOW)m<8_Si)T&X@XYGzPDC+#T&!6BHDQMqoff-ezzT9W&Lci~)VSiq&i) zX^C2O%4v%WBY2*lbrVVRhC0;e(%~{1o25aE`;y1?%4fpHLsx#{_d)7q6-xr7w#KB& zx?>J$g=Q_(<0E)dQfbPRJ%}n9JPS0+6bt3#D1#98i6;5#;T9xV+PMEF$8|#AP;dNs z&Eq55keh}pv{b6X+YzPG5(f*U6`9HJgUXfT0>VByk}|@T5eJ@F&KLuQT`781Rvz&3 zB1^+zSt{ONjWB`ggGM99CA^hN!^yM!C%v-^S6U?60KPl|vS(C^%d5!mt+1lZ;?D@7 zD`=M$spBaDwc3{DLAFdJj!Q4D``jeRUBZr*@Ls^CZgHlsb3Y#^NRY8N#$jO%zR0loO<$m|Up<#rOh5mm3G} zGxCEqwc@JdKPBgt74XckE9yr*&^_nqfzKDt;=|k+iRFX|om2$nHvSyfDHm0nDbJoo zgGqFwm?#Wv&Q?+{%Q&WiAN>h-nn!;?EJ!#Fl>lTQNm~)5F_W(+lo^H3MR+veAyo7G znZ=&^i(a}WjU9diz0p^KULNFH^)l;4j}UMjAa*I(-x@2MECW{?Ny{n{CsKJi@zg|` z)eDgV<%#iy8#l5|5EyE-&dUoGMlo2b4{*^rHSSQ~EQzTz7T%gJVf)}SsH8yRe#Bh+ ztbewN)APA^H*%UIwx1ZB%Lrcds#yxeMDe%$Rt|DX4)Q8zw0=^Csa;U2IS%n5lCOD+ zW5=GzQG&7D;UicfhGHIyb$o&8SLmEJxy3$Nx1OD7qQKgAIaWxp)udhQo@CF|y-pMQ zDKoh%)xb=3IKzlTgJ$Gm@-IUeiTSVQ^~$z6qR#|%+x-5l1>@0=zu7)+C!6Qb;eD4Y z*?yZnzSDU&i;DHd;kCa_pK+!9U=k=lSJ2(LNtVe5gnPTD%?I=6)2!v=_2uh9Lqd&Q zwAR)2X&>lIFTEh0M@gY(949x~vBOFsNW4gwuB|CgAP52I+cNaO>Y+SOCTkwk#?38ED$59Fy2Q=Znr!o~F-A#7=hH1O|zc57GrB zCkOHqO@G77;_F-ogbEQSIRi1R@2BqIuRtD}v4s4S36mnkz-kSIa$keZrXI}KO7^jU zxD|dkL|Yn9uP_v7bC<+<6RNJbVK2ELBs8pm9Z7^KzRo6y@fM)ij2a?oi2$Ll=$Y zfWpWFX>cO9{v~t8u7a7w_)OBPxQjE1n*?0JCYFbYZz!1F{(-XAj7-R=;Lb|wRWkd< zeJoB)W&Hz^RV0-`9FH!#_BARZMDmGBk*U^Oizo7tU}M}v#(GtYF)M4FbgX;?YL&J& z|F*vw;b(ja-(jM+IecCYyI%O%zita+_}(5b8)jzcXnS7oD{6Y)T+oIbXL@e;rRaL@ z&n;_az>IbK#!V^JmaJIk8MRntvaU-mFhiFQ9<%-Z>=jeU(Re42!Gdlh`O~A}A3$TDE!h$B(m0&GHaiuu?%J46 z)<`N4EekSpd)Eue9Z7u#tjIfRbti0rAcAu_UFKp znJ{5X_)sbFH0}<3{d{DY8p58afytwX0mHUi=mUz-x(KE3gbmgf)LXTj>bjwb($_?? zk@_NNF*A!d_BrZG;sjQ_$@7c00@oQ?M?XcAM7opPqDYus9+}z1`}aP!79I#d?-S&B z4|-mWJdKXz4|aiaaW!CRlroiaA`m3z5fr`hiEfu8C0ET8yNe#T^?ash?>{U`_bIY#A6J&8T+HG&Y}d zHzW^YZVx^O3X-h9EA7^IlRq0>VUy+vkHGMSYU9WAGsaL!pNE(3Art`cV~N|WCRSs` z_bpeK7-^6|5!NK{8yoPt7{OeYUAL!@bJ)9X2H|!L3T7rYC3ZEFO|(10z5r{lQ)Nns z&@QfD#MJyd>j+ps;Xf|K#(l-82C)R9ldrT55DLSB!LsedUS+90Riv|6q1Vfh3p*j8Hgy-GrZ(x{d773Ao?xAxbm3dwXM;1S2!`R zjAd?dSVaRxs1=$yS}yTMRQBd5VbX!1%Ce{QEo zztcNhMA->$Cw(=JMH0B5bfS&1_mbwkJ;+y$p)@oat_6@`bHY!q%TA3JlDQjWZr;5K zpRvWqoLVmyc|5?u?X|kZ2z&L#cg-^q!mIDV2PUk25-PLebIZxSzKgQW!f=1t?^~c{ ze+SNN&}}f}ke;aa-f`l??^dVMbT&4&Qi7K9Md`qxe$;e%#Ci@!<)7|@AvQ8XhW6O5 zCS5C7G$;3rU@v0C^sX^+m-ov!GP(d**c0)5~c+plSL=KtK@;7?T~7wxPW2K z^f0k1q-)ohZ{cgG+KHkP@zkCR-CEA(Lvh(m?e=i!3q(wXI=(ekYL|rKD4f=eO6zXP zSIj|pm3|FR$@xJ8`MhWTFDuwptQsB95GErc)E87?9x_3b!&95jcbUAt`@QZ#M`7GExa2ZGGMsp}{BKcUoh~4Q7zFlxNTrl~j&}|r= zPbe#j^zphDMNk#FF8|)cZ6g&vT69Z(rgFsIWD=NFLmp)Ivg4z-;dg!;y23oBl7Byf zw}F(O_L2?og}?im+xr=tdl}47t+YRv^=hFmD~uN(+@2{g-?w@dqr6dTZho+BgxkO)qveJ?v!22W5&drokGw8+4##|*U9B&)=x zgtKxv^AY68;7MtVo@yYtBrrK;zNu59eqC9o4zEPApD1G_p&|)h7z?EP(>CHqv7~6d z_lFwG(Rcue2$=7oHqmBj7qy~OA3Hs77Z{_tkdR=V#neMdKE93P$zVAvc=DLE^WQ%y zUQx8>FYLh}>y`LWgdMqkZQZg?f&>VoG>MbM01d?n5h?P&&LP5dld`*nx>`4=80(Lc zBv2L+M-pYV3J8Y_p6T>NY2vAX*46Pzna1$!7RvO)T;z2af1n(VO-Ykeg=m#KSws9E zlezijE+wK4H7(Lftqy1z9H%6QO@zSv*e`h!2~UTqJx+?9PUUcxKw?U6WVi5lt*P;< zCNbTs8dX8hbnn(s2GN2Xbjh=bCr79R1v*L3Tt4?tA)jZdjSY@~Cej~QX!9i9trS@E zu`}W~&+^39y0137B8vz%(c5gSHCd;uzB9_7xGtv*414h}4Q-dc_lloy)!@mx3z9#X zqt*aLU3@6Gt0>K3-D(L^;}t%fHBP;wBZh9fI~#@tR<))f>PcE-k=v~yuR0Zly$}H} zV*WXsS6Qh^mDp&zwv5qz*E3H12_FKEtZMS<{nA;3aMaKXSf|Cy_IzqPc~=8_{Qg1d z{bvul+p(+(1~TiZJ{jRls~d(M3H6)OU`T2C0X_ zTAi>PLruVJfOq2$i=Zf!0da}d&R3uqG1XcG5ueTcJ3Ioz%LRy;Kl0nAJT$7WI0Rb* zgi(=g#MkE~70s2ZV8!P3IFy6kce9s0;k=Y!>^?ywKT;E=^ibSugvTS~@iO>CBYfhEUa*iL-Iaw+ zyPv@7kbjeVW1VyDj+!Nc+Q;xr>g>zj7P zEl#6HY5>Vv?dlie)I-UtuUnA$$7$=I zi0n!H+0EpktG$91EO_iJB7YY1e!`xRt&r5c;3pPoMiFh4hS86H{D=KrF2_K=J;)^0 zrWv=4qBv6a1_dhF#C+;FS?8I?-h+s(W^9@$r|ysAUv(e@+-|J-g*~`D6~g|!JbIqG zAAK2>nKdx6B^M&GF*m~3{WkLIi(w36whf8h(*i|yHoG<4<~8YKFPl-9ZN=2dk7nxG z!rd8e?5|O+DRg+6a4ffq{9L^ta&W^Nb)W43^1pMIp$oI5=O8VRU$2kzT%}EQz|ss; zs3DUewo8~f1)HM~XYNv_H1_lHI-~byLhCS1$}1fQphyLcy5lIdlk%gxz;Y#L$q|fjAbw}-JkMaLq}q%@iBjW!C-_r zi>_z~1+wHkLk}5NHWdYK-AIdxikxyI2Q%D@FRKkwkgw8h(z3!C1p<-zxP=PPr?2>jzvCclE^ z7ENRH&jga0fa;V6PsDBK+n=n|>SJLumb3Hs4gp&0=6A{y?Din-wi_1T@-4DPu3OkR zox_=T$5B%%z9^H!8dS_vo_{#9+LVge2&lWv9ewr;nM6Nct8Egg2A-3plj$=B>@^9~ z?*bKXzuNGnnaWpG%-si~d=VMFas|oe(r@l|sT>Trdv_to&J@~B<3%3&v$g&kwd#V~ z{6X*K0~qDQp8YXM?WR*^6L3wM#q3eZX2^g9i&k1k3ps9XL>&mryX-xeHT&{*`*nbQ z^jiI~cnRF#gHZr1cEP?S9`54U?B+ivFrPUnftJamTzX-7zd8Kn=I0>*wAq<{F4%d zF);7gdM#4H8`>_1!0SbRv>%HVFxR@#Zy{8*X;LE5VLG?5Yz27zn$ec*$8PDA-q3#) zvsr5+SA(U}W`CjYDHX$3DH9Bq2bIO!o6=I%a$JnGlfBJwZeU0uwaUU4!-pEy5Hk6OC_X|(;dOx6QZRm5%3yc= z@v*9odGV<#F{L~sJru`gqhQ+kmoMtTOB}h>A>b!@#Xov0Fgy$+>$V4sAresvje7eZ z_PYH~S!?b*{J+@0VUs^GgH-3y^cz1T;WP{=9C=J2Cu1lzZDoV1DCTijN<$CF2#}Z& z+chygh32~!KW27Th!n>ZimJWAG6TLJOz1z?Cy|~tjUAMj8*okBJ4YYhn&ukq#le=^ zw(D{UuZ}8Qn4V_(n5>0bPA`RA@Vm6x$pfuJEEP}Z)x7ortWo)L*Dan-{mh+~{Os*+ zvi4xL8EoJOz<^gbn=W^u1|wc(by_mxrr)XQ_@71=inX6xaNb7li! z*Tu)n#X2bv^>prhqQdrnn7selw1?mbe8bAhpe{}-?l4TXtE+khbo#`^JzHn2%df5l zEDM*Q8iZBd6t*MjYG<&L42iNaoqp*WbawVdh#D_#WlsF%S|s*ImKVFvK$4u5_~`s6 zjW#fL_lo54_1O`;{JIkYGE_Sallb~mfS(%}x_MHzxI-FX?+G{rEojpL7c#mn@Pzf> zfR(qIhR5?4uwDxPzkt=v$<@KY?ia8QX?yZN&owsPRy{g8D65ScUkh~ms>yUYnLO2Oi zFVYCUI2}T}eH}v6_1Y8XrfmLT?9x^N~fI6oEb_(@A_nSPh*f_+sLmBESI#xM=mPNP-)}yz615^anCB5AdE{b zsJYeFr8{ay)EmZnv;VdFVR#VZ)=&?ZAx3ac@fRcaTu6GE_Lh1`T?FN8N*NfaeN;8l zyWkg%Mo5lcD67Q15_iYvkD3(zp>9ww1FMjzdsU&W@TI5!r^UU3{aU&!l3Lr^|BQ}R zM;y$!aGp1Apt338IDL{IJ!M4Hv#xjyC;lT&dqFv+%r9N}U{bd>+6h!0 z`9VmPtd_41u}+OT6^_H8+i()v{9&yz0WUnBIG9CidRZFAMh!qkMwZ78+o-L7LydT1 z6n}vyUTMMsJCsVC*)7p}#uW!4kELgJ?gK_ZRLLB5RNp$eV46E(lclO?eLzCq7(L;p z;g^_;81r#G_834GgUbizyXS}C5@?^nJ_X3EDl+{k0VyxcM8IM!n^qW;K5yTp^eYK*zYKuSVW-LMI?)4pSZWWnx7Uvjhu=ySDj&A7&928IZR!E_Bp?|t6r)g z)`oheag0Q`b@G{B2-Jl`qJrTVA=dNz`352zyEhZWP>Vc?mfjR&MKXSoma$Et5?Pf` zXqafKE|R^f0U~oVdI}ZN4MobIRGw zsvcUCazv<-->Se>|H5&ktMX!j#Bl0i1BF(1-(X28LKU#oFz9}(P2(3diTb)jP)B(h zeZK=YpUD6aI7!FPh#>WoTHj`L$krN2#prfo8Ul?$9g*be0AObD@GY7^op2-sk$oCq zQs(H`Q$YiPqETqSrAH!L{5jxlIbp~`5#Z{1f4QXQWJZGra`320M=Vs53d3x7PU%3~ zM|H#TTDo}`?e{~C%c*YcRJWkIz#>u}5t!A%6cLiB9L+awbC4YAB;fgxjF(B-45we8 zPMR{t3nh^f46+nz_D_6Qmt@Teq#%`J?@Hz^aX_4Tm}vF+e6CH23=D8fs9ZVc@}v^} zChfM;XCeXh9g7v9ldEbhe`Jj+Rr?7wDHnczRf8A6Dggcm0w2wMV5A;Cd|vtFcr%|TchRlIy#$m(~CM8eI_*V0pirS*{^d!gPKX*Twr7;fPh}R zful5NxGC#;zI?c?47W6V!G4aXI^&D&A3s0S8LS7u)}9~p;MH6(t?WhLl9vI4aTbk0 z7$6vKsGuJIyG*t!dmnGWQq0bw6L17r43O449y56rjG*}xM5r=SL4?)2`)~w#EzH2L zj5Di5r(8{x$s&pg;ZFSv@Y+Hjyn{glQh0a11bBL+cj*%BN^R63i*{cE=pdEJGeKmn z`NtH|zty@rcbc%+LY^|6!?(9QS^L2BcVqH;Mzyb?z1)$1Xl@O9wd9ddMUQJxEjU@m zr<&KXvMcy+1f>c#r~Q5~Mp5EYi_Yh+5^cDah50hHK&wAsRfikBb8oz=cghFzMI{sohsxT{2Jk2e*C@5egVZZ5b>@tjK1VKbQq=$}pDQP`%YD;( znNK`3O7u(ME4G`j8cilg0dfe>k=+Fg-Gd#K>!48jpA+9W2EYaOy>;@;h}EUA-iTs*Ty$kwlPJ9)zmG;7)_z6$F-RhELpyk0Srqnr(rmu`DO z9q*S=>7G510E%Yzh#S1W$n;K26f+(_;FC~BWWwUVw#R=^4AZUfGDANs@NqhFxomGt z$O_{qwlXB1T4QZY`+{HeHy;y3z0sZmoWG^;a`PMvYkY}d{u5sOYM0LGe_=Z-I@|hA z2Phqz@msFoB)Y~HmjI=)Y%6<^(mQTiJ6xTmYzeFQ5aRIVW&z`XUwD};qBGZxGaE`mrT@OcW0X=~YkYKV{GZTyHmLbdREC4MJO0%?$ zD=U3sS$cOivX|55F~5zuHA;(d`+={Faj7UDWO+v9Mo*wBB(0gKLwQVy(g)4CcU3^| z0>;r%6=mz9qxDGYohi;B!L#+Xy;iPL!Deet@WCR^T7Vl8*iYnI7RX_|z7E z2nE(Fjdvp3OULrirj15QUaB+}7E`;CO)weyr7Ttf(yQu;fDtj4@n*iqJ82P+8MOH~D>mg`aFE|^D2!eY#c=V-^8|5cyf)6p?SE?MX7;B3T>mZVko61AiLYS{J;S3pdW*D2sw1UB1kijZ z%kZzTRa3r?T>A*VOzU!@h-lD*HijtD*#`;^gq+SyC7OhT*!uk;?h>x@M-0*=n<7Yv z|7&l~bVog>d?BqSZ`y?=yXVNU{ino&H$i?_Q$cYmLK%WoK@RO$m5O`G8Dqg_Us#zA zK;l}G?graQCvJ_$eB%cz7}HH@BIz07mSH);NKk7?|=LAUTIrHZMqukV;CSQi^D>V07pCmLTnADP<@WHqf2%taRw@@o@ih z(fT_d>FqhV0TT|;o?e!29pqzN6o~-b^x|BZN}+xcMMI1u-pwutQk`SWA^tTMzfcH7 zQjIf?{rK@vQ|(0{*?8wrmxp}4+BQ~Xt#C%)du1qELufRw%ooTxy*S913$HXV-g3R; z0f1&n{>4T(rWq8I%-^7nu$3(g^&)PoX3Nyl^FE!qOsp7w-|S3iNiq6u0mizS`h0?= zTdTZn4u)$8)qg!~KP=8HdAAscGKfCzKuQ{Xub&DoTqUnK`dvC1)EQJ`Zl#qarVOqk zQNvOw%uDmM-%?9edM-!Pr}RT=!&=E?Vy^M1QyA(8iwN_d_RcLHx1>ThXt0{;T5>(Z zBp48VgMwP*SA3Dy*}GQ7^B+zf!NXx%5_ZwlfhBNyD3f`IxdiWZpDt#^^5Wd?`-DzBklT%cr3 z1WSe8k--I3g+>kdR=cteS85f@^EW~WQ>=oH6nK2tBcW}CvdBs-62$RD=JJgc`Q2!A zgm|C&gOIThb9RwbB~D9R46a zA?ZHi1lmW3qH?K1s=|n!+|0$5VWE)~FpK^Bn~uKlA_Cmb&lodX8+~(hhqE z!u)D%o7b*3M>&_FZm2vL<2*gYsYSo*YE3qPf2Q({#i*d$SISV>mHLy}71v-@fw}TR zEO0ktLy`}2p#+vtJ&9IkIOIi8FyH{A;$s$9(b_KQLfd%jIJVA?6c1UR0{%4Nd)^_7Kqa&YUJ@2e*!Vh9SMu@5yi;Nfpj`G z5SCwPm=zRvs10fv#aNIxLApF+U#;Qk`5Ei3J7#Hz1)(KmU1m<1EVTWM-5C!HYkNFp zNHsXJBMDqq3e(t@U{}VYj?R9E6SC3Q@pL)t?MlEZPItcp%DjjR=DP1>Yay5P)^_=@ zFw^{WA{oua>5X!mOW1-Y_G{tBF=poLi*K1SZYX=ZNqpK-$1FUe42^r^V6+~-o=K=+_x?8#Ny3&=-8eALQS5^l)D=+teWwqrY| z_Anm~`Qdgf(sfGfWa4+;L{0Pk255-MZzr|9*h34V;f$V42fiF^a;p&P(u$tp!Ma+r zrD}tyCW$X(4iHvrwLpV+{(MHAIUO}sk%OYW+1OpO^0i}Kwze|=LNdk@6!GO+;Q?qJ zWGjO7oqV&Ph6gbO>>u|vRn8oT<~=R6BWd19BkaJnAp|DyZ5}VehOPzp-cB93%){GG z6BrFqF2Pz#LH4N6Mu_{^HHAgRW_q{}f^0f`$16!1=5z$3K6Lq+<#o5#fw3*w^rr}^ zCgke^5(uuPdApo8>_=3#PF78bY}XNhTHqPjSl!gThwmrQi#(Az&RDWTBnBo zWQDS3{rY{NTuqgcx}j$LomZZ|cuQR5k0d!J!mTul%5##BQ3PWxMAHs!n^F9Fo{j0R z$#F+jpsTQJcb&~U7T~@qH3lHiyO(bx~)3}H5WM4sl>TG@R@p*fl3t)2`XhMOnPMW z(A-G(MWs%hGip0P5Dfq-|siy@+utZg7ix^Y{pEl$Y9{yVVJ zk&z|E_`(Bymh==0hgKOpw->LcI`|0lm z&h`$0KBONDr#U_$2pAMH#*KHI+PvvYk!*WnBrNC_7zNm=Hva^x&G`KQiTh%zy4 zTovdnJN%wXk^Mi*wqJzVjtu4Hh++G^sB|iKX zcakVX$2^t(?gRz7>nP$dS*0(z)e4(xnth8UG_=EELLtJr34{ZE2MWj%5Ofwy!7LDA( z)*e^1jo%<_GK9IrVU738bc9alS*d^TUUoH<;gapK_8^81IguYxwP4}2ZH*X?Ns{KF^(H3)?_h?kV!?T>19QA-?LCCDS|=-yV3#-mz)Y+zQvU z5J+e1SFoo3T}*?k!e?U+Qi#58s<|3na;aUQY`>OgC50_u5lsMGce4i5l-8LY}p z){-kGUGOkxR$ELhSBh1S_>2`AyeEi=Uw2cScUJ)X151NeZw}MAwF`7Bv_1g#GTgp9 z7}AFq1)KhkAgwUKvr3{H?_X%HwCqb&~ z|E?Ru?CXC!d^+H!#!idIW%5d_Z~&c#Hca04Bu;IoMcavElML10z(T zHvfIha5jn~Q$l$zADQGtP@tVMyCL6~Wv9Ta)i)S1{UpIRK+n(6AGZWjkZ5tEYwr$%sS8UrW+H>k$?Ng^O`trN! zs;=sO@k}C&gqEBOFQ5=RinCIAv=N{=B4w^28;}TZY$Ofw;4QI81YQVeGU&nNKsnVC zKoqG}>~S<)mtzQLwxCrCgf%1l!kW}B_gfG>3vL5@u4U-pwyqsX`lHNN9mcg;3))P? zdka9A9gy4U>}t`91vh!hdQ@>yq+yvV$>haBjOb=?r0`i@iR<+LBt} z+v_(B$d>3CfgFB^*I;0_MwYndWq;x~O@4#~F(8A^lhn;}eYrk=9saQKq=wbonIU)( zO>O6m(qQ23VALQO-%v4t1a~~7@|5cw!rAFnk!qFGL-#He7L*-^wgwt9FxvvM8<;Sn zO%16(zMfK$vEKWROf2WF>t*!DW*Q=hA^}vUa)uf2Ns@SUelVtx^qNi>0DwR0w_5w+ z#{|`1>ix?jUMtbM7E#(dL;k( z4)ymR>&wHcu|rNF@^jEjef-|pSy@rNy)a~)ntM5k+TXeL|M}pD1Vy;*>5P7yBI~`# zNo%SYrO(8~A$@z^Y^w`4B8&@iNHlNSyWqpgahA(I?RQCzfuYXljeR^~A$0#wwYCM% z$Cowkk)@X*xXm*eGl>gH?^H7*rtw@0-gUqz&cO>!zKDeoS~TKY{5}1=-vhl{%7*-To;`)M5s-MpSivdNa((R9d{ zEScme{%|Qg3?ZFFQuPpfgjQ$Spy(8DsK%rBlhB;sda6J)VN=Dyv7y8a$))|sC9#d3 zA_>mx!%h~s{bO6jlxhZo$*AYYv9W=_}AOb(7s6y5#FCPyZR zR4<%UabpTET^bWON+a@JQp6u=@DYf>CfLLRrm^nS?0Tn*3fpnB>exVH_LL?R zchl)-+REwZ`==~A?<5c~hkSbF4hmgqjA0~ogiP){T9Le<^O`I6_P-V=R2oxn`T!665K1%L;Ko zV=ckE-CpcR7}4+&ZZjXXn5JT2y7{Isosp_HzC4ZS@2`U2kjqFF=ZCteTN7kB!g(c; z5tv1Tf%`^0V??nb=dbxWaIQ>U0j6!E&rFlMGf zlaAaMuGrrs1B?fWm2#Np;mPg;+rNaPq0oggV#iK5jCd#p(b*ytP)a;76Mso9vM5LX z`?}KP&;wbTs~7?am=rQ@Hqee6<_@(Grh#D*g1P4!TmE8@PAG0qp8@;4hDyI$Ksj~= zCl@@zQ;dtI2b#5m4&Cv^1*YRZ>6TlR0NXtYwMMA!(>woDGsu~%HVRX$?xUhq!m|4H z@cKS81jk_1`P4a?duO_czib@h%raM&*>8uuk^(YHG+9b=&Ww7{0NS9AiCHq1Lj~3R0*X$c z^?B{L8w5lI2bA^w28hQPX2o65CJBn809D`NH0@cgON{`L>P)U#O)O|%Z*{$)_`ldL zC087GOldn4g%EOEw)qAmrNp*asV-}L8-Bwke7yA6aZ$^5fQsaXL?dhq&7am(&GmEZ zpz5I>^Gb=E{BVlhuBcT!if~e2FRHAE{h0e2k0*BD(YF4 zfSwbY+31p|T(E%e3i)MGS)JnV=UZIWA@SJiQuxIFCi~8NWn~ThWAOo zFw~zGzCqPF{oeG~t44Gd zzkMKn?3ihqd|;O76#FhVzdqK8Zhu6Y6m^j_BW(Q1(h}Ng1FMjQ%tMqV(h{9faKDzs zCii~zTDtGqTM)AvMUqr}X|m-718Q*d0ISPoS^^-5;ptRD{O6!$^)+alglpieeeH73 zQ?y0}%|Ml@xKZQ#KA3@|`Y%M?U0=7*^W_^U7wLsG+y3r1RM7Zm4IVpU@4PJ5wcxBoppOe2V53QH*8 zm1yS;bIx2h$qdpjUD4ryx|UEKTzV!>!K)6Ps`W-p$<>MJh_h72;muP-;8)Ib6j}| zEn=%J7rU^_84I0}`!T2vJt~hR%k)WOU>iD)xh*9vr+QkurJk(9H%At%=(+FIvYm-D1{(pD(pFp07aC{Vij-!228klZq%g7J5&V9Br2)2q(QLx z2~X9hu+sJ%>Luh&bS1_zsaP$70H+4Q7$f0db$Y_41rT+y8|WHf5eH%^_=OQ+TarJ^ zmt`r;?DHH;0J9YmjzlHuk`F~cwdEkt|6YxAC73h8hBW@n-i<=NqscV*pbGPtoIj1K z#d(|e7%Vi+H0Q;HW7<;X{?aZ!r@VPEpZaMTsKYo4g6?a`ELitTjmW{1OXT9W z++07$`F+0&x)J(3HQgk=Fb>UrB`x)}@X)SDMbm&|#MGJyTqa@b>Qp%t3gxWQ%%&`% zYaw6zl-C4QT1(=~2>ZaA$)1$bvtG%O`vjN#XCQ)xwuJCic(7T4+Y)=O5)faWsy$;u zVL75;#k6V%(ogSHSeeVwx|+g+?pC`uTfsi5K+@`L#3JPCD20F!Z-pV681F=PKHexf zYZ2O@_|F%%(HzX&US~^iIReaiswRx4Oo|n54uU6WPV21tAQKWeh86=T?K3K@OP@~=|OXII6}L-%*{+;GfWNHlVWNxIj_G z_x)+!AI>CnTD1ucCbMVPI*TD?x8e0rEuAjvNQg~QXloZoa5I7?AAB#Z${}oVP)34E zjW)=T>iDO%Ml1Y62G{6+Wx5pLfeo5fPMnHui$$HGrq>s7x5B0Y{M%*TWy4X#D2YNP z4}Ty~=Cr_3E7r26s@H1fjlr7yiBQE5%pTv z=T5AxofsyW1FGsi+C3e{`(~&*_c6PDp^g?061X+RRfgWI6SY2+Gn0OStTw!nv&&!= zWAu2b=2Ffgsm6%Fm39|(busoXOus0v;$k&*T1+%-0BTP`5PvO}K3wWBl3$JkORG{@ zaA&4P&xWXQ-tWK*PIdR4Xj0>R_SmX?PMoQ|*p+%amZ3ctQlnsCu1BM3np7sRG5lwV zOlLm5`C<(lvpab2ZsAM5M<#ZH)cZBR080o*^8*2ygq2}&MCKUCZ>2f}q?eGD1zjmI z;QX+0pY%8Yc*w`L!ECGDo42uqSCc%0zFybh9pCzW9z&9Mekw0TCYIt zv+S}#Dgy>L6ZRh$7R4q}5VSbxI8{5NWfcU6I=9*Uq*CWAgksFA?O6xCETyEBVVl{$ z<@4ypft}>oH743P1OI(&gJMT`DPA=Pb*hVM?J^MFN48V#>?av5EBf@rmB#j)-|H^!Rpl{Z4HNqNz8S*nzc;#ZQ1g@?RiWy`L0 zz&A!sx7H$+t5{@n2}XGuk|HqnLl7ZZD77*&6Z@n0RE5EKy&R8!5Q_#Crx5i9+P!fj8_qKsE^zAk)o$61o^;=%@#ql_Jn++vj z3O=me1+8>)(g2m5`4NH7{82?`n~lF97>K@Ex|z_?7GEN(WlN2+=d##J>pk6x2aqTLSu#Gd20+Vi_i&N~s&};4&nW|yU?I``8a`(Z+ zp;({3$N}rgb>9?s}X@W+5IbeyHp{; zOwrU6o;E~NAobaehY}56K)@qH(;?wlnO>B6P z?TX|+Hle8mIJ9XA0LtB|g1w;9riV6>{b#Guqqj*GjQtz*gfX4Fy`E_;#F^VQ7?SZg zjDWr7!e-7F^&j+Zjdirtrj1;~5Ch2ym5g@XR z&Hc&rF*dxDPS#k!Q0P|`O&;ylS~0SkRso(1d&atMJFNX2JL4+wg#G8U{Um=gs5h%O z$GgNvv#;Mx;A7RV3#_LCx+Oxes{?6wZ_J+UcJ<@&*OEl2>qIc&m_}okaE$wH|4?5} znm?_1&xGM_Xw@p$%?SiRZO7Zzz^@ytFN5-@sOP;!U`N;cdB)D8VH#<73yU$8=Mc=2 z)&0|r;XTf6%KEBllz-RiuGMwX{{x_ub?L(HQM zP;0X`10Bbp(&WPFE9w<5*GD4HKp-*Xob9n6OG&OB<+qHLMXYjNkex9g@B ziP6-SxT-c{rsSN?&aPLsXdXZRoPH7m6uF+_^lEP#%p50u?^E03ZEjj$=Gan(eT9|T z@TxB&BFfW-wscGy*Yy~kX2myWR#`S^+EoRf(09HZ;NGpk zr_DT_83h_`!5MA2=9pQ4y^%US@YK<}J9+a_;8yVr>dV>TVjx+| z#^n@NN|{T$oranj$;$J!Opf<%qei$;aTE(K{vrubyv?B}?u0fY-Ja;(b~H<=^)FK_ zho-D{>ob~pHgxTluY$r_9KBt#FD%?z$w@R~lCR7k%-Gr5pcrzo7x??SlX^|4`#V9d zY^l5ZoIZ@UwLmZi4^JKl{R?h7#15)|v0NVPP#C=4qp}}oVUhLDN8p`6x1`8zYCS?K z=3dbp-bydhvEiT4o$5Wn+hv~>*UVh8VOvt%GNvTqMYYpvuDM!?SYyOS&hu<3>8vI% z4V<=%e0K-WyNN8>EiYi)s=y=S-J@E%u<)^p68IFNV zE_UIL8Y|e&UzmEALby+9B00ZNapTr6X-Md)8PqGS`^Hm6KgX9Bwe3UzZ{8rjYuYF- zx*G-dmqfNdsXC%*Io=>03Q2AU!B{a*%!mv^z;rx?m3VKO;CNgFo2>0~Oh#fXmhNKr z=DjU=i^`^~40U67`#qo7)-3s9wCH0Iu|v5@bJ1I3?1bKFF-uo@-Ei##-0VI6>rkCq z!$k4tKEEZ*b~`2&=z!S&gWCEBhq}QZaEjFVAnd<{`-WVPEb58Zt26{~ZEuqP>*bQ8 zEAh-fM(4l~a=F&CxW@HJZ>^-$UB!UOte#4-geMs8 zC=6S=2>*A-L0z;d$wPQm5SGqyn<2P}ralQSthi-q885ZF`Tjrsc~zWTrbpmmk(M=f ziUr#)Cuf|I%O4BGj{V873*yU$XJ#XQ+%vRF{Z?Q5t&?6G@53S1|8xucWV=qN3ApY{ zLcDVC)8bh&(*~%-d7W+G6h5x5*q4Ozn@AR8C1?bT^kKfdB>P6+{^rtejFx#(3VFd% zm>@7l8sD9>)blY}VdoMaH#Uj)O$+l^b|*OQX0C4&u1>zvf2mF34Qn1xH|U>(TD_YI zRx4I$hLwGsk0wVv@#MJY*so_lQ@0Bvh|t&{kDhVOXkB)oIyzNc@J5^{&DA$8bSNk9 z0_UkA9akCbX@A#=X4LS;Od}7gx)C1RTb^A*a2?8W@-Fa*@R=dY)jpye4J6*M`w%k} z-=wy_XGhlVIsZs_#SQSl)F)9ycr<1oXkSLq!M5Dhtk|w>HkH7PN64J4>P6X=D>2=K zfh@wF&HlN=dXg=Rh43#1z6K)JmQLh=wGzVCTS$dQq1xiN{4rU!-TRRnS{vI787IZ} zq-nsm9dsF>gZ}nu*tv7F2?J?23<&-G+WW==9EHNnoiE>mZ76ad>za$r>vLgOM43ZP zK520sH^5J~7OZF;61{TFcu~~m;yae2Ox_f%*%yLS!)N1f{7cvm^2oeG4pcdEDdDyX z>@5Xn#@74Ba$MLSp zez$dvr*(xW)SJV&{4+z)^=>ITA@!EeW!CYUX2I5cK|5oZJui1-kl`cNxOYFtbB%#l z(}+X0wUpaeH@I#^{5!EwziK=ErfsO`T|iZht(Uh0?dW`BTr=zMZ9c<`E;V~Xx=E=H zx1`GJVgqp-E^salPMW@%xd^jn)S3gd<}UT4H=WP8@vWt#S%Rh(hD( zLbrnY9$n9F>>A1a>Gz20ml8>^w7qK%P}izCo2Oo#WYuX($F6v@u4JyzI>)vC=N8Z? zzGULAe#4^jz$SI=DO|8!swV%qx=*ewI_@IyR^i8VbxU3rC)XLIP>Tx37;aP1uoZP!I z`W3H1GNa9?>`FA+umwgA$DTp$|L$K7yB|EgKre1+Y=a*G%Z5 zt+DB2?YTN+$J<)76FUJH(gNtoYCOrG~G5JG%{| zS9+0J;_Qr~0jjFrWzd5cZ-<%?vrTF4;j;dPk*7{x*4ZtrD zKSr<>r*2G6>E}{Lg6>b@EE30>wTdfp6V2-|No})qd4Fxt8opUcAON8np5 z0bC@z2>xMM*anHtrj`0kuCrH7X!pxAUfi9NmT;n)_;-5ypJc3CL}{m{6>0rX7Ln>wQ1ab!NVPh<8fh}gwClsR;A6=2CYqwmP>%Rf2QztcKNyT<& zTq0U=f}z(*d%m7tf_(32U%vC)`-08%&Th*hR+mqHWh*yQL%Bhy9zX2E1<>ltrCvH& zq~p)C4l5ts}Vb=p@Z#b19X*|p2;-3CCByyqOr1+>iQdnu@*biIlm_Nn`syLG!kxp*I#*5 zcanZxx3!$1bcF07=~mz0uf+qJD_M5S5tPp88KGr38WH7*SzXiN#Qy8-`^Vl9K3w$fBJ*{gTjGCN8{|~Eu+7w zNBJG(szw(|3b1grSg z>}?PnXg8U8^(0njT*IQ6HCc_m$-iGFf9*u$d|qrz+w`}~IDDtPIQT9*v`1ruBhc-Z zns5f6E?}f?J6Lep^yIH_%2CSy*3IyEd2myEA%?l9`^?osTEun)x{ygxl&_hGKPwL0ac!X0>Eh4|F&TmOh9 zg70lW0#|C*dWH-`nK#P}|ER)swHTOlT*DFH!MP(x_r(&nCb=DcT`b#} z^J!^=0C|9SJ#aH#%GRefXD6GV;sbDY0w-WR)vGVC53$xB#&0w>vtAp{bx@t2_*}qu zxZ9~+xt-i_c6q3VwKn|v56`)>2fRk33BmRm1$M#e`$SSF*{}HIbWeS^8+fq68O*5f z^L(|m;*=!{yGs>~a%I^{}tqZP}8_*d{5|1@2Z zUi=v;<3gEUFpiRxZ$=#QrN>q))ks}ZPl8F=wrF&~4Gz@onkVdo5o=;D_mWjzMcvjN6JI=^*I;bUcJ2?`u5y4y_^tu?})W6xg3=eqVCCg4fBthEcg~r!MsJV3P{{7WZtiA z0W5LrsN#Fna0KK@w$F4o_T1Mg_&XA$y=>fkVm=GkT+Vv#QCq7Tc!l0@-C8{@+$>`m zv8|vt{g3*0HympH(?%{~%9k~HID0)kKh@@9X7ofHXp9za$K=S0G%LZ7S`3VTAG@=*Zxp%>jPeJIK%avnt@Bf7$sDRO=QoNht4f_JT1S*UUa^$dM#!gNqSOg;*!o$KU)eQ zw&BdB&x=p0I#o&VZRST{vn|U9X>bEDtn(*8Ndv5~VJ4b{R7F;-v{e%bD+yzZ1mOQk zZ(tSDAp+1d-p(F zjf?g%%oY%PgCxbjvWr48GS3qI^ctdrs%?jec>2nD*{#?Op zp{^I(+jBu~{6(^=*P|p8b0-T(aIVvU8VN&O&N$jl)t}aW{&Quvv*dwGfE*8KgX|`4 z4d4mSlq=B{C|wo`rbOLukj;Xbegwy*iVfo8TS*b2Gqlq+(vO`Q#fnkJMs2f;no8(1 ztFPiISqPhG1%sDU@xc3bmLir(KeZZu#9uH6E$$OM4C>FF%orx`)kEkWQ3;8;EV)@D zQopVB7VA}3N-c-b5uU7&?g*=z`X^Z(`mEy4!R6M0NQVKvyhotM^NyXYcq=AwMa;9^cTmJ8(N?Dm`w8hRKJ<5BqUo`cmT^ zRI)@nWXtNkB z{PL>+TzzKr9YV-|BCFtJFM;vE5yGjJ^J-84(;wQoW3JbD1PS)3_ee> zUnB2kL>$C%VYrLy{G06fZ!%ZRBV!h{HqFMs$<3;`_CgI$@Ah8(l8|as*q=^a%4&a@ z3i4|=s4RKG2?y%(rPllc6^&mVz;rfW5)&-XF;}aLi`3k$vCV}ftBV}H&5XF|*O}z9 zY#wQQn-?HinMvASc4JI@VX`URCX)P%yT2n~xK+s!VUuIKw(H>zC(S}f#(|i)E7NAo zBA$Vg&%A=aP@>pgybF4k3SXHTYCI@jG|ej1r^|(Th-W{1X{B-xZ#KCr1l;v0=Fv1T z%_+HO4?b$fTKJcf2$n>DTgd;P=>Mv3vokpp$L^TOi7{_ zyiiXvX`bUEy(6j4GRNYXwgYo|=c7BtH&az<`A9#MrI%+eL`9)JP**Wlj{#+pYx>ii zVa{D;c3^Y*0z8~OziF>3rXS6jV#IrW&HF9qzeC4nBWt~}rNnWnY!3K+!GVU1!~S4P zGweG^vOc0u0?L)Zdk_Ja+iPtxs`Z|+E$~2(Zw55VU&!{kF zXStqVYQ2uULB49a`vgA<=ZS;(!D33;h8I5ez*l!f_HMx(=VWGL-Kge)Gz{8;SjZ z4nrdH{d`N3_rJf2I$d9mNXLTgE{3;j;FBy;=xi{g`J$SI`spK5*|Az$mOg;$!n z@=ztS95xS3Ia_$7Vvvxst<6x<>)OXh*KM6(M*pMTC30HC5e8y#ZOjdI?US*h=+|Vk z_X%LxXu^;zi$~_IhHx8{9ihs7PB9P-z1Y*kiuEX0*3-6yQomCEmNG;{u7JZx%;_6O zBJ8a(G;J9XL65N;$>yt{Y>wg}~d zkN6#bxj8tLDn@*%>Z}xdK0%0Qbp=!SuBuM|8_2oq^RZpE*5~W|`LccM_wz;cz5a6I zseLrE;rVbHbxt>PB6!rkcldAtJYm!`R9^=v5w=g8KX8L?vZ!TXKHmSET(JKIcM!eC zf`%yQ7md@89~m^_NJL3H;W?MGDUHb67t}{RIyT=-X0*aeLO^+$as~H{iS#q+cmDRu+6YWXHE713|8%m6aG|?O0QQk4`FaHdw z-t*;ydMiYNZmd_h=KPLki(R}71#=GAVwvVcdYvRbucH;r&K%}E_>cxN(JMOY1Nz~R zK3}N=w=2GYQees&$_vNCjCE27@m}v%+WOzeDlXHqn{<1DQ~Cmyzc^!(V(zyC@75wB zoEDFF&cmE(KL}WvkGFI0?s@({iT*#?{zUh;&~Mx5jrL#BOnV6!(5L9<M^DrlgwJJzV+flWMZ0)S^TL&s>ydfzOtv@jTFcKFKX88M#T>`oRBQl_E4~w z7?Wm!8HReB{XH4B;gj|eQkMmi&_Du{{x8yGIpjVIW*)R?KA^wxB@hNW&T6W>r?u*r=Kj=Z zM`9oRln5um;6l;6h)F4zTT12%EG8n+j~jWiPxB_-ib0D7#X{=xBq;&YlaK|uZ*^9F zrw5t8w@!+ATVI(Av{SFF3AWjTh04U~!XMA7z$0PBJomDRznJcHegq0`=qQ{TrZ~5E zovkE%hgf?W*G7}H+UUIPdy+$`K4ldwM06`}Fbk9(zmoYiwReEvB4Rs@s@^Z6PE=+j zziTDCMb6exfYRCKU5(zmivcB|^Mxx~e@SxaNlU~u#=#dA(%21(`6J#~Xqw1IbfgQS z+`$d_j2et@GF#&&>R)w(i*LzNf4B1)nrh&Xi`Zl5iKoHf3UwPvn4>Qf6Bi*`*J4za z2hgP`hZSXYHW^hM^$0BXM0XxDaa9t?YgXu6x;G#sJOZ#%`MN}Wxj(MyTyX#2f+r9(R5|9UY*zv@1W3~JO?*N z?=aB_xGF9%x~!>Yn}_GQ|0Iu3f(A0Vc>58?F+_^>hQpgXh{II6G{D&l;N(<|suY8x z?`t479H;FPM6q?o!r2Zz(xanOXNZcX>5ug6rka<sd#IaLz!A-*_0-hS=taexnMq z?5Diltn58!sqg*-N~Q0yv`PB##ff|11n5JO;3vc3{nr!YX^6sAnAuC}mA~KEB^9;5 z{1wro8YH;DKR1=j&9SLxi9Jt&1aUmCso*IXxkZ0mqw=3N`C@>D{3?q7(k5+(b2eBl zr|C7GTh7u2x?&04uoU*+$o~$*I`v(26gjFD3F9D4R}$HW7)Xh{Y1I|_bUn8x;QzQ* zv3{k=tnY=S(o3d2&PS7~$XWVK$1nsN4d={H1)}#VY>PxUkqxY;{9(N>ZsmM8TcDz? zg|Zy%p7I;-4`QP z7uzR}B&_0A6>AS0UUw=)$E|&&+81bYMy6I<#kP+t)MG{T`Mji+*c&RDh)_UD-8EJj z{ayLdxPczuc#=H%iTq(cX!32tSDRJu1OR6uQ)gva9mb1FKrJ>MttE58^_84GkZvQ& z81<1*f~Wa0>BB4cFCzdPZVdyJvh~ybfNWg<$VlI%Nr+tMdksl01t*ujZzES_JI>-i{#+&VAf^zikc>9`XY}Jf$`91$Jk) z(t`2qp%j-F9WVdM(j>&Gx>2R4rsm)vR&tZCT7OTb0MoLz(tbXJ03sfIbG)34nH2*O~jWli>AG$2*n!;kpCrbxG2d%4x1%DK#=1zn&?G6$Mm21zUxaRThZX~38CSrd( zi;y*<6$_sioV0`EyLbsxC3H+YIq~Z;>x`K@_AxtwD)J0_7p8NJyiw%>+_&uxjvvDm zYO?ZlG(+^dT&9>lX&0=#rvM8>T+Bj&<4sQxQtaPx z1^*u)C*wDh$M?-l7S?VLK{v18ed#7K4`6JgxI0MB@5h#9vI%T=C-~y8;HJh zG-5-bQjAjQRn9`2Twti)ErY)tOkrCwCLQPTGucuIoERzgb7Y=V86g?~yex$^q}HG$Zp^aC<% zG)b~d6>}dukK~*pf2B?;N4UHJh1Nx%z0S|S3hts+qt^R{eX3)}`gIN*2q!ft86Hv@ zvD&HsQ{+r^Hj;m~B6m3*;CIQHMYt8!o>pzvtU-RX?l?UoC68+=)vn1**@l9e+1@|{ zD`hfb;EM9Vu$d-F@~Uo$njl7*kUXd*rY*$nT&s!O%rwH?`IqFwRBXMlrOS+z=q@Qx z{?w2*|M}sV)gr$Ia0&eL4u<%LZ3w@uV`fdq=IqJ8b)YRtF&EMze^`4+1lGXIrCyRn zy=!st20k#HJxmfGFu{q}Xb7xRb@kNj!b*FY9atypW6D17ICCpv$f-|5+L>u)ORT&G zX(*i+k;W)O7K;iqW-3|suk&d&-)zK8ARRUbv69n`r8;qrrAqz$XXCy+KiL_3Cdxag z%E&5LVQ-a0GDfY-bzO#E_gP(2m~mkX`tc7OLg}Y?>h07y%ql%*##q~-K`w0xSEn+M ze=PrW@e0^r>N@f_U8}A&Y7vj+oRizSIgbhIv1)ERGM#c+=>|2Rc5^X_P@p{+tN&Wi zZM-q@SE2nJvl+2@M(&=`zV)t&#hE_H)T6FC~`!fm9gAkYQ9ERLJQFO8NHvxyY)kJnonO@UxK6^`R}>^m4oK*c2DgB6-+VG@qFE^!qoUFm?RJ{y#pq zg$%p9`rsJ^t_RS!r@So&nNkpaBJOMvZ9~u!yt}er?{GcZ@hJ<*0&ehv-TO3LZy8e+ zSvWklH-U^>C))bR#I9Y!g6&~TB#9y*#~<23_PvkF>Zw#!%p&qXF#kJg+Z|`bk;VlA z61E5e@(&^ZzmvBAH)O~1FJ;%G>z#xrj`Fjs`i&SRBUPnA`N4oJ^am7;au_*0f`HJK zxsu<~wM}CYw&tiS_@L|Ya;)3km50(`+|u2=w5+VmytM1zzIMs4ZGJpctHZf6(m0*E zGSa%fV;1F=wUXteTf^97-}hcQ@q)mQxzS~$ozk^gt!I*D+f!x!-01nXw;#NwZn~;c zHPdM0w&CiSb#0YQ-L$Q`Vp8SR3+=$Se_5@;aNjZgvPrI*K>u>NQCqn-6oyn=SGl38 zHf?rnwwB*&)>dL={_Lf$Vm%r#KXE}%*h~G>Q3kk(@=tEkO`V%EYpr3Jd>?|wI&SPG zjBdOdiOpJ2_^Z>XG1p|UR1=oMDD?r_^Rx%$U%$oX%Ra0(pX?J#I^>MPEz&c{q%6Bk_sjMIcJgf7y zI(^(^&SOlvRx1`@`q#_j+7>fopWib8#OiITn=Vz>P4~K%P(WktGRdh6Qs-L8564M#^v?#pwBV~FvX6h28m!TY2TG$*2n9xe zYwhNZS4>+NQgsM}QDwK$WsOG>j4Ak~5KP*a@y_}Q{1T?Ql+_(*2?g5e0&`3qH^;C> zqSwzaT_Y&yFppbx=KjMu|J?M-n*o8FsqXj=L)xmZTAisCvh$;Y+iRMy{V6TSHFEU& z%I2lBf5B=Z2O(Y>f@bVJsSDO$WE%?CinQ-2AINJ$V9mwA|An74SF5RN@(1jE7=%jd+mJaSh zoZm3#(pV`hg1h_N$_9>xfjrWh=8cSBoJTS1$(w= z(;9Du866EuH?&q%A9pbX9eFv=NV+F1xzy=o*U3yXx4bMv$ge++^;<+z$oZ=fm_qVj z7s|i4n|fiLn=z1ZK9{7ACc(BZ0j3Ld-aM^;Ex^ySt80*rI2eqriV6XN?eX8tag3#8 zZVSx^wgC1J@oK>tt{qEk&0~kblMSA91ZJK{1Wb6Bu-G*M!V1;Dpj1hVL(tiSGTy?Z zTA1Q|8ztfDlw&I^UaebmuBwJN;%fzhll089XHFqB;e?bjNTX{^Y~<|~Dnn#elbhG+dnkZ#0E zktJuy)+IUO7RyTRp2mcS!C_q^_~psB_-}Y`#YKxu^L9PVDD>KHp_m8)i)^XTB1Gnwx^lO0q!puI*ABLC;2-S`eSl z$Fe>yZxQR~2|rd!GkjY+Bq5MZ8K6B0smrP1(@D~|X+*647j|)VnyJ6g6NvfNmR#0X zD9mx@S7LA0$#<7H)k~w}D3VZ1JIx8Dx@ZHgzl+z)ros|!AICj{dLX*%shP7_k&q#B|91YFs zL*P!WVk&G$U{nHo8T&bBdA=6Ne9Rby*pEv|gpfABj`AHk|wa$-Ly*Ib*0U*>G z!@Yym9-32CX!NuR?B?nrSHMR62PsL}Ld}=PT1g5>Y%_Dxe$``L3`oZ~#F6#Lql_k*D%)X}CT@%?m0Ckhznt0_ubQ0#4b>v`#Y?Ik% z>D|6+yww(!>%|EmO1{DgUf~LYD=RZhz%|pgNOAkg@hOy6|3m6{dQAY@QJeN^eL4z5 zK0OJ)_MZIpxx?{l*6F`uRKHI&Uks_>;A`Idx}V<4{M}^@jH@&Ne`2RY6SbTA%M>Ph zmHJIfbDS){xAx7}Wes@KZ-Q^W=#DZ+A_ijQ*?&LxzS3UAQc1`jys7Ut4K?Z;R~Xh& zjJ>n}2+N$8MlV5qkCn$*R}AJ|*%9v-E9!nFrPMPILXgT@vSs7J8wXKFk_fc-ZRF3- zZu&|wA^!teK&HQvPAsO1e80Zl78}f8>woU+9WAQprQPm!V;-;S2COWJ0-7dN;(b8U zJOs*=EG~)he{+kaL-R{QvDl%rcsP-`Gc4DD(vj^uE;DQ>R;|)ECv#tjt5p z(C?}O0@S-|Td!$IuqmOyKxkUqysWW`bF(PnjM!{ltk7O)G73>Z-%c-X2`Ry1-^euK zM5RomsH_p$VEV4y(x978vk9@`NZ-%LAH5+EQ@gZ~o<>UEB5T*-hRnJ3em4>39!-Dy z)z`my_SN+H=U+Vg_N&*^*DwC?`PbjRj@H~+L0XtVzza3ZzNq9pVNMyc-2=0PXpG7- zm&^JBGLoXHoVXCEq=6_%JUMt;RI(bVSb%tXA>fq3{Sz%L`!jVe^Kza_4ZXjKOxNGZ zTzgTYP6HV|kN;pKQoL+KMWybwfU~$pi31;X!q<(NyO8cI`}Xyh)Wi4rrz{{p_b<#WBR^h4 zSVWwf1$e?s^(!Nu8BWq)ub5@`K!OT@Y3_3Am}mU|u2(aFX=8@91GWRCLg86Atv(n? zYFILgYdGfZa0ft{7FuP)?s1x;DpS^+<&j1tGG_=_6>m$Lw3_l)$i=TB3ZP0=(v2bq zHFUpN5N+yr08-88C0Y8e%j_R}Hpi~XDr986{RI{dRdu;CYp2Nto^8V;n__JYCsa1j zAuskK$JJ#+vq5tO=}AYwLLpQ`Q3%+va0#po%4Ig4!o<6qPHB$73#6IpbYwuh0AyGO z@MpOg!HUt)I>Ry(;9XQ$i*7})ui3=%xm#x#O##D=SNaO#1j(Ro72gbPB+FqYlB2){ zl55h^EupMBs%b!=i){@k#|(33I@a40!! z0GvZ+1z1mGsAseN))1a824oUqb7XAW1rQxFq{fw7CK1}_sv8-y*~X@%q_N9-$}n3s zm(YObRQ+O;5SQ`*I{ic`D2D|6jlL5oB+yTkve@AJViSF(nV#eqQ5s$1Bi6TEv&`^E zb2cRzdDnmi1_H}i0}H|OAgG0k>`80U{bmIqu))vu8S~=07A`lSJSYs+1GSK?n=29n z*9^>U@bE4KECl=YTsCz)I9vzyLWK(c#p8*U4%4uazo1jwQZXbToX-IHVxyhtx^{3M zW@l#nTp4|g|A^Of%K}5&=N2mI#xT{oiBPR!Y&|yPj-=)aTnzx0-l}RdqZ?2pufp6W zyJD`?2u9CR>;b1Aot>Uy-hqnQ%=Qflrb@71qb%sKyLGLV+B+I+o8r@lnCxJF;b&1t zH&r=T&8n{7RwZpiY;MWCw%<)jsG4p|o^nPs4TF2~ZEyVD{PB-_1707E-aaKMKGzq2 zmXLZi;Y4Kpq9H^p?6;9_`^^`hWoN(m_@^V&h(AMT;01&cqx7-cpkr->_zC-O<(3A) zd`NS+8Uab&7bt`;0D*WGu$^#d3{I>u7s|$TvBw&Z&qk6UM3Kd4sw4A|(`M}QWW=CgyU?z<3^PbmMaj+k zpJ1$wNz8yDjfy`ZLHE->ZJB+V_2Y!Tf^hSN**nW73X(gfGuzO7SZs?+R!k>WNzTK( z2^2!cYKpEUT53Dx^r~U&O&@$&F0k?~NEVpan9*r+bgFy=n_4Ka;}mptN6K`!U$4vM zw7V?zAyVikO3^Mw;=li6lX&>NEpLZraIH<(k6?XXHTj@z&c z=Sj`gBIcu5_)obl99;a$T^QG$7Ky$@`F++!X8$!DG?yitQ0%y&7oS9I`sL|gBS8|zf+K#{$Y5_5$y%=$Ee_Or)a)KS{~(;ngeh_mM&Uxce+ zRE!=quQN3~GYvPQFy4J`psJ*+?2_wRB7BABOlc^}FiNrLkw=6sOh$?A$O#_`-dwPK zC47V7x`fzZd4`m;CED&Iis8ywXk+Qo;gilAeo@na4xz%8^~Jo1NXG$)3b|D|rENT+ zw^OYDLGwrjz`It2!7)7$0g3SCrJxg6N^GCqT>3CtRwi!c=vb!iQA?Wv4U))yP5Uxy z7K*4eiOjw1gSf46ui5INu9g|OlM3B)yi50}K(B@h2Ck!zr9oPU4?A6PjgdXK-v&wh zY*O4lB(Nt#2Rl3DDIj2sRO3H!0+pcEJEH^o+;||52wwX=Y8G`ENC;ZKa7A6-Xw~f@iM`%r-l`5jHuU3 z*-DpBn$ELwR)g za+Y|q;Cd|U>)T))Z_HWQ+&UcgsI)j`^#-lGb+2zXx>?sNZ#FWMSg$VUDM`D&sNjPw zy8Ym8881x+0UZ$XPo^!m3Geh&&Ow&c~JE zLI-$W(K=#sRF6+d-HKc7GLFy_+g(F@A$gB^PWm+Z_YVNHWgL3YxGMa2nihDAECz{x z5E5w?=W@|rm|RM8dQ8)le@j!AFnsP|&rb&&Ps4`HLyVfV?zGUkDS%Z^ zVTYy4rB8CU9Woz{JLzU19*H|ayR%9&Vf+Bq#&ggSb@Pz=L8}fW6DQ#wKi=GYu9b`+ zugkhCoHxWgpJ*JmSXz_1HIaKEjCr}q=%&}B(6w&pyh$_9)531%a}GuhK}iDOZp@gJ zcc5p!j_iCdA;?!lO{E<-_w}AOc#^+=l$HVg8tXDX5Gii{NVz$jX;u{ z`gw0tj;W*ngsR>Wn*B zPFxmbc+I3-(seNzU@9S zm?gdsbEAO4(H2RO^e2>{Kg?I)>BVA-RE?4zx| ztD%eRheinWymLNdPiSg@WKwxZ{B8=Eq)9|_DvmnR@bMunAy(skLzuwYRxFd_vqMD< z!jo~-W+3)l@Q{vuxh58JUhZP4j#jTst#X3uR^4JPd{!0sl(0ScD$|8?1Jxo7CB?7x z(64U|=ko?5OSIr%2z%N3n+RNG4mze*Vo$WB3=rAHNB1)-@*0o27sTA=`!7eD9PQu!c&bQP00CGS<- zHWGcW^1-3HP^iV%-|klU=4S=21*hyyg9?87^rhn@e4 z5Yhii_&DxKr?A{(C~tYybl*MR1tCCsJL{c3efktlR)RqiVSKFO2$H+rSFy-I6((>7 zD)Zgz-`G_i6#neCp{onP>=+)22=C`pd%J_NBc*1u zEM_zjX#g&PI^=3Z9D9_2hsL(MtYi8afE2v_vyX9J0o!>Q~oipluP&!0Vy_m0uz zb|?nGWFmg4sc66_=Q%<_UbIdB1A=pk8VIJ(b`c!zNr?vbhi~!ck6*vect}SaLYvXB zUDLMRqS7D_5h&7Exwlz#$ntt&f_9MdDg8x8qk$Zs+K{GRfx}`j7~bT3#nHC#M^`F3 zJ7_WT;Az~O>$qXaZo_Ei&^S8ry1D3P!ARY71QRL{~sn0p~duz^?&{`{uM#zIuClsz@ zcLkoaUr3+r-34{fCa&2%6>0E6Q>8tr;u;;~zw_*h8E?_|d~gs*k?ylkY~6BA%c%exZ=8mcAQk{h)O zHkeWFUKvC6>Q%imB*8jftu>Cz@^)YtZ3R)OqIgkX)#7$(0Qq3@rnaU(3E+2Gm#nbS z_@#;ETl6g^sAi|dL%FoSRN#H@c+I$DjC+oR`k1@&gY&0vd^RKC2p{#na8Z;&9^w5S z$o^>S4Mn0ffo@jClGa=a4Qcb~0lHn&dX`QQOQJv6LL)l+rKqF;**uw>_d#nmw%ubX zd*UsyQfq|Je$9_QxI0Sx1Fl`ILs(lCQtOMFM&u0C-ree%P8v;EE2zaH(%40YhMwE| z@W7}hxVzHuYHA_Hd`1ShQH=4FXKK4>n29@8nEdRX510*g)gQDv*&Zy`yhpi();73f z<`e)r_z$lxbhi6(*2_+^Z=6s;^rar{(?(6zxGpH?cX}TlvmPaLceuUvQYR7{v;iZB zV|QLa&4(-XEDt;BP(Q_?GOsk0Z$U2a)Z?g!e#p*1 zwk=q)_6;cX4rWznN(imd(vW@tG~Tu}0A#3z2#7czeoJi0xdNcc#B%r3LH4Oq*Hd0w zM-(J41Hu)GwN8Wdd&wjZ@7yZpIw4z3dB~+QOt`{)KLV-`b5#0VC`U7XJ3YxeHDsgb zSW$Oko29Wvu?qH30l*=pZX#{PtUw>GDKn3f${&q9z#I)77XcbY4-f^;eG*G>i>W9u zMv={$PomL6yDTu(K%dtNR&+%5vZV1kMj^4nveBE@VLeF1f774?d4nwy zatV~kW+phNoV$8L+joQP5{8Mj^$ar+8f#jny?sZ`P?2$xwkKhaZs5l)?bI=;mB21JhVG*Vsq~*>Dh4Ovap1nr=_~ zfBNaCA<6m_31s{P3+_k)cyoYI0Wi;V2-}=ZlYE-X91Z70^5GbpF)=tm zqJPvb->>hY7v=8b#}w$_k8D9*N73-83L{d!BK=L0B}SCB#C+A55ZRpsu|hGwwT&L@ z!TbEF$hWc0pV*I3;S z&6r1E{A9bofN%X(g!$rl#JvGuHWFxa&32fI!^x5=yH+vpAZ0|)?Di!&`m^F4g z<)`q-Is-r%r)%1L!BL|py})SWdd7X^Qb`;ZmlG*mTWcV|N6sj5Cn~_@uAq~>Lra~U zV9QL?9SVObQ(1v5$kfMH6gZ)YosGjX8|AR2VbkfOQ9z#c9A7>fK{h7~39)av+JFWC zByC~5h*>9hg+%6(m^Loj zY;MXs9Zyd@1p3SzMD# z7-*Yfz3bfc?~Kf3qw88Y^eObI;s6(-g|9V7XM~|Jpvwo1_#bB<5ReXKltP|d%%P)b$lwVn zeVl(nf5dfl!ic)lo8wduan7^30VQC9!`G6KxWv2crlXv9FMaHGHkYFG^#B8+c#Gx} zdCEFKU0mTZOdM(<6h@P(rP(Hk!JM0v_INfy^cQbX6CB&3uq3BXDJ`2&XOA^xiwV*} z$zVkDYK7Lw<~zgs4G0?ke01^gPs%mfE^-TgFrrgDn>;-z;)k>9^WZ+w$?0EWb_W;A zRXzwTBsDo8UiVle%_q*~s<9zJY=)uN9Z^sEY%}q);d_u|oavLT+9iyN!3nfRHrQwC zHdXC9d+?yO=8*~PD$5OrcO6=n3{)7`Iw#9X4C8WJzopF5vOb5X<&hI*T1MbbL3>o`cXwklYm)Ve*Gda&kN(A_G?>OKEG205k4f z4BO?3=JP6;ujYX$N6B*5kyj4q*Yg4#Oz%WYc(ZmL!9(#w$M29#ZqN966N`6(ANs*> zS6JLnIp^$e(#Uw$$(09qC4PiM2{DjNfaGp8pE6crtZ4BjJLg&@FJ;a5!=Mh?qu*0i z)ULr@D4%=P68?G68Ga{{%r~Xfj;^E`4?qHUYuvC_vBQ$9SgfjCh7ZMmUvg=+s3WF~ zriApd^|?Bcr*T)npjO43#fxnuVx(o2)ZxBq8deGsg5b@KjLelW`Z4@;BwGX%yRTE= z{qAyv&~8dd!A=K}2bkPd=85LAMa|1^dBdkv*yA=-fhn1l3G0{YBK?;3o;jMxz4*db zSa;?qSF~;0mxXnrUZ8SFRs>azr)|PpKE+y~6mi?usXk(WhB>&2Hu3BYh(lV^;l+-Z zcgkt)x;8_qnsy_EX1C!I)UP4Id_{?#ouU*FF5m_3L3%JkAAL~V4jd((Yh8M`(>VM2 z)2_W9&e0&tPk!=ISDydy+#t0kC&RU}6qzoP^+*<^Em^Ut4;erjs?C)Vh1efNGHBfx zx+wUly%(j}oQ0AT9}7-HF}dQC@{2nsF(X}W7Ra{0ft*7Eh*gd#7ugWa?7-pWJ6!!x znSh^6xYF?vZtGeEoS*$jO5w4!U=#$0hUkjoZ@3$MM)C>!pz!6$bB25RMu>xlLBMOg z#V7>^K$u=liNP_5he&lwDu039LY(7(g)-3xu7Kmgs6hiVVFnkgC6*25lXb9 zXi%i&j1Z3rdwesf21gmcE1u~g#`u~$OL7<3R=g$H|H=AKG0YPl`p&ApT++Q2n`({) z2lp+!wcgxjuMgOWv8oUP+t};#k*!v06X~Mf!`tYD<~EfjPgviYOKFKh+?aB?ikqB; zPX(eHZ2B)*r_=2vvb_nqbe62x>IGzk{M;NPx(Q>T_w!$kM;{IcAD(C5?cRKtpZ}eH z9}mv?r{B&aeYq4cKkt3lfAgyW3em%dgYUjOiKfUrzh;|>F5kc8L>Tti0go_*6|)GxZ(=i^bDX4 zdDC;L!KhQ8a2z^aU2ahRt}Ue* zM)U#iCWt2<+Z_-&UA8RaZDQluY|WOe5?Qk{av!jXM{EMJqnoD!l)2hWNAd)DM=%lWH`Zbq!jkO93;Dd? z!zk+YB^+D&&t57wqKCXE#vH;v8f}g4vn@&go_HWNQl2ac-h#s5$75f z=S+ozR9;3#HMOxBFg51^SY*hH%$89o&k;e@55nx+5%g8jywe6|%y)5I)$VHOW$?Z2 zId}TxDxPi;Z@s6sYs3!yldw*-@)|;&@hmWec1(~zd3t(3*KF~qP5sTfgY>jd$9>$x zdg8^+EsqInqW>H!(F}gz5MQ(h^aImcP0zShW_ux6U-F^?Qv#8vU@4x|i{ptnX>Y1-or?<6c$T)|y<~-s9 z3Qy+cyC-|e!IoCiblsha=*2I{#fv?^cC`Vug5b9_CqLA@N=K=T!|?@#W%SJz-@uM? za_j9}G=+t|m7n;@Krv^UA&HZQY~79B9U0ui=K71UQFgPqmLmcqIX<8F{N@GQ_zS&` zFV5m}kf~}R-kNLQqUpRf<7vEglldz$N6)UdxzsSCAN+S&=1(Cb{Pg32Sa;dvDBoRl zVh6j|8}adiuLh}h_MGn}_SG{(Qb|c}G;f?rMYS3QVH7GG?nY=kM3I|Hibwjh0P4vL z3@znag(5~0cX->fZdB2Wof+yEr8_>dtJ&5HBgl z106LdG^pqdc_@C3yGpu=aQSDwneHe14T5yJI2CvrrL1er(Nky=>s?DKDe6NP6lOfF@RLH;fe3X5LORz@f05{=%Nx_{W zAbOE!zwxiD^beV2eK}^JUhvxz-~>e^!^7Q zAwN=tZ8wE6)$Ufsw7<1~@4nu`^!H=<+#I3s8r!}}9?2GloS3^?*OT^;vUjASCk1#c zuwG#5O|-l;zm6JP${!!Q`Yj^=*@@6JNLnL@h)*5yI3Kczr^(y$pYNu8){ft~UU|3R z27_=Y>XgKt9LTcLCMp*W#Z(uQ(f^L|=*!9``XDE?s(s$jKcM$n!2es_k7{TM!kJCp zOaF@`!uZ~b8Bfm20Q?-t{j#|8_Y-q}q6B>_AWZui)s`xo?P5&T`z$pYgpm?tyJ_F> zXAU~2-y=UoyG)xC$}>#l<(dS)O79Wh?I3M7LuApa$da|gl?fiGNT?ATrYpeLsMy0I z1o)DKYf9DwVuAmpnJh0cIxo5ltFeANjvr@Eg1@nM%gONM5u<-gJk-ysR53S>jVTm^o)b1cZ#)%JH;{>?>4sz~HL#){lL3jfmS%>267A3o=}D{+csaW? z3YN8wy6Ye*85Hc+N$5AZ&tW~|^^09fpZ@#?qC)~dD4;8(WgoY$%8J@D#a&c8?b&$Z zmucSM09KO^L}eV>&!3$QT7&~(wckI_k>PYOv~>hLDqUsA*4ZV4>sZW6Uq!oI{@mNq382I-ecv6nAG+(pO&FR)T2KFV?-=0I z#r~3)fXUOuJU(@)AdGv7uMcKwxnu>+&v(E~z|CtDtH)rduW3)3l(aY9aSQr9J}4i; z5Wu-4=te*CI5Ktk>K&MrBSeN^DKW%J^Sf*gFCVNu;Psgop0MBIxNiz|+Wgu%F@!tp z$RrmW@bG+cb3zjo?|@>0Q)#z<|At89#s5W96fzycoRc7jce!mU=)T5XHBCQV{i;Xb z25m-mx%)ggKCUI()z-M`AwSV}aC~1lV|oNJzeIi};eE&_rG^%b=roMnNvB5F(D%n( zU>j8_SGMqvca8G#>`evVhZ7vGZC65>;?Wf14#3Ka!SkffyB~fipX&9RZ(Qk!RgC}7 z=d_oFAz*JHskh->dwX)_^{Vj07Q4^lhwo%UhC!3{XTU*SUZ1^uApv9cnu(Dg{ zE89e*`%7tp3(7q~8Zh$7>F89MKzBNJ0n7U!h^)FzLQd%D2eF0EVHGhh`x{h^oj@9C zMBSFXmKSmgz=Y5y2ODzd$HPjTws<*r5uD{-$Ds#8?tBm^7fU45hh^!-xI=Lt8W1fA% zvb!Z_dbyoHTtIARJ1%LFphnZ(9#|}=En`np92)=rPg`67(JJO!YC?-j(TkU%s8||@ zwi}1@uqaWjBc<(%?H1%bFT(7C*n@|F&VMa1lz~QR(!5+Ahl|W0(yl!20jqmV^rKTn z!Y#HKrm|VGRgjt?%{LlGwz_27Nye34aWTyBGZq_)>?rW(Uqwh5*SCW+I7+65(JbMOm)$G`Le>iQecjABEvV5RTp07 zBLL{0Fik#E`YIOLIBo`}$mQ)Qnw{aRlhotNYiIQvk~hqER9b9yK@3-(ApU|qIvm9s zk@Qm7TpX&8uU>cu1_3}~=<*WFkbd*Pn0YxLFp!D@6IXCWy535d7d`A1gyM5V5kM{| zID=Zk0G9Jz0`?X2l@W_Z$u@xROKM${YI9y0H49kR=SDbj#NV^4pSc#82q!2C8X5y# zOxCFYxg8NzNTOjEP(NHJKDGee4rQ@0%&H}vmxp7Koa`lAy-g=IX@w>E4j#rsgs?D` zfN-KjkZ$!Bj%}T>U%K~gFpdpAQ9K*U(B`mYnMxua0Wp5hfY3X|YF&^XYW|TG8$V))um1D$t1p`V`~Fr>i?U;Mh)h74<@DgRrd( zu<|)r5V>EMx&HW!Z{In1GgjIWdS}l+isZzg?xSc1G}9M&T(OSrP?tV&5JrOH?~2sv z*3WOB<1u0y9vjWNV6na}Z${HLWscFY?|s2nRJ+chib0c`A`bJTZ>&;=2&izsJFS8n zDLa{CFS!nkg@d8AQY}*f(jEgB9jhy$jk$zc6K}_*f0G-fnfp*FnD~T^0_rHoS0Qan z`rg>P>E?Se^ObXZ2t%qc!F7y@s9f@z@A)H&W;me~5e!X14PY&iY`XE5if_yvHwF%PDR zbNXlw;xa$c2fgy^jg8da6pM0tS+Y6);+E_gxrrVa>W2=rk+@q*A#0-4;(SoaT4CGf zzgvCWMa1dSrd6WATr|?pb z?^ith&P+cbI3CfD;{HR;%G)Xr5FaKIVG780B(G7lMZZP(5V5RYo10#@)*Ffzao3SC zXShL=3jSm!UXJ!SjMT(wyhp-Us2{yCLDdodcMM0yjq-|L${Fn{GMP;I!6EA%Y3`sj zold34+(L`>!s=hBfU{H1DZR(xHs*3}bK=usB&WW3Pr(*m?JA92e7VxNa1ksS?v7}q z%RN*ylAT?<+_|R2Xr1mz<*!&kdhxXzBy>L`1nYF$Rr$EG7SdF9%;M>bvVwNzr?icWl{a#wIE07CInmedS~~4-8^P5 zx;hSN6G@oY6TMP{mWs!M-DAt%QuGLi>kfzssC16^4O}#y9hcK(Nf-TryZp!0kQ$}G ze#HtJI}*kI$0lL`A0=qiu#decl*OTBTb8d$2N&`V5|trj?yy;-wgbu{wSl|5UaK%5 z)1oIwbUFVxfj~`Z2dB0L+iiY;S?&1kR#chEVv$8VuXlzZOe4O~=k`@rOKS^9px5-} z*WbKOM2EBp*=cinjO_Eq(ff&UbQGU9bbNG(h0qU!%%_OcXH$C`7OpL-{6VS57Gp{T zgp4CX58`n2@>6dE1t2>6ZIZjz99P$!a&yaukQ;#a7&EjW4%xGzG1Q`${NN#zc}2Oo ztTAgm=_lzFT;4?f9`d&+%P^CbnP1MyrZJR9Jo(+sA8AUa!ND&{s6_s1Af1li9HQ{T z9*)vo<0-g1h_yTPhR#q??LN*a%Q3fwk?g)+bO*oW zl+YfSic*53%7hPV80``9oevX^;E_AnGx}4}pBMCJPJf;UdPdq>0TMdYyPX!k@teHh zh%DD5dSUU9{%A=3F4?5lyN9roZo;%3=N1^YIccqrQEe*az~@~`#_(+8T$C*Cc>DcR zghn{BIwwMLg$6_I&w^8~-YC^o_^z>P#;Ks=20b9~R%p4sbGR#r1D?vxV`joe zC(!88BUjxBmBlVer-?p>j7}Q)tbqHZCz7~FZHd|FH-}=@)r`)gh)Hv^++SW66oO?& zaG;Hl_4N`v1#!$zB<&XzP*)B(R1_~#4k29e-Ym(z$3~gwx||){B`5*glA0PLrfG{$ zg01c17T-U13GN0=;savv(~l!x>eo`0GKcB*6zVQ2_lM%z6m3gLVbh4BEl6fbYd-BLlC6sPxDheiDb;p+>Sx zH0pjJIx<|>g7N>3Q2xIQOr>un>AmR)&Xwa|3FkVB5T(MYz`_Yz$~hBe@yl^lVVc9v zrT!OCO9KQH000080C};sR4b8U|+5Uz!zyb5_T>VmiOaOiQ75M2g;@iy*;}Y-J3R|G`9a$Z&xhXg_>?bXnzkq99p`lDt}*zbOhh3P za@V*j3YZh7N-!)h60k4+w`Q}dtP1$P;Mp7;qs)80uNHZgCP@@ff(PSY*7Axgz&P+SVOtV9Mjm zYmlGF!30fZD$6xnRmCa?$BvlFm%=k>>{TU;OQ2NbOO~f#4dkd82Zak!a+s)ONNSWq zl`+?m;o-}~DmGjJ^&-ClTLL1R=XnA!iKJO$`HWd!Q?cMzGB04hA1U{ZO znea57OxQ8I7#OY z2H)||u2KtstQE>cndb4`)G0*@;6372&$e1tiN1Dye3gl70CNRWTHm!0@Eh z0r{ZfDR?CaPswVT7Xp%QRhpcH*=*fP3kry|G6STk0I05sXq6>-TrF{dOEet_S>Olh zW17l%FU|8y4CRb3#W3`;Jew>eET2WZfY?dVaMnwl?ex-vPkK_F!tQ5X$@+}5uknSB z+3bY9szk9ywq*7$_T2-?Nd5ZRV!maK*#|Av@s@tTY|-3Ck`ucxuDS(R2UXWOwVRwS zcmd9V)x{N0B_?E((P-+Fr(Y&9z@fVbKA*;r*X9fP+hw}U^3|(CmDSbt&H8uW>oI=% z@#!a@e)jnnXJ3Bx?D^NjAv$PX{}kd2eszwn&}sP6N}jgir&8EO#)aJ0q6!Du{$ zEt-I6sBuJ!97gw}H$r-wY1PUdLvFI9`WAC2N^D_hS=?q}EGXZ=y0u+8#@=oUs$P#b znBt)8W&~=G`vhR$65u0v)PC3l+F@7Vdb2f4#hj;v<6sS|)LG}xzUTo|{tT=3L^tx@A zP(O^CX&408X?vp5yi~L=fH|*cd5+SiW}dq`xT>E3T%M=4#s*<&e5Hg zzRpVlYbl4#u3$-?`6Zhcf?v}5>coY164fqCP+33~D@#S_ff&w*c9JCSJ8_GNd-1Y> zhY-`EqbzP#`2K(i74wx)bzwjnz)aa~W7E<-GwaywD2u2)9f6{b6CIoe2Nh}TGIZwt z0MiQ+shvVotal<45=_6Mc%j$JlMdMTJ?B;}kKZ8n?NdbV*XKO4`uGoh))QN5I}T^C z`42#^h~5f%1%xncq*VZZ!uB<8KNhfzt+l+JfhOqCE=F=3(SzKDrLC!i*4oT&$&2!M z-)NJ-BDDmx|DE$Xc7yqy=6M=yOm>$7H`m*eV#Lbz zN>J#SM-52$dK;DiiPjDb*1`ZNM^tZwCaDEThH-9ot}#55kTix{j(D?(~=2 z*0Sp+9^Xw-4_Hu}_ZaveZYQR_sZHs_KBX&wXcp3UDKJ_TTETSzfi(t-R~y#WdeE&? z*3(8^tKGWfOm8kKI;IJL<2CdW7cMv) zsezV-XaHZpw(BuQ6{X&mVQ3lI5-TW(hRF-;o(5LwR@17s6@hMW&>h5T>(PVJlG9}N z!&|R+o_AXWzd_{wt%!Y-NWF7}-Xk*a7LlDuyhjASdF0&|aW2wK@!~di2`_o0JgLBA zOdk=AL;ZO;<(mueWN^kgltFZWC>2D|+O!%`Nv?MxshltH1Tm4bnZVOT-L4mKpa*7$ zccdDjaeyE`o5F)D!~p_xDp2qkEvQjHjv9U$z7`dAW>SU8F+0a}FMEc9vWj)-cP{wX0=AU6YbV`rKEPG)suVO+lv* zUXdXfabA%jVle+y=yPc}blig}S~9x9FiizknVWs)P5p%kI-lwKOO<7CzOEGBvTz>5 z0?e4=hND_h({qqq(|c$ageT5_*KfA^ivb1UZMlvar@JY4$*xYoZ|MXacB?xFbYOt| z5slL$+r?*#QjgPzz=`7fp@Vcyf{s^PDR;3E*@D+PcfjfYyOa-iHGqMxO>+yv4j|Mp zosiKnU=H5{yyZhyf87r3%bYw9;EiWz2`!Rv&^}Gq_C}HZ2C+a_C&0VywId=!S2|W8 ziaD3p-U)~>OdjDDa$>Zpv(|vY*DeU*(b&|w*3|meRPW}dHZ4t!pG_N%_G`B|r%*9w z^8Gt@kv6u>UeAT5VY;nf zBW&c=a5yv;t@&N-nK>Q0Ic@xE`+jS(~PTe3u^@XpYhh+n^O%y zJ3G1_PInei`U@-1c-utSbHR?uPT4NL4}&SL4zZJ6!9AUZa($Y^7+IHceMwz_F_)eXzr@DL9)U z1zV@#wL^VT4;+Iv`*Ijm0=_1;G=;drQCEf8LX$?^a=LLd1{5>M60pjEU9?TuacdOL zZu$ps`yO(-Gq{AtE8Y{0;cH~MgLCY_^Q#KFyynITwGQlML!8R`CB;iRJfoUK!L-8- z&=|^d@)A@ql~ddbNZ7IIyQL6yCHAvpS;SS!3lIhCcn|?7O-0Osj6tN8xt^K&@=9PT zxR6|kt@8>R%?VJ|hX%7MqvPHP=;?9f&Qq0xD;UalM+)kK%;2~7!eo*QYU^0PN0e{Z zz&CpY{ytfVn?!;?H2W(04w1fgr30#s=10ejf zWc4X&Wpb$c=YXJyB!)!D29Gllr?7xcToZtVR@IP02IS(*xHkjWfaFaPmV@RS{8+E# ziwr$D#o#S$Y)TCR{u0V?OjUDT)ag&i*vla8^jt*twR5&@jqdZm3|Pi{qZs1qW*Pn7 zEmL{~boQ9zShIrq@ZvG?*Uf;jf&semFMt2(pKB!Cu-=2MLHfGlo95!IhW>Q6VA_r9F{&F*k*qse(RwD;y@~}hR#KL-# ztdhcD?7qrgarNSt5KyY-l=&N1Cy^e#b2v;vMp)4O?6R#1%s7hKTn!T)B`#S3R@sS& z(lA;sdB8fPtjM%NJ0%Mi@g=?Ve*h)8CtMftCa@DqlD#3LTTZv`g(u%e^Z7=q5QFbb=N^DC) z#G@lVjXv~uPaYORsplbD(iY)C6w!lOs}UeC{H6?M41Z=90?zuGkcCV{B$zZFZxy zH!UV=oi$m9kV(WwqFip`GOStl8jt4+odKHnQ=9O&Swon^Wdr?md!C&Q_VawD z2T`?nGJSBBk$yJPt|Qv{!(L~pP8m_kyEozuc8^dlToT~)qE$3TgT{)07-nm9J^TUx zGvexH%O0Zv3;@6q=YNrfRGm$oL@bS6{>ef&7@E_|0{6Q2{4|J`#f=_4kf@-jA}E5R z;HeE{%WQ1u-PvnIf8EXNZIWb-u0Vr)KOSf2dY$bMK;k7783|B{IUVGZsw$ij1hC+) z)u6I#LQ-%rYpo4JnyM(3lguI~s*;+JEBSoGMD9_Yy1Y&=Q%TAvPH30-#B#pW>k+;Q z(UHbAGb1_l_6lsKiJgwe{!;V{MH>5UhNYwuAk`Gyj$4cc_HeG=hcrgf2BUZkHE9)W zK~H-LR>5j&Y(f`fK_;xu&{F^^$yrdIvVo=$LEwUh`v|W|mAf||+;hM2zuHU` z>QF6D06U$5htV8J$4V7NR}MrU1npS92+-Ru;;t`)cHU-$@oM(fubjqI!f+nZE1hQ5 zkbE7J)N*RCs`jpch*?l~R8W&a7^P}XPI zsett=VZl4m4Ra5nqe8AVSt+|0prwY~15cwK0+n_qwInA8mZ#?-awst)d~V zmwS6s+z4^5N&#;UqRG zHfprN2Z*>9djl+*(M!bNfQtzNr6w^;x+Lu;dR^Q^zPSDd8jCT%@l?OJ6-&5cX1CU6 zav!UPBe1x}z~ZrLE}nWzySjLpAP%uV;xwROQ;%2e3?r&SW?{CWe>@7O#*SzLpXfFT zetkvYn3zb~YK_5cCw_kP*gA$umD5+uot}pXi?$WnS*97i*B^<#4TZRd)G`+N%r3T@ z?L@I#G_KFz!`VLW6bn)X*3vBkGuKf4jSocoiKK(ztm2b?4TU042#tueU z*!7R_t2m1R8mpDYU52ZKUMfk>5-$~=mEjWYIP-#)IRQ?(Ry!70fM{FO=f{q-%FAsp z-OkO!1i8;Ua&Ec=ry`Fj@sBLs2@`f?1!FGCOQxQx)n~>pMTtu~&^ykJW3?DJ%yg9C zUj)eHBq2Q#eJ7=eKd}^l5nt>Gq$%YDch`Y#9P4mOIRMCw#6gc33oam|#{g-lrJzid zSVE5pbfZ6QEP&oqPh_b=Gdl6Es(u)rtXG<-lvz1zC%R!+EkHKYR~f@ypf@5FK?!<3 zN^FM)j)GVr+pZ*XQI<%iDQgAly*@2RdaWzE%EcE@3FjNc%Pq-T$%>L}7XE}K%BN_5 z)i4t&HCs&sc*C=WB>Tk$&XP$pQ6wk(Bnl>quXMXsA1=%XF)$q#^rcSbI@-FvM3o*T zsCZO-#+y|>pPUD2jgHZ6-v(HM_=z=CHrO}A{9LOaD}!e>xHyt+vD(Sl_8s;D90jmSEn&H_!0D zwiimWajJEnCUD^F+xG_XV}CVatmZt^N*?RC}vTeup+L=55-{ps; z{WjaPci>-eAzi~x*BB7NaN2Bc6ZeMN?k?bICc!pL@n{;)qLdZt&;2(zKBbhDnTt6X zYm&={2VEcZOp2(!tu=JhLN-g)>PMYbbS>R-)kIY4Vi{(3ZRM*M6o9+^ZvVSLFSWH# ziCp&4qB#Stx_XzorkAoBtAKurY@bRzRMQ`c*;cC!#TLuKF6boVq?oH1^YA4LX;W(h?x6>45O_5+P-JmsLmd?6Qyi@LJ9wA~zqs zJxd+KGPpB9R99tG9yN7sxI$$k`ZHe#a*xCN$Bf3y^k`nn)$b?M@l{ndWS8AU+9}eq z_Qz{U*(f%*fLZ9wSoT#;d2>(yk}Jn1)YWuKJ=;{gPn1IVP395&PdNCNy@IVMtEIm^ zN~wdjiEF3IG=QKAb}e)&dYwmpQV)jW`ndu}5ma?et5fTqkr3K_LbF26NZVesnl7<6 z#*YlHmWrO$!bFM+(WhIqh7Qths_KWrPGkNm%%0FLOg6jBT6H+8@!$>3_#_str8;0% zY*ThRRNY!+?ZX@>EDAf9y#MA$OYo{NPLEC7W|2^|kYrXku!4Y~&=JWDBHWYSPIToUsgQu|{s)iF@C<5) z0^FMhPRxxJZTUDeDg|O9$=95#M&YyN+-Oh_{4`tv=Z)10UZ!rC1E&3t+wAb~gj1fd zJRp&!;f?lOxYvXe7;Oa1d7b7HG)kza-Krsbl4=VC8}s;0@XAoJSlp0xSZo$B+t;tB7kNHG~!^s3q+S zbO9J;k`gZX2D^YzRx&oXwum`7{gZ-M!AeM*|UM(N4XSPUX~C+?ef7kp__b!WcN+3*?DI zTFvg|Mia_^5r|P?WEUL)7>M=JfWtwk`C4_sA>n=LFLlps?UsnXs^=azjv5^X$IJmS zm9kOmGyi7uz1DkA{%&pl?fWwF&KP@#CLx)rPf28HA)%m=IG- zagUkuJGq@sD}T6oxrFs1Un)1ekm@VV%1df~BbHb{Zxs#3D&C3DwpX2~Q=FnTEGA?@ z=h_2s)1NtW(zNlF${Pd#)xY{RV88?s&Q;TtilAmT=i6l3pKT0s)T)goiOnK-h;xq` zV)Sx4%L??x04r+Xetfea0Fi{5q0WIXO$OwBdg0J~gwSq26?-Jp#1joI{=u)jabzYV z>mI>{HN{(*8E(NKC-p0@>~u9;Ur6F^s35hsiRTBFsGcR-J#RR0ebyp*ynI9hGd2_A;W*^Ui+fo5ijh_1Y$U-6y-0wrI^cJ9S;y?i}qI8HR?1 zuXg+3hr>0Auk^*bLUZB9Ug>DR_`Xg0S!0hTCjTu;c{d;TX}vhk72uZ6HdL7@CWgK+ zB5~=9d!k{b4Hq?ZHDQb`U>qoTAK3%Wk0AF7YLM)wgo_VR(8P0t+v0U|;I46zkT-PE z(Dp}*cSp6bkAD8vATSQRl+;x63mmHzrIg3{amu>=@#}e8{=2t5xp~a4#rpH!1{@>9VI>{9_zu8myE7+RY=mxE>-#nDnEn)Qba*Y=7@b%`g?+ft5L^IF~%8^ma8+br7 zlgst-RJP7*-IPJ5uzY~Ir{hcp+;4?p^rvE-w6Z|{Iba03w*y46vmwjSQE9MnzfE;8 z{Yn3&2=CH<%aXUz?Wh4XxQ@}1NHXXJmxN3LmW9)>lAk}(?CLKZB!5tG;do5$h7;hfl4eqli z)FZzXVv(8~9$@aThjIK>cyx~zPI_#P+%SrOJ^$o2 z^p1CLp@!pbVW)94y}mB%5dr)i&fy+u4oB<_kAGGbPapqJGmtn7-2_!+oloqe~sMP z(0?bgQ^q_+Vk0%b{DNho9mK^67)VM6pA)nUFW+o{^(BJJbgn*u5FChm{7dXkY*xGR z?w&qlq_%sq-?x4aB)*U)YV4BHQ(C*FpasF*l@UfDX*Q_c_8bIpirlNWIn8fYDrGyU zuesmw!VDz~x|=fpyuk7q^P8|3Aer3^6l#!V+7y91jOY(u{H>bsTVIkn zkYmY=xzL(nmsK=xJBjO7WWo|CI|}#Eo=CMfqnf)u6Iv7vghRV9+|c{#Xx)v{qCw-0 zD!GLTU=O%;SO;I|((4*2ruWdAVTHFUcb$g8s_TeuJlj9nSQ;-T$Vj;|xL`Eh(=eaB zJ_$dz&&%ccBKf#4J{u>`_m6HPD(_ajdPm6*mGUv;y0Q3;DD$>M>dcj|%_X5`y2ilB z&6i(+1G3a@!PcYd!8rw!K49-0assD9_A7<+>|Tw=1!MsvG22QNH%G>>BhVOna73bU ziW0813m2t|A^|iaU(_dqtO*ag)jpHi5ICNMj>HO_yBua!h!}Q>TZzpo^6_F_mJ}3J8|+V zg`TO;&4sX1XIM2Er+w-f{c7Z$PIBz%QG8sq zp4>QSV^6($9UHDpU&mxN^5^@LWCYxChM9w5cTv^eF927*U@%`vXw8HPg)Ssl@!VND zw#KqPFoGwQiT+6AzFTqXZiad1mv;F;#`j4|+o{F9HVQT(1Q%YNXe9(IH0?5gxt!R@ z3;8yw`Y-(T3-Y#rxhd_w$BIGkm62ns5!DGfxt-P^HX3UbaBj^Cr|{eht@+GI<@fxu zFcLqD5fuFObmj1~bq0!089ta#az(m!x&XcN$3@x--8TV^0ywJxFX%(h9GX@q)?STt zs_jdaj$;NWQG)2aV7Pu)@-LZ)yU_HoYr2K+)~dmy(<&na_5MIIpU{K+O0jtQ`pq;f zu80x3VbEsw_2?$La5(2Ln+o?pmfg&L3h61+onU^fn*=`_>zX`(VsO zYHDn2bsxeqrq<@_?#-be@9u~iYhouc<3t4+Ip!KKfY)|0VaAI)tx9rlSK!;UzW|4v zq7D((pr4vw;!r?=f}aqD_h=b7(@v|K{iBO0pja z^8h(wdYBcxcP+)(-~tVcM4R=6+HgF`%`Rj3ApYgLmtPIkU-fG?JMao#!n>5~ zGc9Vzk7Q#9*1iR38~4C&8vXv_unE1Oqr_iWP4yWlyWRi#FEzvYP_w)a3IO1e?SHAI za5puyb}}{lAIV-zGai`{#!vi+UqsYGTV;yyVThE_&N1ozp1V3-<8T)Gv;a>^wRWlr-xe= zQmSBnxxBE>@;E#|2nmOn-LtDAohF&oy8!BYp7}gS-TP3Kuv8yfC$_C~#AB@nS?m_A z9<)qW^n_U5NZQ%SjuZi7L{CL5YL7SyaRD|XHO9YksGN*ptfe$CG?p7>4b<~v)80;$ zt3lE_c07jlmrTG86#SQvXdb1jS+j_UGX$oc>erQspIV8UI99Soenao)h++Aq4S!|J zE|=uIhsj_Dojqz=y!v7LbwD*072zH4C)?YEk38uj28zP)U-s zgTc<=h9hH!hEINi1NNBY)l)quh}){`ki|3D;65>#U_3#qSxD5SucZUo;>UEsQwQQs zPoj00FlneX7|%|n(5%d8)Jl~ul7|i$CID}Tx%M%n5evN0vP4Ae3O6Rer0GEFYLE!V zgXOuJp_08VwVGy=x}udDM!%<)5l*UV3ltUd3d{ro1Zdt0pr~52QB&t^2ikF@4bbNU z?Hts_!PlixPn2&%+Qc3n8>RLtKM1#DDA!jW zh{PdufFuq(q~RxR2y{enKw>8gr(noOH#IzadVs7;H=B~n@n|mmdBq>lp_;xoHBM;@ zMHgde;4&XOR{U4^5)H-bgRp^T(87(8a1(GShx>@hS*S@V64xDrUBT5va?H4BYSH%%&c z>Hl3LmBKlbi7Z1m8>7wv;$dz-;bn!*tl(418&%99(!s^? zAZKvEcSi?LFow=aeDjaxCm4?_=9qkAO54;1sY>DmzFf+A1r&4`xZ` zyds*^{X-!(vq)(1BGFnw1(--xYXqGNPGC_nYW*$e>D1H5osxtAdl+ahgT5+)6%dj4 zFsVbt`jXyIDqJi~Knpt7D-4WZt=qu9G`ar5IAfSQ#BIk2h?d=EloB4hD>?-(hdy_( zg|N4tb2?I*zCFCVaL`$dpez|Lb;q_~JveCrR+V;2J2~lMSjiH~ZI4AReD7ss#)u2D zSJ5l~w$MMb0##O!mpYfnQ|?f&E* zOaqlHX{R>W^wTYAs%7fJm}=19Cy=mXkt$ZnUTK89bhIjl{3QH|qg;*v=TwYMqujWA zkGoJ+CpdFV8ZjMNTE^yb9hQjs8)N`w?#j9@>#HDOixH#&l`V`*4i5Q=1c zlz876*TD{AXD%OK&QE2NUnwJbR(!I;Zt^z(*hN3ZEf4MG`ZqsKQwYg^?hSw6sOGD= zn-=eIbrik4!mOk0WjcP>+2?a1tD=cAgHs~xY|8EvTp!ni+ENYIp2m;bkUq4=ag>Vi3 ztqF@dUtUh&XJ#$7eq}2(I@Vuuqkdk*r$XvnTfXcI`%61eZFWJTrF_Mj%0eny)`z6V z;4tjou5kosD7t8PBKa=&s&KQk8fO*aPv8ojU`v&#BC%FPz~jZVE)Tn+|doOzou2UsklMCAvs{Rz76}~^nGBZbs>1*r?R_R z$tzTrgqLoqfi~lLOx!J%F$I17r7^VX{>B10LI($Z%RC*w&4KrSCIKse&VYcAJ@poj zk_=HCSkdDD)UuI=&DGw0s_AG0r-|!0U`5bH$4oUYq?4tcGGRG#(4<7_GdWH~NCuF{ zIJmegBGUh@R5Q{e%d4mw;2tgBjuPH- zBNi@u?5HBQ;TM{hmhE6KN;*62*=Hv!AvxAzhDyWp4e_i&fa z;PW@i^o<%#VVzyK;o36D`%RYpbKKqjE$G47OQ+i{uwPl}s8L-yKH+uO1soj~b=E)p zyB2O!nl{JOoz1zOnUPD%=!N=@D?EFU`^8~0z}@!PZ(Hy-F-+MfU#^bWHaPl1KjZhN zcN*^t?85*tS!vGCzup^o<@(vT)$Qj~b$I?7Z?W%sMex(g^6jR#c>X^ZcFV{`htnVc z0HJUI0BHYBD|y)3FgbYs`@?Njm$hGEK-7iMyKzjP3l4DY3l+#=vGstnE#%3CTn31ijJ-6=iA<9ouB9Lp~Gi5c!|?Ykp#0 zbz1{f*j;$hT5sd!H-^Or$2ZaTv0{xu=<$uW?8f8r^aIOCkb=G|gn2Kr5kdV|6qM#Z z_$LX00UU!RxBgLe8f;1l6Lrezrqr%YfCH?A8s;7+!e~~y8I_45#)1?$s;T6w$p_+) zTQRobOAQ;J@%c#w*n9rZ$yV{j8~&MjvS6{ytfT3w#uVJNY1~kC4vL`IH4-Rup0XW3ezYuUwW;4IPg^+%!w3eer^x~mJ_4*`D zooVLaGfNF8!ygiYOI2BK3WYw0_P}W!|Jc%YPChe1LSGl*oKXi2M&7?uysg!vs zf(kT4SF?DIQHflzV-cW;nR;icq}TLGgFO?2WV7a3Fb!#buP*WW|?G z!YAJIYTx5# z^%v%GyOQfFY`h<&_HITGx1lP>bEv;-#&eKG6>mZrmC&E>C46<8-oS7yEmh`KQvvam@ zZdq}J`-b9?`=%dCBXSP%bqI8x?nA-X`y%qI?)j6*eZXW&FR$-xndAWTAHW(BvqJ-_ z<(Qjzra9w0iH(PM#p1;ce27fXK9hQ$!A-#v$U#u+n|>|?Tec8fKP=Imbi=F&xF;X} z*&qf?#JD}}S08KZZ~+E9X*FRsw?)xtrhl`ST2iM*DdN*i!hY;2BbEL0GZ}X z(8=TRV3t&g#gibFFrgVvi>BAQNK#~-2|)#+kiO^W4z7|o6H^S*#(!XaWYK{&dk&X? zZ(D1O=*B4(s?SDc4%+=Xa2It4fA^h zKr5)@F=Q=*14MA=uId~;XIGABzN_foi#6(Nl!*VK`DQ#>?|9edLw@fjW1ZvwJ{MJ% z%h%EJ%YPg>1JyCBXnF<;eLk5)e*JN@KfXRWKhS=C{W>n*qYM_z0Y+{9yMdgSqIp#? ze1G>^7ev^M`M1oUiLP98sK;b&NVH;3ebk_F0Gv>~sop9(t}iv+R& zV(iCH`rcy0Epu;%5W&X1I|I(bET=3TU@ZP%2M+}>R2VFnh9=p@ucm>TzfPk%n&vy~ zut;%5w$FHuuKRWes3W7foG7i?X6lR{$7H9wWw{z-7)nf$z;~pS5k}f89Jd@&e6e1p z11rS$ks?pzq|#HNh3T2#&B&AxpsKOy$`K-_@0CcgHA-`$IDn%|9J;0SXu?H$#IFcxuv&TB@ePcF^(piTm%G+QEMO|GQ-fLuu$RnqA7oj z2QV1NTNzNrP2*1}LX5mA7aB5zED*XhOT;(()yWjL_tzfo26e!+y;@Hf3(CLCG+~wv)|Qj`lO9RGnuUfWFH|09U?;1|_1Db+RL4OyE6cAPQ3a zmN$^vlG_2xI}*m1N-JO&8?S|%Kvxw8u?^<|!cP^F{Z?I&Jd~d)P0pi?vm36^?YRIc z#t@)Is-l>p*USiO%8t%NwBI#!Oayf#ij|b|5_p7=cj8mKr6d@N^$IOZ6cR;3zEkIw z^yjv~Szeft_Y@!{lxl=iOtv{{L*$__ohDqD;W5hO#5*x2uWjk0hyZtqAI?3s?Aal6 zVX7zoC`>*2ucWwy}{gt$Q2m_or84O!07UuWKxggR3Tp+{zL%U_Ckw zgiR*#(CP4EK@o9oUY#bZEIFi~&{^x@(IgA2ei4jC4L&w7Ov4M!5NyjDSt1V*bPlrdw# zi#wX2R(-_*)R%A+_9rbLykIbSt0|Fi z?%T}Y@t~63_9POb3W&!)YUX$SwBo4D>@%K!qboxhlwO+HKNL&%XJ(L15-uT+ENi$2 zY#4+qJ(nfiFj|B&DTy2P9+6yVCnZLOf&tr!z;=;ZlFBoc&P3Ev!7X7P;51OoID0!wdHdJLoMSYE6`(=YEq0dW=9<%V$4{0F~Aac z0?rHpz6X2MXxtaQ>i`@fBTW9aXF-5ISC5!s_@(~&7)7_a62<20 z+41}^GXGk>VFP}i*Z29nc%7TBMc=l10-_p=fN1O4*z{=x7P+&rv*-2id%FG*+8QQ| z@r!)itTozDuulAa^paU*Z+m<}`!VOv4uD_idNZ+jKCrM!=2f?=T(zrb(7xBNoJOv$ z{IzH=FV^DqoF0V^Qp=vh3WiyGV|ssY&?P9q?9H0;HihCt8c|ut?z+oc=&>W45)jos zU8kP&=Z6TuIyUTLQ0iQ)2$AZF*N#$#^hVr6#tXiWuE6A$S*%3V%2B)&vn=QZu6ltK z(PA2E!J`xxMjah^JOV612_pkje+Us+Y^1nqZ zGdYZcgh#<7m`X7c=) ziyDgWNMD5v?t%HHzK>}c==4DPo?%Sz$=$z*gx^XLdpJaxUZBo{9y#6K>sI>F$ZL2m zw!(2dnNf3-9ROFO1AVgDV3XUa!YI3r8NC60IuUz9l{s)x6Et^8ymT|MY^)NDB; zD`5yqcAb#eBz+L_>~DdA_*d_xkz6l-T`OBp_7}AqNxGCRyq?<%t12Ws<=ttq#+Z_A zlVay5`1N{pCHtq{K0dDBjjsHexsnuJh0T)@o5GPXGJIA&f<7So^WF_MUH5U5eCxMj z7~AY9Fd56EA*dcvic|I!%KBv@KbUze0H;vm52sl-D%%z~Vwe+HepZ04oECnJ7g0c8zOKU1AO}Bzm=%M(aEQb(0D6SQ7`D zf}{0w8SF(~A!)9p3g-c`h=J(R2ae>H^kapXC5zI7aRSyA_O7_JT9<0nf|)fWQ*!jVC7?+1`Jsys^%g`_X#+`%7APxRw;Cs62V;RZftJZ?W|gV-q?C_$l^s}z5T@sAp5Ypn<2Sz@vj(-Q3(JAp@35FI3hQW zmB^$#0L{yNJbtYY(KNeT4T{w|u6!B~we7UF$SuN^&7ZK^DCd#cx~22B0lV*@6e{ZqB*X!=Vr>6GLX2 z8}rsbAMS5ZVySml?b3I&$G>6s_bw>x+U4b!>mG*x&P$5PYrz9<;}ym7t$uvRb=#GV zt*{NP0=-mak>1yHK+(b5y^#sUx&45!KJE4Lx$zG&+sZhw#6~H!V?qJiMA}9bMEmy- z9Pxn@rx9AMdg_9oc2>`Cl?Vzy2+tC}#aAy1ZIM#T#&KVdTPC<#VwWH73b^$I-31o| z?)Mm>LANhwy9`q|)tv!crKU-AX?2{PoRRk?d^jPwd2G>gSl!nWCQC87Wzhcp5^i77 z3m$B0=)V{=LnwU)V5Ng*Fd&78YJ`OwDHkF~T{}3aQ6B}r$0jLmhnr7|e`)%svJrJ? zYNn;DlSQ9)h0FX&t1Y^^v;iy67Pg)4{JmSASnj9Se>iegyE_O>vIr2X$pDSZc=1J1u3oeo zhwYx`e6>N`zxHlikOaazPy687yMU-(s-E=1b$NV0yCyccvy8%{2ytEcChr!#OjPPy zUvCVv!C20Y>iEw^esDdDnvb(*$C9ewAp#s^SGOpH)HW zb}B<$nooz{G`KI@@cyc@YaV2PHR}e279cEHD?@2UkVbN)VoCL{h0GHORd=U>fe7+L zlW&k(Q2DYj{$ZVdXs_huv8M66SduuPQV2*vctaVsD0WI5evPPklBG7qVmQ|BMxRSo z&lzy<)RxXzgbApX3s6fa0Ivs7q8Mm*avH!AC&+{kCi`+Aq(*b>7Kx`;MrEt=*`%#T zpGnP`>3endx0x15Z)1(aG5TEa`B^rwQy4cz9og!;3(>#ljHo%*4p+RPJOdVDPsT^; zT?jq)(4lEnS1v}Q+9LfgH&si?LSjOV#1xm8SZw6qg|$1myBHdrtNc;fCqeX>n< zx9Mq#-uM1E)V+HzINPDmu$QTShsE%U9f@{-r+kMfn1*IMuYM33o_;z6&do&s(sX=C zWo8YmyTFaNThEZ$Hakr<@s3Rgy!{b?__YthW~b&GGE|z7VpWH@-0X36>0{CqtGF4e zT#jNKJo?7igW(J_p{!>e++OKYf9jtys{ZX>Z^i`24k+=Flf@=NHIty@Zn z@OH(Z)sU<~@RoxVoj@x(a%f5bjp^irLC3}D8aZCkgOtim#8`yoiP&<@Ij+`ZHRvqw zP$wAilF(Ek8I(nW{s!{#ahuzBukc>-+0y#8f9y0pk57{2^H$9)4n@U(l59e-wG*aM z_+2l1SWuo(38SDMyh17QBEebHqM@y5X|)`HKa!2Uet8+Zv+X*TIjl1nFT0(#jn@9W zvbE(j1WC_Xs*$EyybH8!%PTVHK{C)T^8tYqZl^LB-z2(Ft|$2-lMIiZdl1pO;V{E|#p-EzW)I5YbD1RcNq*gvLf-&R~}V+x+-5G`|$lh98^yU zV0bW=kOtKS9y<0GE5;jp!0O;ar2}-SM~Q$D-%Rrd~GUS8C(5u4l7UP z)$$dE+tFPAMP9d()44H&ac4p#FqXI-hcsSmf4>QeNH1zh6s1lExsZ<|Wk}C1S%f~S zCtM0Jq+}*3FU|R#BA{m$ndYrnrbQ*Oc9mH*H6lP@4pI!4f1vad;b4yb>z~D-A>0^r zHA+L72W~%k^+l3$Mwmu^I-&C7$zBxMFGQ)SJvS0qE}Qz3QapVuQ7j{YB4sw29lKh1 zDBuBPH^WHJy;&PgR9K6=vzQ?~_M%A9=ca_H&D$;|B9flI7nxY=E$zi-ww0SV>2e*g z{)|QW+q<;QBGpJys66vX; zT3eZDtB$=&s4QN53A_yM?d=HWta9`|g|v1f54)R)!PiCLtkyI)9%6@P(w4)&{pTO+ zG8foeTx!1#QwG&q8<(VYNff_@ZzXS}ugVwWzq@|N+B6Z2iLh_UVEGcqSklWkDvu45 zH0Tu~9@$MPyqRI3`$gUA%~_^9X3l?!5%wA9(OLKMS3aiGVx>t0&nY~(2f^vL2F8k{ z7TATx>D~ztzVa)ExI2%4Wxp4s)2&Bpw~k}(|Hn*j->ubbhPt{+O0->8ZTK_@26L>2lB5UhiGB zn|56cN3jk4IfQNN4WCkW=C;=qZyA%4)G5Y}lm=}a(c$C)I~xYpO!N%=?*TyLSTl`( z`BfJ;ia9XTRYnLgQ0W}3>Er8=ishV|N2=#b?Ixj}y5~(*a4WjULaExl9};hTO$4c< zZT-_X^!6y(e5f9{+4FeNEbcMk`KX$oFGbiUWl+cIC-X}_7To**0l0xZyL*Yv0LvMt z>yoij#p~b^y*fQDl1awW4Svc!7?hJIIdynjXfak-R{gb<7Z3R9YOuTgYLj;sYA5JV zIIf7!IQ~5;j=Q|{wTzJ5cgqJk*3D>cI~E837MCW)=XBP#74r7`r8m8F-tg6_e5Cea z&)s{sPh)7Wi(&c4=CSyH^WU5+As#^g`0t#5(STV0-(c1^b9HfbGS%1rH|gwzN< zs4xeK$Rk7tB?-yv7-NiK%#4}wN**_&JR*@-2uC7$gy=*as#9JSc~q1-<@O*s5hoRQ z&Ftfx+1tMN`h3RgkNy4p*8Z*ETC?}sYd+A_ZsxP`s#JU3-!|pc_3LMT-1%m*X033` zIaTY@^G2Ox=U)kvf1~t-7;xi%54+UdbeU+MH|+i^-f*ZX1e-Qy?lH| zPj2ay)I?ZUw#N2?E4X;MmAB`mmHmr;6o+4!zOx zny%Xn-#qo~biSA2_g2FA=*i)NrrfSq`@6r<40Gd{gVz*=`X@s}b?`N!4>V;W>+|KW zC6#_9?74YmXWoO^;NpnuVLB&m?-zwiMI@+r%T?nC*Tk+=zZNtv{j*xNpdnPE?7XzX zc4B!_pIPP1hCGYOKiY0n#z?kyS6NG0 z;$t4KvL19FX*}!o!)5ThPwM$tXX-Vhsl<0wW&4iQwG)5BeD)@=`NExhK=>5 zb%FyHUJ5UaF3Kc|uXH3MP;$z}qh-;BJcI>KM#0p7LLo?emreU+;6LN50BnOI4q-NVs!1z}>3P!18UvsZhdcJ?)g{@T8JoZ~ySZzhoRD zp7*CoIWnbVBZIm&4fH+^-Xinnm`HH#uV*bsoKH2TF=``~X|ih*_tvmKXs2cBR<0dT z&$PEw``yjCGwb@%VKEL~dMxX_NVxDZ22*a@sqKSqePLz!6Z+Y$dgHsx{qhAB0_@t; zN2lFb&+8NqB)BGYJD3O;f3rLjEiGXq7`H9YyAOXWYs;M{c5YI=u0f`Grn2;Y#>cmQ zk)Ld~+LsXaYj6&Bq$F$nZ2i1&%YK79dO8W!Hy!cBp>);(=A(*344l* zB_o?~dHDwMdfhrX1?m&2hS__43cBSK&K0wL2XR8j12*X&kKQ9 z+-V;gx12TfXi9z{tD5*WLdV^qtf05die+b0`D&VIoKKkOnBu%ty2(uPGbwHJ$r_uD zICpA4rD{S%)5G(n3MJof$us|c#pxF%qpt@Ps`lsIJ1H^k_9O9CsAuR6anm5nt+>z4 zUmYml@b;VM?P7@vkjE71^YYwZeV-hitEZ}N$e9uGwDFnW%d87F{Y6cyf0svKc-ig` z4L?|Q<$dtq64`k0lEiCOow=G>Tlv;~mFc%6(w{{?ZF%ohV^e9=ANgQhuj%P^Ti0*% zrYkOU#Ju{1*Js-5f5$<|N!Rs;DwJ?PYWvn-3M+i|IJ5BMlR5RJ%&lkMc|I&7bj=PM zKbz7Ec}JHH$sF%A=?$E`L}lVz4~P{{uUoMvexPK1Jc;ESC6H7Zo;)QlJ{lqWW{C&k80!i=JZRcXjBqvNHMO=v`3l^E=6Sxaa7GmHcF@Q!OUPLxoSLgpY*ETp~Iyv~P zv?GEL6b068Bb_d=z6I$}*<{|HEBx^g9N>5gH|X->>DhP~(@BmA?Y;pT>`(yrjTV1A z;qoYldlMNzR0f^<=sOk`U$4i*=(#SKSpDdu@0H+P-S51~13$(BOVbfSkr;FiIg~@A zdZ8@Ca5ym8DcIUK>xyY@%;93lq|#&kHHC!)+1M0dm^+ zd-mmZq7Wo14MBJW6Jms7_~S!qek#L7$eR6T`$BMN!CfGK)%L z5;?x8xE8QMH5e0W_r%01Jc61^K&BV16y{50`!3Fk1<3pm3|YS*VnXZE zWLIAU*A(2X&{^^DFd8BgnSrQy$Zc@r$z{XLWd{-_bADe^AbSS{MR%d&X$nL$91e4F zJk$voYP|(Rz4yH_A+cShM=2m74&oq43ej-h;dTke78`I0~kfefHxg8(?*UE`>~lidvNA zPYw%Vu)I-FrZEgDXu*(^f-ff2>Ew^o01ameh96{4Qg1*(G%B4;B9eT$yPfk_y>c>4 z?Qnr9r9dhs)jn00u@|%*s1GPAwFyN9Py@(Vowg|nrc~%K<*?cxld@@rZn%Q<#(`dg z95tlxpeQ;AqrbYB!44@4c3KY7FrC9L&lT;#X=+%CPKlzSMNSY4EWcb_8zgVQ)`bDs zk|zgXS~zPI{vd4e;<1h3VI z6o2z4%CIAn{D~AYfl81vPs<4(;e`qa7d84sC5zKQi6}J0Vf9L%PbwbdIMOmEQ%EpEi56!*N16 zYzE1ncX62i6gkpiC;0;G^u6X_I=-~Mns`taI*iftNJaqKVRM(&@gwdTNTf|M7!#py zFIx5v%xTXj+!Xct(Cku-FYd91O!4OPkq)+|dDxe-TIiZ_uAbOF)0-GA4 zqPf}(M-#Qon3vUWJwq`ay)}wc+Mw@%GlQx-T3L&ophdIQ;8(B{rwu#zc86g)RULsM zYBCUX5!5$iKg~ot9C85Dhe|^i?Nj20yrjMq4NDzKM=&Mhs~Se(KpF${Dx~!1WmH7m zd1?rY$V9c>1!q{gQVL5owc(hOIoOi22h`G7Fhn3Hs%5v(QW%qrE^#-aV5a9Y%+M>2 zVlq_}!9XoN2r3>$pQ6gUC8ZE3m;d)-&dF5jd-~r><>#1KSpCp$UQIl zrD-da#J2+`zoh5~ODod;imBsYm=bc|F@9;t4khvJLdGxcI1Nk6*I+4keGI0Ad?uJ* zDkq|)zn>8PU(iy6SWF1{WClN!OhZ9`Kc~U(>}rGqG8Wy%%e!2&_YW(l~E&%HXL1{Huel$xi&cm(|isd#=K literal 0 HcmV?d00001 diff --git a/CVIssueCount/CVIssueCount.py b/CVIssueCount/CVIssueCount.py new file mode 100644 index 0000000..7fe8afd --- /dev/null +++ b/CVIssueCount/CVIssueCount.py @@ -0,0 +1,158 @@ +# +#CVIssueCount.py +# +#Author: Quinyd +# +#Description: Complete series count with the Comic Vine issue count +# +#Versions: +# 0.1 First version +# 0.2 Fixed Multiple book search +# +# +#ComicRack Declarations +# +#@Name CVIssueCount +#@Hook Books +#@Key CVIssueCount +#@PCount 0 + +# $ cd "..\..\Program Files\ComicRack\ +# $ ComicRack.exe -ssc + +from __future__ import unicode_literals + +import clr, re, sys, os, urlparse, time + +from System.Diagnostics import Process +clr.AddReference("System.xml") +import System +from System import * +from System.IO import * +from System.Collections import * +from System.Threading import * +from System.Net import * +from System.Text import * + +clr.AddReference('System') +clr.AddReference('System.Windows.Forms') +from System.Windows.Forms import * +clr.AddReference('System.Drawing') +from System.Drawing import Point, Size, ContentAlignment, Color, SystemColors, Icon +from datetime import datetime +import ssl, urllib + + +def CVIssueCount(books): + + API_KEY="" + + # Load all books in library + + all_books_original = ComicRack.App.GetLibraryBooks() + + # Dictionaries to be used + + MaxCountList= dict() + NumberList=dict() + VolumeJumps=dict() + + seriesVolume=-999999 + + IssueCount=0 + + CheckedVolumes=list() + IssueCountList=list() + + # I look for data in the books in Library + + for book in books: + volume = book.GetCustomValue("comicvine_volume") + if not MaxCountList.has_key(volume): + + # I start default values + + MaxCountList[volume] = -999999 + NumberList[volume] = list() + + try: + + # I look for highest number in each volume + + if int(float(book.Number)) > MaxCountList[volume] and int(float(book.Number)) < 1000 : + MaxCountList[volume] = int(book.Number) + + # I store numbers of each volume + + NumberList[volume].append([book.Year*12+book.Month,int(float(book.Number))]) + + except Exception,e: print str(e) + for volume in NumberList.keys(): + seriesVolume = volume + + + + QUERY = "https://comicvine.gamespot.com/api/volume/4050-"+ volume +"/?api_key=" + API_KEY + "&format=xml&field_list=count_of_issues" + + # print QUERY + + if volume not in CheckedVolumes: + #print "Getting info for " + volume + data = _read_url(QUERY.encode('utf-8')) + # time.sleep(3) + # print Text.Json.RootElement.GetProperty("count_of_issues"); + + doc = System.Xml.XmlDocument() + doc.LoadXml(data) + elemList = doc.GetElementsByTagName("count_of_issues") + + for i in elemList: + IssueCount = int(i.InnerXml) + print str(volume) + "'s count is " + str(IssueCount) + CheckedVolumes.append(volume) + IssueCountList.append(IssueCount) + + for book in all_books_original: + if book.SeriesComplete: + volume = book.GetCustomValue("comicvine_volume") + if volume == seriesVolume: + if book.Number.isnumeric: + # print "Setting count to " + str(IssueCount) + " for comics with Series " + book.Series + "(" + book.GetCustomValue("comicvine_volume") + ")" + book.Count = IssueCount + book.SetCustomValue("comicvine_issue_count",str(IssueCount)) + +def _read_url(url): + + page = '' + + requestUri = url + + ServicePointManager.SecurityProtocol = SecurityProtocolType.Tls | SecurityProtocolType.Tls11 | SecurityProtocolType.Tls12 | SecurityProtocolType.Ssl3; + + Req = HttpWebRequest.Create(requestUri) + Req.Timeout = 60000 + Req.UserAgent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36' + Req.AutomaticDecompression = DecompressionMethods.Deflate | DecompressionMethods.GZip + + #Req.Referer = requestUri + Req.Accept = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8' + Req.Headers.Add('Accept-Language','en-US,en;q=0.9,it;q=0.8,fr;q=0.7,de-DE;q=0.6,de;q=0.5') + + Req.KeepAlive = True + webresponse = Req.GetResponse() + + a = webresponse.Cookies + + inStream = webresponse.GetResponseStream() + encode = Encoding.GetEncoding("utf-8") + ReadStream = StreamReader(inStream, encode) + page = ReadStream.ReadToEnd() + + + try: + inStream.Close() + webresponse.Close() + except: + pass + + return page \ No newline at end of file diff --git a/CVIssueCount/Package.ini b/CVIssueCount/Package.ini new file mode 100644 index 0000000..5d0fdfa --- /dev/null +++ b/CVIssueCount/Package.ini @@ -0,0 +1,4 @@ +Name=CV Issue Count +Author=Quinyd +Version=1 +Description=Complete series count with the Comic Vine issue count \ No newline at end of file diff --git a/CVIssueCount/UserDict.py b/CVIssueCount/UserDict.py new file mode 100644 index 0000000..732b327 --- /dev/null +++ b/CVIssueCount/UserDict.py @@ -0,0 +1,213 @@ +"""A more or less complete user-defined wrapper around dictionary objects.""" + +class UserDict: + def __init__(*args, **kwargs): + if not args: + raise TypeError("descriptor '__init__' of 'UserDict' object " + "needs an argument") + self = args[0] + args = args[1:] + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + if args: + dict = args[0] + elif 'dict' in kwargs: + dict = kwargs.pop('dict') + import warnings + warnings.warn("Passing 'dict' as keyword argument is " + "deprecated", PendingDeprecationWarning, + stacklevel=2) + else: + dict = None + self.data = {} + if dict is not None: + self.update(dict) + if len(kwargs): + self.update(kwargs) + def __repr__(self): return repr(self.data) + def __cmp__(self, dict): + if isinstance(dict, UserDict): + return cmp(self.data, dict.data) + else: + return cmp(self.data, dict) + __hash__ = None # Avoid Py3k warning + def __len__(self): return len(self.data) + def __getitem__(self, key): + if key in self.data: + return self.data[key] + if hasattr(self.__class__, "__missing__"): + return self.__class__.__missing__(self, key) + raise KeyError(key) + def __setitem__(self, key, item): self.data[key] = item + def __delitem__(self, key): del self.data[key] + def clear(self): self.data.clear() + def copy(self): + if self.__class__ is UserDict: + return UserDict(self.data.copy()) + import copy + data = self.data + try: + self.data = {} + c = copy.copy(self) + finally: + self.data = data + c.update(self) + return c + def keys(self): return self.data.keys() + def items(self): return self.data.items() + def iteritems(self): return self.data.iteritems() + def iterkeys(self): return self.data.iterkeys() + def itervalues(self): return self.data.itervalues() + def values(self): return self.data.values() + def has_key(self, key): return key in self.data + def update(*args, **kwargs): + if not args: + raise TypeError("descriptor 'update' of 'UserDict' object " + "needs an argument") + self = args[0] + args = args[1:] + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + if args: + dict = args[0] + elif 'dict' in kwargs: + dict = kwargs.pop('dict') + import warnings + warnings.warn("Passing 'dict' as keyword argument is deprecated", + PendingDeprecationWarning, stacklevel=2) + else: + dict = None + if dict is None: + pass + elif isinstance(dict, UserDict): + self.data.update(dict.data) + elif isinstance(dict, type({})) or not hasattr(dict, 'items'): + self.data.update(dict) + else: + for k, v in dict.items(): + self[k] = v + if len(kwargs): + self.data.update(kwargs) + def get(self, key, failobj=None): + if key not in self: + return failobj + return self[key] + def setdefault(self, key, failobj=None): + if key not in self: + self[key] = failobj + return self[key] + def pop(self, key, *args): + return self.data.pop(key, *args) + def popitem(self): + return self.data.popitem() + def __contains__(self, key): + return key in self.data + @classmethod + def fromkeys(cls, iterable, value=None): + d = cls() + for key in iterable: + d[key] = value + return d + +class IterableUserDict(UserDict): + def __iter__(self): + return iter(self.data) + +import _abcoll +_abcoll.MutableMapping.register(IterableUserDict) + + +class DictMixin: + # Mixin defining all dictionary methods for classes that already have + # a minimum dictionary interface including getitem, setitem, delitem, + # and keys. Without knowledge of the subclass constructor, the mixin + # does not define __init__() or copy(). In addition to the four base + # methods, progressively more efficiency comes with defining + # __contains__(), __iter__(), and iteritems(). + + # second level definitions support higher levels + def __iter__(self): + for k in self.keys(): + yield k + def has_key(self, key): + try: + self[key] + except KeyError: + return False + return True + def __contains__(self, key): + return self.has_key(key) + + # third level takes advantage of second level definitions + def iteritems(self): + for k in self: + yield (k, self[k]) + def iterkeys(self): + return self.__iter__() + + # fourth level uses definitions from lower levels + def itervalues(self): + for _, v in self.iteritems(): + yield v + def values(self): + return [v for _, v in self.iteritems()] + def items(self): + return list(self.iteritems()) + def clear(self): + for key in self.keys(): + del self[key] + def setdefault(self, key, default=None): + try: + return self[key] + except KeyError: + self[key] = default + return default + def pop(self, key, *args): + if len(args) > 1: + raise TypeError, "pop expected at most 2 arguments, got "\ + + repr(1 + len(args)) + try: + value = self[key] + except KeyError: + if args: + return args[0] + raise + del self[key] + return value + def popitem(self): + try: + k, v = self.iteritems().next() + except StopIteration: + raise KeyError, 'container is empty' + del self[k] + return (k, v) + def update(self, other=None, **kwargs): + # Make progressively weaker assumptions about "other" + if other is None: + pass + elif hasattr(other, 'iteritems'): # iteritems saves memory and lookups + for k, v in other.iteritems(): + self[k] = v + elif hasattr(other, 'keys'): + for k in other.keys(): + self[k] = other[k] + else: + for k, v in other: + self[k] = v + if kwargs: + self.update(kwargs) + def get(self, key, default=None): + try: + return self[key] + except KeyError: + return default + def __repr__(self): + return repr(dict(self.iteritems())) + def __cmp__(self, other): + if other is None: + return 1 + if isinstance(other, DictMixin): + other = dict(other.iteritems()) + return cmp(dict(self.iteritems()), other) + def __len__(self): + return len(self.keys()) diff --git a/CVIssueCount/__future__.py b/CVIssueCount/__future__.py new file mode 100644 index 0000000..e0996eb --- /dev/null +++ b/CVIssueCount/__future__.py @@ -0,0 +1,128 @@ +"""Record of phased-in incompatible language changes. + +Each line is of the form: + + FeatureName = "_Feature(" OptionalRelease "," MandatoryRelease "," + CompilerFlag ")" + +where, normally, OptionalRelease < MandatoryRelease, and both are 5-tuples +of the same form as sys.version_info: + + (PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int + PY_MINOR_VERSION, # the 1; an int + PY_MICRO_VERSION, # the 0; an int + PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string + PY_RELEASE_SERIAL # the 3; an int + ) + +OptionalRelease records the first release in which + + from __future__ import FeatureName + +was accepted. + +In the case of MandatoryReleases that have not yet occurred, +MandatoryRelease predicts the release in which the feature will become part +of the language. + +Else MandatoryRelease records when the feature became part of the language; +in releases at or after that, modules no longer need + + from __future__ import FeatureName + +to use the feature in question, but may continue to use such imports. + +MandatoryRelease may also be None, meaning that a planned feature got +dropped. + +Instances of class _Feature have two corresponding methods, +.getOptionalRelease() and .getMandatoryRelease(). + +CompilerFlag is the (bitfield) flag that should be passed in the fourth +argument to the builtin function compile() to enable the feature in +dynamically compiled code. This flag is stored in the .compiler_flag +attribute on _Future instances. These values must match the appropriate +#defines of CO_xxx flags in Include/compile.h. + +No feature line is ever to be deleted from this file. +""" + +all_feature_names = [ + "nested_scopes", + "generators", + "division", + "absolute_import", + "with_statement", + "print_function", + "unicode_literals", +] + +__all__ = ["all_feature_names"] + all_feature_names + +# The CO_xxx symbols are defined here under the same names used by +# compile.h, so that an editor search will find them here. However, +# they're not exported in __all__, because they don't really belong to +# this module. +CO_NESTED = 0x0010 # nested_scopes +CO_GENERATOR_ALLOWED = 0 # generators (obsolete, was 0x1000) +CO_FUTURE_DIVISION = 0x2000 # division +CO_FUTURE_ABSOLUTE_IMPORT = 0x4000 # perform absolute imports by default +CO_FUTURE_WITH_STATEMENT = 0x8000 # with statement +CO_FUTURE_PRINT_FUNCTION = 0x10000 # print function +CO_FUTURE_UNICODE_LITERALS = 0x20000 # unicode string literals + +class _Feature: + def __init__(self, optionalRelease, mandatoryRelease, compiler_flag): + self.optional = optionalRelease + self.mandatory = mandatoryRelease + self.compiler_flag = compiler_flag + + def getOptionalRelease(self): + """Return first release in which this feature was recognized. + + This is a 5-tuple, of the same form as sys.version_info. + """ + + return self.optional + + def getMandatoryRelease(self): + """Return release in which this feature will become mandatory. + + This is a 5-tuple, of the same form as sys.version_info, or, if + the feature was dropped, is None. + """ + + return self.mandatory + + def __repr__(self): + return "_Feature" + repr((self.optional, + self.mandatory, + self.compiler_flag)) + +nested_scopes = _Feature((2, 1, 0, "beta", 1), + (2, 2, 0, "alpha", 0), + CO_NESTED) + +generators = _Feature((2, 2, 0, "alpha", 1), + (2, 3, 0, "final", 0), + CO_GENERATOR_ALLOWED) + +division = _Feature((2, 2, 0, "alpha", 2), + (3, 0, 0, "alpha", 0), + CO_FUTURE_DIVISION) + +absolute_import = _Feature((2, 5, 0, "alpha", 1), + (3, 0, 0, "alpha", 0), + CO_FUTURE_ABSOLUTE_IMPORT) + +with_statement = _Feature((2, 5, 0, "alpha", 1), + (2, 6, 0, "alpha", 0), + CO_FUTURE_WITH_STATEMENT) + +print_function = _Feature((2, 6, 0, "alpha", 2), + (3, 0, 0, "alpha", 0), + CO_FUTURE_PRINT_FUNCTION) + +unicode_literals = _Feature((2, 6, 0, "alpha", 2), + (3, 0, 0, "alpha", 0), + CO_FUTURE_UNICODE_LITERALS) diff --git a/CVIssueCount/_abcoll.py b/CVIssueCount/_abcoll.py new file mode 100644 index 0000000..b643692 --- /dev/null +++ b/CVIssueCount/_abcoll.py @@ -0,0 +1,695 @@ +# Copyright 2007 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Abstract Base Classes (ABCs) for collections, according to PEP 3119. + +DON'T USE THIS MODULE DIRECTLY! The classes here should be imported +via collections; they are defined here only to alleviate certain +bootstrapping issues. Unit tests are in test_collections. +""" + +from abc import ABCMeta, abstractmethod +import sys + +__all__ = ["Hashable", "Iterable", "Iterator", + "Sized", "Container", "Callable", + "Set", "MutableSet", + "Mapping", "MutableMapping", + "MappingView", "KeysView", "ItemsView", "ValuesView", + "Sequence", "MutableSequence", + ] + +### ONE-TRICK PONIES ### + +def _hasattr(C, attr): + try: + return any(attr in B.__dict__ for B in C.__mro__) + except AttributeError: + # Old-style class + return hasattr(C, attr) + + +class Hashable: + __metaclass__ = ABCMeta + + @abstractmethod + def __hash__(self): + return 0 + + @classmethod + def __subclasshook__(cls, C): + if cls is Hashable: + try: + for B in C.__mro__: + if "__hash__" in B.__dict__: + if B.__dict__["__hash__"]: + return True + break + except AttributeError: + # Old-style class + if getattr(C, "__hash__", None): + return True + return NotImplemented + + +class Iterable: + __metaclass__ = ABCMeta + + @abstractmethod + def __iter__(self): + while False: + yield None + + @classmethod + def __subclasshook__(cls, C): + if cls is Iterable: + if _hasattr(C, "__iter__"): + return True + return NotImplemented + +Iterable.register(str) + + +class Iterator(Iterable): + + @abstractmethod + def next(self): + 'Return the next item from the iterator. When exhausted, raise StopIteration' + raise StopIteration + + def __iter__(self): + return self + + @classmethod + def __subclasshook__(cls, C): + if cls is Iterator: + if _hasattr(C, "next") and _hasattr(C, "__iter__"): + return True + return NotImplemented + + +class Sized: + __metaclass__ = ABCMeta + + @abstractmethod + def __len__(self): + return 0 + + @classmethod + def __subclasshook__(cls, C): + if cls is Sized: + if _hasattr(C, "__len__"): + return True + return NotImplemented + + +class Container: + __metaclass__ = ABCMeta + + @abstractmethod + def __contains__(self, x): + return False + + @classmethod + def __subclasshook__(cls, C): + if cls is Container: + if _hasattr(C, "__contains__"): + return True + return NotImplemented + + +class Callable: + __metaclass__ = ABCMeta + + @abstractmethod + def __call__(self, *args, **kwds): + return False + + @classmethod + def __subclasshook__(cls, C): + if cls is Callable: + if _hasattr(C, "__call__"): + return True + return NotImplemented + + +### SETS ### + + +class Set(Sized, Iterable, Container): + """A set is a finite, iterable container. + + This class provides concrete generic implementations of all + methods except for __contains__, __iter__ and __len__. + + To override the comparisons (presumably for speed, as the + semantics are fixed), redefine __le__ and __ge__, + then the other operations will automatically follow suit. + """ + + def __le__(self, other): + if not isinstance(other, Set): + return NotImplemented + if len(self) > len(other): + return False + for elem in self: + if elem not in other: + return False + return True + + def __lt__(self, other): + if not isinstance(other, Set): + return NotImplemented + return len(self) < len(other) and self.__le__(other) + + def __gt__(self, other): + if not isinstance(other, Set): + return NotImplemented + return len(self) > len(other) and self.__ge__(other) + + def __ge__(self, other): + if not isinstance(other, Set): + return NotImplemented + if len(self) < len(other): + return False + for elem in other: + if elem not in self: + return False + return True + + def __eq__(self, other): + if not isinstance(other, Set): + return NotImplemented + return len(self) == len(other) and self.__le__(other) + + def __ne__(self, other): + return not (self == other) + + @classmethod + def _from_iterable(cls, it): + '''Construct an instance of the class from any iterable input. + + Must override this method if the class constructor signature + does not accept an iterable for an input. + ''' + return cls(it) + + def __and__(self, other): + if not isinstance(other, Iterable): + return NotImplemented + return self._from_iterable(value for value in other if value in self) + + __rand__ = __and__ + + def isdisjoint(self, other): + 'Return True if two sets have a null intersection.' + for value in other: + if value in self: + return False + return True + + def __or__(self, other): + if not isinstance(other, Iterable): + return NotImplemented + chain = (e for s in (self, other) for e in s) + return self._from_iterable(chain) + + __ror__ = __or__ + + def __sub__(self, other): + if not isinstance(other, Set): + if not isinstance(other, Iterable): + return NotImplemented + other = self._from_iterable(other) + return self._from_iterable(value for value in self + if value not in other) + + def __rsub__(self, other): + if not isinstance(other, Set): + if not isinstance(other, Iterable): + return NotImplemented + other = self._from_iterable(other) + return self._from_iterable(value for value in other + if value not in self) + + def __xor__(self, other): + if not isinstance(other, Set): + if not isinstance(other, Iterable): + return NotImplemented + other = self._from_iterable(other) + return (self - other) | (other - self) + + __rxor__ = __xor__ + + # Sets are not hashable by default, but subclasses can change this + __hash__ = None + + def _hash(self): + """Compute the hash value of a set. + + Note that we don't define __hash__: not all sets are hashable. + But if you define a hashable set type, its __hash__ should + call this function. + + This must be compatible __eq__. + + All sets ought to compare equal if they contain the same + elements, regardless of how they are implemented, and + regardless of the order of the elements; so there's not much + freedom for __eq__ or __hash__. We match the algorithm used + by the built-in frozenset type. + """ + MAX = sys.maxint + MASK = 2 * MAX + 1 + n = len(self) + h = 1927868237 * (n + 1) + h &= MASK + for x in self: + hx = hash(x) + h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167 + h &= MASK + h = h * 69069 + 907133923 + h &= MASK + if h > MAX: + h -= MASK + 1 + if h == -1: + h = 590923713 + return h + +Set.register(frozenset) + + +class MutableSet(Set): + """A mutable set is a finite, iterable container. + + This class provides concrete generic implementations of all + methods except for __contains__, __iter__, __len__, + add(), and discard(). + + To override the comparisons (presumably for speed, as the + semantics are fixed), all you have to do is redefine __le__ and + then the other operations will automatically follow suit. + """ + + @abstractmethod + def add(self, value): + """Add an element.""" + raise NotImplementedError + + @abstractmethod + def discard(self, value): + """Remove an element. Do not raise an exception if absent.""" + raise NotImplementedError + + def remove(self, value): + """Remove an element. If not a member, raise a KeyError.""" + if value not in self: + raise KeyError(value) + self.discard(value) + + def pop(self): + """Return the popped value. Raise KeyError if empty.""" + it = iter(self) + try: + value = next(it) + except StopIteration: + raise KeyError + self.discard(value) + return value + + def clear(self): + """This is slow (creates N new iterators!) but effective.""" + try: + while True: + self.pop() + except KeyError: + pass + + def __ior__(self, it): + for value in it: + self.add(value) + return self + + def __iand__(self, it): + for value in (self - it): + self.discard(value) + return self + + def __ixor__(self, it): + if it is self: + self.clear() + else: + if not isinstance(it, Set): + it = self._from_iterable(it) + for value in it: + if value in self: + self.discard(value) + else: + self.add(value) + return self + + def __isub__(self, it): + if it is self: + self.clear() + else: + for value in it: + self.discard(value) + return self + +MutableSet.register(set) + + +### MAPPINGS ### + + +class Mapping(Sized, Iterable, Container): + + """A Mapping is a generic container for associating key/value + pairs. + + This class provides concrete generic implementations of all + methods except for __getitem__, __iter__, and __len__. + + """ + + @abstractmethod + def __getitem__(self, key): + raise KeyError + + def get(self, key, default=None): + 'D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.' + try: + return self[key] + except KeyError: + return default + + def __contains__(self, key): + try: + self[key] + except KeyError: + return False + else: + return True + + def iterkeys(self): + 'D.iterkeys() -> an iterator over the keys of D' + return iter(self) + + def itervalues(self): + 'D.itervalues() -> an iterator over the values of D' + for key in self: + yield self[key] + + def iteritems(self): + 'D.iteritems() -> an iterator over the (key, value) items of D' + for key in self: + yield (key, self[key]) + + def keys(self): + "D.keys() -> list of D's keys" + return list(self) + + def items(self): + "D.items() -> list of D's (key, value) pairs, as 2-tuples" + return [(key, self[key]) for key in self] + + def values(self): + "D.values() -> list of D's values" + return [self[key] for key in self] + + # Mappings are not hashable by default, but subclasses can change this + __hash__ = None + + def __eq__(self, other): + if not isinstance(other, Mapping): + return NotImplemented + return dict(self.items()) == dict(other.items()) + + def __ne__(self, other): + return not (self == other) + +class MappingView(Sized): + + def __init__(self, mapping): + self._mapping = mapping + + def __len__(self): + return len(self._mapping) + + def __repr__(self): + return '{0.__class__.__name__}({0._mapping!r})'.format(self) + + +class KeysView(MappingView, Set): + + @classmethod + def _from_iterable(self, it): + return set(it) + + def __contains__(self, key): + return key in self._mapping + + def __iter__(self): + for key in self._mapping: + yield key + +KeysView.register(type({}.viewkeys())) + +class ItemsView(MappingView, Set): + + @classmethod + def _from_iterable(self, it): + return set(it) + + def __contains__(self, item): + key, value = item + try: + v = self._mapping[key] + except KeyError: + return False + else: + return v == value + + def __iter__(self): + for key in self._mapping: + yield (key, self._mapping[key]) + +ItemsView.register(type({}.viewitems())) + +class ValuesView(MappingView): + + def __contains__(self, value): + for key in self._mapping: + if value == self._mapping[key]: + return True + return False + + def __iter__(self): + for key in self._mapping: + yield self._mapping[key] + +ValuesView.register(type({}.viewvalues())) + +class MutableMapping(Mapping): + + """A MutableMapping is a generic container for associating + key/value pairs. + + This class provides concrete generic implementations of all + methods except for __getitem__, __setitem__, __delitem__, + __iter__, and __len__. + + """ + + @abstractmethod + def __setitem__(self, key, value): + raise KeyError + + @abstractmethod + def __delitem__(self, key): + raise KeyError + + __marker = object() + + def pop(self, key, default=__marker): + '''D.pop(k[,d]) -> v, remove specified key and return the corresponding value. + If key is not found, d is returned if given, otherwise KeyError is raised. + ''' + try: + value = self[key] + except KeyError: + if default is self.__marker: + raise + return default + else: + del self[key] + return value + + def popitem(self): + '''D.popitem() -> (k, v), remove and return some (key, value) pair + as a 2-tuple; but raise KeyError if D is empty. + ''' + try: + key = next(iter(self)) + except StopIteration: + raise KeyError + value = self[key] + del self[key] + return key, value + + def clear(self): + 'D.clear() -> None. Remove all items from D.' + try: + while True: + self.popitem() + except KeyError: + pass + + def update(*args, **kwds): + ''' D.update([E, ]**F) -> None. Update D from mapping/iterable E and F. + If E present and has a .keys() method, does: for k in E: D[k] = E[k] + If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v + In either case, this is followed by: for k, v in F.items(): D[k] = v + ''' + if not args: + raise TypeError("descriptor 'update' of 'MutableMapping' object " + "needs an argument") + self = args[0] + args = args[1:] + if len(args) > 1: + raise TypeError('update expected at most 1 arguments, got %d' % + len(args)) + if args: + other = args[0] + if isinstance(other, Mapping): + for key in other: + self[key] = other[key] + elif hasattr(other, "keys"): + for key in other.keys(): + self[key] = other[key] + else: + for key, value in other: + self[key] = value + for key, value in kwds.items(): + self[key] = value + + def setdefault(self, key, default=None): + 'D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D' + try: + return self[key] + except KeyError: + self[key] = default + return default + +MutableMapping.register(dict) + + +### SEQUENCES ### + + +class Sequence(Sized, Iterable, Container): + """All the operations on a read-only sequence. + + Concrete subclasses must override __new__ or __init__, + __getitem__, and __len__. + """ + + @abstractmethod + def __getitem__(self, index): + raise IndexError + + def __iter__(self): + i = 0 + try: + while True: + v = self[i] + yield v + i += 1 + except IndexError: + return + + def __contains__(self, value): + for v in self: + if v == value: + return True + return False + + def __reversed__(self): + for i in reversed(range(len(self))): + yield self[i] + + def index(self, value): + '''S.index(value) -> integer -- return first index of value. + Raises ValueError if the value is not present. + ''' + for i, v in enumerate(self): + if v == value: + return i + raise ValueError + + def count(self, value): + 'S.count(value) -> integer -- return number of occurrences of value' + return sum(1 for v in self if v == value) + +Sequence.register(tuple) +Sequence.register(basestring) +Sequence.register(buffer) +Sequence.register(xrange) + + +class MutableSequence(Sequence): + + """All the operations on a read-only sequence. + + Concrete subclasses must provide __new__ or __init__, + __getitem__, __setitem__, __delitem__, __len__, and insert(). + + """ + + @abstractmethod + def __setitem__(self, index, value): + raise IndexError + + @abstractmethod + def __delitem__(self, index): + raise IndexError + + @abstractmethod + def insert(self, index, value): + 'S.insert(index, object) -- insert object before index' + raise IndexError + + def append(self, value): + 'S.append(object) -- append object to the end of the sequence' + self.insert(len(self), value) + + def reverse(self): + 'S.reverse() -- reverse *IN PLACE*' + n = len(self) + for i in range(n//2): + self[i], self[n-i-1] = self[n-i-1], self[i] + + def extend(self, values): + 'S.extend(iterable) -- extend sequence by appending elements from the iterable' + for v in values: + self.append(v) + + def pop(self, index=-1): + '''S.pop([index]) -> item -- remove and return item at index (default last). + Raise IndexError if list is empty or index is out of range. + ''' + v = self[index] + del self[index] + return v + + def remove(self, value): + '''S.remove(value) -- remove first occurrence of value. + Raise ValueError if the value is not present. + ''' + del self[self.index(value)] + + def __iadd__(self, values): + self.extend(values) + return self + +MutableSequence.register(list) diff --git a/CVIssueCount/_weakrefset.py b/CVIssueCount/_weakrefset.py new file mode 100644 index 0000000..627959b --- /dev/null +++ b/CVIssueCount/_weakrefset.py @@ -0,0 +1,204 @@ +# Access WeakSet through the weakref module. +# This code is separated-out because it is needed +# by abc.py to load everything else at startup. + +from _weakref import ref + +__all__ = ['WeakSet'] + + +class _IterationGuard(object): + # This context manager registers itself in the current iterators of the + # weak container, such as to delay all removals until the context manager + # exits. + # This technique should be relatively thread-safe (since sets are). + + def __init__(self, weakcontainer): + # Don't create cycles + self.weakcontainer = ref(weakcontainer) + + def __enter__(self): + w = self.weakcontainer() + if w is not None: + w._iterating.add(self) + return self + + def __exit__(self, e, t, b): + w = self.weakcontainer() + if w is not None: + s = w._iterating + s.remove(self) + if not s: + w._commit_removals() + + +class WeakSet(object): + def __init__(self, data=None): + self.data = set() + def _remove(item, selfref=ref(self)): + self = selfref() + if self is not None: + if self._iterating: + self._pending_removals.append(item) + else: + self.data.discard(item) + self._remove = _remove + # A list of keys to be removed + self._pending_removals = [] + self._iterating = set() + if data is not None: + self.update(data) + + def _commit_removals(self): + l = self._pending_removals + discard = self.data.discard + while l: + discard(l.pop()) + + def __iter__(self): + with _IterationGuard(self): + for itemref in self.data: + item = itemref() + if item is not None: + # Caveat: the iterator will keep a strong reference to + # `item` until it is resumed or closed. + yield item + + def __len__(self): + return len(self.data) - len(self._pending_removals) + + def __contains__(self, item): + try: + wr = ref(item) + except TypeError: + return False + return wr in self.data + + def __reduce__(self): + return (self.__class__, (list(self),), + getattr(self, '__dict__', None)) + + __hash__ = None + + def add(self, item): + if self._pending_removals: + self._commit_removals() + self.data.add(ref(item, self._remove)) + + def clear(self): + if self._pending_removals: + self._commit_removals() + self.data.clear() + + def copy(self): + return self.__class__(self) + + def pop(self): + if self._pending_removals: + self._commit_removals() + while True: + try: + itemref = self.data.pop() + except KeyError: + raise KeyError('pop from empty WeakSet') + item = itemref() + if item is not None: + return item + + def remove(self, item): + if self._pending_removals: + self._commit_removals() + self.data.remove(ref(item)) + + def discard(self, item): + if self._pending_removals: + self._commit_removals() + self.data.discard(ref(item)) + + def update(self, other): + if self._pending_removals: + self._commit_removals() + for element in other: + self.add(element) + + def __ior__(self, other): + self.update(other) + return self + + def difference(self, other): + newset = self.copy() + newset.difference_update(other) + return newset + __sub__ = difference + + def difference_update(self, other): + self.__isub__(other) + def __isub__(self, other): + if self._pending_removals: + self._commit_removals() + if self is other: + self.data.clear() + else: + self.data.difference_update(ref(item) for item in other) + return self + + def intersection(self, other): + return self.__class__(item for item in other if item in self) + __and__ = intersection + + def intersection_update(self, other): + self.__iand__(other) + def __iand__(self, other): + if self._pending_removals: + self._commit_removals() + self.data.intersection_update(ref(item) for item in other) + return self + + def issubset(self, other): + return self.data.issubset(ref(item) for item in other) + __le__ = issubset + + def __lt__(self, other): + return self.data < set(ref(item) for item in other) + + def issuperset(self, other): + return self.data.issuperset(ref(item) for item in other) + __ge__ = issuperset + + def __gt__(self, other): + return self.data > set(ref(item) for item in other) + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return self.data == set(ref(item) for item in other) + + def __ne__(self, other): + opposite = self.__eq__(other) + if opposite is NotImplemented: + return NotImplemented + return not opposite + + def symmetric_difference(self, other): + newset = self.copy() + newset.symmetric_difference_update(other) + return newset + __xor__ = symmetric_difference + + def symmetric_difference_update(self, other): + self.__ixor__(other) + def __ixor__(self, other): + if self._pending_removals: + self._commit_removals() + if self is other: + self.data.clear() + else: + self.data.symmetric_difference_update(ref(item, self._remove) for item in other) + return self + + def union(self, other): + return self.__class__(e for s in (self, other) for e in s) + __or__ = union + + def isdisjoint(self, other): + return len(self.intersection(other)) == 0 diff --git a/CVIssueCount/abc.py b/CVIssueCount/abc.py new file mode 100644 index 0000000..02e48a1 --- /dev/null +++ b/CVIssueCount/abc.py @@ -0,0 +1,185 @@ +# Copyright 2007 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Abstract Base Classes (ABCs) according to PEP 3119.""" + +import types + +from _weakrefset import WeakSet + +# Instance of old-style class +class _C: pass +_InstanceType = type(_C()) + + +def abstractmethod(funcobj): + """A decorator indicating abstract methods. + + Requires that the metaclass is ABCMeta or derived from it. A + class that has a metaclass derived from ABCMeta cannot be + instantiated unless all of its abstract methods are overridden. + The abstract methods can be called using any of the normal + 'super' call mechanisms. + + Usage: + + class C: + __metaclass__ = ABCMeta + @abstractmethod + def my_abstract_method(self, ...): + ... + """ + funcobj.__isabstractmethod__ = True + return funcobj + + +class abstractproperty(property): + """A decorator indicating abstract properties. + + Requires that the metaclass is ABCMeta or derived from it. A + class that has a metaclass derived from ABCMeta cannot be + instantiated unless all of its abstract properties are overridden. + The abstract properties can be called using any of the normal + 'super' call mechanisms. + + Usage: + + class C: + __metaclass__ = ABCMeta + @abstractproperty + def my_abstract_property(self): + ... + + This defines a read-only property; you can also define a read-write + abstract property using the 'long' form of property declaration: + + class C: + __metaclass__ = ABCMeta + def getx(self): ... + def setx(self, value): ... + x = abstractproperty(getx, setx) + """ + __isabstractmethod__ = True + + +class ABCMeta(type): + + """Metaclass for defining Abstract Base Classes (ABCs). + + Use this metaclass to create an ABC. An ABC can be subclassed + directly, and then acts as a mix-in class. You can also register + unrelated concrete classes (even built-in classes) and unrelated + ABCs as 'virtual subclasses' -- these and their descendants will + be considered subclasses of the registering ABC by the built-in + issubclass() function, but the registering ABC won't show up in + their MRO (Method Resolution Order) nor will method + implementations defined by the registering ABC be callable (not + even via super()). + + """ + + # A global counter that is incremented each time a class is + # registered as a virtual subclass of anything. It forces the + # negative cache to be cleared before its next use. + _abc_invalidation_counter = 0 + + def __new__(mcls, name, bases, namespace): + cls = super(ABCMeta, mcls).__new__(mcls, name, bases, namespace) + # Compute set of abstract method names + abstracts = set(name + for name, value in namespace.items() + if getattr(value, "__isabstractmethod__", False)) + for base in bases: + for name in getattr(base, "__abstractmethods__", set()): + value = getattr(cls, name, None) + if getattr(value, "__isabstractmethod__", False): + abstracts.add(name) + cls.__abstractmethods__ = frozenset(abstracts) + # Set up inheritance registry + cls._abc_registry = WeakSet() + cls._abc_cache = WeakSet() + cls._abc_negative_cache = WeakSet() + cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter + return cls + + def register(cls, subclass): + """Register a virtual subclass of an ABC.""" + if not isinstance(subclass, (type, types.ClassType)): + raise TypeError("Can only register classes") + if issubclass(subclass, cls): + return # Already a subclass + # Subtle: test for cycles *after* testing for "already a subclass"; + # this means we allow X.register(X) and interpret it as a no-op. + if issubclass(cls, subclass): + # This would create a cycle, which is bad for the algorithm below + raise RuntimeError("Refusing to create an inheritance cycle") + cls._abc_registry.add(subclass) + ABCMeta._abc_invalidation_counter += 1 # Invalidate negative cache + + def _dump_registry(cls, file=None): + """Debug helper to print the ABC registry.""" + print >> file, "Class: %s.%s" % (cls.__module__, cls.__name__) + print >> file, "Inv.counter: %s" % ABCMeta._abc_invalidation_counter + for name in sorted(cls.__dict__.keys()): + if name.startswith("_abc_"): + value = getattr(cls, name) + print >> file, "%s: %r" % (name, value) + + def __instancecheck__(cls, instance): + """Override for isinstance(instance, cls).""" + # Inline the cache checking when it's simple. + subclass = getattr(instance, '__class__', None) + if subclass is not None and subclass in cls._abc_cache: + return True + subtype = type(instance) + # Old-style instances + if subtype is _InstanceType: + subtype = subclass + if subtype is subclass or subclass is None: + if (cls._abc_negative_cache_version == + ABCMeta._abc_invalidation_counter and + subtype in cls._abc_negative_cache): + return False + # Fall back to the subclass check. + return cls.__subclasscheck__(subtype) + return (cls.__subclasscheck__(subclass) or + cls.__subclasscheck__(subtype)) + + def __subclasscheck__(cls, subclass): + """Override for issubclass(subclass, cls).""" + # Check cache + if subclass in cls._abc_cache: + return True + # Check negative cache; may have to invalidate + if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter: + # Invalidate the negative cache + cls._abc_negative_cache = WeakSet() + cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter + elif subclass in cls._abc_negative_cache: + return False + # Check the subclass hook + ok = cls.__subclasshook__(subclass) + if ok is not NotImplemented: + assert isinstance(ok, bool) + if ok: + cls._abc_cache.add(subclass) + else: + cls._abc_negative_cache.add(subclass) + return ok + # Check if it's a direct subclass + if cls in getattr(subclass, '__mro__', ()): + cls._abc_cache.add(subclass) + return True + # Check if it's a subclass of a registered class (recursive) + for rcls in cls._abc_registry: + if issubclass(subclass, rcls): + cls._abc_cache.add(subclass) + return True + # Check if it's a subclass of a subclass (recursive) + for scls in cls.__subclasses__(): + if issubclass(subclass, scls): + cls._abc_cache.add(subclass) + return True + # No dice; update negative cache + cls._abc_negative_cache.add(subclass) + return False diff --git a/CVIssueCount/base64.py b/CVIssueCount/base64.py new file mode 100644 index 0000000..82c112c --- /dev/null +++ b/CVIssueCount/base64.py @@ -0,0 +1,364 @@ +#! /usr/bin/env python + +"""RFC 3548: Base16, Base32, Base64 Data Encodings""" + +# Modified 04-Oct-1995 by Jack Jansen to use binascii module +# Modified 30-Dec-2003 by Barry Warsaw to add full RFC 3548 support + +import re +import struct +import string +import binascii + + +__all__ = [ + # Legacy interface exports traditional RFC 1521 Base64 encodings + 'encode', 'decode', 'encodestring', 'decodestring', + # Generalized interface for other encodings + 'b64encode', 'b64decode', 'b32encode', 'b32decode', + 'b16encode', 'b16decode', + # Standard Base64 encoding + 'standard_b64encode', 'standard_b64decode', + # Some common Base64 alternatives. As referenced by RFC 3458, see thread + # starting at: + # + # http://zgp.org/pipermail/p2p-hackers/2001-September/000316.html + 'urlsafe_b64encode', 'urlsafe_b64decode', + ] + +_translation = [chr(_x) for _x in range(256)] +EMPTYSTRING = '' + + +def _translate(s, altchars): + translation = _translation[:] + for k, v in altchars.items(): + translation[ord(k)] = v + return s.translate(''.join(translation)) + + + +# Base64 encoding/decoding uses binascii + +def b64encode(s, altchars=None): + """Encode a string using Base64. + + s is the string to encode. Optional altchars must be a string of at least + length 2 (additional characters are ignored) which specifies an + alternative alphabet for the '+' and '/' characters. This allows an + application to e.g. generate url or filesystem safe Base64 strings. + + The encoded string is returned. + """ + # Strip off the trailing newline + encoded = binascii.b2a_base64(s)[:-1] + if altchars is not None: + return encoded.translate(string.maketrans(b'+/', altchars[:2])) + return encoded + + +def b64decode(s, altchars=None): + """Decode a Base64 encoded string. + + s is the string to decode. Optional altchars must be a string of at least + length 2 (additional characters are ignored) which specifies the + alternative alphabet used instead of the '+' and '/' characters. + + The decoded string is returned. A TypeError is raised if s were + incorrectly padded or if there are non-alphabet characters present in the + string. + """ + if altchars is not None: + s = s.translate(string.maketrans(altchars[:2], '+/')) + try: + return binascii.a2b_base64(s) + except binascii.Error, msg: + # Transform this exception for consistency + raise TypeError(msg) + + +def standard_b64encode(s): + """Encode a string using the standard Base64 alphabet. + + s is the string to encode. The encoded string is returned. + """ + return b64encode(s) + +def standard_b64decode(s): + """Decode a string encoded with the standard Base64 alphabet. + + s is the string to decode. The decoded string is returned. A TypeError + is raised if the string is incorrectly padded or if there are non-alphabet + characters present in the string. + """ + return b64decode(s) + +_urlsafe_encode_translation = string.maketrans(b'+/', b'-_') +_urlsafe_decode_translation = string.maketrans(b'-_', b'+/') + +def urlsafe_b64encode(s): + """Encode a string using a url-safe Base64 alphabet. + + s is the string to encode. The encoded string is returned. The alphabet + uses '-' instead of '+' and '_' instead of '/'. + """ + return b64encode(s).translate(_urlsafe_encode_translation) + +def urlsafe_b64decode(s): + """Decode a string encoded with the standard Base64 alphabet. + + s is the string to decode. The decoded string is returned. A TypeError + is raised if the string is incorrectly padded or if there are non-alphabet + characters present in the string. + + The alphabet uses '-' instead of '+' and '_' instead of '/'. + """ + return b64decode(s.translate(_urlsafe_decode_translation)) + + + +# Base32 encoding/decoding must be done in Python +_b32alphabet = { + 0: 'A', 9: 'J', 18: 'S', 27: '3', + 1: 'B', 10: 'K', 19: 'T', 28: '4', + 2: 'C', 11: 'L', 20: 'U', 29: '5', + 3: 'D', 12: 'M', 21: 'V', 30: '6', + 4: 'E', 13: 'N', 22: 'W', 31: '7', + 5: 'F', 14: 'O', 23: 'X', + 6: 'G', 15: 'P', 24: 'Y', + 7: 'H', 16: 'Q', 25: 'Z', + 8: 'I', 17: 'R', 26: '2', + } + +_b32tab = _b32alphabet.items() +_b32tab.sort() +_b32tab = [v for k, v in _b32tab] +_b32rev = dict([(v, long(k)) for k, v in _b32alphabet.items()]) + + +def b32encode(s): + """Encode a string using Base32. + + s is the string to encode. The encoded string is returned. + """ + parts = [] + quanta, leftover = divmod(len(s), 5) + # Pad the last quantum with zero bits if necessary + if leftover: + s += ('\0' * (5 - leftover)) + quanta += 1 + for i in range(quanta): + # c1 and c2 are 16 bits wide, c3 is 8 bits wide. The intent of this + # code is to process the 40 bits in units of 5 bits. So we take the 1 + # leftover bit of c1 and tack it onto c2. Then we take the 2 leftover + # bits of c2 and tack them onto c3. The shifts and masks are intended + # to give us values of exactly 5 bits in width. + c1, c2, c3 = struct.unpack('!HHB', s[i*5:(i+1)*5]) + c2 += (c1 & 1) << 16 # 17 bits wide + c3 += (c2 & 3) << 8 # 10 bits wide + parts.extend([_b32tab[c1 >> 11], # bits 1 - 5 + _b32tab[(c1 >> 6) & 0x1f], # bits 6 - 10 + _b32tab[(c1 >> 1) & 0x1f], # bits 11 - 15 + _b32tab[c2 >> 12], # bits 16 - 20 (1 - 5) + _b32tab[(c2 >> 7) & 0x1f], # bits 21 - 25 (6 - 10) + _b32tab[(c2 >> 2) & 0x1f], # bits 26 - 30 (11 - 15) + _b32tab[c3 >> 5], # bits 31 - 35 (1 - 5) + _b32tab[c3 & 0x1f], # bits 36 - 40 (1 - 5) + ]) + encoded = EMPTYSTRING.join(parts) + # Adjust for any leftover partial quanta + if leftover == 1: + return encoded[:-6] + '======' + elif leftover == 2: + return encoded[:-4] + '====' + elif leftover == 3: + return encoded[:-3] + '===' + elif leftover == 4: + return encoded[:-1] + '=' + return encoded + + +def b32decode(s, casefold=False, map01=None): + """Decode a Base32 encoded string. + + s is the string to decode. Optional casefold is a flag specifying whether + a lowercase alphabet is acceptable as input. For security purposes, the + default is False. + + RFC 3548 allows for optional mapping of the digit 0 (zero) to the letter O + (oh), and for optional mapping of the digit 1 (one) to either the letter I + (eye) or letter L (el). The optional argument map01 when not None, + specifies which letter the digit 1 should be mapped to (when map01 is not + None, the digit 0 is always mapped to the letter O). For security + purposes the default is None, so that 0 and 1 are not allowed in the + input. + + The decoded string is returned. A TypeError is raised if s were + incorrectly padded or if there are non-alphabet characters present in the + string. + """ + quanta, leftover = divmod(len(s), 8) + if leftover: + raise TypeError('Incorrect padding') + # Handle section 2.4 zero and one mapping. The flag map01 will be either + # False, or the character to map the digit 1 (one) to. It should be + # either L (el) or I (eye). + if map01: + s = s.translate(string.maketrans(b'01', b'O' + map01)) + if casefold: + s = s.upper() + # Strip off pad characters from the right. We need to count the pad + # characters because this will tell us how many null bytes to remove from + # the end of the decoded string. + padchars = 0 + mo = re.search('(?P[=]*)$', s) + if mo: + padchars = len(mo.group('pad')) + if padchars > 0: + s = s[:-padchars] + # Now decode the full quanta + parts = [] + acc = 0 + shift = 35 + for c in s: + val = _b32rev.get(c) + if val is None: + raise TypeError('Non-base32 digit found') + acc += _b32rev[c] << shift + shift -= 5 + if shift < 0: + parts.append(binascii.unhexlify('%010x' % acc)) + acc = 0 + shift = 35 + # Process the last, partial quanta + last = binascii.unhexlify('%010x' % acc) + if padchars == 0: + last = '' # No characters + elif padchars == 1: + last = last[:-1] + elif padchars == 3: + last = last[:-2] + elif padchars == 4: + last = last[:-3] + elif padchars == 6: + last = last[:-4] + else: + raise TypeError('Incorrect padding') + parts.append(last) + return EMPTYSTRING.join(parts) + + + +# RFC 3548, Base 16 Alphabet specifies uppercase, but hexlify() returns +# lowercase. The RFC also recommends against accepting input case +# insensitively. +def b16encode(s): + """Encode a string using Base16. + + s is the string to encode. The encoded string is returned. + """ + return binascii.hexlify(s).upper() + + +def b16decode(s, casefold=False): + """Decode a Base16 encoded string. + + s is the string to decode. Optional casefold is a flag specifying whether + a lowercase alphabet is acceptable as input. For security purposes, the + default is False. + + The decoded string is returned. A TypeError is raised if s were + incorrectly padded or if there are non-alphabet characters present in the + string. + """ + if casefold: + s = s.upper() + if re.search('[^0-9A-F]', s): + raise TypeError('Non-base16 digit found') + return binascii.unhexlify(s) + + + +# Legacy interface. This code could be cleaned up since I don't believe +# binascii has any line length limitations. It just doesn't seem worth it +# though. + +MAXLINESIZE = 76 # Excluding the CRLF +MAXBINSIZE = (MAXLINESIZE//4)*3 + +def encode(input, output): + """Encode a file.""" + while True: + s = input.read(MAXBINSIZE) + if not s: + break + while len(s) < MAXBINSIZE: + ns = input.read(MAXBINSIZE-len(s)) + if not ns: + break + s += ns + line = binascii.b2a_base64(s) + output.write(line) + + +def decode(input, output): + """Decode a file.""" + while True: + line = input.readline() + if not line: + break + s = binascii.a2b_base64(line) + output.write(s) + + +def encodestring(s): + """Encode a string into multiple lines of base-64 data.""" + pieces = [] + for i in range(0, len(s), MAXBINSIZE): + chunk = s[i : i + MAXBINSIZE] + pieces.append(binascii.b2a_base64(chunk)) + return "".join(pieces) + + +def decodestring(s): + """Decode a string.""" + return binascii.a2b_base64(s) + + + +# Useable as a script... +def test(): + """Small test program""" + import sys, getopt + try: + opts, args = getopt.getopt(sys.argv[1:], 'deut') + except getopt.error, msg: + sys.stdout = sys.stderr + print msg + print """usage: %s [-d|-e|-u|-t] [file|-] + -d, -u: decode + -e: encode (default) + -t: encode and decode string 'Aladdin:open sesame'"""%sys.argv[0] + sys.exit(2) + func = encode + for o, a in opts: + if o == '-e': func = encode + if o == '-d': func = decode + if o == '-u': func = decode + if o == '-t': test1(); return + if args and args[0] != '-': + with open(args[0], 'rb') as f: + func(f, sys.stdout) + else: + func(sys.stdin, sys.stdout) + + +def test1(): + s0 = "Aladdin:open sesame" + s1 = encodestring(s0) + s2 = decodestring(s1) + print s0, repr(s1), s2 + + +if __name__ == '__main__': + test() diff --git a/CVIssueCount/bisect.py b/CVIssueCount/bisect.py new file mode 100644 index 0000000..4a4d052 --- /dev/null +++ b/CVIssueCount/bisect.py @@ -0,0 +1,92 @@ +"""Bisection algorithms.""" + +def insort_right(a, x, lo=0, hi=None): + """Insert item x in list a, and keep it sorted assuming a is sorted. + + If x is already in a, insert it to the right of the rightmost x. + + Optional args lo (default 0) and hi (default len(a)) bound the + slice of a to be searched. + """ + + if lo < 0: + raise ValueError('lo must be non-negative') + if hi is None: + hi = len(a) + while lo < hi: + mid = (lo+hi)//2 + if x < a[mid]: hi = mid + else: lo = mid+1 + a.insert(lo, x) + +insort = insort_right # backward compatibility + +def bisect_right(a, x, lo=0, hi=None): + """Return the index where to insert item x in list a, assuming a is sorted. + + The return value i is such that all e in a[:i] have e <= x, and all e in + a[i:] have e > x. So if x already appears in the list, a.insert(x) will + insert just after the rightmost x already there. + + Optional args lo (default 0) and hi (default len(a)) bound the + slice of a to be searched. + """ + + if lo < 0: + raise ValueError('lo must be non-negative') + if hi is None: + hi = len(a) + while lo < hi: + mid = (lo+hi)//2 + if x < a[mid]: hi = mid + else: lo = mid+1 + return lo + +bisect = bisect_right # backward compatibility + +def insort_left(a, x, lo=0, hi=None): + """Insert item x in list a, and keep it sorted assuming a is sorted. + + If x is already in a, insert it to the left of the leftmost x. + + Optional args lo (default 0) and hi (default len(a)) bound the + slice of a to be searched. + """ + + if lo < 0: + raise ValueError('lo must be non-negative') + if hi is None: + hi = len(a) + while lo < hi: + mid = (lo+hi)//2 + if a[mid] < x: lo = mid+1 + else: hi = mid + a.insert(lo, x) + + +def bisect_left(a, x, lo=0, hi=None): + """Return the index where to insert item x in list a, assuming a is sorted. + + The return value i is such that all e in a[:i] have e < x, and all e in + a[i:] have e >= x. So if x already appears in the list, a.insert(x) will + insert just before the leftmost x already there. + + Optional args lo (default 0) and hi (default len(a)) bound the + slice of a to be searched. + """ + + if lo < 0: + raise ValueError('lo must be non-negative') + if hi is None: + hi = len(a) + while lo < hi: + mid = (lo+hi)//2 + if a[mid] < x: lo = mid+1 + else: hi = mid + return lo + +# Overwrite above definitions with a fast C implementation +try: + from _bisect import * +except ImportError: + pass diff --git a/CVIssueCount/collections.py b/CVIssueCount/collections.py new file mode 100644 index 0000000..1dcd233 --- /dev/null +++ b/CVIssueCount/collections.py @@ -0,0 +1,730 @@ +__all__ = ['Counter', 'deque', 'defaultdict', 'namedtuple', 'OrderedDict'] +# For bootstrapping reasons, the collection ABCs are defined in _abcoll.py. +# They should however be considered an integral part of collections.py. +from _abcoll import * +import _abcoll +__all__ += _abcoll.__all__ + +from _collections import deque, defaultdict +from operator import itemgetter as _itemgetter, eq as _eq +from keyword import iskeyword as _iskeyword +import sys as _sys +import heapq as _heapq +from itertools import repeat as _repeat, chain as _chain, starmap as _starmap +from itertools import imap as _imap + +try: + from thread import get_ident as _get_ident +except ImportError: + from dummy_thread import get_ident as _get_ident + + +################################################################################ +### OrderedDict +################################################################################ + +class OrderedDict(dict): + 'Dictionary that remembers insertion order' + # An inherited dict maps keys to values. + # The inherited dict provides __getitem__, __len__, __contains__, and get. + # The remaining methods are order-aware. + # Big-O running times for all methods are the same as regular dictionaries. + + # The internal self.__map dict maps keys to links in a doubly linked list. + # The circular doubly linked list starts and ends with a sentinel element. + # The sentinel element never gets deleted (this simplifies the algorithm). + # Each link is stored as a list of length three: [PREV, NEXT, KEY]. + + def __init__(*args, **kwds): + '''Initialize an ordered dictionary. The signature is the same as + regular dictionaries, but keyword arguments are not recommended because + their insertion order is arbitrary. + + ''' + if not args: + raise TypeError("descriptor '__init__' of 'OrderedDict' object " + "needs an argument") + self = args[0] + args = args[1:] + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + try: + self.__root + except AttributeError: + self.__root = root = [] # sentinel node + root[:] = [root, root, None] + self.__map = {} + self.__update(*args, **kwds) + + def __setitem__(self, key, value, dict_setitem=dict.__setitem__): + 'od.__setitem__(i, y) <==> od[i]=y' + # Setting a new item creates a new link at the end of the linked list, + # and the inherited dictionary is updated with the new key/value pair. + if key not in self: + root = self.__root + last = root[0] + last[1] = root[0] = self.__map[key] = [last, root, key] + return dict_setitem(self, key, value) + + def __delitem__(self, key, dict_delitem=dict.__delitem__): + 'od.__delitem__(y) <==> del od[y]' + # Deleting an existing item uses self.__map to find the link which gets + # removed by updating the links in the predecessor and successor nodes. + dict_delitem(self, key) + link_prev, link_next, _ = self.__map.pop(key) + link_prev[1] = link_next # update link_prev[NEXT] + link_next[0] = link_prev # update link_next[PREV] + + def __iter__(self): + 'od.__iter__() <==> iter(od)' + # Traverse the linked list in order. + root = self.__root + curr = root[1] # start at the first node + while curr is not root: + yield curr[2] # yield the curr[KEY] + curr = curr[1] # move to next node + + def __reversed__(self): + 'od.__reversed__() <==> reversed(od)' + # Traverse the linked list in reverse order. + root = self.__root + curr = root[0] # start at the last node + while curr is not root: + yield curr[2] # yield the curr[KEY] + curr = curr[0] # move to previous node + + def clear(self): + 'od.clear() -> None. Remove all items from od.' + root = self.__root + root[:] = [root, root, None] + self.__map.clear() + dict.clear(self) + + # -- the following methods do not depend on the internal structure -- + + def keys(self): + 'od.keys() -> list of keys in od' + return list(self) + + def values(self): + 'od.values() -> list of values in od' + return [self[key] for key in self] + + def items(self): + 'od.items() -> list of (key, value) pairs in od' + return [(key, self[key]) for key in self] + + def iterkeys(self): + 'od.iterkeys() -> an iterator over the keys in od' + return iter(self) + + def itervalues(self): + 'od.itervalues -> an iterator over the values in od' + for k in self: + yield self[k] + + def iteritems(self): + 'od.iteritems -> an iterator over the (key, value) pairs in od' + for k in self: + yield (k, self[k]) + + update = MutableMapping.update + + __update = update # let subclasses override update without breaking __init__ + + __marker = object() + + def pop(self, key, default=__marker): + '''od.pop(k[,d]) -> v, remove specified key and return the corresponding + value. If key is not found, d is returned if given, otherwise KeyError + is raised. + + ''' + if key in self: + result = self[key] + del self[key] + return result + if default is self.__marker: + raise KeyError(key) + return default + + def setdefault(self, key, default=None): + 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' + if key in self: + return self[key] + self[key] = default + return default + + def popitem(self, last=True): + '''od.popitem() -> (k, v), return and remove a (key, value) pair. + Pairs are returned in LIFO order if last is true or FIFO order if false. + + ''' + if not self: + raise KeyError('dictionary is empty') + key = next(reversed(self) if last else iter(self)) + value = self.pop(key) + return key, value + + def __repr__(self, _repr_running={}): + 'od.__repr__() <==> repr(od)' + call_key = id(self), _get_ident() + if call_key in _repr_running: + return '...' + _repr_running[call_key] = 1 + try: + if not self: + return '%s()' % (self.__class__.__name__,) + return '%s(%r)' % (self.__class__.__name__, self.items()) + finally: + del _repr_running[call_key] + + def __reduce__(self): + 'Return state information for pickling' + items = [[k, self[k]] for k in self] + inst_dict = vars(self).copy() + for k in vars(OrderedDict()): + inst_dict.pop(k, None) + if inst_dict: + return (self.__class__, (items,), inst_dict) + return self.__class__, (items,) + + def copy(self): + 'od.copy() -> a shallow copy of od' + return self.__class__(self) + + @classmethod + def fromkeys(cls, iterable, value=None): + '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S. + If not specified, the value defaults to None. + + ''' + self = cls() + for key in iterable: + self[key] = value + return self + + def __eq__(self, other): + '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive + while comparison to a regular mapping is order-insensitive. + + ''' + if isinstance(other, OrderedDict): + return dict.__eq__(self, other) and all(_imap(_eq, self, other)) + return dict.__eq__(self, other) + + def __ne__(self, other): + 'od.__ne__(y) <==> od!=y' + return not self == other + + # -- the following methods support python 3.x style dictionary views -- + + def viewkeys(self): + "od.viewkeys() -> a set-like object providing a view on od's keys" + return KeysView(self) + + def viewvalues(self): + "od.viewvalues() -> an object providing a view on od's values" + return ValuesView(self) + + def viewitems(self): + "od.viewitems() -> a set-like object providing a view on od's items" + return ItemsView(self) + + +################################################################################ +### namedtuple +################################################################################ + +_class_template = '''\ +class {typename}(tuple): + '{typename}({arg_list})' + + __slots__ = () + + _fields = {field_names!r} + + def __new__(_cls, {arg_list}): + 'Create new instance of {typename}({arg_list})' + return _tuple.__new__(_cls, ({arg_list})) + + @classmethod + def _make(cls, iterable, new=tuple.__new__, len=len): + 'Make a new {typename} object from a sequence or iterable' + result = new(cls, iterable) + if len(result) != {num_fields:d}: + raise TypeError('Expected {num_fields:d} arguments, got %d' % len(result)) + return result + + def __repr__(self): + 'Return a nicely formatted representation string' + return '{typename}({repr_fmt})' % self + + def _asdict(self): + 'Return a new OrderedDict which maps field names to their values' + return OrderedDict(zip(self._fields, self)) + + def _replace(_self, **kwds): + 'Return a new {typename} object replacing specified fields with new values' + result = _self._make(map(kwds.pop, {field_names!r}, _self)) + if kwds: + raise ValueError('Got unexpected field names: %r' % kwds.keys()) + return result + + def __getnewargs__(self): + 'Return self as a plain tuple. Used by copy and pickle.' + return tuple(self) + + __dict__ = _property(_asdict) + + def __getstate__(self): + 'Exclude the OrderedDict from pickling' + pass + +{field_defs} +''' + +_repr_template = '{name}=%r' + +_field_template = '''\ + {name} = _property(_itemgetter({index:d}), doc='Alias for field number {index:d}') +''' + +def namedtuple(typename, field_names, verbose=False, rename=False): + """Returns a new subclass of tuple with named fields. + + >>> Point = namedtuple('Point', ['x', 'y']) + >>> Point.__doc__ # docstring for the new class + 'Point(x, y)' + >>> p = Point(11, y=22) # instantiate with positional args or keywords + >>> p[0] + p[1] # indexable like a plain tuple + 33 + >>> x, y = p # unpack like a regular tuple + >>> x, y + (11, 22) + >>> p.x + p.y # fields also accessable by name + 33 + >>> d = p._asdict() # convert to a dictionary + >>> d['x'] + 11 + >>> Point(**d) # convert from a dictionary + Point(x=11, y=22) + >>> p._replace(x=100) # _replace() is like str.replace() but targets named fields + Point(x=100, y=22) + + """ + + # Validate the field names. At the user's option, either generate an error + # message or automatically replace the field name with a valid name. + if isinstance(field_names, basestring): + field_names = field_names.replace(',', ' ').split() + field_names = map(str, field_names) + typename = str(typename) + if rename: + seen = set() + for index, name in enumerate(field_names): + if (not all(c.isalnum() or c=='_' for c in name) + or _iskeyword(name) + or not name + or name[0].isdigit() + or name.startswith('_') + or name in seen): + field_names[index] = '_%d' % index + seen.add(name) + for name in [typename] + field_names: + if type(name) != str: + raise TypeError('Type names and field names must be strings') + if not all(c.isalnum() or c=='_' for c in name): + raise ValueError('Type names and field names can only contain ' + 'alphanumeric characters and underscores: %r' % name) + if _iskeyword(name): + raise ValueError('Type names and field names cannot be a ' + 'keyword: %r' % name) + if name[0].isdigit(): + raise ValueError('Type names and field names cannot start with ' + 'a number: %r' % name) + seen = set() + for name in field_names: + if name.startswith('_') and not rename: + raise ValueError('Field names cannot start with an underscore: ' + '%r' % name) + if name in seen: + raise ValueError('Encountered duplicate field name: %r' % name) + seen.add(name) + + # Fill-in the class template + class_definition = _class_template.format( + typename = typename, + field_names = tuple(field_names), + num_fields = len(field_names), + arg_list = repr(tuple(field_names)).replace("'", "")[1:-1], + repr_fmt = ', '.join(_repr_template.format(name=name) + for name in field_names), + field_defs = '\n'.join(_field_template.format(index=index, name=name) + for index, name in enumerate(field_names)) + ) + if verbose: + print class_definition + + # Execute the template string in a temporary namespace and support + # tracing utilities by setting a value for frame.f_globals['__name__'] + namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename, + OrderedDict=OrderedDict, _property=property, _tuple=tuple) + try: + exec class_definition in namespace + except SyntaxError as e: + raise SyntaxError(e.message + ':\n' + class_definition) + result = namespace[typename] + + # For pickling to work, the __module__ variable needs to be set to the frame + # where the named tuple is created. Bypass this step in environments where + # sys._getframe is not defined (Jython for example) or sys._getframe is not + # defined for arguments greater than 0 (IronPython). + try: + result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): + pass + + return result + + +######################################################################## +### Counter +######################################################################## + +class Counter(dict): + '''Dict subclass for counting hashable items. Sometimes called a bag + or multiset. Elements are stored as dictionary keys and their counts + are stored as dictionary values. + + >>> c = Counter('abcdeabcdabcaba') # count elements from a string + + >>> c.most_common(3) # three most common elements + [('a', 5), ('b', 4), ('c', 3)] + >>> sorted(c) # list all unique elements + ['a', 'b', 'c', 'd', 'e'] + >>> ''.join(sorted(c.elements())) # list elements with repetitions + 'aaaaabbbbcccdde' + >>> sum(c.values()) # total of all counts + 15 + + >>> c['a'] # count of letter 'a' + 5 + >>> for elem in 'shazam': # update counts from an iterable + ... c[elem] += 1 # by adding 1 to each element's count + >>> c['a'] # now there are seven 'a' + 7 + >>> del c['b'] # remove all 'b' + >>> c['b'] # now there are zero 'b' + 0 + + >>> d = Counter('simsalabim') # make another counter + >>> c.update(d) # add in the second counter + >>> c['a'] # now there are nine 'a' + 9 + + >>> c.clear() # empty the counter + >>> c + Counter() + + Note: If a count is set to zero or reduced to zero, it will remain + in the counter until the entry is deleted or the counter is cleared: + + >>> c = Counter('aaabbc') + >>> c['b'] -= 2 # reduce the count of 'b' by two + >>> c.most_common() # 'b' is still in, but its count is zero + [('a', 3), ('c', 1), ('b', 0)] + + ''' + # References: + # http://en.wikipedia.org/wiki/Multiset + # http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html + # http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm + # http://code.activestate.com/recipes/259174/ + # Knuth, TAOCP Vol. II section 4.6.3 + + def __init__(*args, **kwds): + '''Create a new, empty Counter object. And if given, count elements + from an input iterable. Or, initialize the count from another mapping + of elements to their counts. + + >>> c = Counter() # a new, empty counter + >>> c = Counter('gallahad') # a new counter from an iterable + >>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping + >>> c = Counter(a=4, b=2) # a new counter from keyword args + + ''' + if not args: + raise TypeError("descriptor '__init__' of 'Counter' object " + "needs an argument") + self = args[0] + args = args[1:] + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + super(Counter, self).__init__() + self.update(*args, **kwds) + + def __missing__(self, key): + 'The count of elements not in the Counter is zero.' + # Needed so that self[missing_item] does not raise KeyError + return 0 + + def most_common(self, n=None): + '''List the n most common elements and their counts from the most + common to the least. If n is None, then list all element counts. + + >>> Counter('abcdeabcdabcaba').most_common(3) + [('a', 5), ('b', 4), ('c', 3)] + + ''' + # Emulate Bag.sortedByCount from Smalltalk + if n is None: + return sorted(self.iteritems(), key=_itemgetter(1), reverse=True) + return _heapq.nlargest(n, self.iteritems(), key=_itemgetter(1)) + + def elements(self): + '''Iterator over elements repeating each as many times as its count. + + >>> c = Counter('ABCABC') + >>> sorted(c.elements()) + ['A', 'A', 'B', 'B', 'C', 'C'] + + # Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1 + >>> prime_factors = Counter({2: 2, 3: 3, 17: 1}) + >>> product = 1 + >>> for factor in prime_factors.elements(): # loop over factors + ... product *= factor # and multiply them + >>> product + 1836 + + Note, if an element's count has been set to zero or is a negative + number, elements() will ignore it. + + ''' + # Emulate Bag.do from Smalltalk and Multiset.begin from C++. + return _chain.from_iterable(_starmap(_repeat, self.iteritems())) + + # Override dict methods where necessary + + @classmethod + def fromkeys(cls, iterable, v=None): + # There is no equivalent method for counters because setting v=1 + # means that no element can have a count greater than one. + raise NotImplementedError( + 'Counter.fromkeys() is undefined. Use Counter(iterable) instead.') + + def update(*args, **kwds): + '''Like dict.update() but add counts instead of replacing them. + + Source can be an iterable, a dictionary, or another Counter instance. + + >>> c = Counter('which') + >>> c.update('witch') # add elements from another iterable + >>> d = Counter('watch') + >>> c.update(d) # add elements from another counter + >>> c['h'] # four 'h' in which, witch, and watch + 4 + + ''' + # The regular dict.update() operation makes no sense here because the + # replace behavior results in the some of original untouched counts + # being mixed-in with all of the other counts for a mismash that + # doesn't have a straight-forward interpretation in most counting + # contexts. Instead, we implement straight-addition. Both the inputs + # and outputs are allowed to contain zero and negative counts. + + if not args: + raise TypeError("descriptor 'update' of 'Counter' object " + "needs an argument") + self = args[0] + args = args[1:] + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + iterable = args[0] if args else None + if iterable is not None: + if isinstance(iterable, Mapping): + if self: + self_get = self.get + for elem, count in iterable.iteritems(): + self[elem] = self_get(elem, 0) + count + else: + super(Counter, self).update(iterable) # fast path when counter is empty + else: + self_get = self.get + for elem in iterable: + self[elem] = self_get(elem, 0) + 1 + if kwds: + self.update(kwds) + + def subtract(*args, **kwds): + '''Like dict.update() but subtracts counts instead of replacing them. + Counts can be reduced below zero. Both the inputs and outputs are + allowed to contain zero and negative counts. + + Source can be an iterable, a dictionary, or another Counter instance. + + >>> c = Counter('which') + >>> c.subtract('witch') # subtract elements from another iterable + >>> c.subtract(Counter('watch')) # subtract elements from another counter + >>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch + 0 + >>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch + -1 + + ''' + if not args: + raise TypeError("descriptor 'subtract' of 'Counter' object " + "needs an argument") + self = args[0] + args = args[1:] + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + iterable = args[0] if args else None + if iterable is not None: + self_get = self.get + if isinstance(iterable, Mapping): + for elem, count in iterable.items(): + self[elem] = self_get(elem, 0) - count + else: + for elem in iterable: + self[elem] = self_get(elem, 0) - 1 + if kwds: + self.subtract(kwds) + + def copy(self): + 'Return a shallow copy.' + return self.__class__(self) + + def __reduce__(self): + return self.__class__, (dict(self),) + + def __delitem__(self, elem): + 'Like dict.__delitem__() but does not raise KeyError for missing values.' + if elem in self: + super(Counter, self).__delitem__(elem) + + def __repr__(self): + if not self: + return '%s()' % self.__class__.__name__ + items = ', '.join(map('%r: %r'.__mod__, self.most_common())) + return '%s({%s})' % (self.__class__.__name__, items) + + # Multiset-style mathematical operations discussed in: + # Knuth TAOCP Volume II section 4.6.3 exercise 19 + # and at http://en.wikipedia.org/wiki/Multiset + # + # Outputs guaranteed to only include positive counts. + # + # To strip negative and zero counts, add-in an empty counter: + # c += Counter() + + def __add__(self, other): + '''Add counts from two counters. + + >>> Counter('abbb') + Counter('bcc') + Counter({'b': 4, 'c': 2, 'a': 1}) + + ''' + if not isinstance(other, Counter): + return NotImplemented + result = Counter() + for elem, count in self.items(): + newcount = count + other[elem] + if newcount > 0: + result[elem] = newcount + for elem, count in other.items(): + if elem not in self and count > 0: + result[elem] = count + return result + + def __sub__(self, other): + ''' Subtract count, but keep only results with positive counts. + + >>> Counter('abbbc') - Counter('bccd') + Counter({'b': 2, 'a': 1}) + + ''' + if not isinstance(other, Counter): + return NotImplemented + result = Counter() + for elem, count in self.items(): + newcount = count - other[elem] + if newcount > 0: + result[elem] = newcount + for elem, count in other.items(): + if elem not in self and count < 0: + result[elem] = 0 - count + return result + + def __or__(self, other): + '''Union is the maximum of value in either of the input counters. + + >>> Counter('abbb') | Counter('bcc') + Counter({'b': 3, 'c': 2, 'a': 1}) + + ''' + if not isinstance(other, Counter): + return NotImplemented + result = Counter() + for elem, count in self.items(): + other_count = other[elem] + newcount = other_count if count < other_count else count + if newcount > 0: + result[elem] = newcount + for elem, count in other.items(): + if elem not in self and count > 0: + result[elem] = count + return result + + def __and__(self, other): + ''' Intersection is the minimum of corresponding counts. + + >>> Counter('abbb') & Counter('bcc') + Counter({'b': 1}) + + ''' + if not isinstance(other, Counter): + return NotImplemented + result = Counter() + for elem, count in self.items(): + other_count = other[elem] + newcount = count if count < other_count else other_count + if newcount > 0: + result[elem] = newcount + return result + + +if __name__ == '__main__': + # verify that instances can be pickled + from cPickle import loads, dumps + Point = namedtuple('Point', 'x, y', True) + p = Point(x=10, y=20) + assert p == loads(dumps(p)) + + # test and demonstrate ability to override methods + class Point(namedtuple('Point', 'x y')): + __slots__ = () + @property + def hypot(self): + return (self.x ** 2 + self.y ** 2) ** 0.5 + def __str__(self): + return 'Point: x=%6.3f y=%6.3f hypot=%6.3f' % (self.x, self.y, self.hypot) + + for p in Point(3, 4), Point(14, 5/7.): + print p + + class Point(namedtuple('Point', 'x y')): + 'Point class with optimized _make() and _replace() without error-checking' + __slots__ = () + _make = classmethod(tuple.__new__) + def _replace(self, _map=map, **kwds): + return self._make(_map(kwds.get, ('x', 'y'), self)) + + print Point(11, 22)._replace(x=100) + + Point3D = namedtuple('Point3D', Point._fields + ('z',)) + print Point3D.__doc__ + + import doctest + TestResults = namedtuple('TestResults', 'failed attempted') + print TestResults(*doctest.testmod()) diff --git a/CVIssueCount/contextlib.py b/CVIssueCount/contextlib.py new file mode 100644 index 0000000..f05205b --- /dev/null +++ b/CVIssueCount/contextlib.py @@ -0,0 +1,154 @@ +"""Utilities for with-statement contexts. See PEP 343.""" + +import sys +from functools import wraps +from warnings import warn + +__all__ = ["contextmanager", "nested", "closing"] + +class GeneratorContextManager(object): + """Helper for @contextmanager decorator.""" + + def __init__(self, gen): + self.gen = gen + + def __enter__(self): + try: + return self.gen.next() + except StopIteration: + raise RuntimeError("generator didn't yield") + + def __exit__(self, type, value, traceback): + if type is None: + try: + self.gen.next() + except StopIteration: + return + else: + raise RuntimeError("generator didn't stop") + else: + if value is None: + # Need to force instantiation so we can reliably + # tell if we get the same exception back + value = type() + try: + self.gen.throw(type, value, traceback) + raise RuntimeError("generator didn't stop after throw()") + except StopIteration, exc: + # Suppress the exception *unless* it's the same exception that + # was passed to throw(). This prevents a StopIteration + # raised inside the "with" statement from being suppressed + return exc is not value + except: + # only re-raise if it's *not* the exception that was + # passed to throw(), because __exit__() must not raise + # an exception unless __exit__() itself failed. But throw() + # has to raise the exception to signal propagation, so this + # fixes the impedance mismatch between the throw() protocol + # and the __exit__() protocol. + # + if sys.exc_info()[1] is not value: + raise + + +def contextmanager(func): + """@contextmanager decorator. + + Typical usage: + + @contextmanager + def some_generator(): + + try: + yield + finally: + + + This makes this: + + with some_generator() as : + + + equivalent to this: + + + try: + = + + finally: + + + """ + @wraps(func) + def helper(*args, **kwds): + return GeneratorContextManager(func(*args, **kwds)) + return helper + + +@contextmanager +def nested(*managers): + """Combine multiple context managers into a single nested context manager. + + This function has been deprecated in favour of the multiple manager form + of the with statement. + + The one advantage of this function over the multiple manager form of the + with statement is that argument unpacking allows it to be + used with a variable number of context managers as follows: + + with nested(*managers): + do_something() + + """ + warn("With-statements now directly support multiple context managers", + DeprecationWarning, 3) + exits = [] + vars = [] + exc = (None, None, None) + try: + for mgr in managers: + exit = mgr.__exit__ + enter = mgr.__enter__ + vars.append(enter()) + exits.append(exit) + yield vars + except: + exc = sys.exc_info() + finally: + while exits: + exit = exits.pop() + try: + if exit(*exc): + exc = (None, None, None) + except: + exc = sys.exc_info() + if exc != (None, None, None): + # Don't rely on sys.exc_info() still containing + # the right information. Another exception may + # have been raised and caught by an exit method + raise exc[0], exc[1], exc[2] + + +class closing(object): + """Context to automatically close something at the end of a block. + + Code like this: + + with closing(.open()) as f: + + + is equivalent to this: + + f = .open() + try: + + finally: + f.close() + + """ + def __init__(self, thing): + self.thing = thing + def __enter__(self): + return self.thing + def __exit__(self, *exc_info): + self.thing.close() diff --git a/CVIssueCount/count.py b/CVIssueCount/count.py new file mode 100644 index 0000000..c2ba08a --- /dev/null +++ b/CVIssueCount/count.py @@ -0,0 +1,19 @@ +import xml.etree.ElementTree as ET + +t = """ +OK +1 +0 +1 +1 +1 + +403 + +1.0 +""" + +tree = ET.parse(t) +root = tree.getroot() +for data in root[1]: + print data.text diff --git a/CVIssueCount/functools.py b/CVIssueCount/functools.py new file mode 100644 index 0000000..53680b8 --- /dev/null +++ b/CVIssueCount/functools.py @@ -0,0 +1,100 @@ +"""functools.py - Tools for working with functions and callable objects +""" +# Python module wrapper for _functools C module +# to allow utilities written in Python to be added +# to the functools module. +# Written by Nick Coghlan +# Copyright (C) 2006 Python Software Foundation. +# See C source code for _functools credits/copyright + +from _functools import partial, reduce + +# update_wrapper() and wraps() are tools to help write +# wrapper functions that can handle naive introspection + +WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__doc__') +WRAPPER_UPDATES = ('__dict__',) +def update_wrapper(wrapper, + wrapped, + assigned = WRAPPER_ASSIGNMENTS, + updated = WRAPPER_UPDATES): + """Update a wrapper function to look like the wrapped function + + wrapper is the function to be updated + wrapped is the original function + assigned is a tuple naming the attributes assigned directly + from the wrapped function to the wrapper function (defaults to + functools.WRAPPER_ASSIGNMENTS) + updated is a tuple naming the attributes of the wrapper that + are updated with the corresponding attribute from the wrapped + function (defaults to functools.WRAPPER_UPDATES) + """ + for attr in assigned: + setattr(wrapper, attr, getattr(wrapped, attr)) + for attr in updated: + getattr(wrapper, attr).update(getattr(wrapped, attr, {})) + # Return the wrapper so this can be used as a decorator via partial() + return wrapper + +def wraps(wrapped, + assigned = WRAPPER_ASSIGNMENTS, + updated = WRAPPER_UPDATES): + """Decorator factory to apply update_wrapper() to a wrapper function + + Returns a decorator that invokes update_wrapper() with the decorated + function as the wrapper argument and the arguments to wraps() as the + remaining arguments. Default arguments are as for update_wrapper(). + This is a convenience function to simplify applying partial() to + update_wrapper(). + """ + return partial(update_wrapper, wrapped=wrapped, + assigned=assigned, updated=updated) + +def total_ordering(cls): + """Class decorator that fills in missing ordering methods""" + convert = { + '__lt__': [('__gt__', lambda self, other: not (self < other or self == other)), + ('__le__', lambda self, other: self < other or self == other), + ('__ge__', lambda self, other: not self < other)], + '__le__': [('__ge__', lambda self, other: not self <= other or self == other), + ('__lt__', lambda self, other: self <= other and not self == other), + ('__gt__', lambda self, other: not self <= other)], + '__gt__': [('__lt__', lambda self, other: not (self > other or self == other)), + ('__ge__', lambda self, other: self > other or self == other), + ('__le__', lambda self, other: not self > other)], + '__ge__': [('__le__', lambda self, other: (not self >= other) or self == other), + ('__gt__', lambda self, other: self >= other and not self == other), + ('__lt__', lambda self, other: not self >= other)] + } + roots = set(dir(cls)) & set(convert) + if not roots: + raise ValueError('must define at least one ordering operation: < > <= >=') + root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__ + for opname, opfunc in convert[root]: + if opname not in roots: + opfunc.__name__ = opname + opfunc.__doc__ = getattr(int, opname).__doc__ + setattr(cls, opname, opfunc) + return cls + +def cmp_to_key(mycmp): + """Convert a cmp= function into a key= function""" + class K(object): + __slots__ = ['obj'] + def __init__(self, obj, *args): + self.obj = obj + def __lt__(self, other): + return mycmp(self.obj, other.obj) < 0 + def __gt__(self, other): + return mycmp(self.obj, other.obj) > 0 + def __eq__(self, other): + return mycmp(self.obj, other.obj) == 0 + def __le__(self, other): + return mycmp(self.obj, other.obj) <= 0 + def __ge__(self, other): + return mycmp(self.obj, other.obj) >= 0 + def __ne__(self, other): + return mycmp(self.obj, other.obj) != 0 + def __hash__(self): + raise TypeError('hash not implemented') + return K diff --git a/CVIssueCount/genericpath.py b/CVIssueCount/genericpath.py new file mode 100644 index 0000000..2648e54 --- /dev/null +++ b/CVIssueCount/genericpath.py @@ -0,0 +1,113 @@ +""" +Path operations common to more than one OS +Do not use directly. The OS specific modules import the appropriate +functions from this module themselves. +""" +import os +import stat + +__all__ = ['commonprefix', 'exists', 'getatime', 'getctime', 'getmtime', + 'getsize', 'isdir', 'isfile'] + + +try: + _unicode = unicode +except NameError: + # If Python is built without Unicode support, the unicode type + # will not exist. Fake one. + class _unicode(object): + pass + +# Does a path exist? +# This is false for dangling symbolic links on systems that support them. +def exists(path): + """Test whether a path exists. Returns False for broken symbolic links""" + try: + os.stat(path) + except os.error: + return False + return True + + +# This follows symbolic links, so both islink() and isdir() can be true +# for the same path on systems that support symlinks +def isfile(path): + """Test whether a path is a regular file""" + try: + st = os.stat(path) + except os.error: + return False + return stat.S_ISREG(st.st_mode) + + +# Is a path a directory? +# This follows symbolic links, so both islink() and isdir() +# can be true for the same path on systems that support symlinks +def isdir(s): + """Return true if the pathname refers to an existing directory.""" + try: + st = os.stat(s) + except os.error: + return False + return stat.S_ISDIR(st.st_mode) + + +def getsize(filename): + """Return the size of a file, reported by os.stat().""" + return os.stat(filename).st_size + + +def getmtime(filename): + """Return the last modification time of a file, reported by os.stat().""" + return os.stat(filename).st_mtime + + +def getatime(filename): + """Return the last access time of a file, reported by os.stat().""" + return os.stat(filename).st_atime + + +def getctime(filename): + """Return the metadata change time of a file, reported by os.stat().""" + return os.stat(filename).st_ctime + + +# Return the longest prefix of all list elements. +def commonprefix(m): + "Given a list of pathnames, returns the longest common leading component" + if not m: return '' + s1 = min(m) + s2 = max(m) + for i, c in enumerate(s1): + if c != s2[i]: + return s1[:i] + return s1 + +# Split a path in root and extension. +# The extension is everything starting at the last dot in the last +# pathname component; the root is everything before that. +# It is always true that root + ext == p. + +# Generic implementation of splitext, to be parametrized with +# the separators +def _splitext(p, sep, altsep, extsep): + """Split the extension from a pathname. + + Extension is everything from the last dot to the end, ignoring + leading dots. Returns "(root, ext)"; ext may be empty.""" + + sepIndex = p.rfind(sep) + if altsep: + altsepIndex = p.rfind(altsep) + sepIndex = max(sepIndex, altsepIndex) + + dotIndex = p.rfind(extsep) + if dotIndex > sepIndex: + # skip all leading dots + filenameIndex = sepIndex + 1 + while filenameIndex < dotIndex: + if p[filenameIndex] != extsep: + return p[:dotIndex], p[dotIndex:] + filenameIndex += 1 + + return p, '' diff --git a/CVIssueCount/hashlib.py b/CVIssueCount/hashlib.py new file mode 100644 index 0000000..bbd06b9 --- /dev/null +++ b/CVIssueCount/hashlib.py @@ -0,0 +1,221 @@ +# $Id$ +# +# Copyright (C) 2005 Gregory P. Smith (greg@krypto.org) +# Licensed to PSF under a Contributor Agreement. +# + +__doc__ = """hashlib module - A common interface to many hash functions. + +new(name, string='') - returns a new hash object implementing the + given hash function; initializing the hash + using the given string data. + +Named constructor functions are also available, these are much faster +than using new(): + +md5(), sha1(), sha224(), sha256(), sha384(), and sha512() + +More algorithms may be available on your platform but the above are guaranteed +to exist. See the algorithms_guaranteed and algorithms_available attributes +to find out what algorithm names can be passed to new(). + +NOTE: If you want the adler32 or crc32 hash functions they are available in +the zlib module. + +Choose your hash function wisely. Some have known collision weaknesses. +sha384 and sha512 will be slow on 32 bit platforms. + +Hash objects have these methods: + - update(arg): Update the hash object with the string arg. Repeated calls + are equivalent to a single call with the concatenation of all + the arguments. + - digest(): Return the digest of the strings passed to the update() method + so far. This may contain non-ASCII characters, including + NUL bytes. + - hexdigest(): Like digest() except the digest is returned as a string of + double length, containing only hexadecimal digits. + - copy(): Return a copy (clone) of the hash object. This can be used to + efficiently compute the digests of strings that share a common + initial substring. + +For example, to obtain the digest of the string 'Nobody inspects the +spammish repetition': + + >>> import hashlib + >>> m = hashlib.md5() + >>> m.update("Nobody inspects") + >>> m.update(" the spammish repetition") + >>> m.digest() + '\\xbbd\\x9c\\x83\\xdd\\x1e\\xa5\\xc9\\xd9\\xde\\xc9\\xa1\\x8d\\xf0\\xff\\xe9' + +More condensed: + + >>> hashlib.sha224("Nobody inspects the spammish repetition").hexdigest() + 'a4337bc45a8fc544c03f52dc550cd6e1e87021bc896588bd79e901e2' + +""" + +# This tuple and __get_builtin_constructor() must be modified if a new +# always available algorithm is added. +__always_supported = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512') + +algorithms_guaranteed = set(__always_supported) +algorithms_available = set(__always_supported) + +algorithms = __always_supported + +__all__ = __always_supported + ('new', 'algorithms_guaranteed', + 'algorithms_available', 'algorithms', + 'pbkdf2_hmac') + + +def __get_builtin_constructor(name): + try: + if name in ('SHA1', 'sha1'): + import _sha + return _sha.new + elif name in ('MD5', 'md5'): + import _md5 + return _md5.new + elif name in ('SHA256', 'sha256', 'SHA224', 'sha224'): + import _sha256 + bs = name[3:] + if bs == '256': + return _sha256.sha256 + elif bs == '224': + return _sha256.sha224 + elif name in ('SHA512', 'sha512', 'SHA384', 'sha384'): + import _sha512 + bs = name[3:] + if bs == '512': + return _sha512.sha512 + elif bs == '384': + return _sha512.sha384 + except ImportError: + pass # no extension module, this hash is unsupported. + + raise ValueError('unsupported hash type ' + name) + + +def __get_openssl_constructor(name): + try: + f = getattr(_hashlib, 'openssl_' + name) + # Allow the C module to raise ValueError. The function will be + # defined but the hash not actually available thanks to OpenSSL. + f() + # Use the C function directly (very fast) + return f + except (AttributeError, ValueError): + return __get_builtin_constructor(name) + + +def __py_new(name, string=''): + """new(name, string='') - Return a new hashing object using the named algorithm; + optionally initialized with a string. + """ + return __get_builtin_constructor(name)(string) + + +def __hash_new(name, string=''): + """new(name, string='') - Return a new hashing object using the named algorithm; + optionally initialized with a string. + """ + try: + return _hashlib.new(name, string) + except ValueError: + # If the _hashlib module (OpenSSL) doesn't support the named + # hash, try using our builtin implementations. + # This allows for SHA224/256 and SHA384/512 support even though + # the OpenSSL library prior to 0.9.8 doesn't provide them. + return __get_builtin_constructor(name)(string) + + +try: + import _hashlib + new = __hash_new + __get_hash = __get_openssl_constructor + algorithms_available = algorithms_available.union( + _hashlib.openssl_md_meth_names) +except ImportError: + new = __py_new + __get_hash = __get_builtin_constructor + +for __func_name in __always_supported: + # try them all, some may not work due to the OpenSSL + # version not supporting that algorithm. + try: + globals()[__func_name] = __get_hash(__func_name) + except ValueError: + import logging + logging.exception('code for hash %s was not found.', __func_name) + + +try: + # OpenSSL's PKCS5_PBKDF2_HMAC requires OpenSSL 1.0+ with HMAC and SHA + from _hashlib import pbkdf2_hmac +except ImportError: + import binascii + import struct + + _trans_5C = b"".join(chr(x ^ 0x5C) for x in range(256)) + _trans_36 = b"".join(chr(x ^ 0x36) for x in range(256)) + + def pbkdf2_hmac(hash_name, password, salt, iterations, dklen=None): + """Password based key derivation function 2 (PKCS #5 v2.0) + + This Python implementations based on the hmac module about as fast + as OpenSSL's PKCS5_PBKDF2_HMAC for short passwords and much faster + for long passwords. + """ + if not isinstance(hash_name, str): + raise TypeError(hash_name) + + if not isinstance(password, (bytes, bytearray)): + password = bytes(buffer(password)) + if not isinstance(salt, (bytes, bytearray)): + salt = bytes(buffer(salt)) + + # Fast inline HMAC implementation + inner = new(hash_name) + outer = new(hash_name) + blocksize = getattr(inner, 'block_size', 64) + if len(password) > blocksize: + password = new(hash_name, password).digest() + password = password + b'\x00' * (blocksize - len(password)) + inner.update(password.translate(_trans_36)) + outer.update(password.translate(_trans_5C)) + + def prf(msg, inner=inner, outer=outer): + # PBKDF2_HMAC uses the password as key. We can re-use the same + # digest objects and just update copies to skip initialization. + icpy = inner.copy() + ocpy = outer.copy() + icpy.update(msg) + ocpy.update(icpy.digest()) + return ocpy.digest() + + if iterations < 1: + raise ValueError(iterations) + if dklen is None: + dklen = outer.digest_size + if dklen < 1: + raise ValueError(dklen) + + hex_format_string = "%%0%ix" % (new(hash_name).digest_size * 2) + + dkey = b'' + loop = 1 + while len(dkey) < dklen: + prev = prf(salt + struct.pack(b'>I', loop)) + rkey = int(binascii.hexlify(prev), 16) + for i in xrange(iterations - 1): + prev = prf(prev) + rkey ^= int(binascii.hexlify(prev), 16) + loop += 1 + dkey += binascii.unhexlify(hex_format_string % rkey) + + return dkey[:dklen] + +# Cleanup locals() +del __always_supported, __func_name, __get_hash +del __py_new, __hash_new, __get_openssl_constructor diff --git a/CVIssueCount/heapq.py b/CVIssueCount/heapq.py new file mode 100644 index 0000000..4b2c0c4 --- /dev/null +++ b/CVIssueCount/heapq.py @@ -0,0 +1,485 @@ +# -*- coding: latin-1 -*- + +"""Heap queue algorithm (a.k.a. priority queue). + +Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for +all k, counting elements from 0. For the sake of comparison, +non-existing elements are considered to be infinite. The interesting +property of a heap is that a[0] is always its smallest element. + +Usage: + +heap = [] # creates an empty heap +heappush(heap, item) # pushes a new item on the heap +item = heappop(heap) # pops the smallest item from the heap +item = heap[0] # smallest item on the heap without popping it +heapify(x) # transforms list into a heap, in-place, in linear time +item = heapreplace(heap, item) # pops and returns smallest item, and adds + # new item; the heap size is unchanged + +Our API differs from textbook heap algorithms as follows: + +- We use 0-based indexing. This makes the relationship between the + index for a node and the indexes for its children slightly less + obvious, but is more suitable since Python uses 0-based indexing. + +- Our heappop() method returns the smallest item, not the largest. + +These two make it possible to view the heap as a regular Python list +without surprises: heap[0] is the smallest item, and heap.sort() +maintains the heap invariant! +""" + +# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger + +__about__ = """Heap queues + +[explanation by François Pinard] + +Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for +all k, counting elements from 0. For the sake of comparison, +non-existing elements are considered to be infinite. The interesting +property of a heap is that a[0] is always its smallest element. + +The strange invariant above is meant to be an efficient memory +representation for a tournament. The numbers below are `k', not a[k]: + + 0 + + 1 2 + + 3 4 5 6 + + 7 8 9 10 11 12 13 14 + + 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 + + +In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In +an usual binary tournament we see in sports, each cell is the winner +over the two cells it tops, and we can trace the winner down the tree +to see all opponents s/he had. However, in many computer applications +of such tournaments, we do not need to trace the history of a winner. +To be more memory efficient, when a winner is promoted, we try to +replace it by something else at a lower level, and the rule becomes +that a cell and the two cells it tops contain three different items, +but the top cell "wins" over the two topped cells. + +If this heap invariant is protected at all time, index 0 is clearly +the overall winner. The simplest algorithmic way to remove it and +find the "next" winner is to move some loser (let's say cell 30 in the +diagram above) into the 0 position, and then percolate this new 0 down +the tree, exchanging values, until the invariant is re-established. +This is clearly logarithmic on the total number of items in the tree. +By iterating over all items, you get an O(n ln n) sort. + +A nice feature of this sort is that you can efficiently insert new +items while the sort is going on, provided that the inserted items are +not "better" than the last 0'th element you extracted. This is +especially useful in simulation contexts, where the tree holds all +incoming events, and the "win" condition means the smallest scheduled +time. When an event schedule other events for execution, they are +scheduled into the future, so they can easily go into the heap. So, a +heap is a good structure for implementing schedulers (this is what I +used for my MIDI sequencer :-). + +Various structures for implementing schedulers have been extensively +studied, and heaps are good for this, as they are reasonably speedy, +the speed is almost constant, and the worst case is not much different +than the average case. However, there are other representations which +are more efficient overall, yet the worst cases might be terrible. + +Heaps are also very useful in big disk sorts. You most probably all +know that a big sort implies producing "runs" (which are pre-sorted +sequences, which size is usually related to the amount of CPU memory), +followed by a merging passes for these runs, which merging is often +very cleverly organised[1]. It is very important that the initial +sort produces the longest runs possible. Tournaments are a good way +to that. If, using all the memory available to hold a tournament, you +replace and percolate items that happen to fit the current run, you'll +produce runs which are twice the size of the memory for random input, +and much better for input fuzzily ordered. + +Moreover, if you output the 0'th item on disk and get an input which +may not fit in the current tournament (because the value "wins" over +the last output value), it cannot fit in the heap, so the size of the +heap decreases. The freed memory could be cleverly reused immediately +for progressively building a second heap, which grows at exactly the +same rate the first heap is melting. When the first heap completely +vanishes, you switch heaps and start a new run. Clever and quite +effective! + +In a word, heaps are useful memory structures to know. I use them in +a few applications, and I think it is good to keep a `heap' module +around. :-) + +-------------------- +[1] The disk balancing algorithms which are current, nowadays, are +more annoying than clever, and this is a consequence of the seeking +capabilities of the disks. On devices which cannot seek, like big +tape drives, the story was quite different, and one had to be very +clever to ensure (far in advance) that each tape movement will be the +most effective possible (that is, will best participate at +"progressing" the merge). Some tapes were even able to read +backwards, and this was also used to avoid the rewinding time. +Believe me, real good tape sorts were quite spectacular to watch! +From all times, sorting has always been a Great Art! :-) +""" + +__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge', + 'nlargest', 'nsmallest', 'heappushpop'] + +from itertools import islice, count, imap, izip, tee, chain +from operator import itemgetter + +def cmp_lt(x, y): + # Use __lt__ if available; otherwise, try __le__. + # In Py3.x, only __lt__ will be called. + return (x < y) if hasattr(x, '__lt__') else (not y <= x) + +def heappush(heap, item): + """Push item onto heap, maintaining the heap invariant.""" + heap.append(item) + _siftdown(heap, 0, len(heap)-1) + +def heappop(heap): + """Pop the smallest item off the heap, maintaining the heap invariant.""" + lastelt = heap.pop() # raises appropriate IndexError if heap is empty + if heap: + returnitem = heap[0] + heap[0] = lastelt + _siftup(heap, 0) + else: + returnitem = lastelt + return returnitem + +def heapreplace(heap, item): + """Pop and return the current smallest value, and add the new item. + + This is more efficient than heappop() followed by heappush(), and can be + more appropriate when using a fixed-size heap. Note that the value + returned may be larger than item! That constrains reasonable uses of + this routine unless written as part of a conditional replacement: + + if item > heap[0]: + item = heapreplace(heap, item) + """ + returnitem = heap[0] # raises appropriate IndexError if heap is empty + heap[0] = item + _siftup(heap, 0) + return returnitem + +def heappushpop(heap, item): + """Fast version of a heappush followed by a heappop.""" + if heap and cmp_lt(heap[0], item): + item, heap[0] = heap[0], item + _siftup(heap, 0) + return item + +def heapify(x): + """Transform list into a heap, in-place, in O(len(x)) time.""" + n = len(x) + # Transform bottom-up. The largest index there's any point to looking at + # is the largest with a child index in-range, so must have 2*i + 1 < n, + # or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so + # j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is + # (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1. + for i in reversed(xrange(n//2)): + _siftup(x, i) + +def _heappushpop_max(heap, item): + """Maxheap version of a heappush followed by a heappop.""" + if heap and cmp_lt(item, heap[0]): + item, heap[0] = heap[0], item + _siftup_max(heap, 0) + return item + +def _heapify_max(x): + """Transform list into a maxheap, in-place, in O(len(x)) time.""" + n = len(x) + for i in reversed(range(n//2)): + _siftup_max(x, i) + +def nlargest(n, iterable): + """Find the n largest elements in a dataset. + + Equivalent to: sorted(iterable, reverse=True)[:n] + """ + if n < 0: + return [] + it = iter(iterable) + result = list(islice(it, n)) + if not result: + return result + heapify(result) + _heappushpop = heappushpop + for elem in it: + _heappushpop(result, elem) + result.sort(reverse=True) + return result + +def nsmallest(n, iterable): + """Find the n smallest elements in a dataset. + + Equivalent to: sorted(iterable)[:n] + """ + if n < 0: + return [] + it = iter(iterable) + result = list(islice(it, n)) + if not result: + return result + _heapify_max(result) + _heappushpop = _heappushpop_max + for elem in it: + _heappushpop(result, elem) + result.sort() + return result + +# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos +# is the index of a leaf with a possibly out-of-order value. Restore the +# heap invariant. +def _siftdown(heap, startpos, pos): + newitem = heap[pos] + # Follow the path to the root, moving parents down until finding a place + # newitem fits. + while pos > startpos: + parentpos = (pos - 1) >> 1 + parent = heap[parentpos] + if cmp_lt(newitem, parent): + heap[pos] = parent + pos = parentpos + continue + break + heap[pos] = newitem + +# The child indices of heap index pos are already heaps, and we want to make +# a heap at index pos too. We do this by bubbling the smaller child of +# pos up (and so on with that child's children, etc) until hitting a leaf, +# then using _siftdown to move the oddball originally at index pos into place. +# +# We *could* break out of the loop as soon as we find a pos where newitem <= +# both its children, but turns out that's not a good idea, and despite that +# many books write the algorithm that way. During a heap pop, the last array +# element is sifted in, and that tends to be large, so that comparing it +# against values starting from the root usually doesn't pay (= usually doesn't +# get us out of the loop early). See Knuth, Volume 3, where this is +# explained and quantified in an exercise. +# +# Cutting the # of comparisons is important, since these routines have no +# way to extract "the priority" from an array element, so that intelligence +# is likely to be hiding in custom __cmp__ methods, or in array elements +# storing (priority, record) tuples. Comparisons are thus potentially +# expensive. +# +# On random arrays of length 1000, making this change cut the number of +# comparisons made by heapify() a little, and those made by exhaustive +# heappop() a lot, in accord with theory. Here are typical results from 3 +# runs (3 just to demonstrate how small the variance is): +# +# Compares needed by heapify Compares needed by 1000 heappops +# -------------------------- -------------------------------- +# 1837 cut to 1663 14996 cut to 8680 +# 1855 cut to 1659 14966 cut to 8678 +# 1847 cut to 1660 15024 cut to 8703 +# +# Building the heap by using heappush() 1000 times instead required +# 2198, 2148, and 2219 compares: heapify() is more efficient, when +# you can use it. +# +# The total compares needed by list.sort() on the same lists were 8627, +# 8627, and 8632 (this should be compared to the sum of heapify() and +# heappop() compares): list.sort() is (unsurprisingly!) more efficient +# for sorting. + +def _siftup(heap, pos): + endpos = len(heap) + startpos = pos + newitem = heap[pos] + # Bubble up the smaller child until hitting a leaf. + childpos = 2*pos + 1 # leftmost child position + while childpos < endpos: + # Set childpos to index of smaller child. + rightpos = childpos + 1 + if rightpos < endpos and not cmp_lt(heap[childpos], heap[rightpos]): + childpos = rightpos + # Move the smaller child up. + heap[pos] = heap[childpos] + pos = childpos + childpos = 2*pos + 1 + # The leaf at pos is empty now. Put newitem there, and bubble it up + # to its final resting place (by sifting its parents down). + heap[pos] = newitem + _siftdown(heap, startpos, pos) + +def _siftdown_max(heap, startpos, pos): + 'Maxheap variant of _siftdown' + newitem = heap[pos] + # Follow the path to the root, moving parents down until finding a place + # newitem fits. + while pos > startpos: + parentpos = (pos - 1) >> 1 + parent = heap[parentpos] + if cmp_lt(parent, newitem): + heap[pos] = parent + pos = parentpos + continue + break + heap[pos] = newitem + +def _siftup_max(heap, pos): + 'Maxheap variant of _siftup' + endpos = len(heap) + startpos = pos + newitem = heap[pos] + # Bubble up the larger child until hitting a leaf. + childpos = 2*pos + 1 # leftmost child position + while childpos < endpos: + # Set childpos to index of larger child. + rightpos = childpos + 1 + if rightpos < endpos and not cmp_lt(heap[rightpos], heap[childpos]): + childpos = rightpos + # Move the larger child up. + heap[pos] = heap[childpos] + pos = childpos + childpos = 2*pos + 1 + # The leaf at pos is empty now. Put newitem there, and bubble it up + # to its final resting place (by sifting its parents down). + heap[pos] = newitem + _siftdown_max(heap, startpos, pos) + +# If available, use C implementation +try: + from _heapq import * +except ImportError: + pass + +def merge(*iterables): + '''Merge multiple sorted inputs into a single sorted output. + + Similar to sorted(itertools.chain(*iterables)) but returns a generator, + does not pull the data into memory all at once, and assumes that each of + the input streams is already sorted (smallest to largest). + + >>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25])) + [0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25] + + ''' + _heappop, _heapreplace, _StopIteration = heappop, heapreplace, StopIteration + _len = len + + h = [] + h_append = h.append + for itnum, it in enumerate(map(iter, iterables)): + try: + next = it.next + h_append([next(), itnum, next]) + except _StopIteration: + pass + heapify(h) + + while _len(h) > 1: + try: + while 1: + v, itnum, next = s = h[0] + yield v + s[0] = next() # raises StopIteration when exhausted + _heapreplace(h, s) # restore heap condition + except _StopIteration: + _heappop(h) # remove empty iterator + if h: + # fast case when only a single iterator remains + v, itnum, next = h[0] + yield v + for v in next.__self__: + yield v + +# Extend the implementations of nsmallest and nlargest to use a key= argument +_nsmallest = nsmallest +def nsmallest(n, iterable, key=None): + """Find the n smallest elements in a dataset. + + Equivalent to: sorted(iterable, key=key)[:n] + """ + # Short-cut for n==1 is to use min() when len(iterable)>0 + if n == 1: + it = iter(iterable) + head = list(islice(it, 1)) + if not head: + return [] + if key is None: + return [min(chain(head, it))] + return [min(chain(head, it), key=key)] + + # When n>=size, it's faster to use sorted() + try: + size = len(iterable) + except (TypeError, AttributeError): + pass + else: + if n >= size: + return sorted(iterable, key=key)[:n] + + # When key is none, use simpler decoration + if key is None: + it = izip(iterable, count()) # decorate + result = _nsmallest(n, it) + return map(itemgetter(0), result) # undecorate + + # General case, slowest method + in1, in2 = tee(iterable) + it = izip(imap(key, in1), count(), in2) # decorate + result = _nsmallest(n, it) + return map(itemgetter(2), result) # undecorate + +_nlargest = nlargest +def nlargest(n, iterable, key=None): + """Find the n largest elements in a dataset. + + Equivalent to: sorted(iterable, key=key, reverse=True)[:n] + """ + + # Short-cut for n==1 is to use max() when len(iterable)>0 + if n == 1: + it = iter(iterable) + head = list(islice(it, 1)) + if not head: + return [] + if key is None: + return [max(chain(head, it))] + return [max(chain(head, it), key=key)] + + # When n>=size, it's faster to use sorted() + try: + size = len(iterable) + except (TypeError, AttributeError): + pass + else: + if n >= size: + return sorted(iterable, key=key, reverse=True)[:n] + + # When key is none, use simpler decoration + if key is None: + it = izip(iterable, count(0,-1)) # decorate + result = _nlargest(n, it) + return map(itemgetter(0), result) # undecorate + + # General case, slowest method + in1, in2 = tee(iterable) + it = izip(imap(key, in1), count(0,-1), in2) # decorate + result = _nlargest(n, it) + return map(itemgetter(2), result) # undecorate + +if __name__ == "__main__": + # Simple sanity test + heap = [] + data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0] + for item in data: + heappush(heap, item) + sort = [] + while heap: + sort.append(heappop(heap)) + print sort + + import doctest + doctest.testmod() diff --git a/CVIssueCount/httplib.py b/CVIssueCount/httplib.py new file mode 100644 index 0000000..7223ba1 --- /dev/null +++ b/CVIssueCount/httplib.py @@ -0,0 +1,1445 @@ +r"""HTTP/1.1 client library + + + + +HTTPConnection goes through a number of "states", which define when a client +may legally make another request or fetch the response for a particular +request. This diagram details these state transitions: + + (null) + | + | HTTPConnection() + v + Idle + | + | putrequest() + v + Request-started + | + | ( putheader() )* endheaders() + v + Request-sent + | + | response = getresponse() + v + Unread-response [Response-headers-read] + |\____________________ + | | + | response.read() | putrequest() + v v + Idle Req-started-unread-response + ______/| + / | + response.read() | | ( putheader() )* endheaders() + v v + Request-started Req-sent-unread-response + | + | response.read() + v + Request-sent + +This diagram presents the following rules: + -- a second request may not be started until {response-headers-read} + -- a response [object] cannot be retrieved until {request-sent} + -- there is no differentiation between an unread response body and a + partially read response body + +Note: this enforcement is applied by the HTTPConnection class. The + HTTPResponse class does not enforce this state machine, which + implies sophisticated clients may accelerate the request/response + pipeline. Caution should be taken, though: accelerating the states + beyond the above pattern may imply knowledge of the server's + connection-close behavior for certain requests. For example, it + is impossible to tell whether the server will close the connection + UNTIL the response headers have been read; this means that further + requests cannot be placed into the pipeline until it is known that + the server will NOT be closing the connection. + +Logical State __state __response +------------- ------- ---------- +Idle _CS_IDLE None +Request-started _CS_REQ_STARTED None +Request-sent _CS_REQ_SENT None +Unread-response _CS_IDLE +Req-started-unread-response _CS_REQ_STARTED +Req-sent-unread-response _CS_REQ_SENT +""" + +from array import array +import os +import re +import socket +from sys import py3kwarning +from urlparse import urlsplit +import warnings +with warnings.catch_warnings(): + if py3kwarning: + warnings.filterwarnings("ignore", ".*mimetools has been removed", + DeprecationWarning) + import mimetools + +try: + from cStringIO import StringIO +except ImportError: + from StringIO import StringIO + +__all__ = ["HTTP", "HTTPResponse", "HTTPConnection", + "HTTPException", "NotConnected", "UnknownProtocol", + "UnknownTransferEncoding", "UnimplementedFileMode", + "IncompleteRead", "InvalidURL", "ImproperConnectionState", + "CannotSendRequest", "CannotSendHeader", "ResponseNotReady", + "BadStatusLine", "error", "responses"] + +HTTP_PORT = 80 +HTTPS_PORT = 443 + +_UNKNOWN = 'UNKNOWN' + +# connection states +_CS_IDLE = 'Idle' +_CS_REQ_STARTED = 'Request-started' +_CS_REQ_SENT = 'Request-sent' + +# status codes +# informational +CONTINUE = 100 +SWITCHING_PROTOCOLS = 101 +PROCESSING = 102 + +# successful +OK = 200 +CREATED = 201 +ACCEPTED = 202 +NON_AUTHORITATIVE_INFORMATION = 203 +NO_CONTENT = 204 +RESET_CONTENT = 205 +PARTIAL_CONTENT = 206 +MULTI_STATUS = 207 +IM_USED = 226 + +# redirection +MULTIPLE_CHOICES = 300 +MOVED_PERMANENTLY = 301 +FOUND = 302 +SEE_OTHER = 303 +NOT_MODIFIED = 304 +USE_PROXY = 305 +TEMPORARY_REDIRECT = 307 + +# client error +BAD_REQUEST = 400 +UNAUTHORIZED = 401 +PAYMENT_REQUIRED = 402 +FORBIDDEN = 403 +NOT_FOUND = 404 +METHOD_NOT_ALLOWED = 405 +NOT_ACCEPTABLE = 406 +PROXY_AUTHENTICATION_REQUIRED = 407 +REQUEST_TIMEOUT = 408 +CONFLICT = 409 +GONE = 410 +LENGTH_REQUIRED = 411 +PRECONDITION_FAILED = 412 +REQUEST_ENTITY_TOO_LARGE = 413 +REQUEST_URI_TOO_LONG = 414 +UNSUPPORTED_MEDIA_TYPE = 415 +REQUESTED_RANGE_NOT_SATISFIABLE = 416 +EXPECTATION_FAILED = 417 +UNPROCESSABLE_ENTITY = 422 +LOCKED = 423 +FAILED_DEPENDENCY = 424 +UPGRADE_REQUIRED = 426 + +# server error +INTERNAL_SERVER_ERROR = 500 +NOT_IMPLEMENTED = 501 +BAD_GATEWAY = 502 +SERVICE_UNAVAILABLE = 503 +GATEWAY_TIMEOUT = 504 +HTTP_VERSION_NOT_SUPPORTED = 505 +INSUFFICIENT_STORAGE = 507 +NOT_EXTENDED = 510 + +# Mapping status codes to official W3C names +responses = { + 100: 'Continue', + 101: 'Switching Protocols', + + 200: 'OK', + 201: 'Created', + 202: 'Accepted', + 203: 'Non-Authoritative Information', + 204: 'No Content', + 205: 'Reset Content', + 206: 'Partial Content', + + 300: 'Multiple Choices', + 301: 'Moved Permanently', + 302: 'Found', + 303: 'See Other', + 304: 'Not Modified', + 305: 'Use Proxy', + 306: '(Unused)', + 307: 'Temporary Redirect', + + 400: 'Bad Request', + 401: 'Unauthorized', + 402: 'Payment Required', + 403: 'Forbidden', + 404: 'Not Found', + 405: 'Method Not Allowed', + 406: 'Not Acceptable', + 407: 'Proxy Authentication Required', + 408: 'Request Timeout', + 409: 'Conflict', + 410: 'Gone', + 411: 'Length Required', + 412: 'Precondition Failed', + 413: 'Request Entity Too Large', + 414: 'Request-URI Too Long', + 415: 'Unsupported Media Type', + 416: 'Requested Range Not Satisfiable', + 417: 'Expectation Failed', + + 500: 'Internal Server Error', + 501: 'Not Implemented', + 502: 'Bad Gateway', + 503: 'Service Unavailable', + 504: 'Gateway Timeout', + 505: 'HTTP Version Not Supported', +} + +# maximal amount of data to read at one time in _safe_read +MAXAMOUNT = 1048576 + +# maximal line length when calling readline(). +_MAXLINE = 65536 + +# maximum amount of headers accepted +_MAXHEADERS = 100 + +# Header name/value ABNF (http://tools.ietf.org/html/rfc7230#section-3.2) +# +# VCHAR = %x21-7E +# obs-text = %x80-FF +# header-field = field-name ":" OWS field-value OWS +# field-name = token +# field-value = *( field-content / obs-fold ) +# field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] +# field-vchar = VCHAR / obs-text +# +# obs-fold = CRLF 1*( SP / HTAB ) +# ; obsolete line folding +# ; see Section 3.2.4 + +# token = 1*tchar +# +# tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" +# / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~" +# / DIGIT / ALPHA +# ; any VCHAR, except delimiters +# +# VCHAR defined in http://tools.ietf.org/html/rfc5234#appendix-B.1 + +# the patterns for both name and value are more leniant than RFC +# definitions to allow for backwards compatibility +_is_legal_header_name = re.compile(r'\A[^:\s][^:\r\n]*\Z').match +_is_illegal_header_value = re.compile(r'\n(?![ \t])|\r(?![ \t\n])').search + +# We always set the Content-Length header for these methods because some +# servers will otherwise respond with a 411 +_METHODS_EXPECTING_BODY = {'PATCH', 'POST', 'PUT'} + + +class HTTPMessage(mimetools.Message): + + def addheader(self, key, value): + """Add header for field key handling repeats.""" + prev = self.dict.get(key) + if prev is None: + self.dict[key] = value + else: + combined = ", ".join((prev, value)) + self.dict[key] = combined + + def addcontinue(self, key, more): + """Add more field data from a continuation line.""" + prev = self.dict[key] + self.dict[key] = prev + "\n " + more + + def readheaders(self): + """Read header lines. + + Read header lines up to the entirely blank line that terminates them. + The (normally blank) line that ends the headers is skipped, but not + included in the returned list. If a non-header line ends the headers, + (which is an error), an attempt is made to backspace over it; it is + never included in the returned list. + + The variable self.status is set to the empty string if all went well, + otherwise it is an error message. The variable self.headers is a + completely uninterpreted list of lines contained in the header (so + printing them will reproduce the header exactly as it appears in the + file). + + If multiple header fields with the same name occur, they are combined + according to the rules in RFC 2616 sec 4.2: + + Appending each subsequent field-value to the first, each separated + by a comma. The order in which header fields with the same field-name + are received is significant to the interpretation of the combined + field value. + """ + # XXX The implementation overrides the readheaders() method of + # rfc822.Message. The base class design isn't amenable to + # customized behavior here so the method here is a copy of the + # base class code with a few small changes. + + self.dict = {} + self.unixfrom = '' + self.headers = hlist = [] + self.status = '' + headerseen = "" + firstline = 1 + startofline = unread = tell = None + if hasattr(self.fp, 'unread'): + unread = self.fp.unread + elif self.seekable: + tell = self.fp.tell + while True: + if len(hlist) > _MAXHEADERS: + raise HTTPException("got more than %d headers" % _MAXHEADERS) + if tell: + try: + startofline = tell() + except IOError: + startofline = tell = None + self.seekable = 0 + line = self.fp.readline(_MAXLINE + 1) + if len(line) > _MAXLINE: + raise LineTooLong("header line") + if not line: + self.status = 'EOF in headers' + break + # Skip unix From name time lines + if firstline and line.startswith('From '): + self.unixfrom = self.unixfrom + line + continue + firstline = 0 + if headerseen and line[0] in ' \t': + # XXX Not sure if continuation lines are handled properly + # for http and/or for repeating headers + # It's a continuation line. + hlist.append(line) + self.addcontinue(headerseen, line.strip()) + continue + elif self.iscomment(line): + # It's a comment. Ignore it. + continue + elif self.islast(line): + # Note! No pushback here! The delimiter line gets eaten. + break + headerseen = self.isheader(line) + if headerseen: + # It's a legal header line, save it. + hlist.append(line) + self.addheader(headerseen, line[len(headerseen)+1:].strip()) + continue + elif headerseen is not None: + # An empty header name. These aren't allowed in HTTP, but it's + # probably a benign mistake. Don't add the header, just keep + # going. + continue + else: + # It's not a header line; throw it back and stop here. + if not self.dict: + self.status = 'No headers' + else: + self.status = 'Non-header line where header expected' + # Try to undo the read. + if unread: + unread(line) + elif tell: + self.fp.seek(startofline) + else: + self.status = self.status + '; bad seek' + break + +class HTTPResponse: + + # strict: If true, raise BadStatusLine if the status line can't be + # parsed as a valid HTTP/1.0 or 1.1 status line. By default it is + # false because it prevents clients from talking to HTTP/0.9 + # servers. Note that a response with a sufficiently corrupted + # status line will look like an HTTP/0.9 response. + + # See RFC 2616 sec 19.6 and RFC 1945 sec 6 for details. + + def __init__(self, sock, debuglevel=0, strict=0, method=None, buffering=False): + if buffering: + # The caller won't be using any sock.recv() calls, so buffering + # is fine and recommended for performance. + self.fp = sock.makefile('rb') + else: + # The buffer size is specified as zero, because the headers of + # the response are read with readline(). If the reads were + # buffered the readline() calls could consume some of the + # response, which make be read via a recv() on the underlying + # socket. + self.fp = sock.makefile('rb', 0) + self.debuglevel = debuglevel + self.strict = strict + self._method = method + + self.msg = None + + # from the Status-Line of the response + self.version = _UNKNOWN # HTTP-Version + self.status = _UNKNOWN # Status-Code + self.reason = _UNKNOWN # Reason-Phrase + + self.chunked = _UNKNOWN # is "chunked" being used? + self.chunk_left = _UNKNOWN # bytes left to read in current chunk + self.length = _UNKNOWN # number of bytes left in response + self.will_close = _UNKNOWN # conn will close at end of response + + def _read_status(self): + # Initialize with Simple-Response defaults + line = self.fp.readline(_MAXLINE + 1) + if len(line) > _MAXLINE: + raise LineTooLong("header line") + if self.debuglevel > 0: + print "reply:", repr(line) + if not line: + # Presumably, the server closed the connection before + # sending a valid response. + raise BadStatusLine(line) + try: + [version, status, reason] = line.split(None, 2) + except ValueError: + try: + [version, status] = line.split(None, 1) + reason = "" + except ValueError: + # empty version will cause next test to fail and status + # will be treated as 0.9 response. + version = "" + if not version.startswith('HTTP/'): + if self.strict: + self.close() + raise BadStatusLine(line) + else: + # assume it's a Simple-Response from an 0.9 server + self.fp = LineAndFileWrapper(line, self.fp) + return "HTTP/0.9", 200, "" + + # The status code is a three-digit number + try: + status = int(status) + if status < 100 or status > 999: + raise BadStatusLine(line) + except ValueError: + raise BadStatusLine(line) + return version, status, reason + + def begin(self): + if self.msg is not None: + # we've already started reading the response + return + + # read until we get a non-100 response + while True: + version, status, reason = self._read_status() + if status != CONTINUE: + break + # skip the header from the 100 response + while True: + skip = self.fp.readline(_MAXLINE + 1) + if len(skip) > _MAXLINE: + raise LineTooLong("header line") + skip = skip.strip() + if not skip: + break + if self.debuglevel > 0: + print "header:", skip + + self.status = status + self.reason = reason.strip() + if version == 'HTTP/1.0': + self.version = 10 + elif version.startswith('HTTP/1.'): + self.version = 11 # use HTTP/1.1 code for HTTP/1.x where x>=1 + elif version == 'HTTP/0.9': + self.version = 9 + else: + raise UnknownProtocol(version) + + if self.version == 9: + self.length = None + self.chunked = 0 + self.will_close = 1 + self.msg = HTTPMessage(StringIO()) + return + + self.msg = HTTPMessage(self.fp, 0) + if self.debuglevel > 0: + for hdr in self.msg.headers: + print "header:", hdr, + + # don't let the msg keep an fp + self.msg.fp = None + + # are we using the chunked-style of transfer encoding? + tr_enc = self.msg.getheader('transfer-encoding') + if tr_enc and tr_enc.lower() == "chunked": + self.chunked = 1 + self.chunk_left = None + else: + self.chunked = 0 + + # will the connection close at the end of the response? + self.will_close = self._check_close() + + # do we have a Content-Length? + # NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked" + length = self.msg.getheader('content-length') + if length and not self.chunked: + try: + self.length = int(length) + except ValueError: + self.length = None + else: + if self.length < 0: # ignore nonsensical negative lengths + self.length = None + else: + self.length = None + + # does the body have a fixed length? (of zero) + if (status == NO_CONTENT or status == NOT_MODIFIED or + 100 <= status < 200 or # 1xx codes + self._method == 'HEAD'): + self.length = 0 + + # if the connection remains open, and we aren't using chunked, and + # a content-length was not provided, then assume that the connection + # WILL close. + if not self.will_close and \ + not self.chunked and \ + self.length is None: + self.will_close = 1 + + def _check_close(self): + conn = self.msg.getheader('connection') + if self.version == 11: + # An HTTP/1.1 proxy is assumed to stay open unless + # explicitly closed. + conn = self.msg.getheader('connection') + if conn and "close" in conn.lower(): + return True + return False + + # Some HTTP/1.0 implementations have support for persistent + # connections, using rules different than HTTP/1.1. + + # For older HTTP, Keep-Alive indicates persistent connection. + if self.msg.getheader('keep-alive'): + return False + + # At least Akamai returns a "Connection: Keep-Alive" header, + # which was supposed to be sent by the client. + if conn and "keep-alive" in conn.lower(): + return False + + # Proxy-Connection is a netscape hack. + pconn = self.msg.getheader('proxy-connection') + if pconn and "keep-alive" in pconn.lower(): + return False + + # otherwise, assume it will close + return True + + def close(self): + fp = self.fp + if fp: + self.fp = None + fp.close() + + def isclosed(self): + # NOTE: it is possible that we will not ever call self.close(). This + # case occurs when will_close is TRUE, length is None, and we + # read up to the last byte, but NOT past it. + # + # IMPLIES: if will_close is FALSE, then self.close() will ALWAYS be + # called, meaning self.isclosed() is meaningful. + return self.fp is None + + # XXX It would be nice to have readline and __iter__ for this, too. + + def read(self, amt=None): + if self.fp is None: + return '' + + if self._method == 'HEAD': + self.close() + return '' + + if self.chunked: + return self._read_chunked(amt) + + if amt is None: + # unbounded read + if self.length is None: + s = self.fp.read() + else: + try: + s = self._safe_read(self.length) + except IncompleteRead: + self.close() + raise + self.length = 0 + self.close() # we read everything + return s + + if self.length is not None: + if amt > self.length: + # clip the read to the "end of response" + amt = self.length + + # we do not use _safe_read() here because this may be a .will_close + # connection, and the user is reading more bytes than will be provided + # (for example, reading in 1k chunks) + s = self.fp.read(amt) + if not s and amt: + # Ideally, we would raise IncompleteRead if the content-length + # wasn't satisfied, but it might break compatibility. + self.close() + if self.length is not None: + self.length -= len(s) + if not self.length: + self.close() + + return s + + def _read_chunked(self, amt): + assert self.chunked != _UNKNOWN + chunk_left = self.chunk_left + value = [] + while True: + if chunk_left is None: + line = self.fp.readline(_MAXLINE + 1) + if len(line) > _MAXLINE: + raise LineTooLong("chunk size") + i = line.find(';') + if i >= 0: + line = line[:i] # strip chunk-extensions + try: + chunk_left = int(line, 16) + except ValueError: + # close the connection as protocol synchronisation is + # probably lost + self.close() + raise IncompleteRead(''.join(value)) + if chunk_left == 0: + break + if amt is None: + value.append(self._safe_read(chunk_left)) + elif amt < chunk_left: + value.append(self._safe_read(amt)) + self.chunk_left = chunk_left - amt + return ''.join(value) + elif amt == chunk_left: + value.append(self._safe_read(amt)) + self._safe_read(2) # toss the CRLF at the end of the chunk + self.chunk_left = None + return ''.join(value) + else: + value.append(self._safe_read(chunk_left)) + amt -= chunk_left + + # we read the whole chunk, get another + self._safe_read(2) # toss the CRLF at the end of the chunk + chunk_left = None + + # read and discard trailer up to the CRLF terminator + ### note: we shouldn't have any trailers! + while True: + line = self.fp.readline(_MAXLINE + 1) + if len(line) > _MAXLINE: + raise LineTooLong("trailer line") + if not line: + # a vanishingly small number of sites EOF without + # sending the trailer + break + if line == '\r\n': + break + + # we read everything; close the "file" + self.close() + + return ''.join(value) + + def _safe_read(self, amt): + """Read the number of bytes requested, compensating for partial reads. + + Normally, we have a blocking socket, but a read() can be interrupted + by a signal (resulting in a partial read). + + Note that we cannot distinguish between EOF and an interrupt when zero + bytes have been read. IncompleteRead() will be raised in this + situation. + + This function should be used when bytes "should" be present for + reading. If the bytes are truly not available (due to EOF), then the + IncompleteRead exception can be used to detect the problem. + """ + # NOTE(gps): As of svn r74426 socket._fileobject.read(x) will never + # return less than x bytes unless EOF is encountered. It now handles + # signal interruptions (socket.error EINTR) internally. This code + # never caught that exception anyways. It seems largely pointless. + # self.fp.read(amt) will work fine. + s = [] + while amt > 0: + chunk = self.fp.read(min(amt, MAXAMOUNT)) + if not chunk: + raise IncompleteRead(''.join(s), amt) + s.append(chunk) + amt -= len(chunk) + return ''.join(s) + + def fileno(self): + return self.fp.fileno() + + def getheader(self, name, default=None): + if self.msg is None: + raise ResponseNotReady() + return self.msg.getheader(name, default) + + def getheaders(self): + """Return list of (header, value) tuples.""" + if self.msg is None: + raise ResponseNotReady() + return self.msg.items() + + +class HTTPConnection: + + _http_vsn = 11 + _http_vsn_str = 'HTTP/1.1' + + response_class = HTTPResponse + default_port = HTTP_PORT + auto_open = 1 + debuglevel = 0 + strict = 0 + + def __init__(self, host, port=None, strict=None, + timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None): + self.timeout = timeout + self.source_address = source_address + self.sock = None + self._buffer = [] + self.__response = None + self.__state = _CS_IDLE + self._method = None + self._tunnel_host = None + self._tunnel_port = None + self._tunnel_headers = {} + if strict is not None: + self.strict = strict + + (self.host, self.port) = self._get_hostport(host, port) + + # This is stored as an instance variable to allow unittests + # to replace with a suitable mock + self._create_connection = socket.create_connection + + def set_tunnel(self, host, port=None, headers=None): + """ Set up host and port for HTTP CONNECT tunnelling. + + In a connection that uses HTTP Connect tunneling, the host passed to the + constructor is used as proxy server that relays all communication to the + endpoint passed to set_tunnel. This is done by sending a HTTP CONNECT + request to the proxy server when the connection is established. + + This method must be called before the HTTP connection has been + established. + + The headers argument should be a mapping of extra HTTP headers + to send with the CONNECT request. + """ + # Verify if this is required. + if self.sock: + raise RuntimeError("Can't setup tunnel for established connection.") + + self._tunnel_host, self._tunnel_port = self._get_hostport(host, port) + if headers: + self._tunnel_headers = headers + else: + self._tunnel_headers.clear() + + def _get_hostport(self, host, port): + if port is None: + i = host.rfind(':') + j = host.rfind(']') # ipv6 addresses have [...] + if i > j: + try: + port = int(host[i+1:]) + except ValueError: + if host[i+1:] == "": # http://foo.com:/ == http://foo.com/ + port = self.default_port + else: + raise InvalidURL("nonnumeric port: '%s'" % host[i+1:]) + host = host[:i] + else: + port = self.default_port + if host and host[0] == '[' and host[-1] == ']': + host = host[1:-1] + return (host, port) + + def set_debuglevel(self, level): + self.debuglevel = level + + def _tunnel(self): + self.send("CONNECT %s:%d HTTP/1.0\r\n" % (self._tunnel_host, + self._tunnel_port)) + for header, value in self._tunnel_headers.iteritems(): + self.send("%s: %s\r\n" % (header, value)) + self.send("\r\n") + response = self.response_class(self.sock, strict = self.strict, + method = self._method) + (version, code, message) = response._read_status() + + if version == "HTTP/0.9": + # HTTP/0.9 doesn't support the CONNECT verb, so if httplib has + # concluded HTTP/0.9 is being used something has gone wrong. + self.close() + raise socket.error("Invalid response from tunnel request") + if code != 200: + self.close() + raise socket.error("Tunnel connection failed: %d %s" % (code, + message.strip())) + while True: + line = response.fp.readline(_MAXLINE + 1) + if len(line) > _MAXLINE: + raise LineTooLong("header line") + if not line: + # for sites which EOF without sending trailer + break + if line == '\r\n': + break + + + def connect(self): + """Connect to the host and port specified in __init__.""" + self.sock = self._create_connection((self.host,self.port), + self.timeout, self.source_address) + + if self._tunnel_host: + self._tunnel() + + def close(self): + """Close the connection to the HTTP server.""" + self.__state = _CS_IDLE + try: + sock = self.sock + if sock: + self.sock = None + sock.close() # close it manually... there may be other refs + finally: + response = self.__response + if response: + self.__response = None + response.close() + + def send(self, data): + """Send `data' to the server.""" + if self.sock is None: + if self.auto_open: + self.connect() + else: + raise NotConnected() + + if self.debuglevel > 0: + print "send:", repr(data) + blocksize = 8192 + if hasattr(data,'read') and not isinstance(data, array): + if self.debuglevel > 0: print "sendIng a read()able" + datablock = data.read(blocksize) + while datablock: + self.sock.sendall(datablock) + datablock = data.read(blocksize) + else: + self.sock.sendall(data) + + def _output(self, s): + """Add a line of output to the current request buffer. + + Assumes that the line does *not* end with \\r\\n. + """ + self._buffer.append(s) + + def _send_output(self, message_body=None): + """Send the currently buffered request and clear the buffer. + + Appends an extra \\r\\n to the buffer. + A message_body may be specified, to be appended to the request. + """ + self._buffer.extend(("", "")) + msg = "\r\n".join(self._buffer) + del self._buffer[:] + # If msg and message_body are sent in a single send() call, + # it will avoid performance problems caused by the interaction + # between delayed ack and the Nagle algorithm. + if isinstance(message_body, str): + msg += message_body + message_body = None + self.send(msg) + if message_body is not None: + #message_body was not a string (i.e. it is a file) and + #we must run the risk of Nagle + self.send(message_body) + + def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0): + """Send a request to the server. + + `method' specifies an HTTP request method, e.g. 'GET'. + `url' specifies the object being requested, e.g. '/index.html'. + `skip_host' if True does not add automatically a 'Host:' header + `skip_accept_encoding' if True does not add automatically an + 'Accept-Encoding:' header + """ + + # if a prior response has been completed, then forget about it. + if self.__response and self.__response.isclosed(): + self.__response = None + + + # in certain cases, we cannot issue another request on this connection. + # this occurs when: + # 1) we are in the process of sending a request. (_CS_REQ_STARTED) + # 2) a response to a previous request has signalled that it is going + # to close the connection upon completion. + # 3) the headers for the previous response have not been read, thus + # we cannot determine whether point (2) is true. (_CS_REQ_SENT) + # + # if there is no prior response, then we can request at will. + # + # if point (2) is true, then we will have passed the socket to the + # response (effectively meaning, "there is no prior response"), and + # will open a new one when a new request is made. + # + # Note: if a prior response exists, then we *can* start a new request. + # We are not allowed to begin fetching the response to this new + # request, however, until that prior response is complete. + # + if self.__state == _CS_IDLE: + self.__state = _CS_REQ_STARTED + else: + raise CannotSendRequest() + + # Save the method we use, we need it later in the response phase + self._method = method + if not url: + url = '/' + hdr = '%s %s %s' % (method, url, self._http_vsn_str) + + self._output(hdr) + + if self._http_vsn == 11: + # Issue some standard headers for better HTTP/1.1 compliance + + if not skip_host: + # this header is issued *only* for HTTP/1.1 + # connections. more specifically, this means it is + # only issued when the client uses the new + # HTTPConnection() class. backwards-compat clients + # will be using HTTP/1.0 and those clients may be + # issuing this header themselves. we should NOT issue + # it twice; some web servers (such as Apache) barf + # when they see two Host: headers + + # If we need a non-standard port,include it in the + # header. If the request is going through a proxy, + # but the host of the actual URL, not the host of the + # proxy. + + netloc = '' + if url.startswith('http'): + nil, netloc, nil, nil, nil = urlsplit(url) + + if netloc: + try: + netloc_enc = netloc.encode("ascii") + except UnicodeEncodeError: + netloc_enc = netloc.encode("idna") + self.putheader('Host', netloc_enc) + else: + if self._tunnel_host: + host = self._tunnel_host + port = self._tunnel_port + else: + host = self.host + port = self.port + + try: + host_enc = host.encode("ascii") + except UnicodeEncodeError: + host_enc = host.encode("idna") + # Wrap the IPv6 Host Header with [] (RFC 2732) + if host_enc.find(':') >= 0: + host_enc = "[" + host_enc + "]" + if port == self.default_port: + self.putheader('Host', host_enc) + else: + self.putheader('Host', "%s:%s" % (host_enc, port)) + + # note: we are assuming that clients will not attempt to set these + # headers since *this* library must deal with the + # consequences. this also means that when the supporting + # libraries are updated to recognize other forms, then this + # code should be changed (removed or updated). + + # we only want a Content-Encoding of "identity" since we don't + # support encodings such as x-gzip or x-deflate. + if not skip_accept_encoding: + self.putheader('Accept-Encoding', 'identity') + + # we can accept "chunked" Transfer-Encodings, but no others + # NOTE: no TE header implies *only* "chunked" + #self.putheader('TE', 'chunked') + + # if TE is supplied in the header, then it must appear in a + # Connection header. + #self.putheader('Connection', 'TE') + + else: + # For HTTP/1.0, the server will assume "not chunked" + pass + + def putheader(self, header, *values): + """Send a request header line to the server. + + For example: h.putheader('Accept', 'text/html') + """ + if self.__state != _CS_REQ_STARTED: + raise CannotSendHeader() + + header = '%s' % header + if not _is_legal_header_name(header): + raise ValueError('Invalid header name %r' % (header,)) + + values = [str(v) for v in values] + for one_value in values: + if _is_illegal_header_value(one_value): + raise ValueError('Invalid header value %r' % (one_value,)) + + hdr = '%s: %s' % (header, '\r\n\t'.join(values)) + self._output(hdr) + + def endheaders(self, message_body=None): + """Indicate that the last header line has been sent to the server. + + This method sends the request to the server. The optional + message_body argument can be used to pass a message body + associated with the request. The message body will be sent in + the same packet as the message headers if it is string, otherwise it is + sent as a separate packet. + """ + if self.__state == _CS_REQ_STARTED: + self.__state = _CS_REQ_SENT + else: + raise CannotSendHeader() + self._send_output(message_body) + + def request(self, method, url, body=None, headers={}): + """Send a complete request to the server.""" + self._send_request(method, url, body, headers) + + def _set_content_length(self, body, method): + # Set the content-length based on the body. If the body is "empty", we + # set Content-Length: 0 for methods that expect a body (RFC 7230, + # Section 3.3.2). If the body is set for other methods, we set the + # header provided we can figure out what the length is. + thelen = None + if body is None and method.upper() in _METHODS_EXPECTING_BODY: + thelen = '0' + elif body is not None: + try: + thelen = str(len(body)) + except (TypeError, AttributeError): + # If this is a file-like object, try to + # fstat its file descriptor + try: + thelen = str(os.fstat(body.fileno()).st_size) + except (AttributeError, OSError): + # Don't send a length if this failed + if self.debuglevel > 0: print "Cannot stat!!" + + if thelen is not None: + self.putheader('Content-Length', thelen) + + def _send_request(self, method, url, body, headers): + # Honor explicitly requested Host: and Accept-Encoding: headers. + header_names = dict.fromkeys([k.lower() for k in headers]) + skips = {} + if 'host' in header_names: + skips['skip_host'] = 1 + if 'accept-encoding' in header_names: + skips['skip_accept_encoding'] = 1 + + self.putrequest(method, url, **skips) + + if 'content-length' not in header_names: + self._set_content_length(body, method) + for hdr, value in headers.iteritems(): + self.putheader(hdr, value) + self.endheaders(body) + + def getresponse(self, buffering=False): + "Get the response from the server." + + # if a prior response has been completed, then forget about it. + if self.__response and self.__response.isclosed(): + self.__response = None + + # + # if a prior response exists, then it must be completed (otherwise, we + # cannot read this response's header to determine the connection-close + # behavior) + # + # note: if a prior response existed, but was connection-close, then the + # socket and response were made independent of this HTTPConnection + # object since a new request requires that we open a whole new + # connection + # + # this means the prior response had one of two states: + # 1) will_close: this connection was reset and the prior socket and + # response operate independently + # 2) persistent: the response was retained and we await its + # isclosed() status to become true. + # + if self.__state != _CS_REQ_SENT or self.__response: + raise ResponseNotReady() + + args = (self.sock,) + kwds = {"strict":self.strict, "method":self._method} + if self.debuglevel > 0: + args += (self.debuglevel,) + if buffering: + #only add this keyword if non-default, for compatibility with + #other response_classes. + kwds["buffering"] = True; + response = self.response_class(*args, **kwds) + + try: + response.begin() + assert response.will_close != _UNKNOWN + self.__state = _CS_IDLE + + if response.will_close: + # this effectively passes the connection to the response + self.close() + else: + # remember this, so we can tell when it is complete + self.__response = response + + return response + except: + response.close() + raise + + +class HTTP: + "Compatibility class with httplib.py from 1.5." + + _http_vsn = 10 + _http_vsn_str = 'HTTP/1.0' + + debuglevel = 0 + + _connection_class = HTTPConnection + + def __init__(self, host='', port=None, strict=None): + "Provide a default host, since the superclass requires one." + + # some joker passed 0 explicitly, meaning default port + if port == 0: + port = None + + # Note that we may pass an empty string as the host; this will raise + # an error when we attempt to connect. Presumably, the client code + # will call connect before then, with a proper host. + self._setup(self._connection_class(host, port, strict)) + + def _setup(self, conn): + self._conn = conn + + # set up delegation to flesh out interface + self.send = conn.send + self.putrequest = conn.putrequest + self.putheader = conn.putheader + self.endheaders = conn.endheaders + self.set_debuglevel = conn.set_debuglevel + + conn._http_vsn = self._http_vsn + conn._http_vsn_str = self._http_vsn_str + + self.file = None + + def connect(self, host=None, port=None): + "Accept arguments to set the host/port, since the superclass doesn't." + + if host is not None: + (self._conn.host, self._conn.port) = self._conn._get_hostport(host, port) + self._conn.connect() + + def getfile(self): + "Provide a getfile, since the superclass' does not use this concept." + return self.file + + def getreply(self, buffering=False): + """Compat definition since superclass does not define it. + + Returns a tuple consisting of: + - server status code (e.g. '200' if all goes well) + - server "reason" corresponding to status code + - any RFC822 headers in the response from the server + """ + try: + if not buffering: + response = self._conn.getresponse() + else: + #only add this keyword if non-default for compatibility + #with other connection classes + response = self._conn.getresponse(buffering) + except BadStatusLine, e: + ### hmm. if getresponse() ever closes the socket on a bad request, + ### then we are going to have problems with self.sock + + ### should we keep this behavior? do people use it? + # keep the socket open (as a file), and return it + self.file = self._conn.sock.makefile('rb', 0) + + # close our socket -- we want to restart after any protocol error + self.close() + + self.headers = None + return -1, e.line, None + + self.headers = response.msg + self.file = response.fp + return response.status, response.reason, response.msg + + def close(self): + self._conn.close() + + # note that self.file == response.fp, which gets closed by the + # superclass. just clear the object ref here. + ### hmm. messy. if status==-1, then self.file is owned by us. + ### well... we aren't explicitly closing, but losing this ref will + ### do it + self.file = None + +try: + import ssl +except ImportError: + pass +else: + class HTTPSConnection(HTTPConnection): + "This class allows communication via SSL." + + default_port = HTTPS_PORT + + def __init__(self, host, port=None, key_file=None, cert_file=None, + strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, + source_address=None, context=None): + HTTPConnection.__init__(self, host, port, strict, timeout, + source_address) + self.key_file = key_file + self.cert_file = cert_file + if context is None: + context = ssl._create_default_https_context() + if key_file or cert_file: + context.load_cert_chain(cert_file, key_file) + self._context = context + + def connect(self): + "Connect to a host on a given (SSL) port." + + HTTPConnection.connect(self) + + if self._tunnel_host: + server_hostname = self._tunnel_host + else: + server_hostname = self.host + + self.sock = self._context.wrap_socket(self.sock, + server_hostname=server_hostname) + + __all__.append("HTTPSConnection") + + class HTTPS(HTTP): + """Compatibility with 1.5 httplib interface + + Python 1.5.2 did not have an HTTPS class, but it defined an + interface for sending http requests that is also useful for + https. + """ + + _connection_class = HTTPSConnection + + def __init__(self, host='', port=None, key_file=None, cert_file=None, + strict=None, context=None): + # provide a default host, pass the X509 cert info + + # urf. compensate for bad input. + if port == 0: + port = None + self._setup(self._connection_class(host, port, key_file, + cert_file, strict, + context=context)) + + # we never actually use these for anything, but we keep them + # here for compatibility with post-1.5.2 CVS. + self.key_file = key_file + self.cert_file = cert_file + + + def FakeSocket (sock, sslobj): + warnings.warn("FakeSocket is deprecated, and won't be in 3.x. " + + "Use the result of ssl.wrap_socket() directly instead.", + DeprecationWarning, stacklevel=2) + return sslobj + + +class HTTPException(Exception): + # Subclasses that define an __init__ must call Exception.__init__ + # or define self.args. Otherwise, str() will fail. + pass + +class NotConnected(HTTPException): + pass + +class InvalidURL(HTTPException): + pass + +class UnknownProtocol(HTTPException): + def __init__(self, version): + self.args = version, + self.version = version + +class UnknownTransferEncoding(HTTPException): + pass + +class UnimplementedFileMode(HTTPException): + pass + +class IncompleteRead(HTTPException): + def __init__(self, partial, expected=None): + self.args = partial, + self.partial = partial + self.expected = expected + def __repr__(self): + if self.expected is not None: + e = ', %i more expected' % self.expected + else: + e = '' + return 'IncompleteRead(%i bytes read%s)' % (len(self.partial), e) + def __str__(self): + return repr(self) + +class ImproperConnectionState(HTTPException): + pass + +class CannotSendRequest(ImproperConnectionState): + pass + +class CannotSendHeader(ImproperConnectionState): + pass + +class ResponseNotReady(ImproperConnectionState): + pass + +class BadStatusLine(HTTPException): + def __init__(self, line): + if not line: + line = repr(line) + self.args = line, + self.line = line + +class LineTooLong(HTTPException): + def __init__(self, line_type): + HTTPException.__init__(self, "got more than %d bytes when reading %s" + % (_MAXLINE, line_type)) + +# for backwards compatibility +error = HTTPException + +class LineAndFileWrapper: + """A limited file-like object for HTTP/0.9 responses.""" + + # The status-line parsing code calls readline(), which normally + # get the HTTP status line. For a 0.9 response, however, this is + # actually the first line of the body! Clients need to get a + # readable file object that contains that line. + + def __init__(self, line, file): + self._line = line + self._file = file + self._line_consumed = 0 + self._line_offset = 0 + self._line_left = len(line) + + def __getattr__(self, attr): + return getattr(self._file, attr) + + def _done(self): + # called when the last byte is read from the line. After the + # call, all read methods are delegated to the underlying file + # object. + self._line_consumed = 1 + self.read = self._file.read + self.readline = self._file.readline + self.readlines = self._file.readlines + + def read(self, amt=None): + if self._line_consumed: + return self._file.read(amt) + assert self._line_left + if amt is None or amt > self._line_left: + s = self._line[self._line_offset:] + self._done() + if amt is None: + return s + self._file.read() + else: + return s + self._file.read(amt - len(s)) + else: + assert amt <= self._line_left + i = self._line_offset + j = i + amt + s = self._line[i:j] + self._line_offset = j + self._line_left -= amt + if self._line_left == 0: + self._done() + return s + + def readline(self): + if self._line_consumed: + return self._file.readline() + assert self._line_left + s = self._line[self._line_offset:] + self._done() + return s + + def readlines(self, size=None): + if self._line_consumed: + return self._file.readlines(size) + assert self._line_left + L = [self._line[self._line_offset:]] + self._done() + if size is None: + return L + self._file.readlines() + else: + return L + self._file.readlines(size) diff --git a/CVIssueCount/io.py b/CVIssueCount/io.py new file mode 100644 index 0000000..1438493 --- /dev/null +++ b/CVIssueCount/io.py @@ -0,0 +1,90 @@ +"""The io module provides the Python interfaces to stream handling. The +builtin open function is defined in this module. + +At the top of the I/O hierarchy is the abstract base class IOBase. It +defines the basic interface to a stream. Note, however, that there is no +separation between reading and writing to streams; implementations are +allowed to raise an IOError if they do not support a given operation. + +Extending IOBase is RawIOBase which deals simply with the reading and +writing of raw bytes to a stream. FileIO subclasses RawIOBase to provide +an interface to OS files. + +BufferedIOBase deals with buffering on a raw byte stream (RawIOBase). Its +subclasses, BufferedWriter, BufferedReader, and BufferedRWPair buffer +streams that are readable, writable, and both respectively. +BufferedRandom provides a buffered interface to random access +streams. BytesIO is a simple stream of in-memory bytes. + +Another IOBase subclass, TextIOBase, deals with the encoding and decoding +of streams into text. TextIOWrapper, which extends it, is a buffered text +interface to a buffered raw stream (`BufferedIOBase`). Finally, StringIO +is a in-memory stream for text. + +Argument names are not part of the specification, and only the arguments +of open() are intended to be used as keyword arguments. + +data: + +DEFAULT_BUFFER_SIZE + + An int containing the default buffer size used by the module's buffered + I/O classes. open() uses the file's blksize (as obtained by os.stat) if + possible. +""" +# New I/O library conforming to PEP 3116. + +__author__ = ("Guido van Rossum , " + "Mike Verdone , " + "Mark Russell , " + "Antoine Pitrou , " + "Amaury Forgeot d'Arc , " + "Benjamin Peterson ") + +__all__ = ["BlockingIOError", "open", "IOBase", "RawIOBase", "FileIO", + "BytesIO", "StringIO", "BufferedIOBase", + "BufferedReader", "BufferedWriter", "BufferedRWPair", + "BufferedRandom", "TextIOBase", "TextIOWrapper", + "UnsupportedOperation", "SEEK_SET", "SEEK_CUR", "SEEK_END"] + + +import _io +import abc + +from _io import (DEFAULT_BUFFER_SIZE, BlockingIOError, UnsupportedOperation, + open, FileIO, BytesIO, StringIO, BufferedReader, + BufferedWriter, BufferedRWPair, BufferedRandom, + IncrementalNewlineDecoder, TextIOWrapper) + +OpenWrapper = _io.open # for compatibility with _pyio + +# for seek() +SEEK_SET = 0 +SEEK_CUR = 1 +SEEK_END = 2 + +# Declaring ABCs in C is tricky so we do it here. +# Method descriptions and default implementations are inherited from the C +# version however. +class IOBase(_io._IOBase): + __metaclass__ = abc.ABCMeta + __doc__ = _io._IOBase.__doc__ + +class RawIOBase(_io._RawIOBase, IOBase): + __doc__ = _io._RawIOBase.__doc__ + +class BufferedIOBase(_io._BufferedIOBase, IOBase): + __doc__ = _io._BufferedIOBase.__doc__ + +class TextIOBase(_io._TextIOBase, IOBase): + __doc__ = _io._TextIOBase.__doc__ + +RawIOBase.register(FileIO) + +for klass in (BytesIO, BufferedReader, BufferedWriter, BufferedRandom, + BufferedRWPair): + BufferedIOBase.register(klass) + +for klass in (StringIO, TextIOWrapper): + TextIOBase.register(klass) +del klass diff --git a/CVIssueCount/ipypulldom.py b/CVIssueCount/ipypulldom.py new file mode 100644 index 0000000..4aba1b5 --- /dev/null +++ b/CVIssueCount/ipypulldom.py @@ -0,0 +1,66 @@ +##################################################################################### +# +# Copyright (c) Harry Pierson. All rights reserved. +# +# This source code is subject to terms and conditions of the Microsoft Public License. +# A copy of the license can be found at http://opensource.org/licenses/ms-pl.html +# By using this source code in any fashion, you are agreeing to be bound +# by the terms of the Microsoft Public License. +# +# You must not remove this notice, or any other, from this software. +# +##################################################################################### + +import clr +clr.AddReference('System.Xml') + +from System.String import IsNullOrEmpty +from System.Xml import XmlReader, XmlNodeType, XmlReaderSettings, DtdProcessing +from System.IO import StringReader + +class XmlNode(object): + def __init__(self, xr): + self.name = xr.LocalName + self.namespace = xr.NamespaceURI + self.prefix = xr.Prefix + self.value = xr.Value + self.nodeType = xr.NodeType + + if xr.NodeType == XmlNodeType.Element: + self.attributes = [] + while xr.MoveToNextAttribute(): + if xr.NamespaceURI == 'http://www.w3.org/2000/xmlns/': + continue + self.attributes.append(XmlNode(xr)) + xr.MoveToElement() + + @property + def xname(self): + if IsNullOrEmpty(self.namespace): + return self.name + return "{%(namespace)s}%(name)s" % self.__dict__ + + +def parse(xml): + # see issue 379, and https://stackoverflow.com/questions/215854/ + settings = XmlReaderSettings(); + settings.XmlResolver = None; + settings.DtdProcessing = DtdProcessing.Ignore; + settings.ProhibitDtd = False; + + with XmlReader.Create(xml, settings) as xr: + while xr.Read(): + xr.MoveToContent() + node = XmlNode(xr) + yield node + if xr.IsEmptyElement: + node.nodeType = XmlNodeType.EndElement + del node.attributes + yield node + +def parseString(xml): + return parse(StringReader(xml)) + +if __name__ == "__main__": + nodes = parse('http://feeds.feedburner.com/Devhawk') + \ No newline at end of file diff --git a/CVIssueCount/keyword.py b/CVIssueCount/keyword.py new file mode 100644 index 0000000..69794bd --- /dev/null +++ b/CVIssueCount/keyword.py @@ -0,0 +1,93 @@ +#! /usr/bin/env python + +"""Keywords (from "graminit.c") + +This file is automatically generated; please don't muck it up! + +To update the symbols in this file, 'cd' to the top directory of +the python source tree after building the interpreter and run: + + ./python Lib/keyword.py +""" + +__all__ = ["iskeyword", "kwlist"] + +kwlist = [ +#--start keywords-- + 'and', + 'as', + 'assert', + 'break', + 'class', + 'continue', + 'def', + 'del', + 'elif', + 'else', + 'except', + 'exec', + 'finally', + 'for', + 'from', + 'global', + 'if', + 'import', + 'in', + 'is', + 'lambda', + 'not', + 'or', + 'pass', + 'print', + 'raise', + 'return', + 'try', + 'while', + 'with', + 'yield', +#--end keywords-- + ] + +iskeyword = frozenset(kwlist).__contains__ + +def main(): + import sys, re + + args = sys.argv[1:] + iptfile = args and args[0] or "Python/graminit.c" + if len(args) > 1: optfile = args[1] + else: optfile = "Lib/keyword.py" + + # scan the source file for keywords + fp = open(iptfile) + strprog = re.compile('"([^"]+)"') + lines = [] + for line in fp: + if '{1, "' in line: + match = strprog.search(line) + if match: + lines.append(" '" + match.group(1) + "',\n") + fp.close() + lines.sort() + + # load the output skeleton from the target + fp = open(optfile) + format = fp.readlines() + fp.close() + + # insert the lines of keywords + try: + start = format.index("#--start keywords--\n") + 1 + end = format.index("#--end keywords--\n") + format[start:end] = lines + except ValueError: + sys.stderr.write("target does not contain format markers\n") + sys.exit(1) + + # write the output file + fp = open(optfile, 'w') + fp.write(''.join(format)) + fp.close() + +if __name__ == "__main__": + main() diff --git a/CVIssueCount/linecache.py b/CVIssueCount/linecache.py new file mode 100644 index 0000000..4b97be3 --- /dev/null +++ b/CVIssueCount/linecache.py @@ -0,0 +1,139 @@ +"""Cache lines from files. + +This is intended to read lines from modules imported -- hence if a filename +is not found, it will look down the module search path for a file by +that name. +""" + +import sys +import os + +__all__ = ["getline", "clearcache", "checkcache"] + +def getline(filename, lineno, module_globals=None): + lines = getlines(filename, module_globals) + if 1 <= lineno <= len(lines): + return lines[lineno-1] + else: + return '' + + +# The cache + +cache = {} # The cache + + +def clearcache(): + """Clear the cache entirely.""" + + global cache + cache = {} + + +def getlines(filename, module_globals=None): + """Get the lines for a file from the cache. + Update the cache if it doesn't contain an entry for this file already.""" + + if filename in cache: + return cache[filename][2] + + try: + return updatecache(filename, module_globals) + except MemoryError: + clearcache() + return [] + + +def checkcache(filename=None): + """Discard cache entries that are out of date. + (This is not checked upon each call!)""" + + if filename is None: + filenames = cache.keys() + else: + if filename in cache: + filenames = [filename] + else: + return + + for filename in filenames: + size, mtime, lines, fullname = cache[filename] + if mtime is None: + continue # no-op for files loaded via a __loader__ + try: + stat = os.stat(fullname) + except os.error: + del cache[filename] + continue + if size != stat.st_size or mtime != stat.st_mtime: + del cache[filename] + + +def updatecache(filename, module_globals=None): + """Update a cache entry and return its list of lines. + If something's wrong, print a message, discard the cache entry, + and return an empty list.""" + + if filename in cache: + del cache[filename] + if not filename or (filename.startswith('<') and filename.endswith('>')): + return [] + + fullname = filename + try: + stat = os.stat(fullname) + except OSError: + basename = filename + + # Try for a __loader__, if available + if module_globals and '__loader__' in module_globals: + name = module_globals.get('__name__') + loader = module_globals['__loader__'] + get_source = getattr(loader, 'get_source', None) + + if name and get_source: + try: + data = get_source(name) + except (ImportError, IOError): + pass + else: + if data is None: + # No luck, the PEP302 loader cannot find the source + # for this module. + return [] + cache[filename] = ( + len(data), None, + [line+'\n' for line in data.splitlines()], fullname + ) + return cache[filename][2] + + # Try looking through the module search path, which is only useful + # when handling a relative filename. + if os.path.isabs(filename): + return [] + + for dirname in sys.path: + # When using imputil, sys.path may contain things other than + # strings; ignore them when it happens. + try: + fullname = os.path.join(dirname, basename) + except (TypeError, AttributeError): + # Not sufficiently string-like to do anything useful with. + continue + try: + stat = os.stat(fullname) + break + except os.error: + pass + else: + return [] + try: + with open(fullname, 'rU') as fp: + lines = fp.readlines() + except IOError: + return [] + if lines and not lines[-1].endswith('\n'): + lines[-1] += '\n' + size, mtime = stat.st_size, stat.st_mtime + cache[filename] = size, mtime, lines, fullname + return lines diff --git a/CVIssueCount/mimetools.py b/CVIssueCount/mimetools.py new file mode 100644 index 0000000..71ca8f8 --- /dev/null +++ b/CVIssueCount/mimetools.py @@ -0,0 +1,250 @@ +"""Various tools used by MIME-reading or MIME-writing programs.""" + + +import os +import sys +import tempfile +from warnings import filterwarnings, catch_warnings +with catch_warnings(): + if sys.py3kwarning: + filterwarnings("ignore", ".*rfc822 has been removed", DeprecationWarning) + import rfc822 + +from warnings import warnpy3k +warnpy3k("in 3.x, mimetools has been removed in favor of the email package", + stacklevel=2) + +__all__ = ["Message","choose_boundary","encode","decode","copyliteral", + "copybinary"] + +class Message(rfc822.Message): + """A derived class of rfc822.Message that knows about MIME headers and + contains some hooks for decoding encoded and multipart messages.""" + + def __init__(self, fp, seekable = 1): + rfc822.Message.__init__(self, fp, seekable) + self.encodingheader = \ + self.getheader('content-transfer-encoding') + self.typeheader = \ + self.getheader('content-type') + self.parsetype() + self.parseplist() + + def parsetype(self): + str = self.typeheader + if str is None: + str = 'text/plain' + if ';' in str: + i = str.index(';') + self.plisttext = str[i:] + str = str[:i] + else: + self.plisttext = '' + fields = str.split('/') + for i in range(len(fields)): + fields[i] = fields[i].strip().lower() + self.type = '/'.join(fields) + self.maintype = fields[0] + self.subtype = '/'.join(fields[1:]) + + def parseplist(self): + str = self.plisttext + self.plist = [] + while str[:1] == ';': + str = str[1:] + if ';' in str: + # XXX Should parse quotes! + end = str.index(';') + else: + end = len(str) + f = str[:end] + if '=' in f: + i = f.index('=') + f = f[:i].strip().lower() + \ + '=' + f[i+1:].strip() + self.plist.append(f.strip()) + str = str[end:] + + def getplist(self): + return self.plist + + def getparam(self, name): + name = name.lower() + '=' + n = len(name) + for p in self.plist: + if p[:n] == name: + return rfc822.unquote(p[n:]) + return None + + def getparamnames(self): + result = [] + for p in self.plist: + i = p.find('=') + if i >= 0: + result.append(p[:i].lower()) + return result + + def getencoding(self): + if self.encodingheader is None: + return '7bit' + return self.encodingheader.lower() + + def gettype(self): + return self.type + + def getmaintype(self): + return self.maintype + + def getsubtype(self): + return self.subtype + + + + +# Utility functions +# ----------------- + +try: + import thread +except ImportError: + import dummy_thread as thread +_counter_lock = thread.allocate_lock() +del thread + +_counter = 0 +def _get_next_counter(): + global _counter + _counter_lock.acquire() + _counter += 1 + result = _counter + _counter_lock.release() + return result + +_prefix = None + +def choose_boundary(): + """Return a string usable as a multipart boundary. + + The string chosen is unique within a single program run, and + incorporates the user id (if available), process id (if available), + and current time. So it's very unlikely the returned string appears + in message text, but there's no guarantee. + + The boundary contains dots so you have to quote it in the header.""" + + global _prefix + import time + if _prefix is None: + import socket + try: + hostid = socket.gethostbyname(socket.gethostname()) + except socket.gaierror: + hostid = '127.0.0.1' + try: + uid = repr(os.getuid()) + except AttributeError: + uid = '1' + try: + pid = repr(os.getpid()) + except AttributeError: + pid = '1' + _prefix = hostid + '.' + uid + '.' + pid + return "%s.%.3f.%d" % (_prefix, time.time(), _get_next_counter()) + + +# Subroutines for decoding some common content-transfer-types + +def decode(input, output, encoding): + """Decode common content-transfer-encodings (base64, quopri, uuencode).""" + if encoding == 'base64': + import base64 + return base64.decode(input, output) + if encoding == 'quoted-printable': + import quopri + return quopri.decode(input, output) + if encoding in ('uuencode', 'x-uuencode', 'uue', 'x-uue'): + import uu + return uu.decode(input, output) + if encoding in ('7bit', '8bit'): + return output.write(input.read()) + if encoding in decodetab: + pipethrough(input, decodetab[encoding], output) + else: + raise ValueError, \ + 'unknown Content-Transfer-Encoding: %s' % encoding + +def encode(input, output, encoding): + """Encode common content-transfer-encodings (base64, quopri, uuencode).""" + if encoding == 'base64': + import base64 + return base64.encode(input, output) + if encoding == 'quoted-printable': + import quopri + return quopri.encode(input, output, 0) + if encoding in ('uuencode', 'x-uuencode', 'uue', 'x-uue'): + import uu + return uu.encode(input, output) + if encoding in ('7bit', '8bit'): + return output.write(input.read()) + if encoding in encodetab: + pipethrough(input, encodetab[encoding], output) + else: + raise ValueError, \ + 'unknown Content-Transfer-Encoding: %s' % encoding + +# The following is no longer used for standard encodings + +# XXX This requires that uudecode and mmencode are in $PATH + +uudecode_pipe = '''( +TEMP=/tmp/@uu.$$ +sed "s%^begin [0-7][0-7]* .*%begin 600 $TEMP%" | uudecode +cat $TEMP +rm $TEMP +)''' + +decodetab = { + 'uuencode': uudecode_pipe, + 'x-uuencode': uudecode_pipe, + 'uue': uudecode_pipe, + 'x-uue': uudecode_pipe, + 'quoted-printable': 'mmencode -u -q', + 'base64': 'mmencode -u -b', +} + +encodetab = { + 'x-uuencode': 'uuencode tempfile', + 'uuencode': 'uuencode tempfile', + 'x-uue': 'uuencode tempfile', + 'uue': 'uuencode tempfile', + 'quoted-printable': 'mmencode -q', + 'base64': 'mmencode -b', +} + +def pipeto(input, command): + pipe = os.popen(command, 'w') + copyliteral(input, pipe) + pipe.close() + +def pipethrough(input, command, output): + (fd, tempname) = tempfile.mkstemp() + temp = os.fdopen(fd, 'w') + copyliteral(input, temp) + temp.close() + pipe = os.popen(command + ' <' + tempname, 'r') + copybinary(pipe, output) + pipe.close() + os.unlink(tempname) + +def copyliteral(input, output): + while 1: + line = input.readline() + if not line: break + output.write(line) + +def copybinary(input, output): + BUFSIZE = 8192 + while 1: + line = input.read(BUFSIZE) + if not line: break + output.write(line) diff --git a/CVIssueCount/ntpath.py b/CVIssueCount/ntpath.py new file mode 100644 index 0000000..58951b9 --- /dev/null +++ b/CVIssueCount/ntpath.py @@ -0,0 +1,550 @@ +# Module 'ntpath' -- common operations on WinNT/Win95 pathnames +"""Common pathname manipulations, WindowsNT/95 version. + +Instead of importing this module directly, import os and refer to this +module as os.path. +""" + +import os +import sys +import stat +import genericpath +import warnings + +from genericpath import * +from genericpath import _unicode + +__all__ = ["normcase","isabs","join","splitdrive","split","splitext", + "basename","dirname","commonprefix","getsize","getmtime", + "getatime","getctime", "islink","exists","lexists","isdir","isfile", + "ismount","walk","expanduser","expandvars","normpath","abspath", + "splitunc","curdir","pardir","sep","pathsep","defpath","altsep", + "extsep","devnull","realpath","supports_unicode_filenames","relpath"] + +# strings representing various path-related bits and pieces +curdir = '.' +pardir = '..' +extsep = '.' +sep = '\\' +pathsep = ';' +altsep = '/' +defpath = '.;C:\\bin' +if 'ce' in sys.builtin_module_names: + defpath = '\\Windows' +elif 'os2' in sys.builtin_module_names: + # OS/2 w/ VACPP + altsep = '/' +devnull = 'nul' + +# Normalize the case of a pathname and map slashes to backslashes. +# Other normalizations (such as optimizing '../' away) are not done +# (this is done by normpath). + +def normcase(s): + """Normalize case of pathname. + + Makes all characters lowercase and all slashes into backslashes.""" + return s.replace("/", "\\").lower() + + +# Return whether a path is absolute. +# Trivial in Posix, harder on the Mac or MS-DOS. +# For DOS it is absolute if it starts with a slash or backslash (current +# volume), or if a pathname after the volume letter and colon / UNC resource +# starts with a slash or backslash. + +def isabs(s): + """Test whether a path is absolute""" + s = splitdrive(s)[1] + return s != '' and s[:1] in '/\\' + + +# Join two (or more) paths. +def join(path, *paths): + """Join two or more pathname components, inserting "\\" as needed.""" + result_drive, result_path = splitdrive(path) + for p in paths: + p_drive, p_path = splitdrive(p) + if p_path and p_path[0] in '\\/': + # Second path is absolute + if p_drive or not result_drive: + result_drive = p_drive + result_path = p_path + continue + elif p_drive and p_drive != result_drive: + if p_drive.lower() != result_drive.lower(): + # Different drives => ignore the first path entirely + result_drive = p_drive + result_path = p_path + continue + # Same drive in different case + result_drive = p_drive + # Second path is relative to the first + if result_path and result_path[-1] not in '\\/': + result_path = result_path + '\\' + result_path = result_path + p_path + ## add separator between UNC and non-absolute path + if (result_path and result_path[0] not in '\\/' and + result_drive and result_drive[-1:] != ':'): + return result_drive + sep + result_path + return result_drive + result_path + + +# Split a path in a drive specification (a drive letter followed by a +# colon) and the path specification. +# It is always true that drivespec + pathspec == p +def splitdrive(p): + """Split a pathname into drive/UNC sharepoint and relative path specifiers. + Returns a 2-tuple (drive_or_unc, path); either part may be empty. + + If you assign + result = splitdrive(p) + It is always true that: + result[0] + result[1] == p + + If the path contained a drive letter, drive_or_unc will contain everything + up to and including the colon. e.g. splitdrive("c:/dir") returns ("c:", "/dir") + + If the path contained a UNC path, the drive_or_unc will contain the host name + and share up to but not including the fourth directory separator character. + e.g. splitdrive("//host/computer/dir") returns ("//host/computer", "/dir") + + Paths cannot contain both a drive letter and a UNC path. + + """ + if len(p) > 1: + normp = p.replace(altsep, sep) + if (normp[0:2] == sep*2) and (normp[2:3] != sep): + # is a UNC path: + # vvvvvvvvvvvvvvvvvvvv drive letter or UNC path + # \\machine\mountpoint\directory\etc\... + # directory ^^^^^^^^^^^^^^^ + index = normp.find(sep, 2) + if index == -1: + return '', p + index2 = normp.find(sep, index + 1) + # a UNC path can't have two slashes in a row + # (after the initial two) + if index2 == index + 1: + return '', p + if index2 == -1: + index2 = len(p) + return p[:index2], p[index2:] + if normp[1] == ':': + return p[:2], p[2:] + return '', p + +# Parse UNC paths +def splitunc(p): + """Split a pathname into UNC mount point and relative path specifiers. + + Return a 2-tuple (unc, rest); either part may be empty. + If unc is not empty, it has the form '//host/mount' (or similar + using backslashes). unc+rest is always the input path. + Paths containing drive letters never have an UNC part. + """ + if p[1:2] == ':': + return '', p # Drive letter present + firstTwo = p[0:2] + if firstTwo == '//' or firstTwo == '\\\\': + # is a UNC path: + # vvvvvvvvvvvvvvvvvvvv equivalent to drive letter + # \\machine\mountpoint\directories... + # directory ^^^^^^^^^^^^^^^ + normp = p.replace('\\', '/') + index = normp.find('/', 2) + if index <= 2: + return '', p + index2 = normp.find('/', index + 1) + # a UNC path can't have two slashes in a row + # (after the initial two) + if index2 == index + 1: + return '', p + if index2 == -1: + index2 = len(p) + return p[:index2], p[index2:] + return '', p + + +# Split a path in head (everything up to the last '/') and tail (the +# rest). After the trailing '/' is stripped, the invariant +# join(head, tail) == p holds. +# The resulting head won't end in '/' unless it is the root. + +def split(p): + """Split a pathname. + + Return tuple (head, tail) where tail is everything after the final slash. + Either part may be empty.""" + + d, p = splitdrive(p) + # set i to index beyond p's last slash + i = len(p) + while i and p[i-1] not in '/\\': + i = i - 1 + head, tail = p[:i], p[i:] # now tail has no slashes + # remove trailing slashes from head, unless it's all slashes + head2 = head + while head2 and head2[-1] in '/\\': + head2 = head2[:-1] + head = head2 or head + return d + head, tail + + +# Split a path in root and extension. +# The extension is everything starting at the last dot in the last +# pathname component; the root is everything before that. +# It is always true that root + ext == p. + +def splitext(p): + return genericpath._splitext(p, sep, altsep, extsep) +splitext.__doc__ = genericpath._splitext.__doc__ + + +# Return the tail (basename) part of a path. + +def basename(p): + """Returns the final component of a pathname""" + return split(p)[1] + + +# Return the head (dirname) part of a path. + +def dirname(p): + """Returns the directory component of a pathname""" + return split(p)[0] + +# Is a path a symbolic link? +# This will always return false on systems where posix.lstat doesn't exist. + +def islink(path): + """Test for symbolic link. + On WindowsNT/95 and OS/2 always returns false + """ + return False + +# alias exists to lexists +lexists = exists + +# Is a path a mount point? Either a root (with or without drive letter) +# or an UNC path with at most a / or \ after the mount point. + +def ismount(path): + """Test whether a path is a mount point (defined as root of drive)""" + unc, rest = splitunc(path) + if unc: + return rest in ("", "/", "\\") + p = splitdrive(path)[1] + return len(p) == 1 and p[0] in '/\\' + + +# Directory tree walk. +# For each directory under top (including top itself, but excluding +# '.' and '..'), func(arg, dirname, filenames) is called, where +# dirname is the name of the directory and filenames is the list +# of files (and subdirectories etc.) in the directory. +# The func may modify the filenames list, to implement a filter, +# or to impose a different order of visiting. + +def walk(top, func, arg): + """Directory tree walk with callback function. + + For each directory in the directory tree rooted at top (including top + itself, but excluding '.' and '..'), call func(arg, dirname, fnames). + dirname is the name of the directory, and fnames a list of the names of + the files and subdirectories in dirname (excluding '.' and '..'). func + may modify the fnames list in-place (e.g. via del or slice assignment), + and walk will only recurse into the subdirectories whose names remain in + fnames; this can be used to implement a filter, or to impose a specific + order of visiting. No semantics are defined for, or required of, arg, + beyond that arg is always passed to func. It can be used, e.g., to pass + a filename pattern, or a mutable object designed to accumulate + statistics. Passing None for arg is common.""" + warnings.warnpy3k("In 3.x, os.path.walk is removed in favor of os.walk.", + stacklevel=2) + try: + names = os.listdir(top) + except os.error: + return + func(arg, top, names) + for name in names: + name = join(top, name) + if isdir(name): + walk(name, func, arg) + + +# Expand paths beginning with '~' or '~user'. +# '~' means $HOME; '~user' means that user's home directory. +# If the path doesn't begin with '~', or if the user or $HOME is unknown, +# the path is returned unchanged (leaving error reporting to whatever +# function is called with the expanded path as argument). +# See also module 'glob' for expansion of *, ? and [...] in pathnames. +# (A function should also be defined to do full *sh-style environment +# variable expansion.) + +def expanduser(path): + """Expand ~ and ~user constructs. + + If user or $HOME is unknown, do nothing.""" + if path[:1] != '~': + return path + i, n = 1, len(path) + while i < n and path[i] not in '/\\': + i = i + 1 + + if 'HOME' in os.environ: + userhome = os.environ['HOME'] + elif 'USERPROFILE' in os.environ: + userhome = os.environ['USERPROFILE'] + elif not 'HOMEPATH' in os.environ: + return path + else: + try: + drive = os.environ['HOMEDRIVE'] + except KeyError: + drive = '' + userhome = join(drive, os.environ['HOMEPATH']) + + if i != 1: #~user + userhome = join(dirname(userhome), path[1:i]) + + return userhome + path[i:] + + +# Expand paths containing shell variable substitutions. +# The following rules apply: +# - no expansion within single quotes +# - '$$' is translated into '$' +# - '%%' is translated into '%' if '%%' are not seen in %var1%%var2% +# - ${varname} is accepted. +# - $varname is accepted. +# - %varname% is accepted. +# - varnames can be made out of letters, digits and the characters '_-' +# (though is not verified in the ${varname} and %varname% cases) +# XXX With COMMAND.COM you can use any characters in a variable name, +# XXX except '^|<>='. + +def expandvars(path): + """Expand shell variables of the forms $var, ${var} and %var%. + + Unknown variables are left unchanged.""" + if '$' not in path and '%' not in path: + return path + import string + varchars = string.ascii_letters + string.digits + '_-' + if isinstance(path, _unicode): + encoding = sys.getfilesystemencoding() + def getenv(var): + return os.environ[var.encode(encoding)].decode(encoding) + else: + def getenv(var): + return os.environ[var] + res = '' + index = 0 + pathlen = len(path) + while index < pathlen: + c = path[index] + if c == '\'': # no expansion within single quotes + path = path[index + 1:] + pathlen = len(path) + try: + index = path.index('\'') + res = res + '\'' + path[:index + 1] + except ValueError: + res = res + c + path + index = pathlen - 1 + elif c == '%': # variable or '%' + if path[index + 1:index + 2] == '%': + res = res + c + index = index + 1 + else: + path = path[index+1:] + pathlen = len(path) + try: + index = path.index('%') + except ValueError: + res = res + '%' + path + index = pathlen - 1 + else: + var = path[:index] + try: + res = res + getenv(var) + except KeyError: + res = res + '%' + var + '%' + elif c == '$': # variable or '$$' + if path[index + 1:index + 2] == '$': + res = res + c + index = index + 1 + elif path[index + 1:index + 2] == '{': + path = path[index+2:] + pathlen = len(path) + try: + index = path.index('}') + var = path[:index] + try: + res = res + getenv(var) + except KeyError: + res = res + '${' + var + '}' + except ValueError: + res = res + '${' + path + index = pathlen - 1 + else: + var = '' + index = index + 1 + c = path[index:index + 1] + while c != '' and c in varchars: + var = var + c + index = index + 1 + c = path[index:index + 1] + try: + res = res + getenv(var) + except KeyError: + res = res + '$' + var + if c != '': + index = index - 1 + else: + res = res + c + index = index + 1 + return res + + +# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A\B. +# Previously, this function also truncated pathnames to 8+3 format, +# but as this module is called "ntpath", that's obviously wrong! + +def normpath(path): + """Normalize path, eliminating double slashes, etc.""" + # Preserve unicode (if path is unicode) + backslash, dot = (u'\\', u'.') if isinstance(path, _unicode) else ('\\', '.') + if path.startswith(('\\\\.\\', '\\\\?\\')): + # in the case of paths with these prefixes: + # \\.\ -> device names + # \\?\ -> literal paths + # do not do any normalization, but return the path unchanged + return path + path = path.replace("/", "\\") + prefix, path = splitdrive(path) + # We need to be careful here. If the prefix is empty, and the path starts + # with a backslash, it could either be an absolute path on the current + # drive (\dir1\dir2\file) or a UNC filename (\\server\mount\dir1\file). It + # is therefore imperative NOT to collapse multiple backslashes blindly in + # that case. + # The code below preserves multiple backslashes when there is no drive + # letter. This means that the invalid filename \\\a\b is preserved + # unchanged, where a\\\b is normalised to a\b. It's not clear that there + # is any better behaviour for such edge cases. + if prefix == '': + # No drive letter - preserve initial backslashes + while path[:1] == "\\": + prefix = prefix + backslash + path = path[1:] + else: + # We have a drive letter - collapse initial backslashes + if path.startswith("\\"): + prefix = prefix + backslash + path = path.lstrip("\\") + comps = path.split("\\") + i = 0 + while i < len(comps): + if comps[i] in ('.', ''): + del comps[i] + elif comps[i] == '..': + if i > 0 and comps[i-1] != '..': + del comps[i-1:i+1] + i -= 1 + elif i == 0 and prefix.endswith("\\"): + del comps[i] + else: + i += 1 + else: + i += 1 + # If the path is now empty, substitute '.' + if not prefix and not comps: + comps.append(dot) + return prefix + backslash.join(comps) + + +# Return an absolute path. +try: + from nt import _getfullpathname + +except ImportError: # not running on Windows - mock up something sensible + def abspath(path): + """Return the absolute version of a path.""" + if not isabs(path): + if isinstance(path, _unicode): + cwd = os.getcwdu() + else: + cwd = os.getcwd() + path = join(cwd, path) + return normpath(path) + +else: # use native Windows method on Windows + def abspath(path): + """Return the absolute version of a path.""" + + if path: # Empty path must return current working directory. + try: + path = _getfullpathname(path) + except WindowsError: + pass # Bad path - return unchanged. + elif isinstance(path, _unicode): + path = os.getcwdu() + else: + path = os.getcwd() + return normpath(path) + +# realpath is a no-op on systems without islink support +realpath = abspath +# Win9x family and earlier have no Unicode filename support. +supports_unicode_filenames = (hasattr(sys, "getwindowsversion") and + sys.getwindowsversion()[3] >= 2) + +def _abspath_split(path): + abs = abspath(normpath(path)) + prefix, rest = splitunc(abs) + is_unc = bool(prefix) + if not is_unc: + prefix, rest = splitdrive(abs) + return is_unc, prefix, [x for x in rest.split(sep) if x] + +def relpath(path, start=curdir): + """Return a relative version of a path""" + + if not path: + raise ValueError("no path specified") + + start_is_unc, start_prefix, start_list = _abspath_split(start) + path_is_unc, path_prefix, path_list = _abspath_split(path) + + if path_is_unc ^ start_is_unc: + raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)" + % (path, start)) + if path_prefix.lower() != start_prefix.lower(): + if path_is_unc: + raise ValueError("path is on UNC root %s, start on UNC root %s" + % (path_prefix, start_prefix)) + else: + raise ValueError("path is on drive %s, start on drive %s" + % (path_prefix, start_prefix)) + # Work out how much of the filepath is shared by start and path. + i = 0 + for e1, e2 in zip(start_list, path_list): + if e1.lower() != e2.lower(): + break + i += 1 + + rel_list = [pardir] * (len(start_list)-i) + path_list[i:] + if not rel_list: + return curdir + return join(*rel_list) + +try: + # The genericpath.isdir implementation uses os.stat and checks the mode + # attribute to tell whether or not the path is a directory. + # This is overkill on Windows - just pass the path to GetFileAttributes + # and check the attribute from there. + from nt import _isdir as isdir +except ImportError: + # Use genericpath.isdir as imported above. + pass diff --git a/CVIssueCount/nturl2path.py b/CVIssueCount/nturl2path.py new file mode 100644 index 0000000..9e6eb0d --- /dev/null +++ b/CVIssueCount/nturl2path.py @@ -0,0 +1,68 @@ +"""Convert a NT pathname to a file URL and vice versa.""" + +def url2pathname(url): + """OS-specific conversion from a relative URL of the 'file' scheme + to a file system path; not recommended for general use.""" + # e.g. + # ///C|/foo/bar/spam.foo + # and + # ///C:/foo/bar/spam.foo + # become + # C:\foo\bar\spam.foo + import string, urllib + # Windows itself uses ":" even in URLs. + url = url.replace(':', '|') + if not '|' in url: + # No drive specifier, just convert slashes + if url[:4] == '////': + # path is something like ////host/path/on/remote/host + # convert this to \\host\path\on\remote\host + # (notice halving of slashes at the start of the path) + url = url[2:] + components = url.split('/') + # make sure not to convert quoted slashes :-) + return urllib.unquote('\\'.join(components)) + comp = url.split('|') + if len(comp) != 2 or comp[0][-1] not in string.ascii_letters: + error = 'Bad URL: ' + url + raise IOError, error + drive = comp[0][-1].upper() + path = drive + ':' + components = comp[1].split('/') + for comp in components: + if comp: + path = path + '\\' + urllib.unquote(comp) + # Issue #11474: url like '/C|/' should convert into 'C:\\' + if path.endswith(':') and url.endswith('/'): + path += '\\' + return path + +def pathname2url(p): + """OS-specific conversion from a file system path to a relative URL + of the 'file' scheme; not recommended for general use.""" + # e.g. + # C:\foo\bar\spam.foo + # becomes + # ///C:/foo/bar/spam.foo + import urllib + if not ':' in p: + # No drive specifier, just convert slashes and quote the name + if p[:2] == '\\\\': + # path is something like \\host\path\on\remote\host + # convert this to ////host/path/on/remote/host + # (notice doubling of slashes at the start of the path) + p = '\\\\' + p + components = p.split('\\') + return urllib.quote('/'.join(components)) + comp = p.split(':') + if len(comp) != 2 or len(comp[0]) > 1: + error = 'Bad path: ' + p + raise IOError, error + + drive = urllib.quote(comp[0].upper()) + components = comp[1].split('\\') + path = '///' + drive + ':' + for comp in components: + if comp: + path = path + '/' + urllib.quote(comp) + return path diff --git a/CVIssueCount/os.py b/CVIssueCount/os.py new file mode 100644 index 0000000..cfea71b --- /dev/null +++ b/CVIssueCount/os.py @@ -0,0 +1,742 @@ +r"""OS routines for NT or Posix depending on what system we're on. + +This exports: + - all functions from posix, nt, os2, or ce, e.g. unlink, stat, etc. + - os.path is one of the modules posixpath, or ntpath + - os.name is 'posix', 'nt', 'os2', 'ce' or 'riscos' + - os.curdir is a string representing the current directory ('.' or ':') + - os.pardir is a string representing the parent directory ('..' or '::') + - os.sep is the (or a most common) pathname separator ('/' or ':' or '\\') + - os.extsep is the extension separator ('.' or '/') + - os.altsep is the alternate pathname separator (None or '/') + - os.pathsep is the component separator used in $PATH etc + - os.linesep is the line separator in text files ('\r' or '\n' or '\r\n') + - os.defpath is the default search path for executables + - os.devnull is the file path of the null device ('/dev/null', etc.) + +Programs that import and use 'os' stand a better chance of being +portable between different platforms. Of course, they must then +only use functions that are defined by all platforms (e.g., unlink +and opendir), and leave all pathname manipulation to os.path +(e.g., split and join). +""" + +#' + +import sys, errno + +_names = sys.builtin_module_names + +# Note: more names are added to __all__ later. +__all__ = ["altsep", "curdir", "pardir", "sep", "extsep", "pathsep", "linesep", + "defpath", "name", "path", "devnull", + "SEEK_SET", "SEEK_CUR", "SEEK_END"] + +def _get_exports_list(module): + try: + return list(module.__all__) + except AttributeError: + return [n for n in dir(module) if n[0] != '_'] + +if 'posix' in _names: + name = 'posix' + linesep = '\n' + from posix import * + try: + from posix import _exit + except ImportError: + pass + import posixpath as path + + import posix + __all__.extend(_get_exports_list(posix)) + del posix + +elif 'nt' in _names: + name = 'nt' + linesep = '\r\n' + from nt import * + try: + from nt import _exit + except ImportError: + pass + import ntpath as path + + import nt + __all__.extend(_get_exports_list(nt)) + del nt + +elif 'os2' in _names: + name = 'os2' + linesep = '\r\n' + from os2 import * + try: + from os2 import _exit + except ImportError: + pass + if sys.version.find('EMX GCC') == -1: + import ntpath as path + else: + import os2emxpath as path + from _emx_link import link + + import os2 + __all__.extend(_get_exports_list(os2)) + del os2 + +elif 'ce' in _names: + name = 'ce' + linesep = '\r\n' + from ce import * + try: + from ce import _exit + except ImportError: + pass + # We can use the standard Windows path. + import ntpath as path + + import ce + __all__.extend(_get_exports_list(ce)) + del ce + +elif 'riscos' in _names: + name = 'riscos' + linesep = '\n' + from riscos import * + try: + from riscos import _exit + except ImportError: + pass + import riscospath as path + + import riscos + __all__.extend(_get_exports_list(riscos)) + del riscos + +else: + raise ImportError, 'no os specific module found' + +sys.modules['os.path'] = path +from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep, + devnull) + +del _names + +# Python uses fixed values for the SEEK_ constants; they are mapped +# to native constants if necessary in posixmodule.c +SEEK_SET = 0 +SEEK_CUR = 1 +SEEK_END = 2 + +#' + +# Super directory utilities. +# (Inspired by Eric Raymond; the doc strings are mostly his) + +def makedirs(name, mode=0777): + """makedirs(path [, mode=0777]) + + Super-mkdir; create a leaf directory and all intermediate ones. + Works like mkdir, except that any intermediate path segment (not + just the rightmost) will be created if it does not exist. This is + recursive. + + """ + head, tail = path.split(name) + if not tail: + head, tail = path.split(head) + if head and tail and not path.exists(head): + try: + makedirs(head, mode) + except OSError, e: + # be happy if someone already created the path + if e.errno != errno.EEXIST: + raise + if tail == curdir: # xxx/newdir/. exists if xxx/newdir exists + return + mkdir(name, mode) + +def removedirs(name): + """removedirs(path) + + Super-rmdir; remove a leaf directory and all empty intermediate + ones. Works like rmdir except that, if the leaf directory is + successfully removed, directories corresponding to rightmost path + segments will be pruned away until either the whole path is + consumed or an error occurs. Errors during this latter phase are + ignored -- they generally mean that a directory was not empty. + + """ + rmdir(name) + head, tail = path.split(name) + if not tail: + head, tail = path.split(head) + while head and tail: + try: + rmdir(head) + except error: + break + head, tail = path.split(head) + +def renames(old, new): + """renames(old, new) + + Super-rename; create directories as necessary and delete any left + empty. Works like rename, except creation of any intermediate + directories needed to make the new pathname good is attempted + first. After the rename, directories corresponding to rightmost + path segments of the old name will be pruned until either the + whole path is consumed or a nonempty directory is found. + + Note: this function can fail with the new directory structure made + if you lack permissions needed to unlink the leaf directory or + file. + + """ + head, tail = path.split(new) + if head and tail and not path.exists(head): + makedirs(head) + rename(old, new) + head, tail = path.split(old) + if head and tail: + try: + removedirs(head) + except error: + pass + +__all__.extend(["makedirs", "removedirs", "renames"]) + +def walk(top, topdown=True, onerror=None, followlinks=False): + """Directory tree generator. + + For each directory in the directory tree rooted at top (including top + itself, but excluding '.' and '..'), yields a 3-tuple + + dirpath, dirnames, filenames + + dirpath is a string, the path to the directory. dirnames is a list of + the names of the subdirectories in dirpath (excluding '.' and '..'). + filenames is a list of the names of the non-directory files in dirpath. + Note that the names in the lists are just names, with no path components. + To get a full path (which begins with top) to a file or directory in + dirpath, do os.path.join(dirpath, name). + + If optional arg 'topdown' is true or not specified, the triple for a + directory is generated before the triples for any of its subdirectories + (directories are generated top down). If topdown is false, the triple + for a directory is generated after the triples for all of its + subdirectories (directories are generated bottom up). + + When topdown is true, the caller can modify the dirnames list in-place + (e.g., via del or slice assignment), and walk will only recurse into the + subdirectories whose names remain in dirnames; this can be used to prune the + search, or to impose a specific order of visiting. Modifying dirnames when + topdown is false is ineffective, since the directories in dirnames have + already been generated by the time dirnames itself is generated. No matter + the value of topdown, the list of subdirectories is retrieved before the + tuples for the directory and its subdirectories are generated. + + By default errors from the os.listdir() call are ignored. If + optional arg 'onerror' is specified, it should be a function; it + will be called with one argument, an os.error instance. It can + report the error to continue with the walk, or raise the exception + to abort the walk. Note that the filename is available as the + filename attribute of the exception object. + + By default, os.walk does not follow symbolic links to subdirectories on + systems that support them. In order to get this functionality, set the + optional argument 'followlinks' to true. + + Caution: if you pass a relative pathname for top, don't change the + current working directory between resumptions of walk. walk never + changes the current directory, and assumes that the client doesn't + either. + + Example: + + import os + from os.path import join, getsize + for root, dirs, files in os.walk('python/Lib/email'): + print root, "consumes", + print sum([getsize(join(root, name)) for name in files]), + print "bytes in", len(files), "non-directory files" + if 'CVS' in dirs: + dirs.remove('CVS') # don't visit CVS directories + + """ + + islink, join, isdir = path.islink, path.join, path.isdir + + # We may not have read permission for top, in which case we can't + # get a list of the files the directory contains. os.path.walk + # always suppressed the exception then, rather than blow up for a + # minor reason when (say) a thousand readable directories are still + # left to visit. That logic is copied here. + try: + # Note that listdir and error are globals in this module due + # to earlier import-*. + names = listdir(top) + except error, err: + if onerror is not None: + onerror(err) + return + + dirs, nondirs = [], [] + for name in names: + if isdir(join(top, name)): + dirs.append(name) + else: + nondirs.append(name) + + if topdown: + yield top, dirs, nondirs + for name in dirs: + new_path = join(top, name) + if followlinks or not islink(new_path): + for x in walk(new_path, topdown, onerror, followlinks): + yield x + if not topdown: + yield top, dirs, nondirs + +__all__.append("walk") + +# Make sure os.environ exists, at least +try: + environ +except NameError: + environ = {} + +def execl(file, *args): + """execl(file, *args) + + Execute the executable file with argument list args, replacing the + current process. """ + execv(file, args) + +def execle(file, *args): + """execle(file, *args, env) + + Execute the executable file with argument list args and + environment env, replacing the current process. """ + env = args[-1] + execve(file, args[:-1], env) + +def execlp(file, *args): + """execlp(file, *args) + + Execute the executable file (which is searched for along $PATH) + with argument list args, replacing the current process. """ + execvp(file, args) + +def execlpe(file, *args): + """execlpe(file, *args, env) + + Execute the executable file (which is searched for along $PATH) + with argument list args and environment env, replacing the current + process. """ + env = args[-1] + execvpe(file, args[:-1], env) + +def execvp(file, args): + """execvp(file, args) + + Execute the executable file (which is searched for along $PATH) + with argument list args, replacing the current process. + args may be a list or tuple of strings. """ + _execvpe(file, args) + +def execvpe(file, args, env): + """execvpe(file, args, env) + + Execute the executable file (which is searched for along $PATH) + with argument list args and environment env , replacing the + current process. + args may be a list or tuple of strings. """ + _execvpe(file, args, env) + +__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"]) + +def _execvpe(file, args, env=None): + if env is not None: + func = execve + argrest = (args, env) + else: + func = execv + argrest = (args,) + env = environ + + head, tail = path.split(file) + if head: + func(file, *argrest) + return + if 'PATH' in env: + envpath = env['PATH'] + else: + envpath = defpath + PATH = envpath.split(pathsep) + saved_exc = None + saved_tb = None + for dir in PATH: + fullname = path.join(dir, file) + try: + func(fullname, *argrest) + except error, e: + tb = sys.exc_info()[2] + if (e.errno != errno.ENOENT and e.errno != errno.ENOTDIR + and saved_exc is None): + saved_exc = e + saved_tb = tb + if saved_exc: + raise error, saved_exc, saved_tb + raise error, e, tb + +# Change environ to automatically call putenv() if it exists +try: + # This will fail if there's no putenv + putenv +except NameError: + pass +else: + import UserDict + + # Fake unsetenv() for Windows + # not sure about os2 here but + # I'm guessing they are the same. + + if name in ('os2', 'nt'): + def unsetenv(key): + putenv(key, "") + + if name == "riscos": + # On RISC OS, all env access goes through getenv and putenv + from riscosenviron import _Environ + elif name in ('os2', 'nt'): # Where Env Var Names Must Be UPPERCASE + # But we store them as upper case + class _Environ(UserDict.IterableUserDict): + def __init__(self, environ): + UserDict.UserDict.__init__(self) + data = self.data + for k, v in environ.items(): + data[k.upper()] = v + def __setitem__(self, key, item): + putenv(key, item) + self.data[key.upper()] = item + def __getitem__(self, key): + return self.data[key.upper()] + try: + unsetenv + except NameError: + def __delitem__(self, key): + del self.data[key.upper()] + else: + def __delitem__(self, key): + unsetenv(key) + del self.data[key.upper()] + def clear(self): + for key in self.data.keys(): + unsetenv(key) + del self.data[key] + def pop(self, key, *args): + unsetenv(key) + return self.data.pop(key.upper(), *args) + def has_key(self, key): + return key.upper() in self.data + def __contains__(self, key): + return key.upper() in self.data + def get(self, key, failobj=None): + return self.data.get(key.upper(), failobj) + def update(self, dict=None, **kwargs): + if dict: + try: + keys = dict.keys() + except AttributeError: + # List of (key, value) + for k, v in dict: + self[k] = v + else: + # got keys + # cannot use items(), since mappings + # may not have them. + for k in keys: + self[k] = dict[k] + if kwargs: + self.update(kwargs) + def copy(self): + return dict(self) + + else: # Where Env Var Names Can Be Mixed Case + class _Environ(UserDict.IterableUserDict): + def __init__(self, environ): + UserDict.UserDict.__init__(self) + self.data = environ + def __setitem__(self, key, item): + putenv(key, item) + self.data[key] = item + def update(self, dict=None, **kwargs): + if dict: + try: + keys = dict.keys() + except AttributeError: + # List of (key, value) + for k, v in dict: + self[k] = v + else: + # got keys + # cannot use items(), since mappings + # may not have them. + for k in keys: + self[k] = dict[k] + if kwargs: + self.update(kwargs) + try: + unsetenv + except NameError: + pass + else: + def __delitem__(self, key): + unsetenv(key) + del self.data[key] + def clear(self): + for key in self.data.keys(): + unsetenv(key) + del self.data[key] + def pop(self, key, *args): + unsetenv(key) + return self.data.pop(key, *args) + def copy(self): + return dict(self) + + + environ = _Environ(environ) + +def getenv(key, default=None): + """Get an environment variable, return None if it doesn't exist. + The optional second argument can specify an alternate default.""" + return environ.get(key, default) +__all__.append("getenv") + +def _exists(name): + return name in globals() + +# Supply spawn*() (probably only for Unix) +if _exists("fork") and not _exists("spawnv") and _exists("execv"): + + P_WAIT = 0 + P_NOWAIT = P_NOWAITO = 1 + + # XXX Should we support P_DETACH? I suppose it could fork()**2 + # and close the std I/O streams. Also, P_OVERLAY is the same + # as execv*()? + + def _spawnvef(mode, file, args, env, func): + # Internal helper; func is the exec*() function to use + pid = fork() + if not pid: + # Child + try: + if env is None: + func(file, args) + else: + func(file, args, env) + except: + _exit(127) + else: + # Parent + if mode == P_NOWAIT: + return pid # Caller is responsible for waiting! + while 1: + wpid, sts = waitpid(pid, 0) + if WIFSTOPPED(sts): + continue + elif WIFSIGNALED(sts): + return -WTERMSIG(sts) + elif WIFEXITED(sts): + return WEXITSTATUS(sts) + else: + raise error, "Not stopped, signaled or exited???" + + def spawnv(mode, file, args): + """spawnv(mode, file, args) -> integer + +Execute file with arguments from args in a subprocess. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + return _spawnvef(mode, file, args, None, execv) + + def spawnve(mode, file, args, env): + """spawnve(mode, file, args, env) -> integer + +Execute file with arguments from args in a subprocess with the +specified environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + return _spawnvef(mode, file, args, env, execve) + + # Note: spawnvp[e] is't currently supported on Windows + + def spawnvp(mode, file, args): + """spawnvp(mode, file, args) -> integer + +Execute file (which is looked for along $PATH) with arguments from +args in a subprocess. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + return _spawnvef(mode, file, args, None, execvp) + + def spawnvpe(mode, file, args, env): + """spawnvpe(mode, file, args, env) -> integer + +Execute file (which is looked for along $PATH) with arguments from +args in a subprocess with the supplied environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + return _spawnvef(mode, file, args, env, execvpe) + +if _exists("spawnv"): + # These aren't supplied by the basic Windows code + # but can be easily implemented in Python + + def spawnl(mode, file, *args): + """spawnl(mode, file, *args) -> integer + +Execute file with arguments from args in a subprocess. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + return spawnv(mode, file, args) + + def spawnle(mode, file, *args): + """spawnle(mode, file, *args, env) -> integer + +Execute file with arguments from args in a subprocess with the +supplied environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + env = args[-1] + return spawnve(mode, file, args[:-1], env) + + + __all__.extend(["spawnv", "spawnve", "spawnl", "spawnle",]) + + +if _exists("spawnvp"): + # At the moment, Windows doesn't implement spawnvp[e], + # so it won't have spawnlp[e] either. + def spawnlp(mode, file, *args): + """spawnlp(mode, file, *args) -> integer + +Execute file (which is looked for along $PATH) with arguments from +args in a subprocess with the supplied environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + return spawnvp(mode, file, args) + + def spawnlpe(mode, file, *args): + """spawnlpe(mode, file, *args, env) -> integer + +Execute file (which is looked for along $PATH) with arguments from +args in a subprocess with the supplied environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + env = args[-1] + return spawnvpe(mode, file, args[:-1], env) + + + __all__.extend(["spawnvp", "spawnvpe", "spawnlp", "spawnlpe",]) + + +# Supply popen2 etc. (for Unix) +if _exists("fork"): + if not _exists("popen2"): + def popen2(cmd, mode="t", bufsize=-1): + """Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd' + may be a sequence, in which case arguments will be passed directly to + the program without shell intervention (as with os.spawnv()). If 'cmd' + is a string it will be passed to the shell (as with os.system()). If + 'bufsize' is specified, it sets the buffer size for the I/O pipes. The + file objects (child_stdin, child_stdout) are returned.""" + import warnings + msg = "os.popen2 is deprecated. Use the subprocess module." + warnings.warn(msg, DeprecationWarning, stacklevel=2) + + import subprocess + PIPE = subprocess.PIPE + p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring), + bufsize=bufsize, stdin=PIPE, stdout=PIPE, + close_fds=True) + return p.stdin, p.stdout + __all__.append("popen2") + + if not _exists("popen3"): + def popen3(cmd, mode="t", bufsize=-1): + """Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd' + may be a sequence, in which case arguments will be passed directly to + the program without shell intervention (as with os.spawnv()). If 'cmd' + is a string it will be passed to the shell (as with os.system()). If + 'bufsize' is specified, it sets the buffer size for the I/O pipes. The + file objects (child_stdin, child_stdout, child_stderr) are returned.""" + import warnings + msg = "os.popen3 is deprecated. Use the subprocess module." + warnings.warn(msg, DeprecationWarning, stacklevel=2) + + import subprocess + PIPE = subprocess.PIPE + p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring), + bufsize=bufsize, stdin=PIPE, stdout=PIPE, + stderr=PIPE, close_fds=True) + return p.stdin, p.stdout, p.stderr + __all__.append("popen3") + + if not _exists("popen4"): + def popen4(cmd, mode="t", bufsize=-1): + """Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd' + may be a sequence, in which case arguments will be passed directly to + the program without shell intervention (as with os.spawnv()). If 'cmd' + is a string it will be passed to the shell (as with os.system()). If + 'bufsize' is specified, it sets the buffer size for the I/O pipes. The + file objects (child_stdin, child_stdout_stderr) are returned.""" + import warnings + msg = "os.popen4 is deprecated. Use the subprocess module." + warnings.warn(msg, DeprecationWarning, stacklevel=2) + + import subprocess + PIPE = subprocess.PIPE + p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring), + bufsize=bufsize, stdin=PIPE, stdout=PIPE, + stderr=subprocess.STDOUT, close_fds=True) + return p.stdin, p.stdout + __all__.append("popen4") + +import copy_reg as _copy_reg + +def _make_stat_result(tup, dict): + return stat_result(tup, dict) + +def _pickle_stat_result(sr): + (type, args) = sr.__reduce__() + return (_make_stat_result, args) + +try: + _copy_reg.pickle(stat_result, _pickle_stat_result, _make_stat_result) +except NameError: # stat_result may not exist + pass + +def _make_statvfs_result(tup, dict): + return statvfs_result(tup, dict) + +def _pickle_statvfs_result(sr): + (type, args) = sr.__reduce__() + return (_make_statvfs_result, args) + +try: + _copy_reg.pickle(statvfs_result, _pickle_statvfs_result, + _make_statvfs_result) +except NameError: # statvfs_result may not exist + pass diff --git a/CVIssueCount/posixpath.py b/CVIssueCount/posixpath.py new file mode 100644 index 0000000..6578481 --- /dev/null +++ b/CVIssueCount/posixpath.py @@ -0,0 +1,439 @@ +"""Common operations on Posix pathnames. + +Instead of importing this module directly, import os and refer to +this module as os.path. The "os.path" name is an alias for this +module on Posix systems; on other systems (e.g. Mac, Windows), +os.path provides the same operations in a manner specific to that +platform, and is an alias to another module (e.g. macpath, ntpath). + +Some of this can actually be useful on non-Posix systems too, e.g. +for manipulation of the pathname component of URLs. +""" + +import os +import sys +import stat +import genericpath +import warnings +from genericpath import * +from genericpath import _unicode + +__all__ = ["normcase","isabs","join","splitdrive","split","splitext", + "basename","dirname","commonprefix","getsize","getmtime", + "getatime","getctime","islink","exists","lexists","isdir","isfile", + "ismount","walk","expanduser","expandvars","normpath","abspath", + "samefile","sameopenfile","samestat", + "curdir","pardir","sep","pathsep","defpath","altsep","extsep", + "devnull","realpath","supports_unicode_filenames","relpath"] + +# strings representing various path-related bits and pieces +curdir = '.' +pardir = '..' +extsep = '.' +sep = '/' +pathsep = ':' +defpath = ':/bin:/usr/bin' +altsep = None +devnull = '/dev/null' + +# Normalize the case of a pathname. Trivial in Posix, string.lower on Mac. +# On MS-DOS this may also turn slashes into backslashes; however, other +# normalizations (such as optimizing '../' away) are not allowed +# (another function should be defined to do that). + +def normcase(s): + """Normalize case of pathname. Has no effect under Posix""" + return s + + +# Return whether a path is absolute. +# Trivial in Posix, harder on the Mac or MS-DOS. + +def isabs(s): + """Test whether a path is absolute""" + return s.startswith('/') + + +# Join pathnames. +# Ignore the previous parts if a part is absolute. +# Insert a '/' unless the first part is empty or already ends in '/'. + +def join(a, *p): + """Join two or more pathname components, inserting '/' as needed. + If any component is an absolute path, all previous path components + will be discarded. An empty last part will result in a path that + ends with a separator.""" + path = a + for b in p: + if b.startswith('/'): + path = b + elif path == '' or path.endswith('/'): + path += b + else: + path += '/' + b + return path + + +# Split a path in head (everything up to the last '/') and tail (the +# rest). If the path ends in '/', tail will be empty. If there is no +# '/' in the path, head will be empty. +# Trailing '/'es are stripped from head unless it is the root. + +def split(p): + """Split a pathname. Returns tuple "(head, tail)" where "tail" is + everything after the final slash. Either part may be empty.""" + i = p.rfind('/') + 1 + head, tail = p[:i], p[i:] + if head and head != '/'*len(head): + head = head.rstrip('/') + return head, tail + + +# Split a path in root and extension. +# The extension is everything starting at the last dot in the last +# pathname component; the root is everything before that. +# It is always true that root + ext == p. + +def splitext(p): + return genericpath._splitext(p, sep, altsep, extsep) +splitext.__doc__ = genericpath._splitext.__doc__ + +# Split a pathname into a drive specification and the rest of the +# path. Useful on DOS/Windows/NT; on Unix, the drive is always empty. + +def splitdrive(p): + """Split a pathname into drive and path. On Posix, drive is always + empty.""" + return '', p + + +# Return the tail (basename) part of a path, same as split(path)[1]. + +def basename(p): + """Returns the final component of a pathname""" + i = p.rfind('/') + 1 + return p[i:] + + +# Return the head (dirname) part of a path, same as split(path)[0]. + +def dirname(p): + """Returns the directory component of a pathname""" + i = p.rfind('/') + 1 + head = p[:i] + if head and head != '/'*len(head): + head = head.rstrip('/') + return head + + +# Is a path a symbolic link? +# This will always return false on systems where os.lstat doesn't exist. + +def islink(path): + """Test whether a path is a symbolic link""" + try: + st = os.lstat(path) + except (os.error, AttributeError): + return False + return stat.S_ISLNK(st.st_mode) + +# Being true for dangling symbolic links is also useful. + +def lexists(path): + """Test whether a path exists. Returns True for broken symbolic links""" + try: + os.lstat(path) + except os.error: + return False + return True + + +# Are two filenames really pointing to the same file? + +def samefile(f1, f2): + """Test whether two pathnames reference the same actual file""" + s1 = os.stat(f1) + s2 = os.stat(f2) + return samestat(s1, s2) + + +# Are two open files really referencing the same file? +# (Not necessarily the same file descriptor!) + +def sameopenfile(fp1, fp2): + """Test whether two open file objects reference the same file""" + s1 = os.fstat(fp1) + s2 = os.fstat(fp2) + return samestat(s1, s2) + + +# Are two stat buffers (obtained from stat, fstat or lstat) +# describing the same file? + +def samestat(s1, s2): + """Test whether two stat buffers reference the same file""" + return s1.st_ino == s2.st_ino and \ + s1.st_dev == s2.st_dev + + +# Is a path a mount point? +# (Does this work for all UNIXes? Is it even guaranteed to work by Posix?) + +def ismount(path): + """Test whether a path is a mount point""" + if islink(path): + # A symlink can never be a mount point + return False + try: + s1 = os.lstat(path) + s2 = os.lstat(join(path, '..')) + except os.error: + return False # It doesn't exist -- so not a mount point :-) + dev1 = s1.st_dev + dev2 = s2.st_dev + if dev1 != dev2: + return True # path/.. on a different device as path + ino1 = s1.st_ino + ino2 = s2.st_ino + if ino1 == ino2: + return True # path/.. is the same i-node as path + return False + + +# Directory tree walk. +# For each directory under top (including top itself, but excluding +# '.' and '..'), func(arg, dirname, filenames) is called, where +# dirname is the name of the directory and filenames is the list +# of files (and subdirectories etc.) in the directory. +# The func may modify the filenames list, to implement a filter, +# or to impose a different order of visiting. + +def walk(top, func, arg): + """Directory tree walk with callback function. + + For each directory in the directory tree rooted at top (including top + itself, but excluding '.' and '..'), call func(arg, dirname, fnames). + dirname is the name of the directory, and fnames a list of the names of + the files and subdirectories in dirname (excluding '.' and '..'). func + may modify the fnames list in-place (e.g. via del or slice assignment), + and walk will only recurse into the subdirectories whose names remain in + fnames; this can be used to implement a filter, or to impose a specific + order of visiting. No semantics are defined for, or required of, arg, + beyond that arg is always passed to func. It can be used, e.g., to pass + a filename pattern, or a mutable object designed to accumulate + statistics. Passing None for arg is common.""" + warnings.warnpy3k("In 3.x, os.path.walk is removed in favor of os.walk.", + stacklevel=2) + try: + names = os.listdir(top) + except os.error: + return + func(arg, top, names) + for name in names: + name = join(top, name) + try: + st = os.lstat(name) + except os.error: + continue + if stat.S_ISDIR(st.st_mode): + walk(name, func, arg) + + +# Expand paths beginning with '~' or '~user'. +# '~' means $HOME; '~user' means that user's home directory. +# If the path doesn't begin with '~', or if the user or $HOME is unknown, +# the path is returned unchanged (leaving error reporting to whatever +# function is called with the expanded path as argument). +# See also module 'glob' for expansion of *, ? and [...] in pathnames. +# (A function should also be defined to do full *sh-style environment +# variable expansion.) + +def expanduser(path): + """Expand ~ and ~user constructions. If user or $HOME is unknown, + do nothing.""" + if not path.startswith('~'): + return path + i = path.find('/', 1) + if i < 0: + i = len(path) + if i == 1: + if 'HOME' not in os.environ: + import pwd + userhome = pwd.getpwuid(os.getuid()).pw_dir + else: + userhome = os.environ['HOME'] + else: + import pwd + try: + pwent = pwd.getpwnam(path[1:i]) + except KeyError: + return path + userhome = pwent.pw_dir + userhome = userhome.rstrip('/') + return (userhome + path[i:]) or '/' + + +# Expand paths containing shell variable substitutions. +# This expands the forms $variable and ${variable} only. +# Non-existent variables are left unchanged. + +_varprog = None +_uvarprog = None + +def expandvars(path): + """Expand shell variables of form $var and ${var}. Unknown variables + are left unchanged.""" + global _varprog, _uvarprog + if '$' not in path: + return path + if isinstance(path, _unicode): + if not _uvarprog: + import re + _uvarprog = re.compile(ur'\$(\w+|\{[^}]*\})', re.UNICODE) + varprog = _uvarprog + encoding = sys.getfilesystemencoding() + else: + if not _varprog: + import re + _varprog = re.compile(r'\$(\w+|\{[^}]*\})') + varprog = _varprog + encoding = None + i = 0 + while True: + m = varprog.search(path, i) + if not m: + break + i, j = m.span(0) + name = m.group(1) + if name.startswith('{') and name.endswith('}'): + name = name[1:-1] + if encoding: + name = name.encode(encoding) + if name in os.environ: + tail = path[j:] + value = os.environ[name] + if encoding: + value = value.decode(encoding) + path = path[:i] + value + i = len(path) + path += tail + else: + i = j + return path + + +# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B. +# It should be understood that this may change the meaning of the path +# if it contains symbolic links! + +def normpath(path): + """Normalize path, eliminating double slashes, etc.""" + # Preserve unicode (if path is unicode) + slash, dot = (u'/', u'.') if isinstance(path, _unicode) else ('/', '.') + if path == '': + return dot + initial_slashes = path.startswith('/') + # POSIX allows one or two initial slashes, but treats three or more + # as single slash. + if (initial_slashes and + path.startswith('//') and not path.startswith('///')): + initial_slashes = 2 + comps = path.split('/') + new_comps = [] + for comp in comps: + if comp in ('', '.'): + continue + if (comp != '..' or (not initial_slashes and not new_comps) or + (new_comps and new_comps[-1] == '..')): + new_comps.append(comp) + elif new_comps: + new_comps.pop() + comps = new_comps + path = slash.join(comps) + if initial_slashes: + path = slash*initial_slashes + path + return path or dot + + +def abspath(path): + """Return an absolute path.""" + if not isabs(path): + if isinstance(path, _unicode): + cwd = os.getcwdu() + else: + cwd = os.getcwd() + path = join(cwd, path) + return normpath(path) + + +# Return a canonical path (i.e. the absolute location of a file on the +# filesystem). + +def realpath(filename): + """Return the canonical path of the specified filename, eliminating any +symbolic links encountered in the path.""" + path, ok = _joinrealpath('', filename, {}) + return abspath(path) + +# Join two paths, normalizing ang eliminating any symbolic links +# encountered in the second path. +def _joinrealpath(path, rest, seen): + if isabs(rest): + rest = rest[1:] + path = sep + + while rest: + name, _, rest = rest.partition(sep) + if not name or name == curdir: + # current dir + continue + if name == pardir: + # parent dir + if path: + path, name = split(path) + if name == pardir: + path = join(path, pardir, pardir) + else: + path = pardir + continue + newpath = join(path, name) + if not islink(newpath): + path = newpath + continue + # Resolve the symbolic link + if newpath in seen: + # Already seen this path + path = seen[newpath] + if path is not None: + # use cached value + continue + # The symlink is not resolved, so we must have a symlink loop. + # Return already resolved part + rest of the path unchanged. + return join(newpath, rest), False + seen[newpath] = None # not resolved symlink + path, ok = _joinrealpath(path, os.readlink(newpath), seen) + if not ok: + return join(path, rest), False + seen[newpath] = path # resolved symlink + + return path, True + + +supports_unicode_filenames = (sys.platform == 'darwin') + +def relpath(path, start=curdir): + """Return a relative version of a path""" + + if not path: + raise ValueError("no path specified") + + start_list = [x for x in abspath(start).split(sep) if x] + path_list = [x for x in abspath(path).split(sep) if x] + + # Work out how much of the filepath is shared by start and path. + i = len(commonprefix([start_list, path_list])) + + rel_list = [pardir] * (len(start_list)-i) + path_list[i:] + if not rel_list: + return curdir + return join(*rel_list) diff --git a/CVIssueCount/random.py b/CVIssueCount/random.py new file mode 100644 index 0000000..3f96a37 --- /dev/null +++ b/CVIssueCount/random.py @@ -0,0 +1,910 @@ +"""Random variable generators. + + integers + -------- + uniform within range + + sequences + --------- + pick random element + pick random sample + generate random permutation + + distributions on the real line: + ------------------------------ + uniform + triangular + normal (Gaussian) + lognormal + negative exponential + gamma + beta + pareto + Weibull + + distributions on the circle (angles 0 to 2pi) + --------------------------------------------- + circular uniform + von Mises + +General notes on the underlying Mersenne Twister core generator: + +* The period is 2**19937-1. +* It is one of the most extensively tested generators in existence. +* Without a direct way to compute N steps forward, the semantics of + jumpahead(n) are weakened to simply jump to another distant state and rely + on the large period to avoid overlapping sequences. +* The random() method is implemented in C, executes in a single Python step, + and is, therefore, threadsafe. + +""" + +from __future__ import division +from warnings import warn as _warn +from types import MethodType as _MethodType, BuiltinMethodType as _BuiltinMethodType +from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil +from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin +from os import urandom as _urandom +from binascii import hexlify as _hexlify +import hashlib as _hashlib + +__all__ = ["Random","seed","random","uniform","randint","choice","sample", + "randrange","shuffle","normalvariate","lognormvariate", + "expovariate","vonmisesvariate","gammavariate","triangular", + "gauss","betavariate","paretovariate","weibullvariate", + "getstate","setstate","jumpahead", "WichmannHill", "getrandbits", + "SystemRandom"] + +NV_MAGICCONST = 4 * _exp(-0.5)/_sqrt(2.0) +TWOPI = 2.0*_pi +LOG4 = _log(4.0) +SG_MAGICCONST = 1.0 + _log(4.5) +BPF = 53 # Number of bits in a float +RECIP_BPF = 2**-BPF + + +# Translated by Guido van Rossum from C source provided by +# Adrian Baddeley. Adapted by Raymond Hettinger for use with +# the Mersenne Twister and os.urandom() core generators. + +import _random + +class Random(_random.Random): + """Random number generator base class used by bound module functions. + + Used to instantiate instances of Random to get generators that don't + share state. Especially useful for multi-threaded programs, creating + a different instance of Random for each thread, and using the jumpahead() + method to ensure that the generated sequences seen by each thread don't + overlap. + + Class Random can also be subclassed if you want to use a different basic + generator of your own devising: in that case, override the following + methods: random(), seed(), getstate(), setstate() and jumpahead(). + Optionally, implement a getrandbits() method so that randrange() can cover + arbitrarily large ranges. + + """ + + VERSION = 3 # used by getstate/setstate + + def __init__(self, x=None): + """Initialize an instance. + + Optional argument x controls seeding, as for Random.seed(). + """ + + self.seed(x) + self.gauss_next = None + + def seed(self, a=None): + """Initialize internal state from hashable object. + + None or no argument seeds from current time or from an operating + system specific randomness source if available. + + If a is not None or an int or long, hash(a) is used instead. + """ + + if a is None: + try: + # Seed with enough bytes to span the 19937 bit + # state space for the Mersenne Twister + a = long(_hexlify(_urandom(2500)), 16) + except NotImplementedError: + import time + a = long(time.time() * 256) # use fractional seconds + + super(Random, self).seed(a) + self.gauss_next = None + + def getstate(self): + """Return internal state; can be passed to setstate() later.""" + return self.VERSION, super(Random, self).getstate(), self.gauss_next + + def setstate(self, state): + """Restore internal state from object returned by getstate().""" + version = state[0] + if version == 3: + version, internalstate, self.gauss_next = state + super(Random, self).setstate(internalstate) + elif version == 2: + version, internalstate, self.gauss_next = state + # In version 2, the state was saved as signed ints, which causes + # inconsistencies between 32/64-bit systems. The state is + # really unsigned 32-bit ints, so we convert negative ints from + # version 2 to positive longs for version 3. + try: + internalstate = tuple( long(x) % (2**32) for x in internalstate ) + except ValueError, e: + raise TypeError, e + super(Random, self).setstate(internalstate) + else: + raise ValueError("state with version %s passed to " + "Random.setstate() of version %s" % + (version, self.VERSION)) + + def jumpahead(self, n): + """Change the internal state to one that is likely far away + from the current state. This method will not be in Py3.x, + so it is better to simply reseed. + """ + # The super.jumpahead() method uses shuffling to change state, + # so it needs a large and "interesting" n to work with. Here, + # we use hashing to create a large n for the shuffle. + s = repr(n) + repr(self.getstate()) + n = int(_hashlib.new('sha512', s).hexdigest(), 16) + super(Random, self).jumpahead(n) + +## ---- Methods below this point do not need to be overridden when +## ---- subclassing for the purpose of using a different core generator. + +## -------------------- pickle support ------------------- + + def __getstate__(self): # for pickle + return self.getstate() + + def __setstate__(self, state): # for pickle + self.setstate(state) + + def __reduce__(self): + return self.__class__, (), self.getstate() + +## -------------------- integer methods ------------------- + + def randrange(self, start, stop=None, step=1, _int=int, _maxwidth=1L< 0: + if istart >= _maxwidth: + return self._randbelow(istart) + return _int(self.random() * istart) + raise ValueError, "empty range for randrange()" + + # stop argument supplied. + istop = _int(stop) + if istop != stop: + raise ValueError, "non-integer stop for randrange()" + width = istop - istart + if step == 1 and width > 0: + # Note that + # int(istart + self.random()*width) + # instead would be incorrect. For example, consider istart + # = -2 and istop = 0. Then the guts would be in + # -2.0 to 0.0 exclusive on both ends (ignoring that random() + # might return 0.0), and because int() truncates toward 0, the + # final result would be -1 or 0 (instead of -2 or -1). + # istart + int(self.random()*width) + # would also be incorrect, for a subtler reason: the RHS + # can return a long, and then randrange() would also return + # a long, but we're supposed to return an int (for backward + # compatibility). + + if width >= _maxwidth: + return _int(istart + self._randbelow(width)) + return _int(istart + _int(self.random()*width)) + if step == 1: + raise ValueError, "empty range for randrange() (%d,%d, %d)" % (istart, istop, width) + + # Non-unit step argument supplied. + istep = _int(step) + if istep != step: + raise ValueError, "non-integer step for randrange()" + if istep > 0: + n = (width + istep - 1) // istep + elif istep < 0: + n = (width + istep + 1) // istep + else: + raise ValueError, "zero step for randrange()" + + if n <= 0: + raise ValueError, "empty range for randrange()" + + if n >= _maxwidth: + return istart + istep*self._randbelow(n) + return istart + istep*_int(self.random() * n) + + def randint(self, a, b): + """Return random integer in range [a, b], including both end points. + """ + + return self.randrange(a, b+1) + + def _randbelow(self, n, _log=_log, _int=int, _maxwidth=1L< n-1 > 2**(k-2) + r = getrandbits(k) + while r >= n: + r = getrandbits(k) + return r + if n >= _maxwidth: + _warn("Underlying random() generator does not supply \n" + "enough bits to choose from a population range this large") + return _int(self.random() * n) + +## -------------------- sequence methods ------------------- + + def choice(self, seq): + """Choose a random element from a non-empty sequence.""" + return seq[int(self.random() * len(seq))] # raises IndexError if seq is empty + + def shuffle(self, x, random=None): + """x, random=random.random -> shuffle list x in place; return None. + + Optional arg random is a 0-argument function returning a random + float in [0.0, 1.0); by default, the standard random.random. + + """ + + if random is None: + random = self.random + _int = int + for i in reversed(xrange(1, len(x))): + # pick an element in x[:i+1] with which to exchange x[i] + j = _int(random() * (i+1)) + x[i], x[j] = x[j], x[i] + + def sample(self, population, k): + """Chooses k unique random elements from a population sequence. + + Returns a new list containing elements from the population while + leaving the original population unchanged. The resulting list is + in selection order so that all sub-slices will also be valid random + samples. This allows raffle winners (the sample) to be partitioned + into grand prize and second place winners (the subslices). + + Members of the population need not be hashable or unique. If the + population contains repeats, then each occurrence is a possible + selection in the sample. + + To choose a sample in a range of integers, use xrange as an argument. + This is especially fast and space efficient for sampling from a + large population: sample(xrange(10000000), 60) + """ + + # Sampling without replacement entails tracking either potential + # selections (the pool) in a list or previous selections in a set. + + # When the number of selections is small compared to the + # population, then tracking selections is efficient, requiring + # only a small set and an occasional reselection. For + # a larger number of selections, the pool tracking method is + # preferred since the list takes less space than the + # set and it doesn't suffer from frequent reselections. + + n = len(population) + if not 0 <= k <= n: + raise ValueError("sample larger than population") + random = self.random + _int = int + result = [None] * k + setsize = 21 # size of a small set minus size of an empty list + if k > 5: + setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets + if n <= setsize or hasattr(population, "keys"): + # An n-length list is smaller than a k-length set, or this is a + # mapping type so the other algorithm wouldn't work. + pool = list(population) + for i in xrange(k): # invariant: non-selected at [0,n-i) + j = _int(random() * (n-i)) + result[i] = pool[j] + pool[j] = pool[n-i-1] # move non-selected item into vacancy + else: + try: + selected = set() + selected_add = selected.add + for i in xrange(k): + j = _int(random() * n) + while j in selected: + j = _int(random() * n) + selected_add(j) + result[i] = population[j] + except (TypeError, KeyError): # handle (at least) sets + if isinstance(population, list): + raise + return self.sample(tuple(population), k) + return result + +## -------------------- real-valued distributions ------------------- + +## -------------------- uniform distribution ------------------- + + def uniform(self, a, b): + "Get a random number in the range [a, b) or [a, b] depending on rounding." + return a + (b-a) * self.random() + +## -------------------- triangular -------------------- + + def triangular(self, low=0.0, high=1.0, mode=None): + """Triangular distribution. + + Continuous distribution bounded by given lower and upper limits, + and having a given mode value in-between. + + http://en.wikipedia.org/wiki/Triangular_distribution + + """ + u = self.random() + try: + c = 0.5 if mode is None else (mode - low) / (high - low) + except ZeroDivisionError: + return low + if u > c: + u = 1.0 - u + c = 1.0 - c + low, high = high, low + return low + (high - low) * (u * c) ** 0.5 + +## -------------------- normal distribution -------------------- + + def normalvariate(self, mu, sigma): + """Normal distribution. + + mu is the mean, and sigma is the standard deviation. + + """ + # mu = mean, sigma = standard deviation + + # Uses Kinderman and Monahan method. Reference: Kinderman, + # A.J. and Monahan, J.F., "Computer generation of random + # variables using the ratio of uniform deviates", ACM Trans + # Math Software, 3, (1977), pp257-260. + + random = self.random + while 1: + u1 = random() + u2 = 1.0 - random() + z = NV_MAGICCONST*(u1-0.5)/u2 + zz = z*z/4.0 + if zz <= -_log(u2): + break + return mu + z*sigma + +## -------------------- lognormal distribution -------------------- + + def lognormvariate(self, mu, sigma): + """Log normal distribution. + + If you take the natural logarithm of this distribution, you'll get a + normal distribution with mean mu and standard deviation sigma. + mu can have any value, and sigma must be greater than zero. + + """ + return _exp(self.normalvariate(mu, sigma)) + +## -------------------- exponential distribution -------------------- + + def expovariate(self, lambd): + """Exponential distribution. + + lambd is 1.0 divided by the desired mean. It should be + nonzero. (The parameter would be called "lambda", but that is + a reserved word in Python.) Returned values range from 0 to + positive infinity if lambd is positive, and from negative + infinity to 0 if lambd is negative. + + """ + # lambd: rate lambd = 1/mean + # ('lambda' is a Python reserved word) + + # we use 1-random() instead of random() to preclude the + # possibility of taking the log of zero. + return -_log(1.0 - self.random())/lambd + +## -------------------- von Mises distribution -------------------- + + def vonmisesvariate(self, mu, kappa): + """Circular data distribution. + + mu is the mean angle, expressed in radians between 0 and 2*pi, and + kappa is the concentration parameter, which must be greater than or + equal to zero. If kappa is equal to zero, this distribution reduces + to a uniform random angle over the range 0 to 2*pi. + + """ + # mu: mean angle (in radians between 0 and 2*pi) + # kappa: concentration parameter kappa (>= 0) + # if kappa = 0 generate uniform random angle + + # Based upon an algorithm published in: Fisher, N.I., + # "Statistical Analysis of Circular Data", Cambridge + # University Press, 1993. + + # Thanks to Magnus Kessler for a correction to the + # implementation of step 4. + + random = self.random + if kappa <= 1e-6: + return TWOPI * random() + + s = 0.5 / kappa + r = s + _sqrt(1.0 + s * s) + + while 1: + u1 = random() + z = _cos(_pi * u1) + + d = z / (r + z) + u2 = random() + if u2 < 1.0 - d * d or u2 <= (1.0 - d) * _exp(d): + break + + q = 1.0 / r + f = (q + z) / (1.0 + q * z) + u3 = random() + if u3 > 0.5: + theta = (mu + _acos(f)) % TWOPI + else: + theta = (mu - _acos(f)) % TWOPI + + return theta + +## -------------------- gamma distribution -------------------- + + def gammavariate(self, alpha, beta): + """Gamma distribution. Not the gamma function! + + Conditions on the parameters are alpha > 0 and beta > 0. + + The probability distribution function is: + + x ** (alpha - 1) * math.exp(-x / beta) + pdf(x) = -------------------------------------- + math.gamma(alpha) * beta ** alpha + + """ + + # alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2 + + # Warning: a few older sources define the gamma distribution in terms + # of alpha > -1.0 + if alpha <= 0.0 or beta <= 0.0: + raise ValueError, 'gammavariate: alpha and beta must be > 0.0' + + random = self.random + if alpha > 1.0: + + # Uses R.C.H. Cheng, "The generation of Gamma + # variables with non-integral shape parameters", + # Applied Statistics, (1977), 26, No. 1, p71-74 + + ainv = _sqrt(2.0 * alpha - 1.0) + bbb = alpha - LOG4 + ccc = alpha + ainv + + while 1: + u1 = random() + if not 1e-7 < u1 < .9999999: + continue + u2 = 1.0 - random() + v = _log(u1/(1.0-u1))/ainv + x = alpha*_exp(v) + z = u1*u1*u2 + r = bbb+ccc*v-x + if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z): + return x * beta + + elif alpha == 1.0: + # expovariate(1) + u = random() + while u <= 1e-7: + u = random() + return -_log(u) * beta + + else: # alpha is between 0 and 1 (exclusive) + + # Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle + + while 1: + u = random() + b = (_e + alpha)/_e + p = b*u + if p <= 1.0: + x = p ** (1.0/alpha) + else: + x = -_log((b-p)/alpha) + u1 = random() + if p > 1.0: + if u1 <= x ** (alpha - 1.0): + break + elif u1 <= _exp(-x): + break + return x * beta + +## -------------------- Gauss (faster alternative) -------------------- + + def gauss(self, mu, sigma): + """Gaussian distribution. + + mu is the mean, and sigma is the standard deviation. This is + slightly faster than the normalvariate() function. + + Not thread-safe without a lock around calls. + + """ + + # When x and y are two variables from [0, 1), uniformly + # distributed, then + # + # cos(2*pi*x)*sqrt(-2*log(1-y)) + # sin(2*pi*x)*sqrt(-2*log(1-y)) + # + # are two *independent* variables with normal distribution + # (mu = 0, sigma = 1). + # (Lambert Meertens) + # (corrected version; bug discovered by Mike Miller, fixed by LM) + + # Multithreading note: When two threads call this function + # simultaneously, it is possible that they will receive the + # same return value. The window is very small though. To + # avoid this, you have to use a lock around all calls. (I + # didn't want to slow this down in the serial case by using a + # lock here.) + + random = self.random + z = self.gauss_next + self.gauss_next = None + if z is None: + x2pi = random() * TWOPI + g2rad = _sqrt(-2.0 * _log(1.0 - random())) + z = _cos(x2pi) * g2rad + self.gauss_next = _sin(x2pi) * g2rad + + return mu + z*sigma + +## -------------------- beta -------------------- +## See +## http://mail.python.org/pipermail/python-bugs-list/2001-January/003752.html +## for Ivan Frohne's insightful analysis of why the original implementation: +## +## def betavariate(self, alpha, beta): +## # Discrete Event Simulation in C, pp 87-88. +## +## y = self.expovariate(alpha) +## z = self.expovariate(1.0/beta) +## return z/(y+z) +## +## was dead wrong, and how it probably got that way. + + def betavariate(self, alpha, beta): + """Beta distribution. + + Conditions on the parameters are alpha > 0 and beta > 0. + Returned values range between 0 and 1. + + """ + + # This version due to Janne Sinkkonen, and matches all the std + # texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution"). + y = self.gammavariate(alpha, 1.) + if y == 0: + return 0.0 + else: + return y / (y + self.gammavariate(beta, 1.)) + +## -------------------- Pareto -------------------- + + def paretovariate(self, alpha): + """Pareto distribution. alpha is the shape parameter.""" + # Jain, pg. 495 + + u = 1.0 - self.random() + return 1.0 / pow(u, 1.0/alpha) + +## -------------------- Weibull -------------------- + + def weibullvariate(self, alpha, beta): + """Weibull distribution. + + alpha is the scale parameter and beta is the shape parameter. + + """ + # Jain, pg. 499; bug fix courtesy Bill Arms + + u = 1.0 - self.random() + return alpha * pow(-_log(u), 1.0/beta) + +## -------------------- Wichmann-Hill ------------------- + +class WichmannHill(Random): + + VERSION = 1 # used by getstate/setstate + + def seed(self, a=None): + """Initialize internal state from hashable object. + + None or no argument seeds from current time or from an operating + system specific randomness source if available. + + If a is not None or an int or long, hash(a) is used instead. + + If a is an int or long, a is used directly. Distinct values between + 0 and 27814431486575L inclusive are guaranteed to yield distinct + internal states (this guarantee is specific to the default + Wichmann-Hill generator). + """ + + if a is None: + try: + a = long(_hexlify(_urandom(16)), 16) + except NotImplementedError: + import time + a = long(time.time() * 256) # use fractional seconds + + if not isinstance(a, (int, long)): + a = hash(a) + + a, x = divmod(a, 30268) + a, y = divmod(a, 30306) + a, z = divmod(a, 30322) + self._seed = int(x)+1, int(y)+1, int(z)+1 + + self.gauss_next = None + + def random(self): + """Get the next random number in the range [0.0, 1.0).""" + + # Wichman-Hill random number generator. + # + # Wichmann, B. A. & Hill, I. D. (1982) + # Algorithm AS 183: + # An efficient and portable pseudo-random number generator + # Applied Statistics 31 (1982) 188-190 + # + # see also: + # Correction to Algorithm AS 183 + # Applied Statistics 33 (1984) 123 + # + # McLeod, A. I. (1985) + # A remark on Algorithm AS 183 + # Applied Statistics 34 (1985),198-200 + + # This part is thread-unsafe: + # BEGIN CRITICAL SECTION + x, y, z = self._seed + x = (171 * x) % 30269 + y = (172 * y) % 30307 + z = (170 * z) % 30323 + self._seed = x, y, z + # END CRITICAL SECTION + + # Note: on a platform using IEEE-754 double arithmetic, this can + # never return 0.0 (asserted by Tim; proof too long for a comment). + return (x/30269.0 + y/30307.0 + z/30323.0) % 1.0 + + def getstate(self): + """Return internal state; can be passed to setstate() later.""" + return self.VERSION, self._seed, self.gauss_next + + def setstate(self, state): + """Restore internal state from object returned by getstate().""" + version = state[0] + if version == 1: + version, self._seed, self.gauss_next = state + else: + raise ValueError("state with version %s passed to " + "Random.setstate() of version %s" % + (version, self.VERSION)) + + def jumpahead(self, n): + """Act as if n calls to random() were made, but quickly. + + n is an int, greater than or equal to 0. + + Example use: If you have 2 threads and know that each will + consume no more than a million random numbers, create two Random + objects r1 and r2, then do + r2.setstate(r1.getstate()) + r2.jumpahead(1000000) + Then r1 and r2 will use guaranteed-disjoint segments of the full + period. + """ + + if not n >= 0: + raise ValueError("n must be >= 0") + x, y, z = self._seed + x = int(x * pow(171, n, 30269)) % 30269 + y = int(y * pow(172, n, 30307)) % 30307 + z = int(z * pow(170, n, 30323)) % 30323 + self._seed = x, y, z + + def __whseed(self, x=0, y=0, z=0): + """Set the Wichmann-Hill seed from (x, y, z). + + These must be integers in the range [0, 256). + """ + + if not type(x) == type(y) == type(z) == int: + raise TypeError('seeds must be integers') + if not (0 <= x < 256 and 0 <= y < 256 and 0 <= z < 256): + raise ValueError('seeds must be in range(0, 256)') + if 0 == x == y == z: + # Initialize from current time + import time + t = long(time.time() * 256) + t = int((t&0xffffff) ^ (t>>24)) + t, x = divmod(t, 256) + t, y = divmod(t, 256) + t, z = divmod(t, 256) + # Zero is a poor seed, so substitute 1 + self._seed = (x or 1, y or 1, z or 1) + + self.gauss_next = None + + def whseed(self, a=None): + """Seed from hashable object's hash code. + + None or no argument seeds from current time. It is not guaranteed + that objects with distinct hash codes lead to distinct internal + states. + + This is obsolete, provided for compatibility with the seed routine + used prior to Python 2.1. Use the .seed() method instead. + """ + + if a is None: + self.__whseed() + return + a = hash(a) + a, x = divmod(a, 256) + a, y = divmod(a, 256) + a, z = divmod(a, 256) + x = (x + a) % 256 or 1 + y = (y + a) % 256 or 1 + z = (z + a) % 256 or 1 + self.__whseed(x, y, z) + +## --------------- Operating System Random Source ------------------ + +class SystemRandom(Random): + """Alternate random number generator using sources provided + by the operating system (such as /dev/urandom on Unix or + CryptGenRandom on Windows). + + Not available on all systems (see os.urandom() for details). + """ + + def random(self): + """Get the next random number in the range [0.0, 1.0).""" + return (long(_hexlify(_urandom(7)), 16) >> 3) * RECIP_BPF + + def getrandbits(self, k): + """getrandbits(k) -> x. Generates a long int with k random bits.""" + if k <= 0: + raise ValueError('number of bits must be greater than zero') + if k != int(k): + raise TypeError('number of bits should be an integer') + bytes = (k + 7) // 8 # bits / 8 and rounded up + x = long(_hexlify(_urandom(bytes)), 16) + return x >> (bytes * 8 - k) # trim excess bits + + def _stub(self, *args, **kwds): + "Stub method. Not used for a system random number generator." + return None + seed = jumpahead = _stub + + def _notimplemented(self, *args, **kwds): + "Method should not be called for a system random number generator." + raise NotImplementedError('System entropy source does not have state.') + getstate = setstate = _notimplemented + +## -------------------- test program -------------------- + +def _test_generator(n, func, args): + import time + print n, 'times', func.__name__ + total = 0.0 + sqsum = 0.0 + smallest = 1e10 + largest = -1e10 + t0 = time.time() + for i in range(n): + x = func(*args) + total += x + sqsum = sqsum + x*x + smallest = min(x, smallest) + largest = max(x, largest) + t1 = time.time() + print round(t1-t0, 3), 'sec,', + avg = total/n + stddev = _sqrt(sqsum/n - avg*avg) + print 'avg %g, stddev %g, min %g, max %g' % \ + (avg, stddev, smallest, largest) + + +def _test(N=2000): + _test_generator(N, random, ()) + _test_generator(N, normalvariate, (0.0, 1.0)) + _test_generator(N, lognormvariate, (0.0, 1.0)) + _test_generator(N, vonmisesvariate, (0.0, 1.0)) + _test_generator(N, gammavariate, (0.01, 1.0)) + _test_generator(N, gammavariate, (0.1, 1.0)) + _test_generator(N, gammavariate, (0.1, 2.0)) + _test_generator(N, gammavariate, (0.5, 1.0)) + _test_generator(N, gammavariate, (0.9, 1.0)) + _test_generator(N, gammavariate, (1.0, 1.0)) + _test_generator(N, gammavariate, (2.0, 1.0)) + _test_generator(N, gammavariate, (20.0, 1.0)) + _test_generator(N, gammavariate, (200.0, 1.0)) + _test_generator(N, gauss, (0.0, 1.0)) + _test_generator(N, betavariate, (3.0, 3.0)) + _test_generator(N, triangular, (0.0, 1.0, 1.0/3.0)) + +# Create one instance, seeded from current time, and export its methods +# as module-level functions. The functions share state across all uses +#(both in the user's code and in the Python libraries), but that's fine +# for most programs and is easier for the casual user than making them +# instantiate their own Random() instance. + +_inst = Random() +seed = _inst.seed +random = _inst.random +uniform = _inst.uniform +triangular = _inst.triangular +randint = _inst.randint +choice = _inst.choice +randrange = _inst.randrange +sample = _inst.sample +shuffle = _inst.shuffle +normalvariate = _inst.normalvariate +lognormvariate = _inst.lognormvariate +expovariate = _inst.expovariate +vonmisesvariate = _inst.vonmisesvariate +gammavariate = _inst.gammavariate +gauss = _inst.gauss +betavariate = _inst.betavariate +paretovariate = _inst.paretovariate +weibullvariate = _inst.weibullvariate +getstate = _inst.getstate +setstate = _inst.setstate +jumpahead = _inst.jumpahead +getrandbits = _inst.getrandbits + +if __name__ == '__main__': + _test() diff --git a/CVIssueCount/rfc822.py b/CVIssueCount/rfc822.py new file mode 100644 index 0000000..c1d0865 --- /dev/null +++ b/CVIssueCount/rfc822.py @@ -0,0 +1,1016 @@ +"""RFC 2822 message manipulation. + +Note: This is only a very rough sketch of a full RFC-822 parser; in particular +the tokenizing of addresses does not adhere to all the quoting rules. + +Note: RFC 2822 is a long awaited update to RFC 822. This module should +conform to RFC 2822, and is thus mis-named (it's not worth renaming it). Some +effort at RFC 2822 updates have been made, but a thorough audit has not been +performed. Consider any RFC 2822 non-conformance to be a bug. + + RFC 2822: http://www.faqs.org/rfcs/rfc2822.html + RFC 822 : http://www.faqs.org/rfcs/rfc822.html (obsolete) + +Directions for use: + +To create a Message object: first open a file, e.g.: + + fp = open(file, 'r') + +You can use any other legal way of getting an open file object, e.g. use +sys.stdin or call os.popen(). Then pass the open file object to the Message() +constructor: + + m = Message(fp) + +This class can work with any input object that supports a readline method. If +the input object has seek and tell capability, the rewindbody method will +work; also illegal lines will be pushed back onto the input stream. If the +input object lacks seek but has an `unread' method that can push back a line +of input, Message will use that to push back illegal lines. Thus this class +can be used to parse messages coming from a buffered stream. + +The optional `seekable' argument is provided as a workaround for certain stdio +libraries in which tell() discards buffered data before discovering that the +lseek() system call doesn't work. For maximum portability, you should set the +seekable argument to zero to prevent that initial \code{tell} when passing in +an unseekable object such as a file object created from a socket object. If +it is 1 on entry -- which it is by default -- the tell() method of the open +file object is called once; if this raises an exception, seekable is reset to +0. For other nonzero values of seekable, this test is not made. + +To get the text of a particular header there are several methods: + + str = m.getheader(name) + str = m.getrawheader(name) + +where name is the name of the header, e.g. 'Subject'. The difference is that +getheader() strips the leading and trailing whitespace, while getrawheader() +doesn't. Both functions retain embedded whitespace (including newlines) +exactly as they are specified in the header, and leave the case of the text +unchanged. + +For addresses and address lists there are functions + + realname, mailaddress = m.getaddr(name) + list = m.getaddrlist(name) + +where the latter returns a list of (realname, mailaddr) tuples. + +There is also a method + + time = m.getdate(name) + +which parses a Date-like field and returns a time-compatible tuple, +i.e. a tuple such as returned by time.localtime() or accepted by +time.mktime(). + +See the class definition for lower level access methods. + +There are also some utility functions here. +""" +# Cleanup and extensions by Eric S. Raymond + +import time + +from warnings import warnpy3k +warnpy3k("in 3.x, rfc822 has been removed in favor of the email package", + stacklevel=2) + +__all__ = ["Message","AddressList","parsedate","parsedate_tz","mktime_tz"] + +_blanklines = ('\r\n', '\n') # Optimization for islast() + + +class Message: + """Represents a single RFC 2822-compliant message.""" + + def __init__(self, fp, seekable = 1): + """Initialize the class instance and read the headers.""" + if seekable == 1: + # Exercise tell() to make sure it works + # (and then assume seek() works, too) + try: + fp.tell() + except (AttributeError, IOError): + seekable = 0 + self.fp = fp + self.seekable = seekable + self.startofheaders = None + self.startofbody = None + # + if self.seekable: + try: + self.startofheaders = self.fp.tell() + except IOError: + self.seekable = 0 + # + self.readheaders() + # + if self.seekable: + try: + self.startofbody = self.fp.tell() + except IOError: + self.seekable = 0 + + def rewindbody(self): + """Rewind the file to the start of the body (if seekable).""" + if not self.seekable: + raise IOError, "unseekable file" + self.fp.seek(self.startofbody) + + def readheaders(self): + """Read header lines. + + Read header lines up to the entirely blank line that terminates them. + The (normally blank) line that ends the headers is skipped, but not + included in the returned list. If a non-header line ends the headers, + (which is an error), an attempt is made to backspace over it; it is + never included in the returned list. + + The variable self.status is set to the empty string if all went well, + otherwise it is an error message. The variable self.headers is a + completely uninterpreted list of lines contained in the header (so + printing them will reproduce the header exactly as it appears in the + file). + """ + self.dict = {} + self.unixfrom = '' + self.headers = lst = [] + self.status = '' + headerseen = "" + firstline = 1 + startofline = unread = tell = None + if hasattr(self.fp, 'unread'): + unread = self.fp.unread + elif self.seekable: + tell = self.fp.tell + while 1: + if tell: + try: + startofline = tell() + except IOError: + startofline = tell = None + self.seekable = 0 + line = self.fp.readline() + if not line: + self.status = 'EOF in headers' + break + # Skip unix From name time lines + if firstline and line.startswith('From '): + self.unixfrom = self.unixfrom + line + continue + firstline = 0 + if headerseen and line[0] in ' \t': + # It's a continuation line. + lst.append(line) + x = (self.dict[headerseen] + "\n " + line.strip()) + self.dict[headerseen] = x.strip() + continue + elif self.iscomment(line): + # It's a comment. Ignore it. + continue + elif self.islast(line): + # Note! No pushback here! The delimiter line gets eaten. + break + headerseen = self.isheader(line) + if headerseen: + # It's a legal header line, save it. + lst.append(line) + self.dict[headerseen] = line[len(headerseen)+1:].strip() + continue + elif headerseen is not None: + # An empty header name. These aren't allowed in HTTP, but it's + # probably a benign mistake. Don't add the header, just keep + # going. + continue + else: + # It's not a header line; throw it back and stop here. + if not self.dict: + self.status = 'No headers' + else: + self.status = 'Non-header line where header expected' + # Try to undo the read. + if unread: + unread(line) + elif tell: + self.fp.seek(startofline) + else: + self.status = self.status + '; bad seek' + break + + def isheader(self, line): + """Determine whether a given line is a legal header. + + This method should return the header name, suitably canonicalized. + You may override this method in order to use Message parsing on tagged + data in RFC 2822-like formats with special header formats. + """ + i = line.find(':') + if i > -1: + return line[:i].lower() + return None + + def islast(self, line): + """Determine whether a line is a legal end of RFC 2822 headers. + + You may override this method if your application wants to bend the + rules, e.g. to strip trailing whitespace, or to recognize MH template + separators ('--------'). For convenience (e.g. for code reading from + sockets) a line consisting of \\r\\n also matches. + """ + return line in _blanklines + + def iscomment(self, line): + """Determine whether a line should be skipped entirely. + + You may override this method in order to use Message parsing on tagged + data in RFC 2822-like formats that support embedded comments or + free-text data. + """ + return False + + def getallmatchingheaders(self, name): + """Find all header lines matching a given header name. + + Look through the list of headers and find all lines matching a given + header name (and their continuation lines). A list of the lines is + returned, without interpretation. If the header does not occur, an + empty list is returned. If the header occurs multiple times, all + occurrences are returned. Case is not important in the header name. + """ + name = name.lower() + ':' + n = len(name) + lst = [] + hit = 0 + for line in self.headers: + if line[:n].lower() == name: + hit = 1 + elif not line[:1].isspace(): + hit = 0 + if hit: + lst.append(line) + return lst + + def getfirstmatchingheader(self, name): + """Get the first header line matching name. + + This is similar to getallmatchingheaders, but it returns only the + first matching header (and its continuation lines). + """ + name = name.lower() + ':' + n = len(name) + lst = [] + hit = 0 + for line in self.headers: + if hit: + if not line[:1].isspace(): + break + elif line[:n].lower() == name: + hit = 1 + if hit: + lst.append(line) + return lst + + def getrawheader(self, name): + """A higher-level interface to getfirstmatchingheader(). + + Return a string containing the literal text of the header but with the + keyword stripped. All leading, trailing and embedded whitespace is + kept in the string, however. Return None if the header does not + occur. + """ + + lst = self.getfirstmatchingheader(name) + if not lst: + return None + lst[0] = lst[0][len(name) + 1:] + return ''.join(lst) + + def getheader(self, name, default=None): + """Get the header value for a name. + + This is the normal interface: it returns a stripped version of the + header value for a given header name, or None if it doesn't exist. + This uses the dictionary version which finds the *last* such header. + """ + return self.dict.get(name.lower(), default) + get = getheader + + def getheaders(self, name): + """Get all values for a header. + + This returns a list of values for headers given more than once; each + value in the result list is stripped in the same way as the result of + getheader(). If the header is not given, return an empty list. + """ + result = [] + current = '' + have_header = 0 + for s in self.getallmatchingheaders(name): + if s[0].isspace(): + if current: + current = "%s\n %s" % (current, s.strip()) + else: + current = s.strip() + else: + if have_header: + result.append(current) + current = s[s.find(":") + 1:].strip() + have_header = 1 + if have_header: + result.append(current) + return result + + def getaddr(self, name): + """Get a single address from a header, as a tuple. + + An example return value: + ('Guido van Rossum', 'guido@cwi.nl') + """ + # New, by Ben Escoto + alist = self.getaddrlist(name) + if alist: + return alist[0] + else: + return (None, None) + + def getaddrlist(self, name): + """Get a list of addresses from a header. + + Retrieves a list of addresses from a header, where each address is a + tuple as returned by getaddr(). Scans all named headers, so it works + properly with multiple To: or Cc: headers for example. + """ + raw = [] + for h in self.getallmatchingheaders(name): + if h[0] in ' \t': + raw.append(h) + else: + if raw: + raw.append(', ') + i = h.find(':') + if i > 0: + addr = h[i+1:] + raw.append(addr) + alladdrs = ''.join(raw) + a = AddressList(alladdrs) + return a.addresslist + + def getdate(self, name): + """Retrieve a date field from a header. + + Retrieves a date field from the named header, returning a tuple + compatible with time.mktime(). + """ + try: + data = self[name] + except KeyError: + return None + return parsedate(data) + + def getdate_tz(self, name): + """Retrieve a date field from a header as a 10-tuple. + + The first 9 elements make up a tuple compatible with time.mktime(), + and the 10th is the offset of the poster's time zone from GMT/UTC. + """ + try: + data = self[name] + except KeyError: + return None + return parsedate_tz(data) + + + # Access as a dictionary (only finds *last* header of each type): + + def __len__(self): + """Get the number of headers in a message.""" + return len(self.dict) + + def __getitem__(self, name): + """Get a specific header, as from a dictionary.""" + return self.dict[name.lower()] + + def __setitem__(self, name, value): + """Set the value of a header. + + Note: This is not a perfect inversion of __getitem__, because any + changed headers get stuck at the end of the raw-headers list rather + than where the altered header was. + """ + del self[name] # Won't fail if it doesn't exist + self.dict[name.lower()] = value + text = name + ": " + value + for line in text.split("\n"): + self.headers.append(line + "\n") + + def __delitem__(self, name): + """Delete all occurrences of a specific header, if it is present.""" + name = name.lower() + if not name in self.dict: + return + del self.dict[name] + name = name + ':' + n = len(name) + lst = [] + hit = 0 + for i in range(len(self.headers)): + line = self.headers[i] + if line[:n].lower() == name: + hit = 1 + elif not line[:1].isspace(): + hit = 0 + if hit: + lst.append(i) + for i in reversed(lst): + del self.headers[i] + + def setdefault(self, name, default=""): + lowername = name.lower() + if lowername in self.dict: + return self.dict[lowername] + else: + text = name + ": " + default + for line in text.split("\n"): + self.headers.append(line + "\n") + self.dict[lowername] = default + return default + + def has_key(self, name): + """Determine whether a message contains the named header.""" + return name.lower() in self.dict + + def __contains__(self, name): + """Determine whether a message contains the named header.""" + return name.lower() in self.dict + + def __iter__(self): + return iter(self.dict) + + def keys(self): + """Get all of a message's header field names.""" + return self.dict.keys() + + def values(self): + """Get all of a message's header field values.""" + return self.dict.values() + + def items(self): + """Get all of a message's headers. + + Returns a list of name, value tuples. + """ + return self.dict.items() + + def __str__(self): + return ''.join(self.headers) + + +# Utility functions +# ----------------- + +# XXX Should fix unquote() and quote() to be really conformant. +# XXX The inverses of the parse functions may also be useful. + + +def unquote(s): + """Remove quotes from a string.""" + if len(s) > 1: + if s.startswith('"') and s.endswith('"'): + return s[1:-1].replace('\\\\', '\\').replace('\\"', '"') + if s.startswith('<') and s.endswith('>'): + return s[1:-1] + return s + + +def quote(s): + """Add quotes around a string.""" + return s.replace('\\', '\\\\').replace('"', '\\"') + + +def parseaddr(address): + """Parse an address into a (realname, mailaddr) tuple.""" + a = AddressList(address) + lst = a.addresslist + if not lst: + return (None, None) + return lst[0] + + +class AddrlistClass: + """Address parser class by Ben Escoto. + + To understand what this class does, it helps to have a copy of + RFC 2822 in front of you. + + http://www.faqs.org/rfcs/rfc2822.html + + Note: this class interface is deprecated and may be removed in the future. + Use rfc822.AddressList instead. + """ + + def __init__(self, field): + """Initialize a new instance. + + `field' is an unparsed address header field, containing one or more + addresses. + """ + self.specials = '()<>@,:;.\"[]' + self.pos = 0 + self.LWS = ' \t' + self.CR = '\r\n' + self.atomends = self.specials + self.LWS + self.CR + # Note that RFC 2822 now specifies `.' as obs-phrase, meaning that it + # is obsolete syntax. RFC 2822 requires that we recognize obsolete + # syntax, so allow dots in phrases. + self.phraseends = self.atomends.replace('.', '') + self.field = field + self.commentlist = [] + + def gotonext(self): + """Parse up to the start of the next address.""" + while self.pos < len(self.field): + if self.field[self.pos] in self.LWS + '\n\r': + self.pos = self.pos + 1 + elif self.field[self.pos] == '(': + self.commentlist.append(self.getcomment()) + else: break + + def getaddrlist(self): + """Parse all addresses. + + Returns a list containing all of the addresses. + """ + result = [] + ad = self.getaddress() + while ad: + result += ad + ad = self.getaddress() + return result + + def getaddress(self): + """Parse the next address.""" + self.commentlist = [] + self.gotonext() + + oldpos = self.pos + oldcl = self.commentlist + plist = self.getphraselist() + + self.gotonext() + returnlist = [] + + if self.pos >= len(self.field): + # Bad email address technically, no domain. + if plist: + returnlist = [(' '.join(self.commentlist), plist[0])] + + elif self.field[self.pos] in '.@': + # email address is just an addrspec + # this isn't very efficient since we start over + self.pos = oldpos + self.commentlist = oldcl + addrspec = self.getaddrspec() + returnlist = [(' '.join(self.commentlist), addrspec)] + + elif self.field[self.pos] == ':': + # address is a group + returnlist = [] + + fieldlen = len(self.field) + self.pos += 1 + while self.pos < len(self.field): + self.gotonext() + if self.pos < fieldlen and self.field[self.pos] == ';': + self.pos += 1 + break + returnlist = returnlist + self.getaddress() + + elif self.field[self.pos] == '<': + # Address is a phrase then a route addr + routeaddr = self.getrouteaddr() + + if self.commentlist: + returnlist = [(' '.join(plist) + ' (' + \ + ' '.join(self.commentlist) + ')', routeaddr)] + else: returnlist = [(' '.join(plist), routeaddr)] + + else: + if plist: + returnlist = [(' '.join(self.commentlist), plist[0])] + elif self.field[self.pos] in self.specials: + self.pos += 1 + + self.gotonext() + if self.pos < len(self.field) and self.field[self.pos] == ',': + self.pos += 1 + return returnlist + + def getrouteaddr(self): + """Parse a route address (Return-path value). + + This method just skips all the route stuff and returns the addrspec. + """ + if self.field[self.pos] != '<': + return + + expectroute = 0 + self.pos += 1 + self.gotonext() + adlist = "" + while self.pos < len(self.field): + if expectroute: + self.getdomain() + expectroute = 0 + elif self.field[self.pos] == '>': + self.pos += 1 + break + elif self.field[self.pos] == '@': + self.pos += 1 + expectroute = 1 + elif self.field[self.pos] == ':': + self.pos += 1 + else: + adlist = self.getaddrspec() + self.pos += 1 + break + self.gotonext() + + return adlist + + def getaddrspec(self): + """Parse an RFC 2822 addr-spec.""" + aslist = [] + + self.gotonext() + while self.pos < len(self.field): + if self.field[self.pos] == '.': + aslist.append('.') + self.pos += 1 + elif self.field[self.pos] == '"': + aslist.append('"%s"' % self.getquote()) + elif self.field[self.pos] in self.atomends: + break + else: aslist.append(self.getatom()) + self.gotonext() + + if self.pos >= len(self.field) or self.field[self.pos] != '@': + return ''.join(aslist) + + aslist.append('@') + self.pos += 1 + self.gotonext() + return ''.join(aslist) + self.getdomain() + + def getdomain(self): + """Get the complete domain name from an address.""" + sdlist = [] + while self.pos < len(self.field): + if self.field[self.pos] in self.LWS: + self.pos += 1 + elif self.field[self.pos] == '(': + self.commentlist.append(self.getcomment()) + elif self.field[self.pos] == '[': + sdlist.append(self.getdomainliteral()) + elif self.field[self.pos] == '.': + self.pos += 1 + sdlist.append('.') + elif self.field[self.pos] in self.atomends: + break + else: sdlist.append(self.getatom()) + return ''.join(sdlist) + + def getdelimited(self, beginchar, endchars, allowcomments = 1): + """Parse a header fragment delimited by special characters. + + `beginchar' is the start character for the fragment. If self is not + looking at an instance of `beginchar' then getdelimited returns the + empty string. + + `endchars' is a sequence of allowable end-delimiting characters. + Parsing stops when one of these is encountered. + + If `allowcomments' is non-zero, embedded RFC 2822 comments are allowed + within the parsed fragment. + """ + if self.field[self.pos] != beginchar: + return '' + + slist = [''] + quote = 0 + self.pos += 1 + while self.pos < len(self.field): + if quote == 1: + slist.append(self.field[self.pos]) + quote = 0 + elif self.field[self.pos] in endchars: + self.pos += 1 + break + elif allowcomments and self.field[self.pos] == '(': + slist.append(self.getcomment()) + continue # have already advanced pos from getcomment + elif self.field[self.pos] == '\\': + quote = 1 + else: + slist.append(self.field[self.pos]) + self.pos += 1 + + return ''.join(slist) + + def getquote(self): + """Get a quote-delimited fragment from self's field.""" + return self.getdelimited('"', '"\r', 0) + + def getcomment(self): + """Get a parenthesis-delimited fragment from self's field.""" + return self.getdelimited('(', ')\r', 1) + + def getdomainliteral(self): + """Parse an RFC 2822 domain-literal.""" + return '[%s]' % self.getdelimited('[', ']\r', 0) + + def getatom(self, atomends=None): + """Parse an RFC 2822 atom. + + Optional atomends specifies a different set of end token delimiters + (the default is to use self.atomends). This is used e.g. in + getphraselist() since phrase endings must not include the `.' (which + is legal in phrases).""" + atomlist = [''] + if atomends is None: + atomends = self.atomends + + while self.pos < len(self.field): + if self.field[self.pos] in atomends: + break + else: atomlist.append(self.field[self.pos]) + self.pos += 1 + + return ''.join(atomlist) + + def getphraselist(self): + """Parse a sequence of RFC 2822 phrases. + + A phrase is a sequence of words, which are in turn either RFC 2822 + atoms or quoted-strings. Phrases are canonicalized by squeezing all + runs of continuous whitespace into one space. + """ + plist = [] + + while self.pos < len(self.field): + if self.field[self.pos] in self.LWS: + self.pos += 1 + elif self.field[self.pos] == '"': + plist.append(self.getquote()) + elif self.field[self.pos] == '(': + self.commentlist.append(self.getcomment()) + elif self.field[self.pos] in self.phraseends: + break + else: + plist.append(self.getatom(self.phraseends)) + + return plist + +class AddressList(AddrlistClass): + """An AddressList encapsulates a list of parsed RFC 2822 addresses.""" + def __init__(self, field): + AddrlistClass.__init__(self, field) + if field: + self.addresslist = self.getaddrlist() + else: + self.addresslist = [] + + def __len__(self): + return len(self.addresslist) + + def __str__(self): + return ", ".join(map(dump_address_pair, self.addresslist)) + + def __add__(self, other): + # Set union + newaddr = AddressList(None) + newaddr.addresslist = self.addresslist[:] + for x in other.addresslist: + if not x in self.addresslist: + newaddr.addresslist.append(x) + return newaddr + + def __iadd__(self, other): + # Set union, in-place + for x in other.addresslist: + if not x in self.addresslist: + self.addresslist.append(x) + return self + + def __sub__(self, other): + # Set difference + newaddr = AddressList(None) + for x in self.addresslist: + if not x in other.addresslist: + newaddr.addresslist.append(x) + return newaddr + + def __isub__(self, other): + # Set difference, in-place + for x in other.addresslist: + if x in self.addresslist: + self.addresslist.remove(x) + return self + + def __getitem__(self, index): + # Make indexing, slices, and 'in' work + return self.addresslist[index] + +def dump_address_pair(pair): + """Dump a (name, address) pair in a canonicalized form.""" + if pair[0]: + return '"' + pair[0] + '" <' + pair[1] + '>' + else: + return pair[1] + +# Parse a date field + +_monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', + 'aug', 'sep', 'oct', 'nov', 'dec', + 'january', 'february', 'march', 'april', 'may', 'june', 'july', + 'august', 'september', 'october', 'november', 'december'] +_daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'] + +# The timezone table does not include the military time zones defined +# in RFC822, other than Z. According to RFC1123, the description in +# RFC822 gets the signs wrong, so we can't rely on any such time +# zones. RFC1123 recommends that numeric timezone indicators be used +# instead of timezone names. + +_timezones = {'UT':0, 'UTC':0, 'GMT':0, 'Z':0, + 'AST': -400, 'ADT': -300, # Atlantic (used in Canada) + 'EST': -500, 'EDT': -400, # Eastern + 'CST': -600, 'CDT': -500, # Central + 'MST': -700, 'MDT': -600, # Mountain + 'PST': -800, 'PDT': -700 # Pacific + } + + +def parsedate_tz(data): + """Convert a date string to a time tuple. + + Accounts for military timezones. + """ + if not data: + return None + data = data.split() + if data[0][-1] in (',', '.') or data[0].lower() in _daynames: + # There's a dayname here. Skip it + del data[0] + else: + # no space after the "weekday,"? + i = data[0].rfind(',') + if i >= 0: + data[0] = data[0][i+1:] + if len(data) == 3: # RFC 850 date, deprecated + stuff = data[0].split('-') + if len(stuff) == 3: + data = stuff + data[1:] + if len(data) == 4: + s = data[3] + i = s.find('+') + if i > 0: + data[3:] = [s[:i], s[i+1:]] + else: + data.append('') # Dummy tz + if len(data) < 5: + return None + data = data[:5] + [dd, mm, yy, tm, tz] = data + mm = mm.lower() + if not mm in _monthnames: + dd, mm = mm, dd.lower() + if not mm in _monthnames: + return None + mm = _monthnames.index(mm)+1 + if mm > 12: mm = mm - 12 + if dd[-1] == ',': + dd = dd[:-1] + i = yy.find(':') + if i > 0: + yy, tm = tm, yy + if yy[-1] == ',': + yy = yy[:-1] + if not yy[0].isdigit(): + yy, tz = tz, yy + if tm[-1] == ',': + tm = tm[:-1] + tm = tm.split(':') + if len(tm) == 2: + [thh, tmm] = tm + tss = '0' + elif len(tm) == 3: + [thh, tmm, tss] = tm + else: + return None + try: + yy = int(yy) + dd = int(dd) + thh = int(thh) + tmm = int(tmm) + tss = int(tss) + except ValueError: + return None + tzoffset = None + tz = tz.upper() + if tz in _timezones: + tzoffset = _timezones[tz] + else: + try: + tzoffset = int(tz) + except ValueError: + pass + # Convert a timezone offset into seconds ; -0500 -> -18000 + if tzoffset: + if tzoffset < 0: + tzsign = -1 + tzoffset = -tzoffset + else: + tzsign = 1 + tzoffset = tzsign * ( (tzoffset//100)*3600 + (tzoffset % 100)*60) + return (yy, mm, dd, thh, tmm, tss, 0, 1, 0, tzoffset) + + +def parsedate(data): + """Convert a time string to a time tuple.""" + t = parsedate_tz(data) + if t is None: + return t + return t[:9] + + +def mktime_tz(data): + """Turn a 10-tuple as returned by parsedate_tz() into a UTC timestamp.""" + if data[9] is None: + # No zone info, so localtime is better assumption than GMT + return time.mktime(data[:8] + (-1,)) + else: + t = time.mktime(data[:8] + (0,)) + return t - data[9] - time.timezone + +def formatdate(timeval=None): + """Returns time format preferred for Internet standards. + + Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123 + + According to RFC 1123, day and month names must always be in + English. If not for that, this code could use strftime(). It + can't because strftime() honors the locale and could generated + non-English names. + """ + if timeval is None: + timeval = time.time() + timeval = time.gmtime(timeval) + return "%s, %02d %s %04d %02d:%02d:%02d GMT" % ( + ("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun")[timeval[6]], + timeval[2], + ("Jan", "Feb", "Mar", "Apr", "May", "Jun", + "Jul", "Aug", "Sep", "Oct", "Nov", "Dec")[timeval[1]-1], + timeval[0], timeval[3], timeval[4], timeval[5]) + + +# When used as script, run a small test program. +# The first command line argument must be a filename containing one +# message in RFC-822 format. + +if __name__ == '__main__': + import sys, os + file = os.path.join(os.environ['HOME'], 'Mail/inbox/1') + if sys.argv[1:]: file = sys.argv[1] + f = open(file, 'r') + m = Message(f) + print 'From:', m.getaddr('from') + print 'To:', m.getaddrlist('to') + print 'Subject:', m.getheader('subject') + print 'Date:', m.getheader('date') + date = m.getdate_tz('date') + tz = date[-1] + date = time.localtime(mktime_tz(date)) + if date: + print 'ParsedDate:', time.asctime(date), + hhmmss = tz + hhmm, ss = divmod(hhmmss, 60) + hh, mm = divmod(hhmm, 60) + print "%+03d%02d" % (hh, mm), + if ss: print ".%02d" % ss, + print + else: + print 'ParsedDate:', None + m.rewindbody() + n = 0 + while f.readline(): + n += 1 + print 'Lines:', n + print '-'*70 + print 'len =', len(m) + if 'Date' in m: print 'Date =', m['Date'] + if 'X-Nonsense' in m: pass + print 'keys =', m.keys() + print 'values =', m.values() + print 'items =', m.items() diff --git a/CVIssueCount/socket.py b/CVIssueCount/socket.py new file mode 100644 index 0000000..614af29 --- /dev/null +++ b/CVIssueCount/socket.py @@ -0,0 +1,577 @@ +# Wrapper module for _socket, providing some additional facilities +# implemented in Python. + +"""\ +This module provides socket operations and some related functions. +On Unix, it supports IP (Internet Protocol) and Unix domain sockets. +On other systems, it only supports IP. Functions specific for a +socket are available as methods of the socket object. + +Functions: + +socket() -- create a new socket object +socketpair() -- create a pair of new socket objects [*] +fromfd() -- create a socket object from an open file descriptor [*] +gethostname() -- return the current hostname +gethostbyname() -- map a hostname to its IP number +gethostbyaddr() -- map an IP number or hostname to DNS info +getservbyname() -- map a service name and a protocol name to a port number +getprotobyname() -- map a protocol name (e.g. 'tcp') to a number +ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order +htons(), htonl() -- convert 16, 32 bit int from host to network byte order +inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format +inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89) +ssl() -- secure socket layer support (only available if configured) +socket.getdefaulttimeout() -- get the default timeout value +socket.setdefaulttimeout() -- set the default timeout value +create_connection() -- connects to an address, with an optional timeout and + optional source address. + + [*] not available on all platforms! + +Special objects: + +SocketType -- type object for socket objects +error -- exception raised for I/O errors +has_ipv6 -- boolean value indicating if IPv6 is supported + +Integer constants: + +AF_INET, AF_UNIX -- socket domains (first argument to socket() call) +SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument) + +Many other constants may be defined; these may be used in calls to +the setsockopt() and getsockopt() methods. +""" + +import _socket +from _socket import * +from functools import partial +from types import MethodType + +try: + import _ssl +except ImportError: + # no SSL support + pass +else: + def ssl(sock, keyfile=None, certfile=None): + # we do an internal import here because the ssl + # module imports the socket module + import ssl as _realssl + warnings.warn("socket.ssl() is deprecated. Use ssl.wrap_socket() instead.", + DeprecationWarning, stacklevel=2) + return _realssl.sslwrap_simple(sock, keyfile, certfile) + + # we need to import the same constants we used to... + from _ssl import SSLError as sslerror + from _ssl import \ + RAND_add, \ + RAND_status, \ + SSL_ERROR_ZERO_RETURN, \ + SSL_ERROR_WANT_READ, \ + SSL_ERROR_WANT_WRITE, \ + SSL_ERROR_WANT_X509_LOOKUP, \ + SSL_ERROR_SYSCALL, \ + SSL_ERROR_SSL, \ + SSL_ERROR_WANT_CONNECT, \ + SSL_ERROR_EOF, \ + SSL_ERROR_INVALID_ERROR_CODE + try: + from _ssl import RAND_egd + except ImportError: + # LibreSSL does not provide RAND_egd + pass + +import os, sys, warnings + +try: + from cStringIO import StringIO +except ImportError: + from StringIO import StringIO + +try: + import errno +except ImportError: + errno = None +EBADF = getattr(errno, 'EBADF', 9) +EINTR = getattr(errno, 'EINTR', 4) + +__all__ = ["getfqdn", "create_connection"] +__all__.extend(os._get_exports_list(_socket)) + + +_realsocket = socket + +# WSA error codes +if sys.platform.lower().startswith("win"): + errorTab = {} + errorTab[10004] = "The operation was interrupted." + errorTab[10009] = "A bad file handle was passed." + errorTab[10013] = "Permission denied." + errorTab[10014] = "A fault occurred on the network??" # WSAEFAULT + errorTab[10022] = "An invalid operation was attempted." + errorTab[10035] = "The socket operation would block" + errorTab[10036] = "A blocking operation is already in progress." + errorTab[10048] = "The network address is in use." + errorTab[10054] = "The connection has been reset." + errorTab[10058] = "The network has been shut down." + errorTab[10060] = "The operation timed out." + errorTab[10061] = "Connection refused." + errorTab[10063] = "The name is too long." + errorTab[10064] = "The host is down." + errorTab[10065] = "The host is unreachable." + __all__.append("errorTab") + + + +def getfqdn(name=''): + """Get fully qualified domain name from name. + + An empty argument is interpreted as meaning the local host. + + First the hostname returned by gethostbyaddr() is checked, then + possibly existing aliases. In case no FQDN is available, hostname + from gethostname() is returned. + """ + name = name.strip() + if not name or name == '0.0.0.0': + name = gethostname() + try: + hostname, aliases, ipaddrs = gethostbyaddr(name) + except error: + pass + else: + aliases.insert(0, hostname) + for name in aliases: + if '.' in name: + break + else: + name = hostname + return name + + +_socketmethods = ( + 'bind', 'connect', 'connect_ex', 'fileno', 'listen', + 'getpeername', 'getsockname', 'getsockopt', 'setsockopt', + 'sendall', 'setblocking', + 'settimeout', 'gettimeout', 'shutdown') + +if os.name == "nt": + _socketmethods = _socketmethods + ('ioctl',) + +if sys.platform == "riscos": + _socketmethods = _socketmethods + ('sleeptaskw',) + +# All the method names that must be delegated to either the real socket +# object or the _closedsocket object. +_delegate_methods = ("recv", "recvfrom", "recv_into", "recvfrom_into", + "send", "sendto") + +class _closedsocket(object): + __slots__ = [] + def _dummy(*args): + raise error(EBADF, 'Bad file descriptor') + # All _delegate_methods must also be initialized here. + send = recv = recv_into = sendto = recvfrom = recvfrom_into = _dummy + __getattr__ = _dummy + +# Wrapper around platform socket objects. This implements +# a platform-independent dup() functionality. The +# implementation currently relies on reference counting +# to close the underlying socket object. +class _socketobject(object): + + __doc__ = _realsocket.__doc__ + + __slots__ = ["_sock", "__weakref__"] + list(_delegate_methods) + + def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None): + if _sock is None: + _sock = _realsocket(family, type, proto) + self._sock = _sock + for method in _delegate_methods: + setattr(self, method, getattr(_sock, method)) + + def close(self, _closedsocket=_closedsocket, + _delegate_methods=_delegate_methods, setattr=setattr): + # This function should not reference any globals. See issue #808164. + self._sock = _closedsocket() + dummy = self._sock._dummy + for method in _delegate_methods: + setattr(self, method, dummy) + close.__doc__ = _realsocket.close.__doc__ + + def accept(self): + sock, addr = self._sock.accept() + return _socketobject(_sock=sock), addr + accept.__doc__ = _realsocket.accept.__doc__ + + def dup(self): + """dup() -> socket object + + Return a new socket object connected to the same system resource.""" + return _socketobject(_sock=self._sock) + + def makefile(self, mode='r', bufsize=-1): + """makefile([mode[, bufsize]]) -> file object + + Return a regular file object corresponding to the socket. The mode + and bufsize arguments are as for the built-in open() function.""" + return _fileobject(self._sock, mode, bufsize) + + family = property(lambda self: self._sock.family, doc="the socket family") + type = property(lambda self: self._sock.type, doc="the socket type") + proto = property(lambda self: self._sock.proto, doc="the socket protocol") + +def meth(name,self,*args): + return getattr(self._sock,name)(*args) + +for _m in _socketmethods: + p = partial(meth,_m) + p.__name__ = _m + p.__doc__ = getattr(_realsocket,_m).__doc__ + m = MethodType(p,None,_socketobject) + setattr(_socketobject,_m,m) + +socket = SocketType = _socketobject + +class _fileobject(object): + """Faux file object attached to a socket object.""" + + default_bufsize = 8192 + name = "" + + __slots__ = ["mode", "bufsize", "softspace", + # "closed" is a property, see below + "_sock", "_rbufsize", "_wbufsize", "_rbuf", "_wbuf", "_wbuf_len", + "_close"] + + def __init__(self, sock, mode='rb', bufsize=-1, close=False): + self._sock = sock + self.mode = mode # Not actually used in this version + if bufsize < 0: + bufsize = self.default_bufsize + self.bufsize = bufsize + self.softspace = False + # _rbufsize is the suggested recv buffer size. It is *strictly* + # obeyed within readline() for recv calls. If it is larger than + # default_bufsize it will be used for recv calls within read(). + if bufsize == 0: + self._rbufsize = 1 + elif bufsize == 1: + self._rbufsize = self.default_bufsize + else: + self._rbufsize = bufsize + self._wbufsize = bufsize + # We use StringIO for the read buffer to avoid holding a list + # of variously sized string objects which have been known to + # fragment the heap due to how they are malloc()ed and often + # realloc()ed down much smaller than their original allocation. + self._rbuf = StringIO() + self._wbuf = [] # A list of strings + self._wbuf_len = 0 + self._close = close + + def _getclosed(self): + return self._sock is None + closed = property(_getclosed, doc="True if the file is closed") + + def close(self): + try: + if self._sock: + self.flush() + finally: + if self._close: + self._sock.close() + self._sock = None + + def __del__(self): + try: + self.close() + except: + # close() may fail if __init__ didn't complete + pass + + def flush(self): + if self._wbuf: + data = "".join(self._wbuf) + self._wbuf = [] + self._wbuf_len = 0 + buffer_size = max(self._rbufsize, self.default_bufsize) + data_size = len(data) + write_offset = 0 + view = memoryview(data) + try: + while write_offset < data_size: + self._sock.sendall(view[write_offset:write_offset+buffer_size]) + write_offset += buffer_size + finally: + if write_offset < data_size: + remainder = data[write_offset:] + del view, data # explicit free + self._wbuf.append(remainder) + self._wbuf_len = len(remainder) + + def fileno(self): + return self._sock.fileno() + + def write(self, data): + data = str(data) # XXX Should really reject non-string non-buffers + if not data: + return + self._wbuf.append(data) + self._wbuf_len += len(data) + if (self._wbufsize == 0 or + (self._wbufsize == 1 and '\n' in data) or + (self._wbufsize > 1 and self._wbuf_len >= self._wbufsize)): + self.flush() + + def writelines(self, list): + # XXX We could do better here for very long lists + # XXX Should really reject non-string non-buffers + lines = filter(None, map(str, list)) + self._wbuf_len += sum(map(len, lines)) + self._wbuf.extend(lines) + if (self._wbufsize <= 1 or + self._wbuf_len >= self._wbufsize): + self.flush() + + def read(self, size=-1): + # Use max, disallow tiny reads in a loop as they are very inefficient. + # We never leave read() with any leftover data from a new recv() call + # in our internal buffer. + rbufsize = max(self._rbufsize, self.default_bufsize) + # Our use of StringIO rather than lists of string objects returned by + # recv() minimizes memory usage and fragmentation that occurs when + # rbufsize is large compared to the typical return value of recv(). + buf = self._rbuf + buf.seek(0, 2) # seek end + if size < 0: + # Read until EOF + self._rbuf = StringIO() # reset _rbuf. we consume it via buf. + while True: + try: + data = self._sock.recv(rbufsize) + except error, e: + if e.args[0] == EINTR: + continue + raise + if not data: + break + buf.write(data) + return buf.getvalue() + else: + # Read until size bytes or EOF seen, whichever comes first + buf_len = buf.tell() + if buf_len >= size: + # Already have size bytes in our buffer? Extract and return. + buf.seek(0) + rv = buf.read(size) + self._rbuf = StringIO() + self._rbuf.write(buf.read()) + return rv + + self._rbuf = StringIO() # reset _rbuf. we consume it via buf. + while True: + left = size - buf_len + # recv() will malloc the amount of memory given as its + # parameter even though it often returns much less data + # than that. The returned data string is short lived + # as we copy it into a StringIO and free it. This avoids + # fragmentation issues on many platforms. + try: + data = self._sock.recv(left) + except error, e: + if e.args[0] == EINTR: + continue + raise + if not data: + break + n = len(data) + if n == size and not buf_len: + # Shortcut. Avoid buffer data copies when: + # - We have no data in our buffer. + # AND + # - Our call to recv returned exactly the + # number of bytes we were asked to read. + return data + if n == left: + buf.write(data) + del data # explicit free + break + assert n <= left, "recv(%d) returned %d bytes" % (left, n) + buf.write(data) + buf_len += n + del data # explicit free + #assert buf_len == buf.tell() + return buf.getvalue() + + def readline(self, size=-1): + buf = self._rbuf + buf.seek(0, 2) # seek end + if buf.tell() > 0: + # check if we already have it in our buffer + buf.seek(0) + bline = buf.readline(size) + if bline.endswith('\n') or len(bline) == size: + self._rbuf = StringIO() + self._rbuf.write(buf.read()) + return bline + del bline + if size < 0: + # Read until \n or EOF, whichever comes first + if self._rbufsize <= 1: + # Speed up unbuffered case + buf.seek(0) + buffers = [buf.read()] + self._rbuf = StringIO() # reset _rbuf. we consume it via buf. + data = None + recv = self._sock.recv + while True: + try: + while data != "\n": + data = recv(1) + if not data: + break + buffers.append(data) + except error, e: + # The try..except to catch EINTR was moved outside the + # recv loop to avoid the per byte overhead. + if e.args[0] == EINTR: + continue + raise + break + return "".join(buffers) + + buf.seek(0, 2) # seek end + self._rbuf = StringIO() # reset _rbuf. we consume it via buf. + while True: + try: + data = self._sock.recv(self._rbufsize) + except error, e: + if e.args[0] == EINTR: + continue + raise + if not data: + break + nl = data.find('\n') + if nl >= 0: + nl += 1 + buf.write(data[:nl]) + self._rbuf.write(data[nl:]) + del data + break + buf.write(data) + return buf.getvalue() + else: + # Read until size bytes or \n or EOF seen, whichever comes first + buf.seek(0, 2) # seek end + buf_len = buf.tell() + if buf_len >= size: + buf.seek(0) + rv = buf.read(size) + self._rbuf = StringIO() + self._rbuf.write(buf.read()) + return rv + self._rbuf = StringIO() # reset _rbuf. we consume it via buf. + while True: + try: + data = self._sock.recv(self._rbufsize) + except error, e: + if e.args[0] == EINTR: + continue + raise + if not data: + break + left = size - buf_len + # did we just receive a newline? + nl = data.find('\n', 0, left) + if nl >= 0: + nl += 1 + # save the excess data to _rbuf + self._rbuf.write(data[nl:]) + if buf_len: + buf.write(data[:nl]) + break + else: + # Shortcut. Avoid data copy through buf when returning + # a substring of our first recv(). + return data[:nl] + n = len(data) + if n == size and not buf_len: + # Shortcut. Avoid data copy through buf when + # returning exactly all of our first recv(). + return data + if n >= left: + buf.write(data[:left]) + self._rbuf.write(data[left:]) + break + buf.write(data) + buf_len += n + #assert buf_len == buf.tell() + return buf.getvalue() + + def readlines(self, sizehint=0): + total = 0 + list = [] + while True: + line = self.readline() + if not line: + break + list.append(line) + total += len(line) + if sizehint and total >= sizehint: + break + return list + + # Iterator protocols + + def __iter__(self): + return self + + def next(self): + line = self.readline() + if not line: + raise StopIteration + return line + +_GLOBAL_DEFAULT_TIMEOUT = object() + +def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT, + source_address=None): + """Connect to *address* and return the socket object. + + Convenience function. Connect to *address* (a 2-tuple ``(host, + port)``) and return the socket object. Passing the optional + *timeout* parameter will set the timeout on the socket instance + before attempting to connect. If no *timeout* is supplied, the + global default timeout setting returned by :func:`getdefaulttimeout` + is used. If *source_address* is set it must be a tuple of (host, port) + for the socket to bind as a source address before making the connection. + An host of '' or port 0 tells the OS to use the default. + """ + + host, port = address + err = None + for res in getaddrinfo(host, port, 0, SOCK_STREAM): + af, socktype, proto, canonname, sa = res + sock = None + try: + sock = socket(af, socktype, proto) + if timeout is not _GLOBAL_DEFAULT_TIMEOUT: + sock.settimeout(timeout) + if source_address: + sock.bind(source_address) + sock.connect(sa) + return sock + + except error as _: + err = _ + if sock is not None: + sock.close() + + if err is not None: + raise err + else: + raise error("getaddrinfo returns an empty list") diff --git a/CVIssueCount/ssl.py b/CVIssueCount/ssl.py new file mode 100644 index 0000000..f3e5123 --- /dev/null +++ b/CVIssueCount/ssl.py @@ -0,0 +1,464 @@ +# Wrapper module for _ssl, providing some additional facilities +# implemented in Python. Written by Bill Janssen. + +"""\ +This module provides some more Pythonic support for SSL. + +Object types: + + SSLSocket -- subtype of socket.socket which does SSL over the socket + +Exceptions: + + SSLError -- exception raised for I/O errors + +Functions: + + cert_time_to_seconds -- convert time string used for certificate + notBefore and notAfter functions to integer + seconds past the Epoch (the time values + returned from time.time()) + + fetch_server_certificate (HOST, PORT) -- fetch the certificate provided + by the server running on HOST at port PORT. No + validation of the certificate is performed. + +Integer constants: + +SSL_ERROR_ZERO_RETURN +SSL_ERROR_WANT_READ +SSL_ERROR_WANT_WRITE +SSL_ERROR_WANT_X509_LOOKUP +SSL_ERROR_SYSCALL +SSL_ERROR_SSL +SSL_ERROR_WANT_CONNECT + +SSL_ERROR_EOF +SSL_ERROR_INVALID_ERROR_CODE + +The following group define certificate requirements that one side is +allowing/requiring from the other side: + +CERT_NONE - no certificates from the other side are required (or will + be looked at if provided) +CERT_OPTIONAL - certificates are not required, but if provided will be + validated, and if validation fails, the connection will + also fail +CERT_REQUIRED - certificates are required, and will be validated, and + if validation fails, the connection will also fail + +The following constants identify various SSL protocol variants: + +PROTOCOL_SSLv2 +PROTOCOL_SSLv3 +PROTOCOL_SSLv23 +PROTOCOL_TLSv1 +""" + +import textwrap + +import _ssl # if we can't import it, let the error propagate + +from _ssl import OPENSSL_VERSION_NUMBER, OPENSSL_VERSION_INFO, OPENSSL_VERSION +from _ssl import SSLError +from _ssl import CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED +from _ssl import RAND_status, RAND_egd, RAND_add +from _ssl import \ + SSL_ERROR_ZERO_RETURN, \ + SSL_ERROR_WANT_READ, \ + SSL_ERROR_WANT_WRITE, \ + SSL_ERROR_WANT_X509_LOOKUP, \ + SSL_ERROR_SYSCALL, \ + SSL_ERROR_SSL, \ + SSL_ERROR_WANT_CONNECT, \ + SSL_ERROR_EOF, \ + SSL_ERROR_INVALID_ERROR_CODE +from _ssl import PROTOCOL_SSLv3, PROTOCOL_SSLv23, PROTOCOL_TLSv1 +_PROTOCOL_NAMES = { + PROTOCOL_TLSv1: "TLSv1", + PROTOCOL_SSLv23: "SSLv23", + PROTOCOL_SSLv3: "SSLv3", +} +try: + from _ssl import PROTOCOL_SSLv2 +except ImportError: + pass +else: + _PROTOCOL_NAMES[PROTOCOL_SSLv2] = "SSLv2" + +from socket import socket, _fileobject, _delegate_methods, error as socket_error +from socket import getnameinfo as _getnameinfo +import base64 # for DER-to-PEM translation +import errno + +class SSLSocket(socket): + + """This class implements a subtype of socket.socket that wraps + the underlying OS socket in an SSL context when necessary, and + provides read and write methods over that channel.""" + + def __init__(self, sock, keyfile=None, certfile=None, + server_side=False, cert_reqs=CERT_NONE, + ssl_version=PROTOCOL_SSLv23, ca_certs=None, + do_handshake_on_connect=True, + suppress_ragged_eofs=True, ciphers=None): + socket.__init__(self, _sock=sock._sock) + # The initializer for socket overrides the methods send(), recv(), etc. + # in the instancce, which we don't need -- but we want to provide the + # methods defined in SSLSocket. + for attr in _delegate_methods: + try: + delattr(self, attr) + except AttributeError: + pass + + if certfile and not keyfile: + keyfile = certfile + # see if it's connected + try: + socket.getpeername(self) + except socket_error, e: + if e.errno != errno.ENOTCONN: + raise + # no, no connection yet + self._connected = False + self._sslobj = None + else: + # yes, create the SSL object + self._connected = True + self._sslobj = _ssl.sslwrap(self._sock, server_side, + keyfile, certfile, + cert_reqs, ssl_version, ca_certs, + ciphers) + if do_handshake_on_connect: + self.do_handshake() + self.keyfile = keyfile + self.certfile = certfile + self.cert_reqs = cert_reqs + self.ssl_version = ssl_version + self.ca_certs = ca_certs + self.ciphers = ciphers + self.do_handshake_on_connect = do_handshake_on_connect + self.suppress_ragged_eofs = suppress_ragged_eofs + self._makefile_refs = 0 + + def read(self, len=1024): + + """Read up to LEN bytes and return them. + Return zero-length string on EOF.""" + + try: + return self._sslobj.read(len) + except SSLError, x: + if x.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs: + return '' + else: + raise + + def write(self, data): + + """Write DATA to the underlying SSL channel. Returns + number of bytes of DATA actually transmitted.""" + + return self._sslobj.write(data) + + def getpeercert(self, binary_form=False): + + """Returns a formatted version of the data in the + certificate provided by the other end of the SSL channel. + Return None if no certificate was provided, {} if a + certificate was provided, but not validated.""" + + return self._sslobj.peer_certificate(binary_form) + + def cipher(self): + + if not self._sslobj: + return None + else: + return self._sslobj.cipher() + + def send(self, data, flags=0): + if self._sslobj: + if flags != 0: + raise ValueError( + "non-zero flags not allowed in calls to send() on %s" % + self.__class__) + while True: + try: + v = self._sslobj.write(data) + except SSLError, x: + if x.args[0] == SSL_ERROR_WANT_READ: + return 0 + elif x.args[0] == SSL_ERROR_WANT_WRITE: + return 0 + else: + raise + else: + return v + else: + return self._sock.send(data, flags) + + def sendto(self, data, flags_or_addr, addr=None): + if self._sslobj: + raise ValueError("sendto not allowed on instances of %s" % + self.__class__) + elif addr is None: + return self._sock.sendto(data, flags_or_addr) + else: + return self._sock.sendto(data, flags_or_addr, addr) + + def sendall(self, data, flags=0): + if self._sslobj: + if flags != 0: + raise ValueError( + "non-zero flags not allowed in calls to sendall() on %s" % + self.__class__) + amount = len(data) + count = 0 + while (count < amount): + v = self.send(data[count:]) + count += v + return amount + else: + return socket.sendall(self, data, flags) + + def recv(self, buflen=1024, flags=0): + if self._sslobj: + if flags != 0: + raise ValueError( + "non-zero flags not allowed in calls to recv() on %s" % + self.__class__) + return self.read(buflen) + else: + return self._sock.recv(buflen, flags) + + def recv_into(self, buffer, nbytes=None, flags=0): + if buffer and (nbytes is None): + nbytes = len(buffer) + elif nbytes is None: + nbytes = 1024 + if self._sslobj: + if flags != 0: + raise ValueError( + "non-zero flags not allowed in calls to recv_into() on %s" % + self.__class__) + tmp_buffer = self.read(nbytes) + v = len(tmp_buffer) + buffer[:v] = tmp_buffer + return v + else: + return self._sock.recv_into(buffer, nbytes, flags) + + def recvfrom(self, buflen=1024, flags=0): + if self._sslobj: + raise ValueError("recvfrom not allowed on instances of %s" % + self.__class__) + else: + return self._sock.recvfrom(buflen, flags) + + def recvfrom_into(self, buffer, nbytes=None, flags=0): + if self._sslobj: + raise ValueError("recvfrom_into not allowed on instances of %s" % + self.__class__) + else: + return self._sock.recvfrom_into(buffer, nbytes, flags) + + def pending(self): + if self._sslobj: + return self._sslobj.pending() + else: + return 0 + + def unwrap(self): + if self._sslobj: + s = self._sslobj.shutdown() + self._sslobj = None + return s + else: + raise ValueError("No SSL wrapper around " + str(self)) + + def shutdown(self, how): + self._sslobj = None + socket.shutdown(self, how) + + def close(self): + if self._makefile_refs < 1: + self._sslobj = None + socket.close(self) + else: + self._makefile_refs -= 1 + + def do_handshake(self): + + """Perform a TLS/SSL handshake.""" + + self._sslobj.do_handshake() + + def _real_connect(self, addr, return_errno): + # Here we assume that the socket is client-side, and not + # connected at the time of the call. We connect it, then wrap it. + if self._connected: + raise ValueError("attempt to connect already-connected SSLSocket!") + self._sslobj = _ssl.sslwrap(self._sock, False, self.keyfile, self.certfile, + self.cert_reqs, self.ssl_version, + self.ca_certs, self.ciphers) + try: + socket.connect(self, addr) + if self.do_handshake_on_connect: + self.do_handshake() + except socket_error as e: + if return_errno: + return e.errno + else: + self._sslobj = None + raise e + self._connected = True + return 0 + + def connect(self, addr): + """Connects to remote ADDR, and then wraps the connection in + an SSL channel.""" + self._real_connect(addr, False) + + def connect_ex(self, addr): + """Connects to remote ADDR, and then wraps the connection in + an SSL channel.""" + return self._real_connect(addr, True) + + def accept(self): + + """Accepts a new connection from a remote client, and returns + a tuple containing that new connection wrapped with a server-side + SSL channel, and the address of the remote client.""" + + newsock, addr = socket.accept(self) + return (SSLSocket(newsock, + keyfile=self.keyfile, + certfile=self.certfile, + server_side=True, + cert_reqs=self.cert_reqs, + ssl_version=self.ssl_version, + ca_certs=self.ca_certs, + ciphers=self.ciphers, + do_handshake_on_connect=self.do_handshake_on_connect, + suppress_ragged_eofs=self.suppress_ragged_eofs), + addr) + + def makefile(self, mode='r', bufsize=-1): + + """Make and return a file-like object that + works with the SSL connection. Just use the code + from the socket module.""" + + self._makefile_refs += 1 + # close=True so as to decrement the reference count when done with + # the file-like object. + return _fileobject(self, mode, bufsize, close=True) + + + +def wrap_socket(sock, keyfile=None, certfile=None, + server_side=False, cert_reqs=CERT_NONE, + ssl_version=PROTOCOL_SSLv23, ca_certs=None, + do_handshake_on_connect=True, + suppress_ragged_eofs=True, ciphers=None): + + return SSLSocket(sock, keyfile=keyfile, certfile=certfile, + server_side=server_side, cert_reqs=cert_reqs, + ssl_version=ssl_version, ca_certs=ca_certs, + do_handshake_on_connect=do_handshake_on_connect, + suppress_ragged_eofs=suppress_ragged_eofs, + ciphers=ciphers) + + +# some utility functions + +def cert_time_to_seconds(cert_time): + + """Takes a date-time string in standard ASN1_print form + ("MON DAY 24HOUR:MINUTE:SEC YEAR TIMEZONE") and return + a Python time value in seconds past the epoch.""" + + import time + return time.mktime(time.strptime(cert_time, "%b %d %H:%M:%S %Y GMT")) + +PEM_HEADER = "-----BEGIN CERTIFICATE-----" +PEM_FOOTER = "-----END CERTIFICATE-----" + +def DER_cert_to_PEM_cert(der_cert_bytes): + + """Takes a certificate in binary DER format and returns the + PEM version of it as a string.""" + + if hasattr(base64, 'standard_b64encode'): + # preferred because older API gets line-length wrong + f = base64.standard_b64encode(der_cert_bytes) + return (PEM_HEADER + '\n' + + textwrap.fill(f, 64) + '\n' + + PEM_FOOTER + '\n') + else: + return (PEM_HEADER + '\n' + + base64.encodestring(der_cert_bytes) + + PEM_FOOTER + '\n') + +def PEM_cert_to_DER_cert(pem_cert_string): + + """Takes a certificate in ASCII PEM format and returns the + DER-encoded version of it as a byte sequence""" + + if not pem_cert_string.startswith(PEM_HEADER): + raise ValueError("Invalid PEM encoding; must start with %s" + % PEM_HEADER) + if not pem_cert_string.strip().endswith(PEM_FOOTER): + raise ValueError("Invalid PEM encoding; must end with %s" + % PEM_FOOTER) + d = pem_cert_string.strip()[len(PEM_HEADER):-len(PEM_FOOTER)] + return base64.decodestring(d) + +def get_server_certificate(addr, ssl_version=PROTOCOL_SSLv3, ca_certs=None): + + """Retrieve the certificate from the server at the specified address, + and return it as a PEM-encoded string. + If 'ca_certs' is specified, validate the server cert against it. + If 'ssl_version' is specified, use it in the connection attempt.""" + + host, port = addr + if (ca_certs is not None): + cert_reqs = CERT_REQUIRED + else: + cert_reqs = CERT_NONE + s = wrap_socket(socket(), ssl_version=ssl_version, + cert_reqs=cert_reqs, ca_certs=ca_certs) + s.connect(addr) + dercert = s.getpeercert(True) + s.close() + return DER_cert_to_PEM_cert(dercert) + +def get_protocol_name(protocol_code): + return _PROTOCOL_NAMES.get(protocol_code, '') + + +# a replacement for the old socket.ssl function + +def sslwrap_simple(sock, keyfile=None, certfile=None): + + """A replacement for the old socket.ssl function. Designed + for compability with Python 2.5 and earlier. Will disappear in + Python 3.0.""" + + if hasattr(sock, "_sock"): + sock = sock._sock + + ssl_sock = _ssl.sslwrap(sock, 0, keyfile, certfile, CERT_NONE, + PROTOCOL_SSLv23, None) + try: + sock.getpeername() + except socket_error: + # no, no connection yet + pass + else: + # yes, do the handshake + ssl_sock.do_handshake() + + return ssl_sock diff --git a/CVIssueCount/stat.py b/CVIssueCount/stat.py new file mode 100644 index 0000000..abed5c9 --- /dev/null +++ b/CVIssueCount/stat.py @@ -0,0 +1,96 @@ +"""Constants/functions for interpreting results of os.stat() and os.lstat(). + +Suggested usage: from stat import * +""" + +# Indices for stat struct members in the tuple returned by os.stat() + +ST_MODE = 0 +ST_INO = 1 +ST_DEV = 2 +ST_NLINK = 3 +ST_UID = 4 +ST_GID = 5 +ST_SIZE = 6 +ST_ATIME = 7 +ST_MTIME = 8 +ST_CTIME = 9 + +# Extract bits from the mode + +def S_IMODE(mode): + return mode & 07777 + +def S_IFMT(mode): + return mode & 0170000 + +# Constants used as S_IFMT() for various file types +# (not all are implemented on all systems) + +S_IFDIR = 0040000 +S_IFCHR = 0020000 +S_IFBLK = 0060000 +S_IFREG = 0100000 +S_IFIFO = 0010000 +S_IFLNK = 0120000 +S_IFSOCK = 0140000 + +# Functions to test for each file type + +def S_ISDIR(mode): + return S_IFMT(mode) == S_IFDIR + +def S_ISCHR(mode): + return S_IFMT(mode) == S_IFCHR + +def S_ISBLK(mode): + return S_IFMT(mode) == S_IFBLK + +def S_ISREG(mode): + return S_IFMT(mode) == S_IFREG + +def S_ISFIFO(mode): + return S_IFMT(mode) == S_IFIFO + +def S_ISLNK(mode): + return S_IFMT(mode) == S_IFLNK + +def S_ISSOCK(mode): + return S_IFMT(mode) == S_IFSOCK + +# Names for permission bits + +S_ISUID = 04000 +S_ISGID = 02000 +S_ENFMT = S_ISGID +S_ISVTX = 01000 +S_IREAD = 00400 +S_IWRITE = 00200 +S_IEXEC = 00100 +S_IRWXU = 00700 +S_IRUSR = 00400 +S_IWUSR = 00200 +S_IXUSR = 00100 +S_IRWXG = 00070 +S_IRGRP = 00040 +S_IWGRP = 00020 +S_IXGRP = 00010 +S_IRWXO = 00007 +S_IROTH = 00004 +S_IWOTH = 00002 +S_IXOTH = 00001 + +# Names for file flags + +UF_NODUMP = 0x00000001 +UF_IMMUTABLE = 0x00000002 +UF_APPEND = 0x00000004 +UF_OPAQUE = 0x00000008 +UF_NOUNLINK = 0x00000010 +UF_COMPRESSED = 0x00000020 # OS X: file is hfs-compressed +UF_HIDDEN = 0x00008000 # OS X: file should not be displayed +SF_ARCHIVED = 0x00010000 +SF_IMMUTABLE = 0x00020000 +SF_APPEND = 0x00040000 +SF_NOUNLINK = 0x00100000 +SF_SNAPSHOT = 0x00200000 diff --git a/CVIssueCount/string.py b/CVIssueCount/string.py new file mode 100644 index 0000000..23608b4 --- /dev/null +++ b/CVIssueCount/string.py @@ -0,0 +1,656 @@ +"""A collection of string operations (most are no longer used). + +Warning: most of the code you see here isn't normally used nowadays. +Beginning with Python 1.6, many of these functions are implemented as +methods on the standard string object. They used to be implemented by +a built-in module called strop, but strop is now obsolete itself. + +Public module variables: + +whitespace -- a string containing all characters considered whitespace +lowercase -- a string containing all characters considered lowercase letters +uppercase -- a string containing all characters considered uppercase letters +letters -- a string containing all characters considered letters +digits -- a string containing all characters considered decimal digits +hexdigits -- a string containing all characters considered hexadecimal digits +octdigits -- a string containing all characters considered octal digits +punctuation -- a string containing all characters considered punctuation +printable -- a string containing all characters considered printable + +""" + +# Some strings for ctype-style character classification +whitespace = ' \t\n\r\v\f' +lowercase = 'abcdefghijklmnopqrstuvwxyz' +uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' +letters = lowercase + uppercase +ascii_lowercase = lowercase +ascii_uppercase = uppercase +ascii_letters = ascii_lowercase + ascii_uppercase +digits = '0123456789' +hexdigits = digits + 'abcdef' + 'ABCDEF' +octdigits = '01234567' +punctuation = """!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~""" +printable = digits + letters + punctuation + whitespace + +# Case conversion helpers +# Use str to convert Unicode literal in case of -U +l = map(chr, xrange(256)) +_idmap = str('').join(l) +del l + +# Functions which aren't available as string methods. + +# Capitalize the words in a string, e.g. " aBc dEf " -> "Abc Def". +def capwords(s, sep=None): + """capwords(s [,sep]) -> string + + Split the argument into words using split, capitalize each + word using capitalize, and join the capitalized words using + join. If the optional second argument sep is absent or None, + runs of whitespace characters are replaced by a single space + and leading and trailing whitespace are removed, otherwise + sep is used to split and join the words. + + """ + return (sep or ' ').join(x.capitalize() for x in s.split(sep)) + + +# Construct a translation string +_idmapL = None +def maketrans(fromstr, tostr): + """maketrans(frm, to) -> string + + Return a translation table (a string of 256 bytes long) + suitable for use in string.translate. The strings frm and to + must be of the same length. + + """ + if len(fromstr) != len(tostr): + raise ValueError, "maketrans arguments must have same length" + global _idmapL + if not _idmapL: + _idmapL = list(_idmap) + L = _idmapL[:] + fromstr = map(ord, fromstr) + for i in range(len(fromstr)): + L[fromstr[i]] = tostr[i] + return ''.join(L) + + + +#################################################################### +import re as _re + +class _multimap: + """Helper class for combining multiple mappings. + + Used by .{safe_,}substitute() to combine the mapping and keyword + arguments. + """ + def __init__(self, primary, secondary): + self._primary = primary + self._secondary = secondary + + def __getitem__(self, key): + try: + return self._primary[key] + except KeyError: + return self._secondary[key] + + +class _TemplateMetaclass(type): + pattern = r""" + %(delim)s(?: + (?P%(delim)s) | # Escape sequence of two delimiters + (?P%(id)s) | # delimiter and a Python identifier + {(?P%(id)s)} | # delimiter and a braced identifier + (?P) # Other ill-formed delimiter exprs + ) + """ + + def __init__(cls, name, bases, dct): + super(_TemplateMetaclass, cls).__init__(name, bases, dct) + if 'pattern' in dct: + pattern = cls.pattern + else: + pattern = _TemplateMetaclass.pattern % { + 'delim' : _re.escape(cls.delimiter), + 'id' : cls.idpattern, + } + cls.pattern = _re.compile(pattern, _re.IGNORECASE | _re.VERBOSE) + + +class Template: + """A string class for supporting $-substitutions.""" + __metaclass__ = _TemplateMetaclass + + delimiter = '$' + idpattern = r'[_a-z][_a-z0-9]*' + + def __init__(self, template): + self.template = template + + # Search for $$, $identifier, ${identifier}, and any bare $'s + + def _invalid(self, mo): + i = mo.start('invalid') + lines = self.template[:i].splitlines(True) + if not lines: + colno = 1 + lineno = 1 + else: + colno = i - len(''.join(lines[:-1])) + lineno = len(lines) + raise ValueError('Invalid placeholder in string: line %d, col %d' % + (lineno, colno)) + + def substitute(*args, **kws): + if not args: + raise TypeError("descriptor 'substitute' of 'Template' object " + "needs an argument") + self, args = args[0], args[1:] # allow the "self" keyword be passed + if len(args) > 1: + raise TypeError('Too many positional arguments') + if not args: + mapping = kws + elif kws: + mapping = _multimap(kws, args[0]) + else: + mapping = args[0] + # Helper function for .sub() + def convert(mo): + # Check the most common path first. + named = mo.group('named') or mo.group('braced') + if named is not None: + val = mapping[named] + # We use this idiom instead of str() because the latter will + # fail if val is a Unicode containing non-ASCII characters. + return '%s' % (val,) + if mo.group('escaped') is not None: + return self.delimiter + if mo.group('invalid') is not None: + self._invalid(mo) + raise ValueError('Unrecognized named group in pattern', + self.pattern) + return self.pattern.sub(convert, self.template) + + def safe_substitute(*args, **kws): + if not args: + raise TypeError("descriptor 'safe_substitute' of 'Template' object " + "needs an argument") + self, args = args[0], args[1:] # allow the "self" keyword be passed + if len(args) > 1: + raise TypeError('Too many positional arguments') + if not args: + mapping = kws + elif kws: + mapping = _multimap(kws, args[0]) + else: + mapping = args[0] + # Helper function for .sub() + def convert(mo): + named = mo.group('named') or mo.group('braced') + if named is not None: + try: + # We use this idiom instead of str() because the latter + # will fail if val is a Unicode containing non-ASCII + return '%s' % (mapping[named],) + except KeyError: + return mo.group() + if mo.group('escaped') is not None: + return self.delimiter + if mo.group('invalid') is not None: + return mo.group() + raise ValueError('Unrecognized named group in pattern', + self.pattern) + return self.pattern.sub(convert, self.template) + + + +#################################################################### +# NOTE: Everything below here is deprecated. Use string methods instead. +# This stuff will go away in Python 3.0. + +# Backward compatible names for exceptions +index_error = ValueError +atoi_error = ValueError +atof_error = ValueError +atol_error = ValueError + +# convert UPPER CASE letters to lower case +def lower(s): + """lower(s) -> string + + Return a copy of the string s converted to lowercase. + + """ + return s.lower() + +# Convert lower case letters to UPPER CASE +def upper(s): + """upper(s) -> string + + Return a copy of the string s converted to uppercase. + + """ + return s.upper() + +# Swap lower case letters and UPPER CASE +def swapcase(s): + """swapcase(s) -> string + + Return a copy of the string s with upper case characters + converted to lowercase and vice versa. + + """ + return s.swapcase() + +# Strip leading and trailing tabs and spaces +def strip(s, chars=None): + """strip(s [,chars]) -> string + + Return a copy of the string s with leading and trailing + whitespace removed. + If chars is given and not None, remove characters in chars instead. + If chars is unicode, S will be converted to unicode before stripping. + + """ + return s.strip(chars) + +# Strip leading tabs and spaces +def lstrip(s, chars=None): + """lstrip(s [,chars]) -> string + + Return a copy of the string s with leading whitespace removed. + If chars is given and not None, remove characters in chars instead. + + """ + return s.lstrip(chars) + +# Strip trailing tabs and spaces +def rstrip(s, chars=None): + """rstrip(s [,chars]) -> string + + Return a copy of the string s with trailing whitespace removed. + If chars is given and not None, remove characters in chars instead. + + """ + return s.rstrip(chars) + + +# Split a string into a list of space/tab-separated words +def split(s, sep=None, maxsplit=-1): + """split(s [,sep [,maxsplit]]) -> list of strings + + Return a list of the words in the string s, using sep as the + delimiter string. If maxsplit is given, splits at no more than + maxsplit places (resulting in at most maxsplit+1 words). If sep + is not specified or is None, any whitespace string is a separator. + + (split and splitfields are synonymous) + + """ + return s.split(sep, maxsplit) +splitfields = split + +# Split a string into a list of space/tab-separated words +def rsplit(s, sep=None, maxsplit=-1): + """rsplit(s [,sep [,maxsplit]]) -> list of strings + + Return a list of the words in the string s, using sep as the + delimiter string, starting at the end of the string and working + to the front. If maxsplit is given, at most maxsplit splits are + done. If sep is not specified or is None, any whitespace string + is a separator. + """ + return s.rsplit(sep, maxsplit) + +# Join fields with optional separator +def join(words, sep = ' '): + """join(list [,sep]) -> string + + Return a string composed of the words in list, with + intervening occurrences of sep. The default separator is a + single space. + + (joinfields and join are synonymous) + + """ + return sep.join(words) +joinfields = join + +# Find substring, raise exception if not found +def index(s, *args): + """index(s, sub [,start [,end]]) -> int + + Like find but raises ValueError when the substring is not found. + + """ + return s.index(*args) + +# Find last substring, raise exception if not found +def rindex(s, *args): + """rindex(s, sub [,start [,end]]) -> int + + Like rfind but raises ValueError when the substring is not found. + + """ + return s.rindex(*args) + +# Count non-overlapping occurrences of substring +def count(s, *args): + """count(s, sub[, start[,end]]) -> int + + Return the number of occurrences of substring sub in string + s[start:end]. Optional arguments start and end are + interpreted as in slice notation. + + """ + return s.count(*args) + +# Find substring, return -1 if not found +def find(s, *args): + """find(s, sub [,start [,end]]) -> in + + Return the lowest index in s where substring sub is found, + such that sub is contained within s[start,end]. Optional + arguments start and end are interpreted as in slice notation. + + Return -1 on failure. + + """ + return s.find(*args) + +# Find last substring, return -1 if not found +def rfind(s, *args): + """rfind(s, sub [,start [,end]]) -> int + + Return the highest index in s where substring sub is found, + such that sub is contained within s[start,end]. Optional + arguments start and end are interpreted as in slice notation. + + Return -1 on failure. + + """ + return s.rfind(*args) + +# for a bit of speed +_float = float +_int = int +_long = long + +# Convert string to float +def atof(s): + """atof(s) -> float + + Return the floating point number represented by the string s. + + """ + return _float(s) + + +# Convert string to integer +def atoi(s , base=10): + """atoi(s [,base]) -> int + + Return the integer represented by the string s in the given + base, which defaults to 10. The string s must consist of one + or more digits, possibly preceded by a sign. If base is 0, it + is chosen from the leading characters of s, 0 for octal, 0x or + 0X for hexadecimal. If base is 16, a preceding 0x or 0X is + accepted. + + """ + return _int(s, base) + + +# Convert string to long integer +def atol(s, base=10): + """atol(s [,base]) -> long + + Return the long integer represented by the string s in the + given base, which defaults to 10. The string s must consist + of one or more digits, possibly preceded by a sign. If base + is 0, it is chosen from the leading characters of s, 0 for + octal, 0x or 0X for hexadecimal. If base is 16, a preceding + 0x or 0X is accepted. A trailing L or l is not accepted, + unless base is 0. + + """ + return _long(s, base) + + +# Left-justify a string +def ljust(s, width, *args): + """ljust(s, width[, fillchar]) -> string + + Return a left-justified version of s, in a field of the + specified width, padded with spaces as needed. The string is + never truncated. If specified the fillchar is used instead of spaces. + + """ + return s.ljust(width, *args) + +# Right-justify a string +def rjust(s, width, *args): + """rjust(s, width[, fillchar]) -> string + + Return a right-justified version of s, in a field of the + specified width, padded with spaces as needed. The string is + never truncated. If specified the fillchar is used instead of spaces. + + """ + return s.rjust(width, *args) + +# Center a string +def center(s, width, *args): + """center(s, width[, fillchar]) -> string + + Return a center version of s, in a field of the specified + width. padded with spaces as needed. The string is never + truncated. If specified the fillchar is used instead of spaces. + + """ + return s.center(width, *args) + +# Zero-fill a number, e.g., (12, 3) --> '012' and (-3, 3) --> '-03' +# Decadent feature: the argument may be a string or a number +# (Use of this is deprecated; it should be a string as with ljust c.s.) +def zfill(x, width): + """zfill(x, width) -> string + + Pad a numeric string x with zeros on the left, to fill a field + of the specified width. The string x is never truncated. + + """ + if not isinstance(x, basestring): + x = repr(x) + return x.zfill(width) + +# Expand tabs in a string. +# Doesn't take non-printing chars into account, but does understand \n. +def expandtabs(s, tabsize=8): + """expandtabs(s [,tabsize]) -> string + + Return a copy of the string s with all tab characters replaced + by the appropriate number of spaces, depending on the current + column, and the tabsize (default 8). + + """ + return s.expandtabs(tabsize) + +# Character translation through look-up table. +def translate(s, table, deletions=""): + """translate(s,table [,deletions]) -> string + + Return a copy of the string s, where all characters occurring + in the optional argument deletions are removed, and the + remaining characters have been mapped through the given + translation table, which must be a string of length 256. The + deletions argument is not allowed for Unicode strings. + + """ + if deletions or table is None: + return s.translate(table, deletions) + else: + # Add s[:0] so that if s is Unicode and table is an 8-bit string, + # table is converted to Unicode. This means that table *cannot* + # be a dictionary -- for that feature, use u.translate() directly. + return s.translate(table + s[:0]) + +# Capitalize a string, e.g. "aBc dEf" -> "Abc def". +def capitalize(s): + """capitalize(s) -> string + + Return a copy of the string s with only its first character + capitalized. + + """ + return s.capitalize() + +# Substring replacement (global) +def replace(s, old, new, maxreplace=-1): + """replace (str, old, new[, maxreplace]) -> string + + Return a copy of string str with all occurrences of substring + old replaced by new. If the optional argument maxreplace is + given, only the first maxreplace occurrences are replaced. + + """ + return s.replace(old, new, maxreplace) + + +# Try importing optional built-in module "strop" -- if it exists, +# it redefines some string operations that are 100-1000 times faster. +# It also defines values for whitespace, lowercase and uppercase +# that match 's definitions. + +try: + from strop import maketrans, lowercase, uppercase, whitespace + letters = lowercase + uppercase +except ImportError: + pass # Use the original versions + +######################################################################## +# the Formatter class +# see PEP 3101 for details and purpose of this class + +# The hard parts are reused from the C implementation. They're exposed as "_" +# prefixed methods of str and unicode. + +# The overall parser is implemented in str._formatter_parser. +# The field name parser is implemented in str._formatter_field_name_split + +class Formatter(object): + def format(*args, **kwargs): + if not args: + raise TypeError("descriptor 'format' of 'Formatter' object " + "needs an argument") + self, args = args[0], args[1:] # allow the "self" keyword be passed + try: + format_string, args = args[0], args[1:] # allow the "format_string" keyword be passed + except IndexError: + if 'format_string' in kwargs: + format_string = kwargs.pop('format_string') + else: + raise TypeError("format() missing 1 required positional " + "argument: 'format_string'") + return self.vformat(format_string, args, kwargs) + + def vformat(self, format_string, args, kwargs): + used_args = set() + result = self._vformat(format_string, args, kwargs, used_args, 2) + self.check_unused_args(used_args, args, kwargs) + return result + + def _vformat(self, format_string, args, kwargs, used_args, recursion_depth): + if recursion_depth < 0: + raise ValueError('Max string recursion exceeded') + result = [] + for literal_text, field_name, format_spec, conversion in \ + self.parse(format_string): + + # output the literal text + if literal_text: + result.append(literal_text) + + # if there's a field, output it + if field_name is not None: + # this is some markup, find the object and do + # the formatting + + # given the field_name, find the object it references + # and the argument it came from + obj, arg_used = self.get_field(field_name, args, kwargs) + used_args.add(arg_used) + + # do any conversion on the resulting object + obj = self.convert_field(obj, conversion) + + # expand the format spec, if needed + format_spec = self._vformat(format_spec, args, kwargs, + used_args, recursion_depth-1) + + # format the object and append to the result + result.append(self.format_field(obj, format_spec)) + + return ''.join(result) + + + def get_value(self, key, args, kwargs): + if isinstance(key, (int, long)): + return args[key] + else: + return kwargs[key] + + + def check_unused_args(self, used_args, args, kwargs): + pass + + + def format_field(self, value, format_spec): + return format(value, format_spec) + + + def convert_field(self, value, conversion): + # do any conversion on the resulting object + if conversion is None: + return value + elif conversion == 's': + return str(value) + elif conversion == 'r': + return repr(value) + raise ValueError("Unknown conversion specifier {0!s}".format(conversion)) + + + # returns an iterable that contains tuples of the form: + # (literal_text, field_name, format_spec, conversion) + # literal_text can be zero length + # field_name can be None, in which case there's no + # object to format and output + # if field_name is not None, it is looked up, formatted + # with format_spec and conversion and then used + def parse(self, format_string): + return format_string._formatter_parser() + + + # given a field_name, find the object it references. + # field_name: the field being looked up, e.g. "0.name" + # or "lookup[3]" + # used_args: a set of which args have been used + # args, kwargs: as passed in to vformat + def get_field(self, field_name, args, kwargs): + first, rest = field_name._formatter_field_name_split() + + obj = self.get_value(first, args, kwargs) + + # loop through the rest of the field_name, doing + # getattr or getitem as needed + for is_attr, i in rest: + if is_attr: + obj = getattr(obj, i) + else: + obj = obj[i] + + return obj, first diff --git a/CVIssueCount/struct.py b/CVIssueCount/struct.py new file mode 100644 index 0000000..b022355 --- /dev/null +++ b/CVIssueCount/struct.py @@ -0,0 +1,3 @@ +from _struct import * +from _struct import _clearcache +from _struct import __doc__ diff --git a/CVIssueCount/tempfile.py b/CVIssueCount/tempfile.py new file mode 100644 index 0000000..184dfc1 --- /dev/null +++ b/CVIssueCount/tempfile.py @@ -0,0 +1,639 @@ +"""Temporary files. + +This module provides generic, low- and high-level interfaces for +creating temporary files and directories. All of the interfaces +provided by this module can be used without fear of race conditions +except for 'mktemp'. 'mktemp' is subject to race conditions and +should not be used; it is provided for backward compatibility only. + +This module also provides some data items to the user: + + TMP_MAX - maximum number of names that will be tried before + giving up. + template - the default prefix for all temporary names. + You may change this to control the default prefix. + tempdir - If this is set to a string before the first use of + any routine from this module, it will be considered as + another candidate location to store temporary files. +""" + +__all__ = [ + "NamedTemporaryFile", "TemporaryFile", # high level safe interfaces + "SpooledTemporaryFile", + "mkstemp", "mkdtemp", # low level safe interfaces + "mktemp", # deprecated unsafe interface + "TMP_MAX", "gettempprefix", # constants + "tempdir", "gettempdir" + ] + + +# Imports. + +import io as _io +import os as _os +import errno as _errno +from random import Random as _Random + +try: + from cStringIO import StringIO as _StringIO +except ImportError: + from StringIO import StringIO as _StringIO + +try: + import fcntl as _fcntl +except ImportError: + def _set_cloexec(fd): + pass +else: + def _set_cloexec(fd): + try: + flags = _fcntl.fcntl(fd, _fcntl.F_GETFD, 0) + except IOError: + pass + else: + # flags read successfully, modify + flags |= _fcntl.FD_CLOEXEC + _fcntl.fcntl(fd, _fcntl.F_SETFD, flags) + + +try: + import thread as _thread +except ImportError: + import dummy_thread as _thread +_allocate_lock = _thread.allocate_lock + +_text_openflags = _os.O_RDWR | _os.O_CREAT | _os.O_EXCL +if hasattr(_os, 'O_NOINHERIT'): + _text_openflags |= _os.O_NOINHERIT +if hasattr(_os, 'O_NOFOLLOW'): + _text_openflags |= _os.O_NOFOLLOW + +_bin_openflags = _text_openflags +if hasattr(_os, 'O_BINARY'): + _bin_openflags |= _os.O_BINARY + +if hasattr(_os, 'TMP_MAX'): + TMP_MAX = _os.TMP_MAX +else: + TMP_MAX = 10000 + +template = "tmp" + +# Internal routines. + +_once_lock = _allocate_lock() + +if hasattr(_os, "lstat"): + _stat = _os.lstat +elif hasattr(_os, "stat"): + _stat = _os.stat +else: + # Fallback. All we need is something that raises os.error if the + # file doesn't exist. + def _stat(fn): + try: + f = open(fn) + except IOError: + raise _os.error + f.close() + +def _exists(fn): + try: + _stat(fn) + except _os.error: + return False + else: + return True + +class _RandomNameSequence: + """An instance of _RandomNameSequence generates an endless + sequence of unpredictable strings which can safely be incorporated + into file names. Each string is six characters long. Multiple + threads can safely use the same instance at the same time. + + _RandomNameSequence is an iterator.""" + + characters = ("abcdefghijklmnopqrstuvwxyz" + + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + + "0123456789_") + + def __init__(self): + self.mutex = _allocate_lock() + self.normcase = _os.path.normcase + + @property + def rng(self): + cur_pid = _os.getpid() + if cur_pid != getattr(self, '_rng_pid', None): + self._rng = _Random() + self._rng_pid = cur_pid + return self._rng + + def __iter__(self): + return self + + def next(self): + m = self.mutex + c = self.characters + choose = self.rng.choice + + m.acquire() + try: + letters = [choose(c) for dummy in "123456"] + finally: + m.release() + + return self.normcase(''.join(letters)) + +def _candidate_tempdir_list(): + """Generate a list of candidate temporary directories which + _get_default_tempdir will try.""" + + dirlist = [] + + # First, try the environment. + for envname in 'TMPDIR', 'TEMP', 'TMP': + dirname = _os.getenv(envname) + if dirname: dirlist.append(dirname) + + # Failing that, try OS-specific locations. + if _os.name == 'riscos': + dirname = _os.getenv('Wimp$ScrapDir') + if dirname: dirlist.append(dirname) + elif _os.name == 'nt': + dirlist.extend([ r'c:\temp', r'c:\tmp', r'\temp', r'\tmp' ]) + else: + dirlist.extend([ '/tmp', '/var/tmp', '/usr/tmp' ]) + + # As a last resort, the current directory. + try: + dirlist.append(_os.getcwd()) + except (AttributeError, _os.error): + dirlist.append(_os.curdir) + + return dirlist + +def _get_default_tempdir(): + """Calculate the default directory to use for temporary files. + This routine should be called exactly once. + + We determine whether or not a candidate temp dir is usable by + trying to create and write to a file in that directory. If this + is successful, the test file is deleted. To prevent denial of + service, the name of the test file must be randomized.""" + + namer = _RandomNameSequence() + dirlist = _candidate_tempdir_list() + flags = _text_openflags + + for dir in dirlist: + if dir != _os.curdir: + dir = _os.path.normcase(_os.path.abspath(dir)) + # Try only a few names per directory. + for seq in xrange(100): + name = namer.next() + filename = _os.path.join(dir, name) + try: + fd = _os.open(filename, flags, 0o600) + try: + try: + with _io.open(fd, 'wb', closefd=False) as fp: + fp.write(b'blat') + finally: + _os.close(fd) + finally: + _os.unlink(filename) + return dir + except (OSError, IOError) as e: + if e.args[0] == _errno.EEXIST: + continue + if (_os.name == 'nt' and e.args[0] == _errno.EACCES and + _os.path.isdir(dir) and _os.access(dir, _os.W_OK)): + # On windows, when a directory with the chosen name already + # exists, EACCES error code is returned instead of EEXIST. + continue + break # no point trying more names in this directory + raise IOError, (_errno.ENOENT, + ("No usable temporary directory found in %s" % dirlist)) + +_name_sequence = None + +def _get_candidate_names(): + """Common setup sequence for all user-callable interfaces.""" + + global _name_sequence + if _name_sequence is None: + _once_lock.acquire() + try: + if _name_sequence is None: + _name_sequence = _RandomNameSequence() + finally: + _once_lock.release() + return _name_sequence + + +def _mkstemp_inner(dir, pre, suf, flags): + """Code common to mkstemp, TemporaryFile, and NamedTemporaryFile.""" + + names = _get_candidate_names() + + for seq in xrange(TMP_MAX): + name = names.next() + file = _os.path.join(dir, pre + name + suf) + try: + fd = _os.open(file, flags, 0600) + _set_cloexec(fd) + return (fd, _os.path.abspath(file)) + except OSError, e: + if e.errno == _errno.EEXIST: + continue # try again + if (_os.name == 'nt' and e.errno == _errno.EACCES and + _os.path.isdir(dir) and _os.access(dir, _os.W_OK)): + # On windows, when a directory with the chosen name already + # exists, EACCES error code is returned instead of EEXIST. + continue + raise + + raise IOError, (_errno.EEXIST, "No usable temporary file name found") + + +# User visible interfaces. + +def gettempprefix(): + """Accessor for tempdir.template.""" + return template + +tempdir = None + +def gettempdir(): + """Accessor for tempfile.tempdir.""" + global tempdir + if tempdir is None: + _once_lock.acquire() + try: + if tempdir is None: + tempdir = _get_default_tempdir() + finally: + _once_lock.release() + return tempdir + +def mkstemp(suffix="", prefix=template, dir=None, text=False): + """User-callable function to create and return a unique temporary + file. The return value is a pair (fd, name) where fd is the + file descriptor returned by os.open, and name is the filename. + + If 'suffix' is specified, the file name will end with that suffix, + otherwise there will be no suffix. + + If 'prefix' is specified, the file name will begin with that prefix, + otherwise a default prefix is used. + + If 'dir' is specified, the file will be created in that directory, + otherwise a default directory is used. + + If 'text' is specified and true, the file is opened in text + mode. Else (the default) the file is opened in binary mode. On + some operating systems, this makes no difference. + + The file is readable and writable only by the creating user ID. + If the operating system uses permission bits to indicate whether a + file is executable, the file is executable by no one. The file + descriptor is not inherited by children of this process. + + Caller is responsible for deleting the file when done with it. + """ + + if dir is None: + dir = gettempdir() + + if text: + flags = _text_openflags + else: + flags = _bin_openflags + + return _mkstemp_inner(dir, prefix, suffix, flags) + + +def mkdtemp(suffix="", prefix=template, dir=None): + """User-callable function to create and return a unique temporary + directory. The return value is the pathname of the directory. + + Arguments are as for mkstemp, except that the 'text' argument is + not accepted. + + The directory is readable, writable, and searchable only by the + creating user. + + Caller is responsible for deleting the directory when done with it. + """ + + if dir is None: + dir = gettempdir() + + names = _get_candidate_names() + + for seq in xrange(TMP_MAX): + name = names.next() + file = _os.path.join(dir, prefix + name + suffix) + try: + _os.mkdir(file, 0700) + return file + except OSError, e: + if e.errno == _errno.EEXIST: + continue # try again + if (_os.name == 'nt' and e.errno == _errno.EACCES and + _os.path.isdir(dir) and _os.access(dir, _os.W_OK)): + # On windows, when a directory with the chosen name already + # exists, EACCES error code is returned instead of EEXIST. + continue + raise + + raise IOError, (_errno.EEXIST, "No usable temporary directory name found") + +def mktemp(suffix="", prefix=template, dir=None): + """User-callable function to return a unique temporary file name. The + file is not created. + + Arguments are as for mkstemp, except that the 'text' argument is + not accepted. + + This function is unsafe and should not be used. The file name + refers to a file that did not exist at some point, but by the time + you get around to creating it, someone else may have beaten you to + the punch. + """ + +## from warnings import warn as _warn +## _warn("mktemp is a potential security risk to your program", +## RuntimeWarning, stacklevel=2) + + if dir is None: + dir = gettempdir() + + names = _get_candidate_names() + for seq in xrange(TMP_MAX): + name = names.next() + file = _os.path.join(dir, prefix + name + suffix) + if not _exists(file): + return file + + raise IOError, (_errno.EEXIST, "No usable temporary filename found") + + +class _TemporaryFileWrapper: + """Temporary file wrapper + + This class provides a wrapper around files opened for + temporary use. In particular, it seeks to automatically + remove the file when it is no longer needed. + """ + + def __init__(self, file, name, delete=True): + self.file = file + self.name = name + self.close_called = False + self.delete = delete + + def __getattr__(self, name): + # Attribute lookups are delegated to the underlying file + # and cached for non-numeric results + # (i.e. methods are cached, closed and friends are not) + file = self.__dict__['file'] + a = getattr(file, name) + if not issubclass(type(a), type(0)): + setattr(self, name, a) + return a + + # The underlying __enter__ method returns the wrong object + # (self.file) so override it to return the wrapper + def __enter__(self): + self.file.__enter__() + return self + + # NT provides delete-on-close as a primitive, so we don't need + # the wrapper to do anything special. We still use it so that + # file.name is useful (i.e. not "(fdopen)") with NamedTemporaryFile. + if _os.name != 'nt': + # Cache the unlinker so we don't get spurious errors at + # shutdown when the module-level "os" is None'd out. Note + # that this must be referenced as self.unlink, because the + # name TemporaryFileWrapper may also get None'd out before + # __del__ is called. + unlink = _os.unlink + + def close(self): + if not self.close_called: + self.close_called = True + try: + self.file.close() + finally: + if self.delete: + self.unlink(self.name) + + def __del__(self): + self.close() + + # Need to trap __exit__ as well to ensure the file gets + # deleted when used in a with statement + def __exit__(self, exc, value, tb): + result = self.file.__exit__(exc, value, tb) + self.close() + return result + else: + def __exit__(self, exc, value, tb): + self.file.__exit__(exc, value, tb) + + +def NamedTemporaryFile(mode='w+b', bufsize=-1, suffix="", + prefix=template, dir=None, delete=True): + """Create and return a temporary file. + Arguments: + 'prefix', 'suffix', 'dir' -- as for mkstemp. + 'mode' -- the mode argument to os.fdopen (default "w+b"). + 'bufsize' -- the buffer size argument to os.fdopen (default -1). + 'delete' -- whether the file is deleted on close (default True). + The file is created as mkstemp() would do it. + + Returns an object with a file-like interface; the name of the file + is accessible as file.name. The file will be automatically deleted + when it is closed unless the 'delete' argument is set to False. + """ + + if dir is None: + dir = gettempdir() + + if 'b' in mode: + flags = _bin_openflags + else: + flags = _text_openflags + + # Setting O_TEMPORARY in the flags causes the OS to delete + # the file when it is closed. This is only supported by Windows. + if _os.name == 'nt' and delete: + flags |= _os.O_TEMPORARY + + (fd, name) = _mkstemp_inner(dir, prefix, suffix, flags) + try: + file = _os.fdopen(fd, mode, bufsize) + return _TemporaryFileWrapper(file, name, delete) + except: + _os.close(fd) + raise + +if _os.name != 'posix' or _os.sys.platform == 'cygwin': + # On non-POSIX and Cygwin systems, assume that we cannot unlink a file + # while it is open. + TemporaryFile = NamedTemporaryFile + +else: + def TemporaryFile(mode='w+b', bufsize=-1, suffix="", + prefix=template, dir=None): + """Create and return a temporary file. + Arguments: + 'prefix', 'suffix', 'dir' -- as for mkstemp. + 'mode' -- the mode argument to os.fdopen (default "w+b"). + 'bufsize' -- the buffer size argument to os.fdopen (default -1). + The file is created as mkstemp() would do it. + + Returns an object with a file-like interface. The file has no + name, and will cease to exist when it is closed. + """ + + if dir is None: + dir = gettempdir() + + if 'b' in mode: + flags = _bin_openflags + else: + flags = _text_openflags + + (fd, name) = _mkstemp_inner(dir, prefix, suffix, flags) + try: + _os.unlink(name) + return _os.fdopen(fd, mode, bufsize) + except: + _os.close(fd) + raise + +class SpooledTemporaryFile: + """Temporary file wrapper, specialized to switch from + StringIO to a real file when it exceeds a certain size or + when a fileno is needed. + """ + _rolled = False + + def __init__(self, max_size=0, mode='w+b', bufsize=-1, + suffix="", prefix=template, dir=None): + self._file = _StringIO() + self._max_size = max_size + self._rolled = False + self._TemporaryFileArgs = (mode, bufsize, suffix, prefix, dir) + + def _check(self, file): + if self._rolled: return + max_size = self._max_size + if max_size and file.tell() > max_size: + self.rollover() + + def rollover(self): + if self._rolled: return + file = self._file + newfile = self._file = TemporaryFile(*self._TemporaryFileArgs) + del self._TemporaryFileArgs + + newfile.write(file.getvalue()) + newfile.seek(file.tell(), 0) + + self._rolled = True + + # The method caching trick from NamedTemporaryFile + # won't work here, because _file may change from a + # _StringIO instance to a real file. So we list + # all the methods directly. + + # Context management protocol + def __enter__(self): + if self._file.closed: + raise ValueError("Cannot enter context with closed file") + return self + + def __exit__(self, exc, value, tb): + self._file.close() + + # file protocol + def __iter__(self): + return self._file.__iter__() + + def close(self): + self._file.close() + + @property + def closed(self): + return self._file.closed + + def fileno(self): + self.rollover() + return self._file.fileno() + + def flush(self): + self._file.flush() + + def isatty(self): + return self._file.isatty() + + @property + def mode(self): + try: + return self._file.mode + except AttributeError: + return self._TemporaryFileArgs[0] + + @property + def name(self): + try: + return self._file.name + except AttributeError: + return None + + def next(self): + return self._file.next + + def read(self, *args): + return self._file.read(*args) + + def readline(self, *args): + return self._file.readline(*args) + + def readlines(self, *args): + return self._file.readlines(*args) + + def seek(self, *args): + self._file.seek(*args) + + @property + def softspace(self): + return self._file.softspace + + def tell(self): + return self._file.tell() + + def truncate(self): + self._file.truncate() + + def write(self, s): + file = self._file + rv = file.write(s) + self._check(file) + return rv + + def writelines(self, iterable): + file = self._file + rv = file.writelines(iterable) + self._check(file) + return rv + + def xreadlines(self, *args): + if hasattr(self._file, 'xreadlines'): # real file + return iter(self._file) + else: # StringIO() + return iter(self._file.readlines(*args)) diff --git a/CVIssueCount/textwrap.py b/CVIssueCount/textwrap.py new file mode 100644 index 0000000..5c2e4fa --- /dev/null +++ b/CVIssueCount/textwrap.py @@ -0,0 +1,429 @@ +"""Text wrapping and filling. +""" + +# Copyright (C) 1999-2001 Gregory P. Ward. +# Copyright (C) 2002, 2003 Python Software Foundation. +# Written by Greg Ward + +__revision__ = "$Id$" + +import string, re + +try: + _unicode = unicode +except NameError: + # If Python is built without Unicode support, the unicode type + # will not exist. Fake one. + class _unicode(object): + pass + +# Do the right thing with boolean values for all known Python versions +# (so this module can be copied to projects that don't depend on Python +# 2.3, e.g. Optik and Docutils) by uncommenting the block of code below. +#try: +# True, False +#except NameError: +# (True, False) = (1, 0) + +__all__ = ['TextWrapper', 'wrap', 'fill', 'dedent'] + +# Hardcode the recognized whitespace characters to the US-ASCII +# whitespace characters. The main reason for doing this is that in +# ISO-8859-1, 0xa0 is non-breaking whitespace, so in certain locales +# that character winds up in string.whitespace. Respecting +# string.whitespace in those cases would 1) make textwrap treat 0xa0 the +# same as any other whitespace char, which is clearly wrong (it's a +# *non-breaking* space), 2) possibly cause problems with Unicode, +# since 0xa0 is not in range(128). +_whitespace = '\t\n\x0b\x0c\r ' + +class TextWrapper: + """ + Object for wrapping/filling text. The public interface consists of + the wrap() and fill() methods; the other methods are just there for + subclasses to override in order to tweak the default behaviour. + If you want to completely replace the main wrapping algorithm, + you'll probably have to override _wrap_chunks(). + + Several instance attributes control various aspects of wrapping: + width (default: 70) + the maximum width of wrapped lines (unless break_long_words + is false) + initial_indent (default: "") + string that will be prepended to the first line of wrapped + output. Counts towards the line's width. + subsequent_indent (default: "") + string that will be prepended to all lines save the first + of wrapped output; also counts towards each line's width. + expand_tabs (default: true) + Expand tabs in input text to spaces before further processing. + Each tab will become 1 .. 8 spaces, depending on its position in + its line. If false, each tab is treated as a single character. + replace_whitespace (default: true) + Replace all whitespace characters in the input text by spaces + after tab expansion. Note that if expand_tabs is false and + replace_whitespace is true, every tab will be converted to a + single space! + fix_sentence_endings (default: false) + Ensure that sentence-ending punctuation is always followed + by two spaces. Off by default because the algorithm is + (unavoidably) imperfect. + break_long_words (default: true) + Break words longer than 'width'. If false, those words will not + be broken, and some lines might be longer than 'width'. + break_on_hyphens (default: true) + Allow breaking hyphenated words. If true, wrapping will occur + preferably on whitespaces and right after hyphens part of + compound words. + drop_whitespace (default: true) + Drop leading and trailing whitespace from lines. + """ + + whitespace_trans = string.maketrans(_whitespace, ' ' * len(_whitespace)) + + unicode_whitespace_trans = {} + uspace = ord(u' ') + for x in map(ord, _whitespace): + unicode_whitespace_trans[x] = uspace + + # This funky little regex is just the trick for splitting + # text up into word-wrappable chunks. E.g. + # "Hello there -- you goof-ball, use the -b option!" + # splits into + # Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option! + # (after stripping out empty strings). + wordsep_re = re.compile( + r'(\s+|' # any whitespace + r'[^\s\w]*\w+[^0-9\W]-(?=\w+[^0-9\W])|' # hyphenated words + r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash + + # This less funky little regex just split on recognized spaces. E.g. + # "Hello there -- you goof-ball, use the -b option!" + # splits into + # Hello/ /there/ /--/ /you/ /goof-ball,/ /use/ /the/ /-b/ /option!/ + wordsep_simple_re = re.compile(r'(\s+)') + + # XXX this is not locale- or charset-aware -- string.lowercase + # is US-ASCII only (and therefore English-only) + sentence_end_re = re.compile(r'[%s]' # lowercase letter + r'[\.\!\?]' # sentence-ending punct. + r'[\"\']?' # optional end-of-quote + r'\Z' # end of chunk + % string.lowercase) + + + def __init__(self, + width=70, + initial_indent="", + subsequent_indent="", + expand_tabs=True, + replace_whitespace=True, + fix_sentence_endings=False, + break_long_words=True, + drop_whitespace=True, + break_on_hyphens=True): + self.width = width + self.initial_indent = initial_indent + self.subsequent_indent = subsequent_indent + self.expand_tabs = expand_tabs + self.replace_whitespace = replace_whitespace + self.fix_sentence_endings = fix_sentence_endings + self.break_long_words = break_long_words + self.drop_whitespace = drop_whitespace + self.break_on_hyphens = break_on_hyphens + + # recompile the regexes for Unicode mode -- done in this clumsy way for + # backwards compatibility because it's rather common to monkey-patch + # the TextWrapper class' wordsep_re attribute. + self.wordsep_re_uni = re.compile(self.wordsep_re.pattern, re.U) + self.wordsep_simple_re_uni = re.compile( + self.wordsep_simple_re.pattern, re.U) + + + # -- Private methods ----------------------------------------------- + # (possibly useful for subclasses to override) + + def _munge_whitespace(self, text): + """_munge_whitespace(text : string) -> string + + Munge whitespace in text: expand tabs and convert all other + whitespace characters to spaces. Eg. " foo\\tbar\\n\\nbaz" + becomes " foo bar baz". + """ + if self.expand_tabs: + text = text.expandtabs() + if self.replace_whitespace: + if isinstance(text, str): + text = text.translate(self.whitespace_trans) + elif isinstance(text, _unicode): + text = text.translate(self.unicode_whitespace_trans) + return text + + + def _split(self, text): + """_split(text : string) -> [string] + + Split the text to wrap into indivisible chunks. Chunks are + not quite the same as words; see _wrap_chunks() for full + details. As an example, the text + Look, goof-ball -- use the -b option! + breaks into the following chunks: + 'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ', + 'use', ' ', 'the', ' ', '-b', ' ', 'option!' + if break_on_hyphens is True, or in: + 'Look,', ' ', 'goof-ball', ' ', '--', ' ', + 'use', ' ', 'the', ' ', '-b', ' ', option!' + otherwise. + """ + if isinstance(text, _unicode): + if self.break_on_hyphens: + pat = self.wordsep_re_uni + else: + pat = self.wordsep_simple_re_uni + else: + if self.break_on_hyphens: + pat = self.wordsep_re + else: + pat = self.wordsep_simple_re + chunks = pat.split(text) + chunks = filter(None, chunks) # remove empty chunks + return chunks + + def _fix_sentence_endings(self, chunks): + """_fix_sentence_endings(chunks : [string]) + + Correct for sentence endings buried in 'chunks'. Eg. when the + original text contains "... foo.\\nBar ...", munge_whitespace() + and split() will convert that to [..., "foo.", " ", "Bar", ...] + which has one too few spaces; this method simply changes the one + space to two. + """ + i = 0 + patsearch = self.sentence_end_re.search + while i < len(chunks)-1: + if chunks[i+1] == " " and patsearch(chunks[i]): + chunks[i+1] = " " + i += 2 + else: + i += 1 + + def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width): + """_handle_long_word(chunks : [string], + cur_line : [string], + cur_len : int, width : int) + + Handle a chunk of text (most likely a word, not whitespace) that + is too long to fit in any line. + """ + # Figure out when indent is larger than the specified width, and make + # sure at least one character is stripped off on every pass + if width < 1: + space_left = 1 + else: + space_left = width - cur_len + + # If we're allowed to break long words, then do so: put as much + # of the next chunk onto the current line as will fit. + if self.break_long_words: + cur_line.append(reversed_chunks[-1][:space_left]) + reversed_chunks[-1] = reversed_chunks[-1][space_left:] + + # Otherwise, we have to preserve the long word intact. Only add + # it to the current line if there's nothing already there -- + # that minimizes how much we violate the width constraint. + elif not cur_line: + cur_line.append(reversed_chunks.pop()) + + # If we're not allowed to break long words, and there's already + # text on the current line, do nothing. Next time through the + # main loop of _wrap_chunks(), we'll wind up here again, but + # cur_len will be zero, so the next line will be entirely + # devoted to the long word that we can't handle right now. + + def _wrap_chunks(self, chunks): + """_wrap_chunks(chunks : [string]) -> [string] + + Wrap a sequence of text chunks and return a list of lines of + length 'self.width' or less. (If 'break_long_words' is false, + some lines may be longer than this.) Chunks correspond roughly + to words and the whitespace between them: each chunk is + indivisible (modulo 'break_long_words'), but a line break can + come between any two chunks. Chunks should not have internal + whitespace; ie. a chunk is either all whitespace or a "word". + Whitespace chunks will be removed from the beginning and end of + lines, but apart from that whitespace is preserved. + """ + lines = [] + if self.width <= 0: + raise ValueError("invalid width %r (must be > 0)" % self.width) + + # Arrange in reverse order so items can be efficiently popped + # from a stack of chucks. + chunks.reverse() + + while chunks: + + # Start the list of chunks that will make up the current line. + # cur_len is just the length of all the chunks in cur_line. + cur_line = [] + cur_len = 0 + + # Figure out which static string will prefix this line. + if lines: + indent = self.subsequent_indent + else: + indent = self.initial_indent + + # Maximum width for this line. + width = self.width - len(indent) + + # First chunk on line is whitespace -- drop it, unless this + # is the very beginning of the text (ie. no lines started yet). + if self.drop_whitespace and chunks[-1].strip() == '' and lines: + del chunks[-1] + + while chunks: + l = len(chunks[-1]) + + # Can at least squeeze this chunk onto the current line. + if cur_len + l <= width: + cur_line.append(chunks.pop()) + cur_len += l + + # Nope, this line is full. + else: + break + + # The current line is full, and the next chunk is too big to + # fit on *any* line (not just this one). + if chunks and len(chunks[-1]) > width: + self._handle_long_word(chunks, cur_line, cur_len, width) + + # If the last chunk on this line is all whitespace, drop it. + if self.drop_whitespace and cur_line and cur_line[-1].strip() == '': + del cur_line[-1] + + # Convert current line back to a string and store it in list + # of all lines (return value). + if cur_line: + lines.append(indent + ''.join(cur_line)) + + return lines + + + # -- Public interface ---------------------------------------------- + + def wrap(self, text): + """wrap(text : string) -> [string] + + Reformat the single paragraph in 'text' so it fits in lines of + no more than 'self.width' columns, and return a list of wrapped + lines. Tabs in 'text' are expanded with string.expandtabs(), + and all other whitespace characters (including newline) are + converted to space. + """ + text = self._munge_whitespace(text) + chunks = self._split(text) + if self.fix_sentence_endings: + self._fix_sentence_endings(chunks) + return self._wrap_chunks(chunks) + + def fill(self, text): + """fill(text : string) -> string + + Reformat the single paragraph in 'text' to fit in lines of no + more than 'self.width' columns, and return a new string + containing the entire wrapped paragraph. + """ + return "\n".join(self.wrap(text)) + + +# -- Convenience interface --------------------------------------------- + +def wrap(text, width=70, **kwargs): + """Wrap a single paragraph of text, returning a list of wrapped lines. + + Reformat the single paragraph in 'text' so it fits in lines of no + more than 'width' columns, and return a list of wrapped lines. By + default, tabs in 'text' are expanded with string.expandtabs(), and + all other whitespace characters (including newline) are converted to + space. See TextWrapper class for available keyword args to customize + wrapping behaviour. + """ + w = TextWrapper(width=width, **kwargs) + return w.wrap(text) + +def fill(text, width=70, **kwargs): + """Fill a single paragraph of text, returning a new string. + + Reformat the single paragraph in 'text' to fit in lines of no more + than 'width' columns, and return a new string containing the entire + wrapped paragraph. As with wrap(), tabs are expanded and other + whitespace characters converted to space. See TextWrapper class for + available keyword args to customize wrapping behaviour. + """ + w = TextWrapper(width=width, **kwargs) + return w.fill(text) + + +# -- Loosely related functionality ------------------------------------- + +_whitespace_only_re = re.compile('^[ \t]+$', re.MULTILINE) +_leading_whitespace_re = re.compile('(^[ \t]*)(?:[^ \t\n])', re.MULTILINE) + +def dedent(text): + """Remove any common leading whitespace from every line in `text`. + + This can be used to make triple-quoted strings line up with the left + edge of the display, while still presenting them in the source code + in indented form. + + Note that tabs and spaces are both treated as whitespace, but they + are not equal: the lines " hello" and "\\thello" are + considered to have no common leading whitespace. (This behaviour is + new in Python 2.5; older versions of this module incorrectly + expanded tabs before searching for common leading whitespace.) + """ + # Look for the longest leading string of spaces and tabs common to + # all lines. + margin = None + text = _whitespace_only_re.sub('', text) + indents = _leading_whitespace_re.findall(text) + for indent in indents: + if margin is None: + margin = indent + + # Current line more deeply indented than previous winner: + # no change (previous winner is still on top). + elif indent.startswith(margin): + pass + + # Current line consistent with and no deeper than previous winner: + # it's the new winner. + elif margin.startswith(indent): + margin = indent + + # Find the largest common whitespace between current line and previous + # winner. + else: + for i, (x, y) in enumerate(zip(margin, indent)): + if x != y: + margin = margin[:i] + break + else: + margin = margin[:len(indent)] + + # sanity check (testing/debugging only) + if 0 and margin: + for line in text.split("\n"): + assert not line or line.startswith(margin), \ + "line = %r, margin = %r" % (line, margin) + + if margin: + text = re.sub(r'(?m)^' + margin, '', text) + return text + +if __name__ == "__main__": + #print dedent("\tfoo\n\tbar") + #print dedent(" \thello there\n \t how are you?") + print dedent("Hello there.\n This is indented.") diff --git a/CVIssueCount/types.py b/CVIssueCount/types.py new file mode 100644 index 0000000..d414f54 --- /dev/null +++ b/CVIssueCount/types.py @@ -0,0 +1,86 @@ +"""Define names for all type symbols known in the standard interpreter. + +Types that are part of optional modules (e.g. array) are not listed. +""" +import sys + +# Iterators in Python aren't a matter of type but of protocol. A large +# and changing number of builtin types implement *some* flavor of +# iterator. Don't check the type! Use hasattr to check for both +# "__iter__" and "next" attributes instead. + +NoneType = type(None) +TypeType = type +ObjectType = object + +IntType = int +LongType = long +FloatType = float +BooleanType = bool +try: + ComplexType = complex +except NameError: + pass + +StringType = str + +# StringTypes is already outdated. Instead of writing "type(x) in +# types.StringTypes", you should use "isinstance(x, basestring)". But +# we keep around for compatibility with Python 2.2. +try: + UnicodeType = unicode + StringTypes = (StringType, UnicodeType) +except NameError: + StringTypes = (StringType,) + +BufferType = buffer + +TupleType = tuple +ListType = list +DictType = DictionaryType = dict + +def _f(): pass +FunctionType = type(_f) +LambdaType = type(lambda: None) # Same as FunctionType +CodeType = type(_f.func_code) + +def _g(): + yield 1 +GeneratorType = type(_g()) + +class _C: + def _m(self): pass +ClassType = type(_C) +UnboundMethodType = type(_C._m) # Same as MethodType +_x = _C() +InstanceType = type(_x) +MethodType = type(_x._m) + +BuiltinFunctionType = type(len) +BuiltinMethodType = type([].append) # Same as BuiltinFunctionType + +ModuleType = type(sys) +FileType = file +XRangeType = xrange + +try: + raise TypeError +except TypeError: + tb = sys.exc_info()[2] + TracebackType = type(tb) + FrameType = type(tb.tb_frame) + del tb + +SliceType = slice +EllipsisType = type(Ellipsis) + +DictProxyType = type(TypeType.__dict__) +NotImplementedType = type(NotImplemented) + +# For Jython, the following two types are identical +GetSetDescriptorType = type(FunctionType.func_code) +MemberDescriptorType = type(FunctionType.func_globals) + +del sys, _f, _g, _C, _x # Not for export + +__all__ = list(n for n in globals() if n[:1] != '_') diff --git a/CVIssueCount/urllib.py b/CVIssueCount/urllib.py new file mode 100644 index 0000000..ccb0574 --- /dev/null +++ b/CVIssueCount/urllib.py @@ -0,0 +1,1637 @@ +"""Open an arbitrary URL. + +See the following document for more info on URLs: +"Names and Addresses, URIs, URLs, URNs, URCs", at +http://www.w3.org/pub/WWW/Addressing/Overview.html + +See also the HTTP spec (from which the error codes are derived): +"HTTP - Hypertext Transfer Protocol", at +http://www.w3.org/pub/WWW/Protocols/ + +Related standards and specs: +- RFC1808: the "relative URL" spec. (authoritative status) +- RFC1738 - the "URL standard". (authoritative status) +- RFC1630 - the "URI spec". (informational status) + +The object returned by URLopener().open(file) will differ per +protocol. All you know is that is has methods read(), readline(), +readlines(), fileno(), close() and info(). The read*(), fileno() +and close() methods work like those of open files. +The info() method returns a mimetools.Message object which can be +used to query various info about the object, if available. +(mimetools.Message objects are queried with the getheader() method.) +""" + +import string +import socket +import os +import time +import sys +import base64 +import re + +from urlparse import urljoin as basejoin + +__all__ = ["urlopen", "URLopener", "FancyURLopener", "urlretrieve", + "urlcleanup", "quote", "quote_plus", "unquote", "unquote_plus", + "urlencode", "url2pathname", "pathname2url", "splittag", + "localhost", "thishost", "ftperrors", "basejoin", "unwrap", + "splittype", "splithost", "splituser", "splitpasswd", "splitport", + "splitnport", "splitquery", "splitattr", "splitvalue", + "getproxies"] + +__version__ = '1.17' # XXX This version is not always updated :-( + +MAXFTPCACHE = 10 # Trim the ftp cache beyond this size + +# Helper for non-unix systems +if os.name == 'nt': + from nturl2path import url2pathname, pathname2url +elif os.name == 'riscos': + from rourl2path import url2pathname, pathname2url +else: + def url2pathname(pathname): + """OS-specific conversion from a relative URL of the 'file' scheme + to a file system path; not recommended for general use.""" + return unquote(pathname) + + def pathname2url(pathname): + """OS-specific conversion from a file system path to a relative URL + of the 'file' scheme; not recommended for general use.""" + return quote(pathname) + +# This really consists of two pieces: +# (1) a class which handles opening of all sorts of URLs +# (plus assorted utilities etc.) +# (2) a set of functions for parsing URLs +# XXX Should these be separated out into different modules? + + +# Shortcut for basic usage +_urlopener = None +def urlopen(url, data=None, proxies=None, context=None): + """Create a file-like object for the specified URL to read from.""" + from warnings import warnpy3k + warnpy3k("urllib.urlopen() has been removed in Python 3.0 in " + "favor of urllib2.urlopen()", stacklevel=2) + + global _urlopener + if proxies is not None or context is not None: + opener = FancyURLopener(proxies=proxies, context=context) + elif not _urlopener: + opener = FancyURLopener() + _urlopener = opener + else: + opener = _urlopener + if data is None: + return opener.open(url) + else: + return opener.open(url, data) +def urlretrieve(url, filename=None, reporthook=None, data=None, context=None): + global _urlopener + if context is not None: + opener = FancyURLopener(context=context) + elif not _urlopener: + _urlopener = opener = FancyURLopener() + else: + opener = _urlopener + return opener.retrieve(url, filename, reporthook, data) +def urlcleanup(): + if _urlopener: + _urlopener.cleanup() + _safe_quoters.clear() + ftpcache.clear() + +# check for SSL +try: + import ssl +except: + _have_ssl = False +else: + _have_ssl = True + +# exception raised when downloaded size does not match content-length +class ContentTooShortError(IOError): + def __init__(self, message, content): + IOError.__init__(self, message) + self.content = content + +ftpcache = {} +class URLopener: + """Class to open URLs. + This is a class rather than just a subroutine because we may need + more than one set of global protocol-specific options. + Note -- this is a base class for those who don't want the + automatic handling of errors type 302 (relocated) and 401 + (authorization needed).""" + + __tempfiles = None + + version = "Python-urllib/%s" % __version__ + + # Constructor + def __init__(self, proxies=None, context=None, **x509): + if proxies is None: + proxies = getproxies() + assert hasattr(proxies, 'has_key'), "proxies must be a mapping" + self.proxies = proxies + self.key_file = x509.get('key_file') + self.cert_file = x509.get('cert_file') + self.context = context + self.addheaders = [('User-Agent', self.version)] + self.__tempfiles = [] + self.__unlink = os.unlink # See cleanup() + self.tempcache = None + # Undocumented feature: if you assign {} to tempcache, + # it is used to cache files retrieved with + # self.retrieve(). This is not enabled by default + # since it does not work for changing documents (and I + # haven't got the logic to check expiration headers + # yet). + self.ftpcache = ftpcache + # Undocumented feature: you can use a different + # ftp cache by assigning to the .ftpcache member; + # in case you want logically independent URL openers + # XXX This is not threadsafe. Bah. + + def __del__(self): + self.close() + + def close(self): + self.cleanup() + + def cleanup(self): + # This code sometimes runs when the rest of this module + # has already been deleted, so it can't use any globals + # or import anything. + if self.__tempfiles: + for file in self.__tempfiles: + try: + self.__unlink(file) + except OSError: + pass + del self.__tempfiles[:] + if self.tempcache: + self.tempcache.clear() + + def addheader(self, *args): + """Add a header to be used by the HTTP interface only + e.g. u.addheader('Accept', 'sound/basic')""" + self.addheaders.append(args) + + # External interface + def open(self, fullurl, data=None): + """Use URLopener().open(file) instead of open(file, 'r').""" + fullurl = unwrap(toBytes(fullurl)) + # percent encode url, fixing lame server errors for e.g, like space + # within url paths. + fullurl = quote(fullurl, safe="%/:=&?~#+!$,;'@()*[]|") + if self.tempcache and fullurl in self.tempcache: + filename, headers = self.tempcache[fullurl] + fp = open(filename, 'rb') + return addinfourl(fp, headers, fullurl) + urltype, url = splittype(fullurl) + if not urltype: + urltype = 'file' + if urltype in self.proxies: + proxy = self.proxies[urltype] + urltype, proxyhost = splittype(proxy) + host, selector = splithost(proxyhost) + url = (host, fullurl) # Signal special case to open_*() + else: + proxy = None + name = 'open_' + urltype + self.type = urltype + name = name.replace('-', '_') + if not hasattr(self, name): + if proxy: + return self.open_unknown_proxy(proxy, fullurl, data) + else: + return self.open_unknown(fullurl, data) + try: + if data is None: + return getattr(self, name)(url) + else: + return getattr(self, name)(url, data) + except socket.error, msg: + raise IOError, ('socket error', msg), sys.exc_info()[2] + + def open_unknown(self, fullurl, data=None): + """Overridable interface to open unknown URL type.""" + type, url = splittype(fullurl) + raise IOError, ('url error', 'unknown url type', type) + + def open_unknown_proxy(self, proxy, fullurl, data=None): + """Overridable interface to open unknown URL type.""" + type, url = splittype(fullurl) + raise IOError, ('url error', 'invalid proxy for %s' % type, proxy) + + # External interface + def retrieve(self, url, filename=None, reporthook=None, data=None): + """retrieve(url) returns (filename, headers) for a local object + or (tempfilename, headers) for a remote object.""" + url = unwrap(toBytes(url)) + if self.tempcache and url in self.tempcache: + return self.tempcache[url] + type, url1 = splittype(url) + if filename is None and (not type or type == 'file'): + try: + fp = self.open_local_file(url1) + hdrs = fp.info() + fp.close() + return url2pathname(splithost(url1)[1]), hdrs + except IOError: + pass + fp = self.open(url, data) + try: + headers = fp.info() + if filename: + tfp = open(filename, 'wb') + else: + import tempfile + garbage, path = splittype(url) + garbage, path = splithost(path or "") + path, garbage = splitquery(path or "") + path, garbage = splitattr(path or "") + suffix = os.path.splitext(path)[1] + (fd, filename) = tempfile.mkstemp(suffix) + self.__tempfiles.append(filename) + tfp = os.fdopen(fd, 'wb') + try: + result = filename, headers + if self.tempcache is not None: + self.tempcache[url] = result + bs = 1024*8 + size = -1 + read = 0 + blocknum = 0 + if "content-length" in headers: + size = int(headers["Content-Length"]) + if reporthook: + reporthook(blocknum, bs, size) + while 1: + block = fp.read(bs) + if block == "": + break + read += len(block) + tfp.write(block) + blocknum += 1 + if reporthook: + reporthook(blocknum, bs, size) + finally: + tfp.close() + finally: + fp.close() + + # raise exception if actual size does not match content-length header + if size >= 0 and read < size: + raise ContentTooShortError("retrieval incomplete: got only %i out " + "of %i bytes" % (read, size), result) + + return result + + # Each method named open_ knows how to open that type of URL + + def open_http(self, url, data=None): + """Use HTTP protocol.""" + import httplib + user_passwd = None + proxy_passwd= None + if isinstance(url, str): + host, selector = splithost(url) + if host: + user_passwd, host = splituser(host) + host = unquote(host) + realhost = host + else: + host, selector = url + # check whether the proxy contains authorization information + proxy_passwd, host = splituser(host) + # now we proceed with the url we want to obtain + urltype, rest = splittype(selector) + url = rest + user_passwd = None + if urltype.lower() != 'http': + realhost = None + else: + realhost, rest = splithost(rest) + if realhost: + user_passwd, realhost = splituser(realhost) + if user_passwd: + selector = "%s://%s%s" % (urltype, realhost, rest) + if proxy_bypass(realhost): + host = realhost + + #print "proxy via http:", host, selector + if not host: raise IOError, ('http error', 'no host given') + + if proxy_passwd: + proxy_passwd = unquote(proxy_passwd) + proxy_auth = base64.b64encode(proxy_passwd).strip() + else: + proxy_auth = None + + if user_passwd: + user_passwd = unquote(user_passwd) + auth = base64.b64encode(user_passwd).strip() + else: + auth = None + h = httplib.HTTP(host) + if data is not None: + h.putrequest('POST', selector) + h.putheader('Content-Type', 'application/x-www-form-urlencoded') + h.putheader('Content-Length', '%d' % len(data)) + else: + h.putrequest('GET', selector) + if proxy_auth: h.putheader('Proxy-Authorization', 'Basic %s' % proxy_auth) + if auth: h.putheader('Authorization', 'Basic %s' % auth) + if realhost: h.putheader('Host', realhost) + for args in self.addheaders: h.putheader(*args) + h.endheaders(data) + errcode, errmsg, headers = h.getreply() + fp = h.getfile() + if errcode == -1: + if fp: fp.close() + # something went wrong with the HTTP status line + raise IOError, ('http protocol error', 0, + 'got a bad status line', None) + # According to RFC 2616, "2xx" code indicates that the client's + # request was successfully received, understood, and accepted. + if (200 <= errcode < 300): + return addinfourl(fp, headers, "http:" + url, errcode) + else: + if data is None: + return self.http_error(url, fp, errcode, errmsg, headers) + else: + return self.http_error(url, fp, errcode, errmsg, headers, data) + + def http_error(self, url, fp, errcode, errmsg, headers, data=None): + """Handle http errors. + Derived class can override this, or provide specific handlers + named http_error_DDD where DDD is the 3-digit error code.""" + # First check if there's a specific handler for this error + name = 'http_error_%d' % errcode + if hasattr(self, name): + method = getattr(self, name) + if data is None: + result = method(url, fp, errcode, errmsg, headers) + else: + result = method(url, fp, errcode, errmsg, headers, data) + if result: return result + return self.http_error_default(url, fp, errcode, errmsg, headers) + + def http_error_default(self, url, fp, errcode, errmsg, headers): + """Default error handler: close the connection and raise IOError.""" + fp.close() + raise IOError, ('http error', errcode, errmsg, headers) + + if _have_ssl: + def open_https(self, url, data=None): + """Use HTTPS protocol.""" + + import httplib + user_passwd = None + proxy_passwd = None + if isinstance(url, str): + host, selector = splithost(url) + if host: + user_passwd, host = splituser(host) + host = unquote(host) + realhost = host + else: + host, selector = url + # here, we determine, whether the proxy contains authorization information + proxy_passwd, host = splituser(host) + urltype, rest = splittype(selector) + url = rest + user_passwd = None + if urltype.lower() != 'https': + realhost = None + else: + realhost, rest = splithost(rest) + if realhost: + user_passwd, realhost = splituser(realhost) + if user_passwd: + selector = "%s://%s%s" % (urltype, realhost, rest) + #print "proxy via https:", host, selector + if not host: raise IOError, ('https error', 'no host given') + if proxy_passwd: + proxy_passwd = unquote(proxy_passwd) + proxy_auth = base64.b64encode(proxy_passwd).strip() + else: + proxy_auth = None + if user_passwd: + user_passwd = unquote(user_passwd) + auth = base64.b64encode(user_passwd).strip() + else: + auth = None + h = httplib.HTTPS(host, 0, + key_file=self.key_file, + cert_file=self.cert_file, + context=self.context) + if data is not None: + h.putrequest('POST', selector) + h.putheader('Content-Type', + 'application/x-www-form-urlencoded') + h.putheader('Content-Length', '%d' % len(data)) + else: + h.putrequest('GET', selector) + if proxy_auth: h.putheader('Proxy-Authorization', 'Basic %s' % proxy_auth) + if auth: h.putheader('Authorization', 'Basic %s' % auth) + if realhost: h.putheader('Host', realhost) + for args in self.addheaders: h.putheader(*args) + h.endheaders(data) + errcode, errmsg, headers = h.getreply() + fp = h.getfile() + if errcode == -1: + if fp: fp.close() + # something went wrong with the HTTP status line + raise IOError, ('http protocol error', 0, + 'got a bad status line', None) + # According to RFC 2616, "2xx" code indicates that the client's + # request was successfully received, understood, and accepted. + if (200 <= errcode < 300): + return addinfourl(fp, headers, "https:" + url, errcode) + else: + if data is None: + return self.http_error(url, fp, errcode, errmsg, headers) + else: + return self.http_error(url, fp, errcode, errmsg, headers, + data) + + def open_file(self, url): + """Use local file or FTP depending on form of URL.""" + if not isinstance(url, str): + raise IOError, ('file error', 'proxy support for file protocol currently not implemented') + if url[:2] == '//' and url[2:3] != '/' and url[2:12].lower() != 'localhost/': + return self.open_ftp(url) + else: + return self.open_local_file(url) + + def open_local_file(self, url): + """Use local file.""" + import mimetypes, mimetools, email.utils + try: + from cStringIO import StringIO + except ImportError: + from StringIO import StringIO + host, file = splithost(url) + localname = url2pathname(file) + try: + stats = os.stat(localname) + except OSError, e: + raise IOError(e.errno, e.strerror, e.filename) + size = stats.st_size + modified = email.utils.formatdate(stats.st_mtime, usegmt=True) + mtype = mimetypes.guess_type(url)[0] + headers = mimetools.Message(StringIO( + 'Content-Type: %s\nContent-Length: %d\nLast-modified: %s\n' % + (mtype or 'text/plain', size, modified))) + if not host: + urlfile = file + if file[:1] == '/': + urlfile = 'file://' + file + elif file[:2] == './': + raise ValueError("local file url may start with / or file:. Unknown url of type: %s" % url) + return addinfourl(open(localname, 'rb'), + headers, urlfile) + host, port = splitport(host) + if not port \ + and socket.gethostbyname(host) in (localhost(), thishost()): + urlfile = file + if file[:1] == '/': + urlfile = 'file://' + file + return addinfourl(open(localname, 'rb'), + headers, urlfile) + raise IOError, ('local file error', 'not on local host') + + def open_ftp(self, url): + """Use FTP protocol.""" + if not isinstance(url, str): + raise IOError, ('ftp error', 'proxy support for ftp protocol currently not implemented') + import mimetypes, mimetools + try: + from cStringIO import StringIO + except ImportError: + from StringIO import StringIO + host, path = splithost(url) + if not host: raise IOError, ('ftp error', 'no host given') + host, port = splitport(host) + user, host = splituser(host) + if user: user, passwd = splitpasswd(user) + else: passwd = None + host = unquote(host) + user = user or '' + passwd = passwd or '' + host = socket.gethostbyname(host) + if not port: + import ftplib + port = ftplib.FTP_PORT + else: + port = int(port) + path, attrs = splitattr(path) + path = unquote(path) + dirs = path.split('/') + dirs, file = dirs[:-1], dirs[-1] + if dirs and not dirs[0]: dirs = dirs[1:] + if dirs and not dirs[0]: dirs[0] = '/' + key = user, host, port, '/'.join(dirs) + # XXX thread unsafe! + if len(self.ftpcache) > MAXFTPCACHE: + # Prune the cache, rather arbitrarily + for k in self.ftpcache.keys(): + if k != key: + v = self.ftpcache[k] + del self.ftpcache[k] + v.close() + try: + if not key in self.ftpcache: + self.ftpcache[key] = \ + ftpwrapper(user, passwd, host, port, dirs) + if not file: type = 'D' + else: type = 'I' + for attr in attrs: + attr, value = splitvalue(attr) + if attr.lower() == 'type' and \ + value in ('a', 'A', 'i', 'I', 'd', 'D'): + type = value.upper() + (fp, retrlen) = self.ftpcache[key].retrfile(file, type) + mtype = mimetypes.guess_type("ftp:" + url)[0] + headers = "" + if mtype: + headers += "Content-Type: %s\n" % mtype + if retrlen is not None and retrlen >= 0: + headers += "Content-Length: %d\n" % retrlen + headers = mimetools.Message(StringIO(headers)) + return addinfourl(fp, headers, "ftp:" + url) + except ftperrors(), msg: + raise IOError, ('ftp error', msg), sys.exc_info()[2] + + def open_data(self, url, data=None): + """Use "data" URL.""" + if not isinstance(url, str): + raise IOError, ('data error', 'proxy support for data protocol currently not implemented') + # ignore POSTed data + # + # syntax of data URLs: + # dataurl := "data:" [ mediatype ] [ ";base64" ] "," data + # mediatype := [ type "/" subtype ] *( ";" parameter ) + # data := *urlchar + # parameter := attribute "=" value + import mimetools + try: + from cStringIO import StringIO + except ImportError: + from StringIO import StringIO + try: + [type, data] = url.split(',', 1) + except ValueError: + raise IOError, ('data error', 'bad data URL') + if not type: + type = 'text/plain;charset=US-ASCII' + semi = type.rfind(';') + if semi >= 0 and '=' not in type[semi:]: + encoding = type[semi+1:] + type = type[:semi] + else: + encoding = '' + msg = [] + msg.append('Date: %s'%time.strftime('%a, %d %b %Y %H:%M:%S GMT', + time.gmtime(time.time()))) + msg.append('Content-type: %s' % type) + if encoding == 'base64': + data = base64.decodestring(data) + else: + data = unquote(data) + msg.append('Content-Length: %d' % len(data)) + msg.append('') + msg.append(data) + msg = '\n'.join(msg) + f = StringIO(msg) + headers = mimetools.Message(f, 0) + #f.fileno = None # needed for addinfourl + return addinfourl(f, headers, url) + + +class FancyURLopener(URLopener): + """Derived class with handlers for errors we can handle (perhaps).""" + + def __init__(self, *args, **kwargs): + URLopener.__init__(self, *args, **kwargs) + self.auth_cache = {} + self.tries = 0 + self.maxtries = 10 + + def http_error_default(self, url, fp, errcode, errmsg, headers): + """Default error handling -- don't raise an exception.""" + return addinfourl(fp, headers, "http:" + url, errcode) + + def http_error_302(self, url, fp, errcode, errmsg, headers, data=None): + """Error 302 -- relocated (temporarily).""" + self.tries += 1 + if self.maxtries and self.tries >= self.maxtries: + if hasattr(self, "http_error_500"): + meth = self.http_error_500 + else: + meth = self.http_error_default + self.tries = 0 + return meth(url, fp, 500, + "Internal Server Error: Redirect Recursion", headers) + result = self.redirect_internal(url, fp, errcode, errmsg, headers, + data) + self.tries = 0 + return result + + def redirect_internal(self, url, fp, errcode, errmsg, headers, data): + if 'location' in headers: + newurl = headers['location'] + elif 'uri' in headers: + newurl = headers['uri'] + else: + return + fp.close() + # In case the server sent a relative URL, join with original: + newurl = basejoin(self.type + ":" + url, newurl) + + # For security reasons we do not allow redirects to protocols + # other than HTTP, HTTPS or FTP. + newurl_lower = newurl.lower() + if not (newurl_lower.startswith('http://') or + newurl_lower.startswith('https://') or + newurl_lower.startswith('ftp://')): + raise IOError('redirect error', errcode, + errmsg + " - Redirection to url '%s' is not allowed" % + newurl, + headers) + + return self.open(newurl) + + def http_error_301(self, url, fp, errcode, errmsg, headers, data=None): + """Error 301 -- also relocated (permanently).""" + return self.http_error_302(url, fp, errcode, errmsg, headers, data) + + def http_error_303(self, url, fp, errcode, errmsg, headers, data=None): + """Error 303 -- also relocated (essentially identical to 302).""" + return self.http_error_302(url, fp, errcode, errmsg, headers, data) + + def http_error_307(self, url, fp, errcode, errmsg, headers, data=None): + """Error 307 -- relocated, but turn POST into error.""" + if data is None: + return self.http_error_302(url, fp, errcode, errmsg, headers, data) + else: + return self.http_error_default(url, fp, errcode, errmsg, headers) + + def http_error_401(self, url, fp, errcode, errmsg, headers, data=None): + """Error 401 -- authentication required. + This function supports Basic authentication only.""" + if not 'www-authenticate' in headers: + URLopener.http_error_default(self, url, fp, + errcode, errmsg, headers) + stuff = headers['www-authenticate'] + import re + match = re.match('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', stuff) + if not match: + URLopener.http_error_default(self, url, fp, + errcode, errmsg, headers) + scheme, realm = match.groups() + if scheme.lower() != 'basic': + URLopener.http_error_default(self, url, fp, + errcode, errmsg, headers) + name = 'retry_' + self.type + '_basic_auth' + if data is None: + return getattr(self,name)(url, realm) + else: + return getattr(self,name)(url, realm, data) + + def http_error_407(self, url, fp, errcode, errmsg, headers, data=None): + """Error 407 -- proxy authentication required. + This function supports Basic authentication only.""" + if not 'proxy-authenticate' in headers: + URLopener.http_error_default(self, url, fp, + errcode, errmsg, headers) + stuff = headers['proxy-authenticate'] + import re + match = re.match('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', stuff) + if not match: + URLopener.http_error_default(self, url, fp, + errcode, errmsg, headers) + scheme, realm = match.groups() + if scheme.lower() != 'basic': + URLopener.http_error_default(self, url, fp, + errcode, errmsg, headers) + name = 'retry_proxy_' + self.type + '_basic_auth' + if data is None: + return getattr(self,name)(url, realm) + else: + return getattr(self,name)(url, realm, data) + + def retry_proxy_http_basic_auth(self, url, realm, data=None): + host, selector = splithost(url) + newurl = 'http://' + host + selector + proxy = self.proxies['http'] + urltype, proxyhost = splittype(proxy) + proxyhost, proxyselector = splithost(proxyhost) + i = proxyhost.find('@') + 1 + proxyhost = proxyhost[i:] + user, passwd = self.get_user_passwd(proxyhost, realm, i) + if not (user or passwd): return None + proxyhost = quote(user, safe='') + ':' + quote(passwd, safe='') + '@' + proxyhost + self.proxies['http'] = 'http://' + proxyhost + proxyselector + if data is None: + return self.open(newurl) + else: + return self.open(newurl, data) + + def retry_proxy_https_basic_auth(self, url, realm, data=None): + host, selector = splithost(url) + newurl = 'https://' + host + selector + proxy = self.proxies['https'] + urltype, proxyhost = splittype(proxy) + proxyhost, proxyselector = splithost(proxyhost) + i = proxyhost.find('@') + 1 + proxyhost = proxyhost[i:] + user, passwd = self.get_user_passwd(proxyhost, realm, i) + if not (user or passwd): return None + proxyhost = quote(user, safe='') + ':' + quote(passwd, safe='') + '@' + proxyhost + self.proxies['https'] = 'https://' + proxyhost + proxyselector + if data is None: + return self.open(newurl) + else: + return self.open(newurl, data) + + def retry_http_basic_auth(self, url, realm, data=None): + host, selector = splithost(url) + i = host.find('@') + 1 + host = host[i:] + user, passwd = self.get_user_passwd(host, realm, i) + if not (user or passwd): return None + host = quote(user, safe='') + ':' + quote(passwd, safe='') + '@' + host + newurl = 'http://' + host + selector + if data is None: + return self.open(newurl) + else: + return self.open(newurl, data) + + def retry_https_basic_auth(self, url, realm, data=None): + host, selector = splithost(url) + i = host.find('@') + 1 + host = host[i:] + user, passwd = self.get_user_passwd(host, realm, i) + if not (user or passwd): return None + host = quote(user, safe='') + ':' + quote(passwd, safe='') + '@' + host + newurl = 'https://' + host + selector + if data is None: + return self.open(newurl) + else: + return self.open(newurl, data) + + def get_user_passwd(self, host, realm, clear_cache=0): + key = realm + '@' + host.lower() + if key in self.auth_cache: + if clear_cache: + del self.auth_cache[key] + else: + return self.auth_cache[key] + user, passwd = self.prompt_user_passwd(host, realm) + if user or passwd: self.auth_cache[key] = (user, passwd) + return user, passwd + + def prompt_user_passwd(self, host, realm): + """Override this in a GUI environment!""" + import getpass + try: + user = raw_input("Enter username for %s at %s: " % (realm, + host)) + passwd = getpass.getpass("Enter password for %s in %s at %s: " % + (user, realm, host)) + return user, passwd + except KeyboardInterrupt: + print + return None, None + + +# Utility functions + +_localhost = None +def localhost(): + """Return the IP address of the magic hostname 'localhost'.""" + global _localhost + if _localhost is None: + _localhost = socket.gethostbyname('localhost') + return _localhost + +_thishost = None +def thishost(): + """Return the IP address of the current host.""" + global _thishost + if _thishost is None: + try: + _thishost = socket.gethostbyname(socket.gethostname()) + except socket.gaierror: + _thishost = socket.gethostbyname('localhost') + return _thishost + +_ftperrors = None +def ftperrors(): + """Return the set of errors raised by the FTP class.""" + global _ftperrors + if _ftperrors is None: + import ftplib + _ftperrors = ftplib.all_errors + return _ftperrors + +_noheaders = None +def noheaders(): + """Return an empty mimetools.Message object.""" + global _noheaders + if _noheaders is None: + import mimetools + try: + from cStringIO import StringIO + except ImportError: + from StringIO import StringIO + _noheaders = mimetools.Message(StringIO(), 0) + _noheaders.fp.close() # Recycle file descriptor + return _noheaders + + +# Utility classes + +class ftpwrapper: + """Class used by open_ftp() for cache of open FTP connections.""" + + def __init__(self, user, passwd, host, port, dirs, + timeout=socket._GLOBAL_DEFAULT_TIMEOUT, + persistent=True): + self.user = user + self.passwd = passwd + self.host = host + self.port = port + self.dirs = dirs + self.timeout = timeout + self.refcount = 0 + self.keepalive = persistent + try: + self.init() + except: + self.close() + raise + + def init(self): + import ftplib + self.busy = 0 + self.ftp = ftplib.FTP() + self.ftp.connect(self.host, self.port, self.timeout) + self.ftp.login(self.user, self.passwd) + _target = '/'.join(self.dirs) + self.ftp.cwd(_target) + + def retrfile(self, file, type): + import ftplib + self.endtransfer() + if type in ('d', 'D'): cmd = 'TYPE A'; isdir = 1 + else: cmd = 'TYPE ' + type; isdir = 0 + try: + self.ftp.voidcmd(cmd) + except ftplib.all_errors: + self.init() + self.ftp.voidcmd(cmd) + conn = None + if file and not isdir: + # Try to retrieve as a file + try: + cmd = 'RETR ' + file + conn, retrlen = self.ftp.ntransfercmd(cmd) + except ftplib.error_perm, reason: + if str(reason)[:3] != '550': + raise IOError, ('ftp error', reason), sys.exc_info()[2] + if not conn: + # Set transfer mode to ASCII! + self.ftp.voidcmd('TYPE A') + # Try a directory listing. Verify that directory exists. + if file: + pwd = self.ftp.pwd() + try: + try: + self.ftp.cwd(file) + except ftplib.error_perm, reason: + raise IOError, ('ftp error', reason), sys.exc_info()[2] + finally: + self.ftp.cwd(pwd) + cmd = 'LIST ' + file + else: + cmd = 'LIST' + conn, retrlen = self.ftp.ntransfercmd(cmd) + self.busy = 1 + ftpobj = addclosehook(conn.makefile('rb'), self.file_close) + self.refcount += 1 + conn.close() + # Pass back both a suitably decorated object and a retrieval length + return (ftpobj, retrlen) + + def endtransfer(self): + if not self.busy: + return + self.busy = 0 + try: + self.ftp.voidresp() + except ftperrors(): + pass + + def close(self): + self.keepalive = False + if self.refcount <= 0: + self.real_close() + + def file_close(self): + self.endtransfer() + self.refcount -= 1 + if self.refcount <= 0 and not self.keepalive: + self.real_close() + + def real_close(self): + self.endtransfer() + try: + self.ftp.close() + except ftperrors(): + pass + +class addbase: + """Base class for addinfo and addclosehook.""" + + def __init__(self, fp): + self.fp = fp + self.read = self.fp.read + self.readline = self.fp.readline + if hasattr(self.fp, "readlines"): self.readlines = self.fp.readlines + if hasattr(self.fp, "fileno"): + self.fileno = self.fp.fileno + else: + self.fileno = lambda: None + if hasattr(self.fp, "__iter__"): + self.__iter__ = self.fp.__iter__ + if hasattr(self.fp, "next"): + self.next = self.fp.next + + def __repr__(self): + return '<%s at %r whose fp = %r>' % (self.__class__.__name__, + id(self), self.fp) + + def close(self): + self.read = None + self.readline = None + self.readlines = None + self.fileno = None + if self.fp: self.fp.close() + self.fp = None + +class addclosehook(addbase): + """Class to add a close hook to an open file.""" + + def __init__(self, fp, closehook, *hookargs): + addbase.__init__(self, fp) + self.closehook = closehook + self.hookargs = hookargs + + def close(self): + try: + closehook = self.closehook + hookargs = self.hookargs + if closehook: + self.closehook = None + self.hookargs = None + closehook(*hookargs) + finally: + addbase.close(self) + + +class addinfo(addbase): + """class to add an info() method to an open file.""" + + def __init__(self, fp, headers): + addbase.__init__(self, fp) + self.headers = headers + + def info(self): + return self.headers + +class addinfourl(addbase): + """class to add info() and geturl() methods to an open file.""" + + def __init__(self, fp, headers, url, code=None): + addbase.__init__(self, fp) + self.headers = headers + self.url = url + self.code = code + + def info(self): + return self.headers + + def getcode(self): + return self.code + + def geturl(self): + return self.url + + +# Utilities to parse URLs (most of these return None for missing parts): +# unwrap('') --> 'type://host/path' +# splittype('type:opaquestring') --> 'type', 'opaquestring' +# splithost('//host[:port]/path') --> 'host[:port]', '/path' +# splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]' +# splitpasswd('user:passwd') -> 'user', 'passwd' +# splitport('host:port') --> 'host', 'port' +# splitquery('/path?query') --> '/path', 'query' +# splittag('/path#tag') --> '/path', 'tag' +# splitattr('/path;attr1=value1;attr2=value2;...') -> +# '/path', ['attr1=value1', 'attr2=value2', ...] +# splitvalue('attr=value') --> 'attr', 'value' +# unquote('abc%20def') -> 'abc def' +# quote('abc def') -> 'abc%20def') + +try: + unicode +except NameError: + def _is_unicode(x): + return 0 +else: + def _is_unicode(x): + return isinstance(x, unicode) + +def toBytes(url): + """toBytes(u"URL") --> 'URL'.""" + # Most URL schemes require ASCII. If that changes, the conversion + # can be relaxed + if _is_unicode(url): + try: + url = url.encode("ASCII") + except UnicodeError: + raise UnicodeError("URL " + repr(url) + + " contains non-ASCII characters") + return url + +def unwrap(url): + """unwrap('') --> 'type://host/path'.""" + url = url.strip() + if url[:1] == '<' and url[-1:] == '>': + url = url[1:-1].strip() + if url[:4] == 'URL:': url = url[4:].strip() + return url + +_typeprog = None +def splittype(url): + """splittype('type:opaquestring') --> 'type', 'opaquestring'.""" + global _typeprog + if _typeprog is None: + import re + _typeprog = re.compile('^([^/:]+):') + + match = _typeprog.match(url) + if match: + scheme = match.group(1) + return scheme.lower(), url[len(scheme) + 1:] + return None, url + +_hostprog = None +def splithost(url): + """splithost('//host[:port]/path') --> 'host[:port]', '/path'.""" + global _hostprog + if _hostprog is None: + import re + _hostprog = re.compile('^//([^/?]*)(.*)$') + + match = _hostprog.match(url) + if match: + host_port = match.group(1) + path = match.group(2) + if path and not path.startswith('/'): + path = '/' + path + return host_port, path + return None, url + +_userprog = None +def splituser(host): + """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.""" + global _userprog + if _userprog is None: + import re + _userprog = re.compile('^(.*)@(.*)$') + + match = _userprog.match(host) + if match: return match.group(1, 2) + return None, host + +_passwdprog = None +def splitpasswd(user): + """splitpasswd('user:passwd') -> 'user', 'passwd'.""" + global _passwdprog + if _passwdprog is None: + import re + _passwdprog = re.compile('^([^:]*):(.*)$',re.S) + + match = _passwdprog.match(user) + if match: return match.group(1, 2) + return user, None + +# splittag('/path#tag') --> '/path', 'tag' +_portprog = None +def splitport(host): + """splitport('host:port') --> 'host', 'port'.""" + global _portprog + if _portprog is None: + import re + _portprog = re.compile('^(.*):([0-9]*)$') + + match = _portprog.match(host) + if match: + host, port = match.groups() + if port: + return host, port + return host, None + +_nportprog = None +def splitnport(host, defport=-1): + """Split host and port, returning numeric port. + Return given default port if no ':' found; defaults to -1. + Return numerical port if a valid number are found after ':'. + Return None if ':' but not a valid number.""" + global _nportprog + if _nportprog is None: + import re + _nportprog = re.compile('^(.*):(.*)$') + + match = _nportprog.match(host) + if match: + host, port = match.group(1, 2) + if port: + try: + nport = int(port) + except ValueError: + nport = None + return host, nport + return host, defport + +_queryprog = None +def splitquery(url): + """splitquery('/path?query') --> '/path', 'query'.""" + global _queryprog + if _queryprog is None: + import re + _queryprog = re.compile('^(.*)\?([^?]*)$') + + match = _queryprog.match(url) + if match: return match.group(1, 2) + return url, None + +_tagprog = None +def splittag(url): + """splittag('/path#tag') --> '/path', 'tag'.""" + global _tagprog + if _tagprog is None: + import re + _tagprog = re.compile('^(.*)#([^#]*)$') + + match = _tagprog.match(url) + if match: return match.group(1, 2) + return url, None + +def splitattr(url): + """splitattr('/path;attr1=value1;attr2=value2;...') -> + '/path', ['attr1=value1', 'attr2=value2', ...].""" + words = url.split(';') + return words[0], words[1:] + +_valueprog = None +def splitvalue(attr): + """splitvalue('attr=value') --> 'attr', 'value'.""" + global _valueprog + if _valueprog is None: + import re + _valueprog = re.compile('^([^=]*)=(.*)$') + + match = _valueprog.match(attr) + if match: return match.group(1, 2) + return attr, None + +# urlparse contains a duplicate of this method to avoid a circular import. If +# you update this method, also update the copy in urlparse. This code +# duplication does not exist in Python3. + +_hexdig = '0123456789ABCDEFabcdef' +_hextochr = dict((a + b, chr(int(a + b, 16))) + for a in _hexdig for b in _hexdig) +_asciire = re.compile('([\x00-\x7f]+)') + +def unquote(s): + """unquote('abc%20def') -> 'abc def'.""" + if _is_unicode(s): + if '%' not in s: + return s + bits = _asciire.split(s) + res = [bits[0]] + append = res.append + for i in range(1, len(bits), 2): + append(unquote(str(bits[i])).decode('latin1')) + append(bits[i + 1]) + return ''.join(res) + + bits = s.split('%') + # fastpath + if len(bits) == 1: + return s + res = [bits[0]] + append = res.append + for item in bits[1:]: + try: + append(_hextochr[item[:2]]) + append(item[2:]) + except KeyError: + append('%') + append(item) + return ''.join(res) + +def unquote_plus(s): + """unquote('%7e/abc+def') -> '~/abc def'""" + s = s.replace('+', ' ') + return unquote(s) + +always_safe = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ' + 'abcdefghijklmnopqrstuvwxyz' + '0123456789' '_.-') +_safe_map = {} +for i, c in zip(xrange(256), str(bytearray(xrange(256)))): + _safe_map[c] = c if (i < 128 and c in always_safe) else '%{:02X}'.format(i) +_safe_quoters = {} + +def quote(s, safe='/'): + """quote('abc def') -> 'abc%20def' + + Each part of a URL, e.g. the path info, the query, etc., has a + different set of reserved characters that must be quoted. + + RFC 2396 Uniform Resource Identifiers (URI): Generic Syntax lists + the following reserved characters. + + reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" | + "$" | "," + + Each of these characters is reserved in some component of a URL, + but not necessarily in all of them. + + By default, the quote function is intended for quoting the path + section of a URL. Thus, it will not encode '/'. This character + is reserved, but in typical usage the quote function is being + called on a path where the existing slash characters are used as + reserved characters. + """ + # fastpath + if not s: + if s is None: + raise TypeError('None object cannot be quoted') + return s + cachekey = (safe, always_safe) + try: + (quoter, safe) = _safe_quoters[cachekey] + except KeyError: + safe_map = _safe_map.copy() + safe_map.update([(c, c) for c in safe]) + quoter = safe_map.__getitem__ + safe = always_safe + safe + _safe_quoters[cachekey] = (quoter, safe) + if not s.rstrip(safe): + return s + return ''.join(map(quoter, s)) + +def quote_plus(s, safe=''): + """Quote the query fragment of a URL; replacing ' ' with '+'""" + if ' ' in s: + s = quote(s, safe + ' ') + return s.replace(' ', '+') + return quote(s, safe) + +def urlencode(query, doseq=0): + """Encode a sequence of two-element tuples or dictionary into a URL query string. + + If any values in the query arg are sequences and doseq is true, each + sequence element is converted to a separate parameter. + + If the query arg is a sequence of two-element tuples, the order of the + parameters in the output will match the order of parameters in the + input. + """ + + if hasattr(query,"items"): + # mapping objects + query = query.items() + else: + # it's a bother at times that strings and string-like objects are + # sequences... + try: + # non-sequence items should not work with len() + # non-empty strings will fail this + if len(query) and not isinstance(query[0], tuple): + raise TypeError + # zero-length sequences of all types will get here and succeed, + # but that's a minor nit - since the original implementation + # allowed empty dicts that type of behavior probably should be + # preserved for consistency + except TypeError: + ty,va,tb = sys.exc_info() + raise TypeError, "not a valid non-string sequence or mapping object", tb + + l = [] + if not doseq: + # preserve old behavior + for k, v in query: + k = quote_plus(str(k)) + v = quote_plus(str(v)) + l.append(k + '=' + v) + else: + for k, v in query: + k = quote_plus(str(k)) + if isinstance(v, str): + v = quote_plus(v) + l.append(k + '=' + v) + elif _is_unicode(v): + # is there a reasonable way to convert to ASCII? + # encode generates a string, but "replace" or "ignore" + # lose information and "strict" can raise UnicodeError + v = quote_plus(v.encode("ASCII","replace")) + l.append(k + '=' + v) + else: + try: + # is this a sufficient test for sequence-ness? + len(v) + except TypeError: + # not a sequence + v = quote_plus(str(v)) + l.append(k + '=' + v) + else: + # loop over the sequence + for elt in v: + l.append(k + '=' + quote_plus(str(elt))) + return '&'.join(l) + +# Proxy handling +def getproxies_environment(): + """Return a dictionary of scheme -> proxy server URL mappings. + + Scan the environment for variables named _proxy; + this seems to be the standard convention. If you need a + different way, you can pass a proxies dictionary to the + [Fancy]URLopener constructor. + + """ + proxies = {} + for name, value in os.environ.items(): + name = name.lower() + if value and name[-6:] == '_proxy': + proxies[name[:-6]] = value + return proxies + +def proxy_bypass_environment(host): + """Test if proxies should not be used for a particular host. + + Checks the environment for a variable named no_proxy, which should + be a list of DNS suffixes separated by commas, or '*' for all hosts. + """ + no_proxy = os.environ.get('no_proxy', '') or os.environ.get('NO_PROXY', '') + # '*' is special case for always bypass + if no_proxy == '*': + return 1 + # strip port off host + hostonly, port = splitport(host) + # check if the host ends with any of the DNS suffixes + no_proxy_list = [proxy.strip() for proxy in no_proxy.split(',')] + for name in no_proxy_list: + if name and (hostonly.endswith(name) or host.endswith(name)): + return 1 + # otherwise, don't bypass + return 0 + + +if sys.platform == 'darwin': + from _scproxy import _get_proxy_settings, _get_proxies + + def proxy_bypass_macosx_sysconf(host): + """ + Return True iff this host shouldn't be accessed using a proxy + + This function uses the MacOSX framework SystemConfiguration + to fetch the proxy information. + """ + import re + import socket + from fnmatch import fnmatch + + hostonly, port = splitport(host) + + def ip2num(ipAddr): + parts = ipAddr.split('.') + parts = map(int, parts) + if len(parts) != 4: + parts = (parts + [0, 0, 0, 0])[:4] + return (parts[0] << 24) | (parts[1] << 16) | (parts[2] << 8) | parts[3] + + proxy_settings = _get_proxy_settings() + + # Check for simple host names: + if '.' not in host: + if proxy_settings['exclude_simple']: + return True + + hostIP = None + + for value in proxy_settings.get('exceptions', ()): + # Items in the list are strings like these: *.local, 169.254/16 + if not value: continue + + m = re.match(r"(\d+(?:\.\d+)*)(/\d+)?", value) + if m is not None: + if hostIP is None: + try: + hostIP = socket.gethostbyname(hostonly) + hostIP = ip2num(hostIP) + except socket.error: + continue + + base = ip2num(m.group(1)) + mask = m.group(2) + if mask is None: + mask = 8 * (m.group(1).count('.') + 1) + + else: + mask = int(mask[1:]) + mask = 32 - mask + + if (hostIP >> mask) == (base >> mask): + return True + + elif fnmatch(host, value): + return True + + return False + + def getproxies_macosx_sysconf(): + """Return a dictionary of scheme -> proxy server URL mappings. + + This function uses the MacOSX framework SystemConfiguration + to fetch the proxy information. + """ + return _get_proxies() + + def proxy_bypass(host): + if getproxies_environment(): + return proxy_bypass_environment(host) + else: + return proxy_bypass_macosx_sysconf(host) + + def getproxies(): + return getproxies_environment() or getproxies_macosx_sysconf() + +elif os.name == 'nt': + def getproxies_registry(): + """Return a dictionary of scheme -> proxy server URL mappings. + + Win32 uses the registry to store proxies. + + """ + proxies = {} + try: + import _winreg + except ImportError: + # Std module, so should be around - but you never know! + return proxies + try: + internetSettings = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, + r'Software\Microsoft\Windows\CurrentVersion\Internet Settings') + proxyEnable = _winreg.QueryValueEx(internetSettings, + 'ProxyEnable')[0] + if proxyEnable: + # Returned as Unicode but problems if not converted to ASCII + proxyServer = str(_winreg.QueryValueEx(internetSettings, + 'ProxyServer')[0]) + if '=' in proxyServer: + # Per-protocol settings + for p in proxyServer.split(';'): + protocol, address = p.split('=', 1) + # See if address has a type:// prefix + import re + if not re.match('^([^/:]+)://', address): + address = '%s://%s' % (protocol, address) + proxies[protocol] = address + else: + # Use one setting for all protocols + if proxyServer[:5] == 'http:': + proxies['http'] = proxyServer + else: + proxies['http'] = 'http://%s' % proxyServer + proxies['https'] = 'https://%s' % proxyServer + proxies['ftp'] = 'ftp://%s' % proxyServer + internetSettings.Close() + except (WindowsError, ValueError, TypeError): + # Either registry key not found etc, or the value in an + # unexpected format. + # proxies already set up to be empty so nothing to do + pass + return proxies + + def getproxies(): + """Return a dictionary of scheme -> proxy server URL mappings. + + Returns settings gathered from the environment, if specified, + or the registry. + + """ + return getproxies_environment() or getproxies_registry() + + def proxy_bypass_registry(host): + try: + import _winreg + import re + except ImportError: + # Std modules, so should be around - but you never know! + return 0 + try: + internetSettings = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, + r'Software\Microsoft\Windows\CurrentVersion\Internet Settings') + proxyEnable = _winreg.QueryValueEx(internetSettings, + 'ProxyEnable')[0] + proxyOverride = str(_winreg.QueryValueEx(internetSettings, + 'ProxyOverride')[0]) + # ^^^^ Returned as Unicode but problems if not converted to ASCII + except WindowsError: + return 0 + if not proxyEnable or not proxyOverride: + return 0 + # try to make a host list from name and IP address. + rawHost, port = splitport(host) + host = [rawHost] + try: + addr = socket.gethostbyname(rawHost) + if addr != rawHost: + host.append(addr) + except socket.error: + pass + try: + fqdn = socket.getfqdn(rawHost) + if fqdn != rawHost: + host.append(fqdn) + except socket.error: + pass + # make a check value list from the registry entry: replace the + # '' string by the localhost entry and the corresponding + # canonical entry. + proxyOverride = proxyOverride.split(';') + # now check if we match one of the registry values. + for test in proxyOverride: + if test == '': + if '.' not in rawHost: + return 1 + test = test.replace(".", r"\.") # mask dots + test = test.replace("*", r".*") # change glob sequence + test = test.replace("?", r".") # change glob char + for val in host: + # print "%s <--> %s" %( test, val ) + if re.match(test, val, re.I): + return 1 + return 0 + + def proxy_bypass(host): + """Return a dictionary of scheme -> proxy server URL mappings. + + Returns settings gathered from the environment, if specified, + or the registry. + + """ + if getproxies_environment(): + return proxy_bypass_environment(host) + else: + return proxy_bypass_registry(host) + +else: + # By default use environment variables + getproxies = getproxies_environment + proxy_bypass = proxy_bypass_environment + +# Test and time quote() and unquote() +def test1(): + s = '' + for i in range(256): s = s + chr(i) + s = s*4 + t0 = time.time() + qs = quote(s) + uqs = unquote(qs) + t1 = time.time() + if uqs != s: + print 'Wrong!' + print repr(s) + print repr(qs) + print repr(uqs) + print round(t1 - t0, 3), 'sec' + + +def reporthook(blocknum, blocksize, totalsize): + # Report during remote transfers + print "Block number: %d, Block size: %d, Total size: %d" % ( + blocknum, blocksize, totalsize) diff --git a/CVIssueCount/urllib2.py b/CVIssueCount/urllib2.py new file mode 100644 index 0000000..9277b1d --- /dev/null +++ b/CVIssueCount/urllib2.py @@ -0,0 +1,1488 @@ +"""An extensible library for opening URLs using a variety of protocols + +The simplest way to use this module is to call the urlopen function, +which accepts a string containing a URL or a Request object (described +below). It opens the URL and returns the results as file-like +object; the returned object has some extra methods described below. + +The OpenerDirector manages a collection of Handler objects that do +all the actual work. Each Handler implements a particular protocol or +option. The OpenerDirector is a composite object that invokes the +Handlers needed to open the requested URL. For example, the +HTTPHandler performs HTTP GET and POST requests and deals with +non-error returns. The HTTPRedirectHandler automatically deals with +HTTP 301, 302, 303 and 307 redirect errors, and the HTTPDigestAuthHandler +deals with digest authentication. + +urlopen(url, data=None) -- Basic usage is the same as original +urllib. pass the url and optionally data to post to an HTTP URL, and +get a file-like object back. One difference is that you can also pass +a Request instance instead of URL. Raises a URLError (subclass of +IOError); for HTTP errors, raises an HTTPError, which can also be +treated as a valid response. + +build_opener -- Function that creates a new OpenerDirector instance. +Will install the default handlers. Accepts one or more Handlers as +arguments, either instances or Handler classes that it will +instantiate. If one of the argument is a subclass of the default +handler, the argument will be installed instead of the default. + +install_opener -- Installs a new opener as the default opener. + +objects of interest: + +OpenerDirector -- Sets up the User Agent as the Python-urllib client and manages +the Handler classes, while dealing with requests and responses. + +Request -- An object that encapsulates the state of a request. The +state can be as simple as the URL. It can also include extra HTTP +headers, e.g. a User-Agent. + +BaseHandler -- + +exceptions: +URLError -- A subclass of IOError, individual protocols have their own +specific subclass. + +HTTPError -- Also a valid HTTP response, so you can treat an HTTP error +as an exceptional event or valid response. + +internals: +BaseHandler and parent +_call_chain conventions + +Example usage: + +import urllib2 + +# set up authentication info +authinfo = urllib2.HTTPBasicAuthHandler() +authinfo.add_password(realm='PDQ Application', + uri='https://mahler:8092/site-updates.py', + user='klem', + passwd='geheim$parole') + +proxy_support = urllib2.ProxyHandler({"http" : "http://ahad-haam:3128"}) + +# build a new opener that adds authentication and caching FTP handlers +opener = urllib2.build_opener(proxy_support, authinfo, urllib2.CacheFTPHandler) + +# install it +urllib2.install_opener(opener) + +f = urllib2.urlopen('http://www.python.org/') + + +""" + +# XXX issues: +# If an authentication error handler that tries to perform +# authentication for some reason but fails, how should the error be +# signalled? The client needs to know the HTTP error code. But if +# the handler knows that the problem was, e.g., that it didn't know +# that hash algo that requested in the challenge, it would be good to +# pass that information along to the client, too. +# ftp errors aren't handled cleanly +# check digest against correct (i.e. non-apache) implementation + +# Possible extensions: +# complex proxies XXX not sure what exactly was meant by this +# abstract factory for opener + +import base64 +import hashlib +import httplib +import mimetools +import os +import posixpath +import random +import re +import socket +import sys +import time +import urlparse +import bisect +import warnings + +try: + from cStringIO import StringIO +except ImportError: + from StringIO import StringIO + +# check for SSL +try: + import ssl +except ImportError: + _have_ssl = False +else: + _have_ssl = True + +from urllib import (unwrap, unquote, splittype, splithost, quote, + addinfourl, splitport, splittag, toBytes, + splitattr, ftpwrapper, splituser, splitpasswd, splitvalue) + +# support for FileHandler, proxies via environment variables +from urllib import localhost, url2pathname, getproxies, proxy_bypass + +# used in User-Agent header sent +__version__ = sys.version[:3] + +_opener = None +def urlopen(url, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, + cafile=None, capath=None, cadefault=False, context=None): + global _opener + if cafile or capath or cadefault: + if context is not None: + raise ValueError( + "You can't pass both context and any of cafile, capath, and " + "cadefault" + ) + if not _have_ssl: + raise ValueError('SSL support not available') + context = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, + cafile=cafile, + capath=capath) + https_handler = HTTPSHandler(context=context) + opener = build_opener(https_handler) + elif context: + https_handler = HTTPSHandler(context=context) + opener = build_opener(https_handler) + elif _opener is None: + _opener = opener = build_opener() + else: + opener = _opener + return opener.open(url, data, timeout) + +def install_opener(opener): + global _opener + _opener = opener + +# do these error classes make sense? +# make sure all of the IOError stuff is overridden. we just want to be +# subtypes. + +class URLError(IOError): + # URLError is a sub-type of IOError, but it doesn't share any of + # the implementation. need to override __init__ and __str__. + # It sets self.args for compatibility with other EnvironmentError + # subclasses, but args doesn't have the typical format with errno in + # slot 0 and strerror in slot 1. This may be better than nothing. + def __init__(self, reason): + self.args = reason, + self.reason = reason + + def __str__(self): + return '' % self.reason + +class HTTPError(URLError, addinfourl): + """Raised when HTTP error occurs, but also acts like non-error return""" + __super_init = addinfourl.__init__ + + def __init__(self, url, code, msg, hdrs, fp): + self.code = code + self.msg = msg + self.hdrs = hdrs + self.fp = fp + self.filename = url + # The addinfourl classes depend on fp being a valid file + # object. In some cases, the HTTPError may not have a valid + # file object. If this happens, the simplest workaround is to + # not initialize the base classes. + if fp is not None: + self.__super_init(fp, hdrs, url, code) + + def __str__(self): + return 'HTTP Error %s: %s' % (self.code, self.msg) + + # since URLError specifies a .reason attribute, HTTPError should also + # provide this attribute. See issue13211 fo discussion. + @property + def reason(self): + return self.msg + + def info(self): + return self.hdrs + +# copied from cookielib.py +_cut_port_re = re.compile(r":\d+$") +def request_host(request): + """Return request-host, as defined by RFC 2965. + + Variation from RFC: returned value is lowercased, for convenient + comparison. + + """ + url = request.get_full_url() + host = urlparse.urlparse(url)[1] + if host == "": + host = request.get_header("Host", "") + + # remove port, if present + host = _cut_port_re.sub("", host, 1) + return host.lower() + +class Request: + + def __init__(self, url, data=None, headers={}, + origin_req_host=None, unverifiable=False): + # unwrap('') --> 'type://host/path' + self.__original = unwrap(url) + self.__original, self.__fragment = splittag(self.__original) + self.type = None + # self.__r_type is what's left after doing the splittype + self.host = None + self.port = None + self._tunnel_host = None + self.data = data + self.headers = {} + for key, value in headers.items(): + self.add_header(key, value) + self.unredirected_hdrs = {} + if origin_req_host is None: + origin_req_host = request_host(self) + self.origin_req_host = origin_req_host + self.unverifiable = unverifiable + + def __getattr__(self, attr): + # XXX this is a fallback mechanism to guard against these + # methods getting called in a non-standard order. this may be + # too complicated and/or unnecessary. + # XXX should the __r_XXX attributes be public? + if attr[:12] == '_Request__r_': + name = attr[12:] + if hasattr(Request, 'get_' + name): + getattr(self, 'get_' + name)() + return getattr(self, attr) + raise AttributeError, attr + + def get_method(self): + if self.has_data(): + return "POST" + else: + return "GET" + + # XXX these helper methods are lame + + def add_data(self, data): + self.data = data + + def has_data(self): + return self.data is not None + + def get_data(self): + return self.data + + def get_full_url(self): + if self.__fragment: + return '%s#%s' % (self.__original, self.__fragment) + else: + return self.__original + + def get_type(self): + if self.type is None: + self.type, self.__r_type = splittype(self.__original) + if self.type is None: + raise ValueError, "unknown url type: %s" % self.__original + return self.type + + def get_host(self): + if self.host is None: + self.host, self.__r_host = splithost(self.__r_type) + if self.host: + self.host = unquote(self.host) + return self.host + + def get_selector(self): + return self.__r_host + + def set_proxy(self, host, type): + if self.type == 'https' and not self._tunnel_host: + self._tunnel_host = self.host + else: + self.type = type + self.__r_host = self.__original + + self.host = host + + def has_proxy(self): + return self.__r_host == self.__original + + def get_origin_req_host(self): + return self.origin_req_host + + def is_unverifiable(self): + return self.unverifiable + + def add_header(self, key, val): + # useful for something like authentication + self.headers[key.capitalize()] = val + + def add_unredirected_header(self, key, val): + # will not be added to a redirected request + self.unredirected_hdrs[key.capitalize()] = val + + def has_header(self, header_name): + return (header_name in self.headers or + header_name in self.unredirected_hdrs) + + def get_header(self, header_name, default=None): + return self.headers.get( + header_name, + self.unredirected_hdrs.get(header_name, default)) + + def header_items(self): + hdrs = self.unredirected_hdrs.copy() + hdrs.update(self.headers) + return hdrs.items() + +class OpenerDirector: + def __init__(self): + client_version = "Python-urllib/%s" % __version__ + self.addheaders = [('User-agent', client_version)] + # self.handlers is retained only for backward compatibility + self.handlers = [] + # manage the individual handlers + self.handle_open = {} + self.handle_error = {} + self.process_response = {} + self.process_request = {} + + def add_handler(self, handler): + if not hasattr(handler, "add_parent"): + raise TypeError("expected BaseHandler instance, got %r" % + type(handler)) + + added = False + for meth in dir(handler): + if meth in ["redirect_request", "do_open", "proxy_open"]: + # oops, coincidental match + continue + + i = meth.find("_") + protocol = meth[:i] + condition = meth[i+1:] + + if condition.startswith("error"): + j = condition.find("_") + i + 1 + kind = meth[j+1:] + try: + kind = int(kind) + except ValueError: + pass + lookup = self.handle_error.get(protocol, {}) + self.handle_error[protocol] = lookup + elif condition == "open": + kind = protocol + lookup = self.handle_open + elif condition == "response": + kind = protocol + lookup = self.process_response + elif condition == "request": + kind = protocol + lookup = self.process_request + else: + continue + + handlers = lookup.setdefault(kind, []) + if handlers: + bisect.insort(handlers, handler) + else: + handlers.append(handler) + added = True + + if added: + bisect.insort(self.handlers, handler) + handler.add_parent(self) + + def close(self): + # Only exists for backwards compatibility. + pass + + def _call_chain(self, chain, kind, meth_name, *args): + # Handlers raise an exception if no one else should try to handle + # the request, or return None if they can't but another handler + # could. Otherwise, they return the response. + handlers = chain.get(kind, ()) + for handler in handlers: + func = getattr(handler, meth_name) + + result = func(*args) + if result is not None: + return result + + def open(self, fullurl, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT): + # accept a URL or a Request object + if isinstance(fullurl, basestring): + req = Request(fullurl, data) + else: + req = fullurl + if data is not None: + req.add_data(data) + + req.timeout = timeout + protocol = req.get_type() + + # pre-process request + meth_name = protocol+"_request" + for processor in self.process_request.get(protocol, []): + meth = getattr(processor, meth_name) + req = meth(req) + + response = self._open(req, data) + + # post-process response + meth_name = protocol+"_response" + for processor in self.process_response.get(protocol, []): + meth = getattr(processor, meth_name) + response = meth(req, response) + + return response + + def _open(self, req, data=None): + result = self._call_chain(self.handle_open, 'default', + 'default_open', req) + if result: + return result + + protocol = req.get_type() + result = self._call_chain(self.handle_open, protocol, protocol + + '_open', req) + if result: + return result + + return self._call_chain(self.handle_open, 'unknown', + 'unknown_open', req) + + def error(self, proto, *args): + if proto in ('http', 'https'): + # XXX http[s] protocols are special-cased + dict = self.handle_error['http'] # https is not different than http + proto = args[2] # YUCK! + meth_name = 'http_error_%s' % proto + http_err = 1 + orig_args = args + else: + dict = self.handle_error + meth_name = proto + '_error' + http_err = 0 + args = (dict, proto, meth_name) + args + result = self._call_chain(*args) + if result: + return result + + if http_err: + args = (dict, 'default', 'http_error_default') + orig_args + return self._call_chain(*args) + +# XXX probably also want an abstract factory that knows when it makes +# sense to skip a superclass in favor of a subclass and when it might +# make sense to include both + +def build_opener(*handlers): + """Create an opener object from a list of handlers. + + The opener will use several default handlers, including support + for HTTP, FTP and when applicable, HTTPS. + + If any of the handlers passed as arguments are subclasses of the + default handlers, the default handlers will not be used. + """ + import types + def isclass(obj): + return isinstance(obj, (types.ClassType, type)) + + opener = OpenerDirector() + default_classes = [ProxyHandler, UnknownHandler, HTTPHandler, + HTTPDefaultErrorHandler, HTTPRedirectHandler, + FTPHandler, FileHandler, HTTPErrorProcessor] + if hasattr(httplib, 'HTTPS'): + default_classes.append(HTTPSHandler) + skip = set() + for klass in default_classes: + for check in handlers: + if isclass(check): + if issubclass(check, klass): + skip.add(klass) + elif isinstance(check, klass): + skip.add(klass) + for klass in skip: + default_classes.remove(klass) + + for klass in default_classes: + opener.add_handler(klass()) + + for h in handlers: + if isclass(h): + h = h() + opener.add_handler(h) + return opener + +class BaseHandler: + handler_order = 500 + + def add_parent(self, parent): + self.parent = parent + + def close(self): + # Only exists for backwards compatibility + pass + + def __lt__(self, other): + if not hasattr(other, "handler_order"): + # Try to preserve the old behavior of having custom classes + # inserted after default ones (works only for custom user + # classes which are not aware of handler_order). + return True + return self.handler_order < other.handler_order + + +class HTTPErrorProcessor(BaseHandler): + """Process HTTP error responses.""" + handler_order = 1000 # after all other processing + + def http_response(self, request, response): + code, msg, hdrs = response.code, response.msg, response.info() + + # According to RFC 2616, "2xx" code indicates that the client's + # request was successfully received, understood, and accepted. + if not (200 <= code < 300): + response = self.parent.error( + 'http', request, response, code, msg, hdrs) + + return response + + https_response = http_response + +class HTTPDefaultErrorHandler(BaseHandler): + def http_error_default(self, req, fp, code, msg, hdrs): + raise HTTPError(req.get_full_url(), code, msg, hdrs, fp) + +class HTTPRedirectHandler(BaseHandler): + # maximum number of redirections to any single URL + # this is needed because of the state that cookies introduce + max_repeats = 4 + # maximum total number of redirections (regardless of URL) before + # assuming we're in a loop + max_redirections = 10 + + def redirect_request(self, req, fp, code, msg, headers, newurl): + """Return a Request or None in response to a redirect. + + This is called by the http_error_30x methods when a + redirection response is received. If a redirection should + take place, return a new Request to allow http_error_30x to + perform the redirect. Otherwise, raise HTTPError if no-one + else should try to handle this url. Return None if you can't + but another Handler might. + """ + m = req.get_method() + if (code in (301, 302, 303, 307) and m in ("GET", "HEAD") + or code in (301, 302, 303) and m == "POST"): + # Strictly (according to RFC 2616), 301 or 302 in response + # to a POST MUST NOT cause a redirection without confirmation + # from the user (of urllib2, in this case). In practice, + # essentially all clients do redirect in this case, so we + # do the same. + # be conciliant with URIs containing a space + newurl = newurl.replace(' ', '%20') + newheaders = dict((k,v) for k,v in req.headers.items() + if k.lower() not in ("content-length", "content-type") + ) + return Request(newurl, + headers=newheaders, + origin_req_host=req.get_origin_req_host(), + unverifiable=True) + else: + raise HTTPError(req.get_full_url(), code, msg, headers, fp) + + # Implementation note: To avoid the server sending us into an + # infinite loop, the request object needs to track what URLs we + # have already seen. Do this by adding a handler-specific + # attribute to the Request object. + def http_error_302(self, req, fp, code, msg, headers): + # Some servers (incorrectly) return multiple Location headers + # (so probably same goes for URI). Use first header. + if 'location' in headers: + newurl = headers.getheaders('location')[0] + elif 'uri' in headers: + newurl = headers.getheaders('uri')[0] + else: + return + + # fix a possible malformed URL + urlparts = urlparse.urlparse(newurl) + if not urlparts.path: + urlparts = list(urlparts) + urlparts[2] = "/" + newurl = urlparse.urlunparse(urlparts) + + newurl = urlparse.urljoin(req.get_full_url(), newurl) + + # For security reasons we do not allow redirects to protocols + # other than HTTP, HTTPS or FTP. + newurl_lower = newurl.lower() + if not (newurl_lower.startswith('http://') or + newurl_lower.startswith('https://') or + newurl_lower.startswith('ftp://')): + raise HTTPError(newurl, code, + msg + " - Redirection to url '%s' is not allowed" % + newurl, + headers, fp) + + # XXX Probably want to forget about the state of the current + # request, although that might interact poorly with other + # handlers that also use handler-specific request attributes + new = self.redirect_request(req, fp, code, msg, headers, newurl) + if new is None: + return + + # loop detection + # .redirect_dict has a key url if url was previously visited. + if hasattr(req, 'redirect_dict'): + visited = new.redirect_dict = req.redirect_dict + if (visited.get(newurl, 0) >= self.max_repeats or + len(visited) >= self.max_redirections): + raise HTTPError(req.get_full_url(), code, + self.inf_msg + msg, headers, fp) + else: + visited = new.redirect_dict = req.redirect_dict = {} + visited[newurl] = visited.get(newurl, 0) + 1 + + # Don't close the fp until we are sure that we won't use it + # with HTTPError. + fp.read() + fp.close() + + return self.parent.open(new, timeout=req.timeout) + + http_error_301 = http_error_303 = http_error_307 = http_error_302 + + inf_msg = "The HTTP server returned a redirect error that would " \ + "lead to an infinite loop.\n" \ + "The last 30x error message was:\n" + + +def _parse_proxy(proxy): + """Return (scheme, user, password, host/port) given a URL or an authority. + + If a URL is supplied, it must have an authority (host:port) component. + According to RFC 3986, having an authority component means the URL must + have two slashes after the scheme: + + >>> _parse_proxy('file:/ftp.example.com/') + Traceback (most recent call last): + ValueError: proxy URL with no authority: 'file:/ftp.example.com/' + + The first three items of the returned tuple may be None. + + Examples of authority parsing: + + >>> _parse_proxy('proxy.example.com') + (None, None, None, 'proxy.example.com') + >>> _parse_proxy('proxy.example.com:3128') + (None, None, None, 'proxy.example.com:3128') + + The authority component may optionally include userinfo (assumed to be + username:password): + + >>> _parse_proxy('joe:password@proxy.example.com') + (None, 'joe', 'password', 'proxy.example.com') + >>> _parse_proxy('joe:password@proxy.example.com:3128') + (None, 'joe', 'password', 'proxy.example.com:3128') + + Same examples, but with URLs instead: + + >>> _parse_proxy('http://proxy.example.com/') + ('http', None, None, 'proxy.example.com') + >>> _parse_proxy('http://proxy.example.com:3128/') + ('http', None, None, 'proxy.example.com:3128') + >>> _parse_proxy('http://joe:password@proxy.example.com/') + ('http', 'joe', 'password', 'proxy.example.com') + >>> _parse_proxy('http://joe:password@proxy.example.com:3128') + ('http', 'joe', 'password', 'proxy.example.com:3128') + + Everything after the authority is ignored: + + >>> _parse_proxy('ftp://joe:password@proxy.example.com/rubbish:3128') + ('ftp', 'joe', 'password', 'proxy.example.com') + + Test for no trailing '/' case: + + >>> _parse_proxy('http://joe:password@proxy.example.com') + ('http', 'joe', 'password', 'proxy.example.com') + + """ + scheme, r_scheme = splittype(proxy) + if not r_scheme.startswith("/"): + # authority + scheme = None + authority = proxy + else: + # URL + if not r_scheme.startswith("//"): + raise ValueError("proxy URL with no authority: %r" % proxy) + # We have an authority, so for RFC 3986-compliant URLs (by ss 3. + # and 3.3.), path is empty or starts with '/' + end = r_scheme.find("/", 2) + if end == -1: + end = None + authority = r_scheme[2:end] + userinfo, hostport = splituser(authority) + if userinfo is not None: + user, password = splitpasswd(userinfo) + else: + user = password = None + return scheme, user, password, hostport + +class ProxyHandler(BaseHandler): + # Proxies must be in front + handler_order = 100 + + def __init__(self, proxies=None): + if proxies is None: + proxies = getproxies() + assert hasattr(proxies, 'has_key'), "proxies must be a mapping" + self.proxies = proxies + for type, url in proxies.items(): + setattr(self, '%s_open' % type, + lambda r, proxy=url, type=type, meth=self.proxy_open: \ + meth(r, proxy, type)) + + def proxy_open(self, req, proxy, type): + orig_type = req.get_type() + proxy_type, user, password, hostport = _parse_proxy(proxy) + + if proxy_type is None: + proxy_type = orig_type + + if req.host and proxy_bypass(req.host): + return None + + if user and password: + user_pass = '%s:%s' % (unquote(user), unquote(password)) + creds = base64.b64encode(user_pass).strip() + req.add_header('Proxy-authorization', 'Basic ' + creds) + hostport = unquote(hostport) + req.set_proxy(hostport, proxy_type) + + if orig_type == proxy_type or orig_type == 'https': + # let other handlers take care of it + return None + else: + # need to start over, because the other handlers don't + # grok the proxy's URL type + # e.g. if we have a constructor arg proxies like so: + # {'http': 'ftp://proxy.example.com'}, we may end up turning + # a request for http://acme.example.com/a into one for + # ftp://proxy.example.com/a + return self.parent.open(req, timeout=req.timeout) + +class HTTPPasswordMgr: + + def __init__(self): + self.passwd = {} + + def add_password(self, realm, uri, user, passwd): + # uri could be a single URI or a sequence + if isinstance(uri, basestring): + uri = [uri] + if not realm in self.passwd: + self.passwd[realm] = {} + for default_port in True, False: + reduced_uri = tuple( + [self.reduce_uri(u, default_port) for u in uri]) + self.passwd[realm][reduced_uri] = (user, passwd) + + def find_user_password(self, realm, authuri): + domains = self.passwd.get(realm, {}) + for default_port in True, False: + reduced_authuri = self.reduce_uri(authuri, default_port) + for uris, authinfo in domains.iteritems(): + for uri in uris: + if self.is_suburi(uri, reduced_authuri): + return authinfo + return None, None + + def reduce_uri(self, uri, default_port=True): + """Accept authority or URI and extract only the authority and path.""" + # note HTTP URLs do not have a userinfo component + parts = urlparse.urlsplit(uri) + if parts[1]: + # URI + scheme = parts[0] + authority = parts[1] + path = parts[2] or '/' + else: + # host or host:port + scheme = None + authority = uri + path = '/' + host, port = splitport(authority) + if default_port and port is None and scheme is not None: + dport = {"http": 80, + "https": 443, + }.get(scheme) + if dport is not None: + authority = "%s:%d" % (host, dport) + return authority, path + + def is_suburi(self, base, test): + """Check if test is below base in a URI tree + + Both args must be URIs in reduced form. + """ + if base == test: + return True + if base[0] != test[0]: + return False + common = posixpath.commonprefix((base[1], test[1])) + if len(common) == len(base[1]): + return True + return False + + +class HTTPPasswordMgrWithDefaultRealm(HTTPPasswordMgr): + + def find_user_password(self, realm, authuri): + user, password = HTTPPasswordMgr.find_user_password(self, realm, + authuri) + if user is not None: + return user, password + return HTTPPasswordMgr.find_user_password(self, None, authuri) + + +class AbstractBasicAuthHandler: + + # XXX this allows for multiple auth-schemes, but will stupidly pick + # the last one with a realm specified. + + # allow for double- and single-quoted realm values + # (single quotes are a violation of the RFC, but appear in the wild) + rx = re.compile('(?:.*,)*[ \t]*([^ \t]+)[ \t]+' + 'realm=(["\']?)([^"\']*)\\2', re.I) + + # XXX could pre-emptively send auth info already accepted (RFC 2617, + # end of section 2, and section 1.2 immediately after "credentials" + # production). + + def __init__(self, password_mgr=None): + if password_mgr is None: + password_mgr = HTTPPasswordMgr() + self.passwd = password_mgr + self.add_password = self.passwd.add_password + + + def http_error_auth_reqed(self, authreq, host, req, headers): + # host may be an authority (without userinfo) or a URL with an + # authority + # XXX could be multiple headers + authreq = headers.get(authreq, None) + + if authreq: + mo = AbstractBasicAuthHandler.rx.search(authreq) + if mo: + scheme, quote, realm = mo.groups() + if quote not in ['"', "'"]: + warnings.warn("Basic Auth Realm was unquoted", + UserWarning, 2) + if scheme.lower() == 'basic': + return self.retry_http_basic_auth(host, req, realm) + + def retry_http_basic_auth(self, host, req, realm): + user, pw = self.passwd.find_user_password(realm, host) + if pw is not None: + raw = "%s:%s" % (user, pw) + auth = 'Basic %s' % base64.b64encode(raw).strip() + if req.get_header(self.auth_header, None) == auth: + return None + req.add_unredirected_header(self.auth_header, auth) + return self.parent.open(req, timeout=req.timeout) + else: + return None + + +class HTTPBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler): + + auth_header = 'Authorization' + + def http_error_401(self, req, fp, code, msg, headers): + url = req.get_full_url() + response = self.http_error_auth_reqed('www-authenticate', + url, req, headers) + return response + + +class ProxyBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler): + + auth_header = 'Proxy-authorization' + + def http_error_407(self, req, fp, code, msg, headers): + # http_error_auth_reqed requires that there is no userinfo component in + # authority. Assume there isn't one, since urllib2 does not (and + # should not, RFC 3986 s. 3.2.1) support requests for URLs containing + # userinfo. + authority = req.get_host() + response = self.http_error_auth_reqed('proxy-authenticate', + authority, req, headers) + return response + + +def randombytes(n): + """Return n random bytes.""" + # Use /dev/urandom if it is available. Fall back to random module + # if not. It might be worthwhile to extend this function to use + # other platform-specific mechanisms for getting random bytes. + if os.path.exists("/dev/urandom"): + f = open("/dev/urandom") + s = f.read(n) + f.close() + return s + else: + L = [chr(random.randrange(0, 256)) for i in range(n)] + return "".join(L) + +class AbstractDigestAuthHandler: + # Digest authentication is specified in RFC 2617. + + # XXX The client does not inspect the Authentication-Info header + # in a successful response. + + # XXX It should be possible to test this implementation against + # a mock server that just generates a static set of challenges. + + # XXX qop="auth-int" supports is shaky + + def __init__(self, passwd=None): + if passwd is None: + passwd = HTTPPasswordMgr() + self.passwd = passwd + self.add_password = self.passwd.add_password + self.retried = 0 + self.nonce_count = 0 + self.last_nonce = None + + def reset_retry_count(self): + self.retried = 0 + + def http_error_auth_reqed(self, auth_header, host, req, headers): + authreq = headers.get(auth_header, None) + if self.retried > 5: + # Don't fail endlessly - if we failed once, we'll probably + # fail a second time. Hm. Unless the Password Manager is + # prompting for the information. Crap. This isn't great + # but it's better than the current 'repeat until recursion + # depth exceeded' approach + raise HTTPError(req.get_full_url(), 401, "digest auth failed", + headers, None) + else: + self.retried += 1 + if authreq: + scheme = authreq.split()[0] + if scheme.lower() == 'digest': + return self.retry_http_digest_auth(req, authreq) + + def retry_http_digest_auth(self, req, auth): + token, challenge = auth.split(' ', 1) + chal = parse_keqv_list(parse_http_list(challenge)) + auth = self.get_authorization(req, chal) + if auth: + auth_val = 'Digest %s' % auth + if req.headers.get(self.auth_header, None) == auth_val: + return None + req.add_unredirected_header(self.auth_header, auth_val) + resp = self.parent.open(req, timeout=req.timeout) + return resp + + def get_cnonce(self, nonce): + # The cnonce-value is an opaque + # quoted string value provided by the client and used by both client + # and server to avoid chosen plaintext attacks, to provide mutual + # authentication, and to provide some message integrity protection. + # This isn't a fabulous effort, but it's probably Good Enough. + dig = hashlib.sha1("%s:%s:%s:%s" % (self.nonce_count, nonce, time.ctime(), + randombytes(8))).hexdigest() + return dig[:16] + + def get_authorization(self, req, chal): + try: + realm = chal['realm'] + nonce = chal['nonce'] + qop = chal.get('qop') + algorithm = chal.get('algorithm', 'MD5') + # mod_digest doesn't send an opaque, even though it isn't + # supposed to be optional + opaque = chal.get('opaque', None) + except KeyError: + return None + + H, KD = self.get_algorithm_impls(algorithm) + if H is None: + return None + + user, pw = self.passwd.find_user_password(realm, req.get_full_url()) + if user is None: + return None + + # XXX not implemented yet + if req.has_data(): + entdig = self.get_entity_digest(req.get_data(), chal) + else: + entdig = None + + A1 = "%s:%s:%s" % (user, realm, pw) + A2 = "%s:%s" % (req.get_method(), + # XXX selector: what about proxies and full urls + req.get_selector()) + if qop == 'auth': + if nonce == self.last_nonce: + self.nonce_count += 1 + else: + self.nonce_count = 1 + self.last_nonce = nonce + + ncvalue = '%08x' % self.nonce_count + cnonce = self.get_cnonce(nonce) + noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, H(A2)) + respdig = KD(H(A1), noncebit) + elif qop is None: + respdig = KD(H(A1), "%s:%s" % (nonce, H(A2))) + else: + # XXX handle auth-int. + raise URLError("qop '%s' is not supported." % qop) + + # XXX should the partial digests be encoded too? + + base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \ + 'response="%s"' % (user, realm, nonce, req.get_selector(), + respdig) + if opaque: + base += ', opaque="%s"' % opaque + if entdig: + base += ', digest="%s"' % entdig + base += ', algorithm="%s"' % algorithm + if qop: + base += ', qop=auth, nc=%s, cnonce="%s"' % (ncvalue, cnonce) + return base + + def get_algorithm_impls(self, algorithm): + # algorithm should be case-insensitive according to RFC2617 + algorithm = algorithm.upper() + # lambdas assume digest modules are imported at the top level + if algorithm == 'MD5': + H = lambda x: hashlib.md5(x).hexdigest() + elif algorithm == 'SHA': + H = lambda x: hashlib.sha1(x).hexdigest() + # XXX MD5-sess + KD = lambda s, d: H("%s:%s" % (s, d)) + return H, KD + + def get_entity_digest(self, data, chal): + # XXX not implemented yet + return None + + +class HTTPDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler): + """An authentication protocol defined by RFC 2069 + + Digest authentication improves on basic authentication because it + does not transmit passwords in the clear. + """ + + auth_header = 'Authorization' + handler_order = 490 # before Basic auth + + def http_error_401(self, req, fp, code, msg, headers): + host = urlparse.urlparse(req.get_full_url())[1] + retry = self.http_error_auth_reqed('www-authenticate', + host, req, headers) + self.reset_retry_count() + return retry + + +class ProxyDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler): + + auth_header = 'Proxy-Authorization' + handler_order = 490 # before Basic auth + + def http_error_407(self, req, fp, code, msg, headers): + host = req.get_host() + retry = self.http_error_auth_reqed('proxy-authenticate', + host, req, headers) + self.reset_retry_count() + return retry + +class AbstractHTTPHandler(BaseHandler): + + def __init__(self, debuglevel=0): + self._debuglevel = debuglevel + + def set_http_debuglevel(self, level): + self._debuglevel = level + + def do_request_(self, request): + host = request.get_host() + if not host: + raise URLError('no host given') + + if request.has_data(): # POST + data = request.get_data() + if not request.has_header('Content-type'): + request.add_unredirected_header( + 'Content-type', + 'application/x-www-form-urlencoded') + if not request.has_header('Content-length'): + request.add_unredirected_header( + 'Content-length', '%d' % len(data)) + + sel_host = host + if request.has_proxy(): + scheme, sel = splittype(request.get_selector()) + sel_host, sel_path = splithost(sel) + + if not request.has_header('Host'): + request.add_unredirected_header('Host', sel_host) + for name, value in self.parent.addheaders: + name = name.capitalize() + if not request.has_header(name): + request.add_unredirected_header(name, value) + + return request + + def do_open(self, http_class, req, **http_conn_args): + """Return an addinfourl object for the request, using http_class. + + http_class must implement the HTTPConnection API from httplib. + The addinfourl return value is a file-like object. It also + has methods and attributes including: + - info(): return a mimetools.Message object for the headers + - geturl(): return the original request URL + - code: HTTP status code + """ + host = req.get_host() + if not host: + raise URLError('no host given') + + # will parse host:port + h = http_class(host, timeout=req.timeout, **http_conn_args) + h.set_debuglevel(self._debuglevel) + + headers = dict(req.unredirected_hdrs) + headers.update(dict((k, v) for k, v in req.headers.items() + if k not in headers)) + + # We want to make an HTTP/1.1 request, but the addinfourl + # class isn't prepared to deal with a persistent connection. + # It will try to read all remaining data from the socket, + # which will block while the server waits for the next request. + # So make sure the connection gets closed after the (only) + # request. + headers["Connection"] = "close" + headers = dict( + (name.title(), val) for name, val in headers.items()) + + if req._tunnel_host: + tunnel_headers = {} + proxy_auth_hdr = "Proxy-Authorization" + if proxy_auth_hdr in headers: + tunnel_headers[proxy_auth_hdr] = headers[proxy_auth_hdr] + # Proxy-Authorization should not be sent to origin + # server. + del headers[proxy_auth_hdr] + h.set_tunnel(req._tunnel_host, headers=tunnel_headers) + + try: + h.request(req.get_method(), req.get_selector(), req.data, headers) + except socket.error, err: # XXX what error? + h.close() + raise URLError(err) + else: + try: + r = h.getresponse(buffering=True) + except TypeError: # buffering kw not supported + r = h.getresponse() + + # Pick apart the HTTPResponse object to get the addinfourl + # object initialized properly. + + # Wrap the HTTPResponse object in socket's file object adapter + # for Windows. That adapter calls recv(), so delegate recv() + # to read(). This weird wrapping allows the returned object to + # have readline() and readlines() methods. + + # XXX It might be better to extract the read buffering code + # out of socket._fileobject() and into a base class. + + r.recv = r.read + fp = socket._fileobject(r, close=True) + + resp = addinfourl(fp, r.msg, req.get_full_url()) + resp.code = r.status + resp.msg = r.reason + return resp + + +class HTTPHandler(AbstractHTTPHandler): + + def http_open(self, req): + return self.do_open(httplib.HTTPConnection, req) + + http_request = AbstractHTTPHandler.do_request_ + +if hasattr(httplib, 'HTTPS'): + class HTTPSHandler(AbstractHTTPHandler): + + def __init__(self, debuglevel=0, context=None): + AbstractHTTPHandler.__init__(self, debuglevel) + self._context = context + + def https_open(self, req): + return self.do_open(httplib.HTTPSConnection, req, + context=self._context) + + https_request = AbstractHTTPHandler.do_request_ + +class HTTPCookieProcessor(BaseHandler): + def __init__(self, cookiejar=None): + import cookielib + if cookiejar is None: + cookiejar = cookielib.CookieJar() + self.cookiejar = cookiejar + + def http_request(self, request): + self.cookiejar.add_cookie_header(request) + return request + + def http_response(self, request, response): + self.cookiejar.extract_cookies(response, request) + return response + + https_request = http_request + https_response = http_response + +class UnknownHandler(BaseHandler): + def unknown_open(self, req): + type = req.get_type() + raise URLError('unknown url type: %s' % type) + +def parse_keqv_list(l): + """Parse list of key=value strings where keys are not duplicated.""" + parsed = {} + for elt in l: + k, v = elt.split('=', 1) + if v[0] == '"' and v[-1] == '"': + v = v[1:-1] + parsed[k] = v + return parsed + +def parse_http_list(s): + """Parse lists as described by RFC 2068 Section 2. + + In particular, parse comma-separated lists where the elements of + the list may include quoted-strings. A quoted-string could + contain a comma. A non-quoted string could have quotes in the + middle. Neither commas nor quotes count if they are escaped. + Only double-quotes count, not single-quotes. + """ + res = [] + part = '' + + escape = quote = False + for cur in s: + if escape: + part += cur + escape = False + continue + if quote: + if cur == '\\': + escape = True + continue + elif cur == '"': + quote = False + part += cur + continue + + if cur == ',': + res.append(part) + part = '' + continue + + if cur == '"': + quote = True + + part += cur + + # append last part + if part: + res.append(part) + + return [part.strip() for part in res] + +def _safe_gethostbyname(host): + try: + return socket.gethostbyname(host) + except socket.gaierror: + return None + +class FileHandler(BaseHandler): + # Use local file or FTP depending on form of URL + def file_open(self, req): + url = req.get_selector() + if url[:2] == '//' and url[2:3] != '/' and (req.host and + req.host != 'localhost'): + req.type = 'ftp' + return self.parent.open(req) + else: + return self.open_local_file(req) + + # names for the localhost + names = None + def get_names(self): + if FileHandler.names is None: + try: + FileHandler.names = tuple( + socket.gethostbyname_ex('localhost')[2] + + socket.gethostbyname_ex(socket.gethostname())[2]) + except socket.gaierror: + FileHandler.names = (socket.gethostbyname('localhost'),) + return FileHandler.names + + # not entirely sure what the rules are here + def open_local_file(self, req): + import email.utils + import mimetypes + host = req.get_host() + filename = req.get_selector() + localfile = url2pathname(filename) + try: + stats = os.stat(localfile) + size = stats.st_size + modified = email.utils.formatdate(stats.st_mtime, usegmt=True) + mtype = mimetypes.guess_type(filename)[0] + headers = mimetools.Message(StringIO( + 'Content-type: %s\nContent-length: %d\nLast-modified: %s\n' % + (mtype or 'text/plain', size, modified))) + if host: + host, port = splitport(host) + if not host or \ + (not port and _safe_gethostbyname(host) in self.get_names()): + if host: + origurl = 'file://' + host + filename + else: + origurl = 'file://' + filename + return addinfourl(open(localfile, 'rb'), headers, origurl) + except OSError, msg: + # urllib2 users shouldn't expect OSErrors coming from urlopen() + raise URLError(msg) + raise URLError('file not on local host') + +class FTPHandler(BaseHandler): + def ftp_open(self, req): + import ftplib + import mimetypes + host = req.get_host() + if not host: + raise URLError('ftp error: no host given') + host, port = splitport(host) + if port is None: + port = ftplib.FTP_PORT + else: + port = int(port) + + # username/password handling + user, host = splituser(host) + if user: + user, passwd = splitpasswd(user) + else: + passwd = None + host = unquote(host) + user = user or '' + passwd = passwd or '' + + try: + host = socket.gethostbyname(host) + except socket.error, msg: + raise URLError(msg) + path, attrs = splitattr(req.get_selector()) + dirs = path.split('/') + dirs = map(unquote, dirs) + dirs, file = dirs[:-1], dirs[-1] + if dirs and not dirs[0]: + dirs = dirs[1:] + try: + fw = self.connect_ftp(user, passwd, host, port, dirs, req.timeout) + type = file and 'I' or 'D' + for attr in attrs: + attr, value = splitvalue(attr) + if attr.lower() == 'type' and \ + value in ('a', 'A', 'i', 'I', 'd', 'D'): + type = value.upper() + fp, retrlen = fw.retrfile(file, type) + headers = "" + mtype = mimetypes.guess_type(req.get_full_url())[0] + if mtype: + headers += "Content-type: %s\n" % mtype + if retrlen is not None and retrlen >= 0: + headers += "Content-length: %d\n" % retrlen + sf = StringIO(headers) + headers = mimetools.Message(sf) + return addinfourl(fp, headers, req.get_full_url()) + except ftplib.all_errors, msg: + raise URLError, ('ftp error: %s' % msg), sys.exc_info()[2] + + def connect_ftp(self, user, passwd, host, port, dirs, timeout): + fw = ftpwrapper(user, passwd, host, port, dirs, timeout, + persistent=False) +## fw.ftp.set_debuglevel(1) + return fw + +class CacheFTPHandler(FTPHandler): + # XXX would be nice to have pluggable cache strategies + # XXX this stuff is definitely not thread safe + def __init__(self): + self.cache = {} + self.timeout = {} + self.soonest = 0 + self.delay = 60 + self.max_conns = 16 + + def setTimeout(self, t): + self.delay = t + + def setMaxConns(self, m): + self.max_conns = m + + def connect_ftp(self, user, passwd, host, port, dirs, timeout): + key = user, host, port, '/'.join(dirs), timeout + if key in self.cache: + self.timeout[key] = time.time() + self.delay + else: + self.cache[key] = ftpwrapper(user, passwd, host, port, dirs, timeout) + self.timeout[key] = time.time() + self.delay + self.check_cache() + return self.cache[key] + + def check_cache(self): + # first check for old ones + t = time.time() + if self.soonest <= t: + for k, v in self.timeout.items(): + if v < t: + self.cache[k].close() + del self.cache[k] + del self.timeout[k] + self.soonest = min(self.timeout.values()) + + # then check the size + if len(self.cache) == self.max_conns: + for k, v in self.timeout.items(): + if v == self.soonest: + del self.cache[k] + del self.timeout[k] + break + self.soonest = min(self.timeout.values()) + + def clear_cache(self): + for conn in self.cache.values(): + conn.close() + self.cache.clear() + self.timeout.clear() diff --git a/CVIssueCount/urlparse.py b/CVIssueCount/urlparse.py new file mode 100644 index 0000000..4cd3d67 --- /dev/null +++ b/CVIssueCount/urlparse.py @@ -0,0 +1,428 @@ +"""Parse (absolute and relative) URLs. + +urlparse module is based upon the following RFC specifications. + +RFC 3986 (STD66): "Uniform Resource Identifiers" by T. Berners-Lee, R. Fielding +and L. Masinter, January 2005. + +RFC 2732 : "Format for Literal IPv6 Addresses in URL's by R.Hinden, B.Carpenter +and L.Masinter, December 1999. + +RFC 2396: "Uniform Resource Identifiers (URI)": Generic Syntax by T. +Berners-Lee, R. Fielding, and L. Masinter, August 1998. + +RFC 2368: "The mailto URL scheme", by P.Hoffman , L Masinter, J. Zwinski, July 1998. + +RFC 1808: "Relative Uniform Resource Locators", by R. Fielding, UC Irvine, June +1995. + +RFC 1738: "Uniform Resource Locators (URL)" by T. Berners-Lee, L. Masinter, M. +McCahill, December 1994 + +RFC 3986 is considered the current standard and any future changes to +urlparse module should conform with it. The urlparse module is +currently not entirely compliant with this RFC due to defacto +scenarios for parsing, and for backward compatibility purposes, some +parsing quirks from older RFCs are retained. The testcases in +test_urlparse.py provides a good indicator of parsing behavior. + +""" + +import re + +__all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag", + "urlsplit", "urlunsplit", "parse_qs", "parse_qsl"] + +# A classification of schemes ('' means apply by default) +uses_relative = ['ftp', 'http', 'gopher', 'nntp', 'imap', + 'wais', 'file', 'https', 'shttp', 'mms', + 'prospero', 'rtsp', 'rtspu', '', 'sftp', + 'svn', 'svn+ssh'] +uses_netloc = ['ftp', 'http', 'gopher', 'nntp', 'telnet', + 'imap', 'wais', 'file', 'mms', 'https', 'shttp', + 'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', '', + 'svn', 'svn+ssh', 'sftp','nfs','git', 'git+ssh'] +uses_params = ['ftp', 'hdl', 'prospero', 'http', 'imap', + 'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'sips', + 'mms', '', 'sftp', 'tel'] + +# These are not actually used anymore, but should stay for backwards +# compatibility. (They are undocumented, but have a public-looking name.) +non_hierarchical = ['gopher', 'hdl', 'mailto', 'news', + 'telnet', 'wais', 'imap', 'snews', 'sip', 'sips'] +uses_query = ['http', 'wais', 'imap', 'https', 'shttp', 'mms', + 'gopher', 'rtsp', 'rtspu', 'sip', 'sips', ''] +uses_fragment = ['ftp', 'hdl', 'http', 'gopher', 'news', + 'nntp', 'wais', 'https', 'shttp', 'snews', + 'file', 'prospero', ''] + +# Characters valid in scheme names +scheme_chars = ('abcdefghijklmnopqrstuvwxyz' + 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' + '0123456789' + '+-.') + +MAX_CACHE_SIZE = 20 +_parse_cache = {} + +def clear_cache(): + """Clear the parse cache.""" + _parse_cache.clear() + + +class ResultMixin(object): + """Shared methods for the parsed result objects.""" + + @property + def username(self): + netloc = self.netloc + if "@" in netloc: + userinfo = netloc.rsplit("@", 1)[0] + if ":" in userinfo: + userinfo = userinfo.split(":", 1)[0] + return userinfo + return None + + @property + def password(self): + netloc = self.netloc + if "@" in netloc: + userinfo = netloc.rsplit("@", 1)[0] + if ":" in userinfo: + return userinfo.split(":", 1)[1] + return None + + @property + def hostname(self): + netloc = self.netloc.split('@')[-1] + if '[' in netloc and ']' in netloc: + return netloc.split(']')[0][1:].lower() + elif ':' in netloc: + return netloc.split(':')[0].lower() + elif netloc == '': + return None + else: + return netloc.lower() + + @property + def port(self): + netloc = self.netloc.split('@')[-1].split(']')[-1] + if ':' in netloc: + port = netloc.split(':')[1] + if port: + port = int(port, 10) + # verify legal port + if (0 <= port <= 65535): + return port + return None + +from collections import namedtuple + +class SplitResult(namedtuple('SplitResult', 'scheme netloc path query fragment'), ResultMixin): + + __slots__ = () + + def geturl(self): + return urlunsplit(self) + + +class ParseResult(namedtuple('ParseResult', 'scheme netloc path params query fragment'), ResultMixin): + + __slots__ = () + + def geturl(self): + return urlunparse(self) + + +def urlparse(url, scheme='', allow_fragments=True): + """Parse a URL into 6 components: + :///;?# + Return a 6-tuple: (scheme, netloc, path, params, query, fragment). + Note that we don't break the components up in smaller bits + (e.g. netloc is a single string) and we don't expand % escapes.""" + tuple = urlsplit(url, scheme, allow_fragments) + scheme, netloc, url, query, fragment = tuple + if scheme in uses_params and ';' in url: + url, params = _splitparams(url) + else: + params = '' + return ParseResult(scheme, netloc, url, params, query, fragment) + +def _splitparams(url): + if '/' in url: + i = url.find(';', url.rfind('/')) + if i < 0: + return url, '' + else: + i = url.find(';') + return url[:i], url[i+1:] + +def _splitnetloc(url, start=0): + delim = len(url) # position of end of domain part of url, default is end + for c in '/?#': # look for delimiters; the order is NOT important + wdelim = url.find(c, start) # find first of this delim + if wdelim >= 0: # if found + delim = min(delim, wdelim) # use earliest delim position + return url[start:delim], url[delim:] # return (domain, rest) + +def urlsplit(url, scheme='', allow_fragments=True): + """Parse a URL into 5 components: + :///?# + Return a 5-tuple: (scheme, netloc, path, query, fragment). + Note that we don't break the components up in smaller bits + (e.g. netloc is a single string) and we don't expand % escapes.""" + allow_fragments = bool(allow_fragments) + key = url, scheme, allow_fragments, type(url), type(scheme) + cached = _parse_cache.get(key, None) + if cached: + return cached + if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth + clear_cache() + netloc = query = fragment = '' + i = url.find(':') + if i > 0: + if url[:i] == 'http': # optimize the common case + scheme = url[:i].lower() + url = url[i+1:] + if url[:2] == '//': + netloc, url = _splitnetloc(url, 2) + if (('[' in netloc and ']' not in netloc) or + (']' in netloc and '[' not in netloc)): + raise ValueError("Invalid IPv6 URL") + if allow_fragments and '#' in url: + url, fragment = url.split('#', 1) + if '?' in url: + url, query = url.split('?', 1) + v = SplitResult(scheme, netloc, url, query, fragment) + _parse_cache[key] = v + return v + for c in url[:i]: + if c not in scheme_chars: + break + else: + # make sure "url" is not actually a port number (in which case + # "scheme" is really part of the path) + rest = url[i+1:] + if not rest or any(c not in '0123456789' for c in rest): + # not a port number + scheme, url = url[:i].lower(), rest + + if url[:2] == '//': + netloc, url = _splitnetloc(url, 2) + if (('[' in netloc and ']' not in netloc) or + (']' in netloc and '[' not in netloc)): + raise ValueError("Invalid IPv6 URL") + if allow_fragments and '#' in url: + url, fragment = url.split('#', 1) + if '?' in url: + url, query = url.split('?', 1) + v = SplitResult(scheme, netloc, url, query, fragment) + _parse_cache[key] = v + return v + +def urlunparse(data): + """Put a parsed URL back together again. This may result in a + slightly different, but equivalent URL, if the URL that was parsed + originally had redundant delimiters, e.g. a ? with an empty query + (the draft states that these are equivalent).""" + scheme, netloc, url, params, query, fragment = data + if params: + url = "%s;%s" % (url, params) + return urlunsplit((scheme, netloc, url, query, fragment)) + +def urlunsplit(data): + """Combine the elements of a tuple as returned by urlsplit() into a + complete URL as a string. The data argument can be any five-item iterable. + This may result in a slightly different, but equivalent URL, if the URL that + was parsed originally had unnecessary delimiters (for example, a ? with an + empty query; the RFC states that these are equivalent).""" + scheme, netloc, url, query, fragment = data + if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'): + if url and url[:1] != '/': url = '/' + url + url = '//' + (netloc or '') + url + if scheme: + url = scheme + ':' + url + if query: + url = url + '?' + query + if fragment: + url = url + '#' + fragment + return url + +def urljoin(base, url, allow_fragments=True): + """Join a base URL and a possibly relative URL to form an absolute + interpretation of the latter.""" + if not base: + return url + if not url: + return base + bscheme, bnetloc, bpath, bparams, bquery, bfragment = \ + urlparse(base, '', allow_fragments) + scheme, netloc, path, params, query, fragment = \ + urlparse(url, bscheme, allow_fragments) + if scheme != bscheme or scheme not in uses_relative: + return url + if scheme in uses_netloc: + if netloc: + return urlunparse((scheme, netloc, path, + params, query, fragment)) + netloc = bnetloc + if path[:1] == '/': + return urlunparse((scheme, netloc, path, + params, query, fragment)) + if not path and not params: + path = bpath + params = bparams + if not query: + query = bquery + return urlunparse((scheme, netloc, path, + params, query, fragment)) + segments = bpath.split('/')[:-1] + path.split('/') + # XXX The stuff below is bogus in various ways... + if segments[-1] == '.': + segments[-1] = '' + while '.' in segments: + segments.remove('.') + while 1: + i = 1 + n = len(segments) - 1 + while i < n: + if (segments[i] == '..' + and segments[i-1] not in ('', '..')): + del segments[i-1:i+1] + break + i = i+1 + else: + break + if segments == ['', '..']: + segments[-1] = '' + elif len(segments) >= 2 and segments[-1] == '..': + segments[-2:] = [''] + return urlunparse((scheme, netloc, '/'.join(segments), + params, query, fragment)) + +def urldefrag(url): + """Removes any existing fragment from URL. + + Returns a tuple of the defragmented URL and the fragment. If + the URL contained no fragments, the second element is the + empty string. + """ + if '#' in url: + s, n, p, a, q, frag = urlparse(url) + defrag = urlunparse((s, n, p, a, q, '')) + return defrag, frag + else: + return url, '' + +try: + unicode +except NameError: + def _is_unicode(x): + return 0 +else: + def _is_unicode(x): + return isinstance(x, unicode) + +# unquote method for parse_qs and parse_qsl +# Cannot use directly from urllib as it would create a circular reference +# because urllib uses urlparse methods (urljoin). If you update this function, +# update it also in urllib. This code duplication does not existin in Python3. + +_hexdig = '0123456789ABCDEFabcdef' +_hextochr = dict((a+b, chr(int(a+b,16))) + for a in _hexdig for b in _hexdig) +_asciire = re.compile('([\x00-\x7f]+)') + +def unquote(s): + """unquote('abc%20def') -> 'abc def'.""" + if _is_unicode(s): + if '%' not in s: + return s + bits = _asciire.split(s) + res = [bits[0]] + append = res.append + for i in range(1, len(bits), 2): + append(unquote(str(bits[i])).decode('latin1')) + append(bits[i + 1]) + return ''.join(res) + + bits = s.split('%') + # fastpath + if len(bits) == 1: + return s + res = [bits[0]] + append = res.append + for item in bits[1:]: + try: + append(_hextochr[item[:2]]) + append(item[2:]) + except KeyError: + append('%') + append(item) + return ''.join(res) + +def parse_qs(qs, keep_blank_values=0, strict_parsing=0): + """Parse a query given as a string argument. + + Arguments: + + qs: percent-encoded query string to be parsed + + keep_blank_values: flag indicating whether blank values in + percent-encoded queries should be treated as blank strings. + A true value indicates that blanks should be retained as + blank strings. The default false value indicates that + blank values are to be ignored and treated as if they were + not included. + + strict_parsing: flag indicating what to do with parsing errors. + If false (the default), errors are silently ignored. + If true, errors raise a ValueError exception. + """ + dict = {} + for name, value in parse_qsl(qs, keep_blank_values, strict_parsing): + if name in dict: + dict[name].append(value) + else: + dict[name] = [value] + return dict + +def parse_qsl(qs, keep_blank_values=0, strict_parsing=0): + """Parse a query given as a string argument. + + Arguments: + + qs: percent-encoded query string to be parsed + + keep_blank_values: flag indicating whether blank values in + percent-encoded queries should be treated as blank strings. A + true value indicates that blanks should be retained as blank + strings. The default false value indicates that blank values + are to be ignored and treated as if they were not included. + + strict_parsing: flag indicating what to do with parsing errors. If + false (the default), errors are silently ignored. If true, + errors raise a ValueError exception. + + Returns a list, as G-d intended. + """ + pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')] + r = [] + for name_value in pairs: + if not name_value and not strict_parsing: + continue + nv = name_value.split('=', 1) + if len(nv) != 2: + if strict_parsing: + raise ValueError, "bad query field: %r" % (name_value,) + # Handle case of a control-name with no equal sign + if keep_blank_values: + nv.append('') + else: + continue + if len(nv[1]) or keep_blank_values: + name = unquote(nv[0].replace('+', ' ')) + value = unquote(nv[1].replace('+', ' ')) + r.append((name, value)) + + return r diff --git a/CVIssueCount/warnings.py b/CVIssueCount/warnings.py new file mode 100644 index 0000000..b0d53aa --- /dev/null +++ b/CVIssueCount/warnings.py @@ -0,0 +1,422 @@ +"""Python part of the warnings subsystem.""" + +# Note: function level imports should *not* be used +# in this module as it may cause import lock deadlock. +# See bug 683658. +import linecache +import sys +import types + +__all__ = ["warn", "warn_explicit", "showwarning", + "formatwarning", "filterwarnings", "simplefilter", + "resetwarnings", "catch_warnings"] + + +def warnpy3k(message, category=None, stacklevel=1): + """Issue a deprecation warning for Python 3.x related changes. + + Warnings are omitted unless Python is started with the -3 option. + """ + if sys.py3kwarning: + if category is None: + category = DeprecationWarning + warn(message, category, stacklevel+1) + +def _show_warning(message, category, filename, lineno, file=None, line=None): + """Hook to write a warning to a file; replace if you like.""" + if file is None: + file = sys.stderr + if file is None: + # sys.stderr is None - warnings get lost + return + try: + file.write(formatwarning(message, category, filename, lineno, line)) + except (IOError, UnicodeError): + pass # the file (probably stderr) is invalid - this warning gets lost. +# Keep a working version around in case the deprecation of the old API is +# triggered. +showwarning = _show_warning + +def formatwarning(message, category, filename, lineno, line=None): + """Function to format a warning the standard way.""" + try: + unicodetype = unicode + except NameError: + unicodetype = () + try: + message = str(message) + except UnicodeEncodeError: + pass + s = "%s: %s: %s\n" % (lineno, category.__name__, message) + line = linecache.getline(filename, lineno) if line is None else line + if line: + line = line.strip() + if isinstance(s, unicodetype) and isinstance(line, str): + line = unicode(line, 'latin1') + s += " %s\n" % line + if isinstance(s, unicodetype) and isinstance(filename, str): + enc = sys.getfilesystemencoding() + if enc: + try: + filename = unicode(filename, enc) + except UnicodeDecodeError: + pass + s = "%s:%s" % (filename, s) + return s + +def filterwarnings(action, message="", category=Warning, module="", lineno=0, + append=0): + """Insert an entry into the list of warnings filters (at the front). + + 'action' -- one of "error", "ignore", "always", "default", "module", + or "once" + 'message' -- a regex that the warning message must match + 'category' -- a class that the warning must be a subclass of + 'module' -- a regex that the module name must match + 'lineno' -- an integer line number, 0 matches all warnings + 'append' -- if true, append to the list of filters + """ + import re + assert action in ("error", "ignore", "always", "default", "module", + "once"), "invalid action: %r" % (action,) + assert isinstance(message, basestring), "message must be a string" + assert isinstance(category, (type, types.ClassType)), \ + "category must be a class" + assert issubclass(category, Warning), "category must be a Warning subclass" + assert isinstance(module, basestring), "module must be a string" + assert isinstance(lineno, int) and lineno >= 0, \ + "lineno must be an int >= 0" + item = (action, re.compile(message, re.I), category, + re.compile(module), lineno) + if append: + filters.append(item) + else: + filters.insert(0, item) + +def simplefilter(action, category=Warning, lineno=0, append=0): + """Insert a simple entry into the list of warnings filters (at the front). + + A simple filter matches all modules and messages. + 'action' -- one of "error", "ignore", "always", "default", "module", + or "once" + 'category' -- a class that the warning must be a subclass of + 'lineno' -- an integer line number, 0 matches all warnings + 'append' -- if true, append to the list of filters + """ + assert action in ("error", "ignore", "always", "default", "module", + "once"), "invalid action: %r" % (action,) + assert isinstance(lineno, int) and lineno >= 0, \ + "lineno must be an int >= 0" + item = (action, None, category, None, lineno) + if append: + filters.append(item) + else: + filters.insert(0, item) + +def resetwarnings(): + """Clear the list of warning filters, so that no filters are active.""" + filters[:] = [] + +class _OptionError(Exception): + """Exception used by option processing helpers.""" + pass + +# Helper to process -W options passed via sys.warnoptions +def _processoptions(args): + for arg in args: + try: + _setoption(arg) + except _OptionError, msg: + print >>sys.stderr, "Invalid -W option ignored:", msg + +# Helper for _processoptions() +def _setoption(arg): + import re + parts = arg.split(':') + if len(parts) > 5: + raise _OptionError("too many fields (max 5): %r" % (arg,)) + while len(parts) < 5: + parts.append('') + action, message, category, module, lineno = [s.strip() + for s in parts] + action = _getaction(action) + message = re.escape(message) + category = _getcategory(category) + module = re.escape(module) + if module: + module = module + '$' + if lineno: + try: + lineno = int(lineno) + if lineno < 0: + raise ValueError + except (ValueError, OverflowError): + raise _OptionError("invalid lineno %r" % (lineno,)) + else: + lineno = 0 + filterwarnings(action, message, category, module, lineno) + +# Helper for _setoption() +def _getaction(action): + if not action: + return "default" + if action == "all": return "always" # Alias + for a in ('default', 'always', 'ignore', 'module', 'once', 'error'): + if a.startswith(action): + return a + raise _OptionError("invalid action: %r" % (action,)) + +# Helper for _setoption() +def _getcategory(category): + import re + if not category: + return Warning + if re.match("^[a-zA-Z0-9_]+$", category): + try: + cat = eval(category) + except NameError: + raise _OptionError("unknown warning category: %r" % (category,)) + else: + i = category.rfind(".") + module = category[:i] + klass = category[i+1:] + try: + m = __import__(module, None, None, [klass]) + except ImportError: + raise _OptionError("invalid module name: %r" % (module,)) + try: + cat = getattr(m, klass) + except AttributeError: + raise _OptionError("unknown warning category: %r" % (category,)) + if not issubclass(cat, Warning): + raise _OptionError("invalid warning category: %r" % (category,)) + return cat + + +# Code typically replaced by _warnings +def warn(message, category=None, stacklevel=1): + """Issue a warning, or maybe ignore it or raise an exception.""" + # Check if message is already a Warning object + if isinstance(message, Warning): + category = message.__class__ + # Check category argument + if category is None: + category = UserWarning + assert issubclass(category, Warning) + # Get context information + try: + caller = sys._getframe(stacklevel) + except ValueError: + globals = sys.__dict__ + lineno = 1 + else: + globals = caller.f_globals + lineno = caller.f_lineno + if '__name__' in globals: + module = globals['__name__'] + else: + module = "" + filename = globals.get('__file__') + if filename: + fnl = filename.lower() + if fnl.endswith((".pyc", ".pyo")): + filename = filename[:-1] + else: + if module == "__main__": + try: + filename = sys.argv[0] + except AttributeError: + # embedded interpreters don't have sys.argv, see bug #839151 + filename = '__main__' + if not filename: + filename = module + registry = globals.setdefault("__warningregistry__", {}) + warn_explicit(message, category, filename, lineno, module, registry, + globals) + +def warn_explicit(message, category, filename, lineno, + module=None, registry=None, module_globals=None): + lineno = int(lineno) + if module is None: + module = filename or "" + if module[-3:].lower() == ".py": + module = module[:-3] # XXX What about leading pathname? + if registry is None: + registry = {} + if isinstance(message, Warning): + text = str(message) + category = message.__class__ + else: + text = message + message = category(message) + key = (text, category, lineno) + # Quick test for common case + if registry.get(key): + return + # Search the filters + for item in filters: + action, msg, cat, mod, ln = item + if ((msg is None or msg.match(text)) and + issubclass(category, cat) and + (mod is None or mod.match(module)) and + (ln == 0 or lineno == ln)): + break + else: + action = defaultaction + # Early exit actions + if action == "ignore": + registry[key] = 1 + return + + # Prime the linecache for formatting, in case the + # "file" is actually in a zipfile or something. + linecache.getlines(filename, module_globals) + + if action == "error": + raise message + # Other actions + if action == "once": + registry[key] = 1 + oncekey = (text, category) + if onceregistry.get(oncekey): + return + onceregistry[oncekey] = 1 + elif action == "always": + pass + elif action == "module": + registry[key] = 1 + altkey = (text, category, 0) + if registry.get(altkey): + return + registry[altkey] = 1 + elif action == "default": + registry[key] = 1 + else: + # Unrecognized actions are errors + raise RuntimeError( + "Unrecognized action (%r) in warnings.filters:\n %s" % + (action, item)) + # Print message and context + showwarning(message, category, filename, lineno) + + +class WarningMessage(object): + + """Holds the result of a single showwarning() call.""" + + _WARNING_DETAILS = ("message", "category", "filename", "lineno", "file", + "line") + + def __init__(self, message, category, filename, lineno, file=None, + line=None): + local_values = locals() + for attr in self._WARNING_DETAILS: + setattr(self, attr, local_values[attr]) + self._category_name = category.__name__ if category else None + + def __str__(self): + return ("{message : %r, category : %r, filename : %r, lineno : %s, " + "line : %r}" % (self.message, self._category_name, + self.filename, self.lineno, self.line)) + + +class catch_warnings(object): + + """A context manager that copies and restores the warnings filter upon + exiting the context. + + The 'record' argument specifies whether warnings should be captured by a + custom implementation of warnings.showwarning() and be appended to a list + returned by the context manager. Otherwise None is returned by the context + manager. The objects appended to the list are arguments whose attributes + mirror the arguments to showwarning(). + + The 'module' argument is to specify an alternative module to the module + named 'warnings' and imported under that name. This argument is only useful + when testing the warnings module itself. + + """ + + def __init__(self, record=False, module=None): + """Specify whether to record warnings and if an alternative module + should be used other than sys.modules['warnings']. + + For compatibility with Python 3.0, please consider all arguments to be + keyword-only. + + """ + self._record = record + self._module = sys.modules['warnings'] if module is None else module + self._entered = False + + def __repr__(self): + args = [] + if self._record: + args.append("record=True") + if self._module is not sys.modules['warnings']: + args.append("module=%r" % self._module) + name = type(self).__name__ + return "%s(%s)" % (name, ", ".join(args)) + + def __enter__(self): + if self._entered: + raise RuntimeError("Cannot enter %r twice" % self) + self._entered = True + self._filters = self._module.filters + self._module.filters = self._filters[:] + self._showwarning = self._module.showwarning + if self._record: + log = [] + def showwarning(*args, **kwargs): + log.append(WarningMessage(*args, **kwargs)) + self._module.showwarning = showwarning + return log + else: + return None + + def __exit__(self, *exc_info): + if not self._entered: + raise RuntimeError("Cannot exit %r without entering first" % self) + self._module.filters = self._filters + self._module.showwarning = self._showwarning + + +# filters contains a sequence of filter 5-tuples +# The components of the 5-tuple are: +# - an action: error, ignore, always, default, module, or once +# - a compiled regex that must match the warning message +# - a class representing the warning category +# - a compiled regex that must match the module that is being warned +# - a line number for the line being warning, or 0 to mean any line +# If either if the compiled regexs are None, match anything. +_warnings_defaults = False +try: + from _warnings import (filters, default_action, once_registry, + warn, warn_explicit) + defaultaction = default_action + onceregistry = once_registry + _warnings_defaults = True +except ImportError: + filters = [] + defaultaction = "default" + onceregistry = {} + + +# Module initialization +_processoptions(sys.warnoptions) +if not _warnings_defaults: + silence = [ImportWarning, PendingDeprecationWarning] + # Don't silence DeprecationWarning if -3 or -Q was used. + if not sys.py3kwarning and not sys.flags.division_warning: + silence.append(DeprecationWarning) + for cls in silence: + simplefilter("ignore", category=cls) + bytes_warning = sys.flags.bytes_warning + if bytes_warning > 1: + bytes_action = "error" + elif bytes_warning: + bytes_action = "default" + else: + bytes_action = "ignore" + simplefilter(bytes_action, category=BytesWarning, append=1) +del _warnings_defaults diff --git a/CVIssueCount/weakref.py b/CVIssueCount/weakref.py new file mode 100644 index 0000000..ca37f87 --- /dev/null +++ b/CVIssueCount/weakref.py @@ -0,0 +1,458 @@ +"""Weak reference support for Python. + +This module is an implementation of PEP 205: + +http://www.python.org/dev/peps/pep-0205/ +""" + +# Naming convention: Variables named "wr" are weak reference objects; +# they are called this instead of "ref" to avoid name collisions with +# the module-global ref() function imported from _weakref. + +import UserDict + +from _weakref import ( + getweakrefcount, + getweakrefs, + ref, + proxy, + CallableProxyType, + ProxyType, + ReferenceType) + +from _weakrefset import WeakSet, _IterationGuard + +from exceptions import ReferenceError + + +ProxyTypes = (ProxyType, CallableProxyType) + +__all__ = ["ref", "proxy", "getweakrefcount", "getweakrefs", + "WeakKeyDictionary", "ReferenceError", "ReferenceType", "ProxyType", + "CallableProxyType", "ProxyTypes", "WeakValueDictionary", 'WeakSet'] + + +class WeakValueDictionary(UserDict.UserDict): + """Mapping class that references values weakly. + + Entries in the dictionary will be discarded when no strong + reference to the value exists anymore + """ + # We inherit the constructor without worrying about the input + # dictionary; since it uses our .update() method, we get the right + # checks (if the other dictionary is a WeakValueDictionary, + # objects are unwrapped on the way out, and we always wrap on the + # way in). + + def __init__(*args, **kw): + if not args: + raise TypeError("descriptor '__init__' of 'WeakValueDictionary' " + "object needs an argument") + self = args[0] + args = args[1:] + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + def remove(wr, selfref=ref(self)): + self = selfref() + if self is not None: + if self._iterating: + self._pending_removals.append(wr.key) + else: + del self.data[wr.key] + self._remove = remove + # A list of keys to be removed + self._pending_removals = [] + self._iterating = set() + UserDict.UserDict.__init__(self, *args, **kw) + + def _commit_removals(self): + l = self._pending_removals + d = self.data + # We shouldn't encounter any KeyError, because this method should + # always be called *before* mutating the dict. + while l: + del d[l.pop()] + + def __getitem__(self, key): + o = self.data[key]() + if o is None: + raise KeyError, key + else: + return o + + def __delitem__(self, key): + if self._pending_removals: + self._commit_removals() + del self.data[key] + + def __contains__(self, key): + try: + o = self.data[key]() + except KeyError: + return False + return o is not None + + def has_key(self, key): + try: + o = self.data[key]() + except KeyError: + return False + return o is not None + + def __repr__(self): + return "" % id(self) + + def __setitem__(self, key, value): + if self._pending_removals: + self._commit_removals() + self.data[key] = KeyedRef(value, self._remove, key) + + def clear(self): + if self._pending_removals: + self._commit_removals() + self.data.clear() + + def copy(self): + new = WeakValueDictionary() + for key, wr in self.data.items(): + o = wr() + if o is not None: + new[key] = o + return new + + __copy__ = copy + + def __deepcopy__(self, memo): + from copy import deepcopy + new = self.__class__() + for key, wr in self.data.items(): + o = wr() + if o is not None: + new[deepcopy(key, memo)] = o + return new + + def get(self, key, default=None): + try: + wr = self.data[key] + except KeyError: + return default + else: + o = wr() + if o is None: + # This should only happen + return default + else: + return o + + def items(self): + L = [] + for key, wr in self.data.items(): + o = wr() + if o is not None: + L.append((key, o)) + return L + + def iteritems(self): + with _IterationGuard(self): + for wr in self.data.itervalues(): + value = wr() + if value is not None: + yield wr.key, value + + def iterkeys(self): + with _IterationGuard(self): + for k in self.data.iterkeys(): + yield k + + __iter__ = iterkeys + + def itervaluerefs(self): + """Return an iterator that yields the weak references to the values. + + The references are not guaranteed to be 'live' at the time + they are used, so the result of calling the references needs + to be checked before being used. This can be used to avoid + creating references that will cause the garbage collector to + keep the values around longer than needed. + + """ + with _IterationGuard(self): + for wr in self.data.itervalues(): + yield wr + + def itervalues(self): + with _IterationGuard(self): + for wr in self.data.itervalues(): + obj = wr() + if obj is not None: + yield obj + + def popitem(self): + if self._pending_removals: + self._commit_removals() + while 1: + key, wr = self.data.popitem() + o = wr() + if o is not None: + return key, o + + def pop(self, key, *args): + if self._pending_removals: + self._commit_removals() + try: + o = self.data.pop(key)() + except KeyError: + if args: + return args[0] + raise + if o is None: + raise KeyError, key + else: + return o + + def setdefault(self, key, default=None): + try: + wr = self.data[key] + except KeyError: + if self._pending_removals: + self._commit_removals() + self.data[key] = KeyedRef(default, self._remove, key) + return default + else: + return wr() + + def update(*args, **kwargs): + if not args: + raise TypeError("descriptor 'update' of 'WeakValueDictionary' " + "object needs an argument") + self = args[0] + args = args[1:] + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + dict = args[0] if args else None + if self._pending_removals: + self._commit_removals() + d = self.data + if dict is not None: + if not hasattr(dict, "items"): + dict = type({})(dict) + for key, o in dict.items(): + d[key] = KeyedRef(o, self._remove, key) + if len(kwargs): + self.update(kwargs) + + def valuerefs(self): + """Return a list of weak references to the values. + + The references are not guaranteed to be 'live' at the time + they are used, so the result of calling the references needs + to be checked before being used. This can be used to avoid + creating references that will cause the garbage collector to + keep the values around longer than needed. + + """ + return self.data.values() + + def values(self): + L = [] + for wr in self.data.values(): + o = wr() + if o is not None: + L.append(o) + return L + + +class KeyedRef(ref): + """Specialized reference that includes a key corresponding to the value. + + This is used in the WeakValueDictionary to avoid having to create + a function object for each key stored in the mapping. A shared + callback object can use the 'key' attribute of a KeyedRef instead + of getting a reference to the key from an enclosing scope. + + """ + + __slots__ = "key", + + def __new__(type, ob, callback, key): + self = ref.__new__(type, ob, callback) + self.key = key + return self + + def __init__(self, ob, callback, key): + super(KeyedRef, self).__init__(ob, callback) + + +class WeakKeyDictionary(UserDict.UserDict): + """ Mapping class that references keys weakly. + + Entries in the dictionary will be discarded when there is no + longer a strong reference to the key. This can be used to + associate additional data with an object owned by other parts of + an application without adding attributes to those objects. This + can be especially useful with objects that override attribute + accesses. + """ + + def __init__(self, dict=None): + self.data = {} + def remove(k, selfref=ref(self)): + self = selfref() + if self is not None: + if self._iterating: + self._pending_removals.append(k) + else: + del self.data[k] + self._remove = remove + # A list of dead weakrefs (keys to be removed) + self._pending_removals = [] + self._iterating = set() + if dict is not None: + self.update(dict) + + def _commit_removals(self): + # NOTE: We don't need to call this method before mutating the dict, + # because a dead weakref never compares equal to a live weakref, + # even if they happened to refer to equal objects. + # However, it means keys may already have been removed. + l = self._pending_removals + d = self.data + while l: + try: + del d[l.pop()] + except KeyError: + pass + + def __delitem__(self, key): + del self.data[ref(key)] + + def __getitem__(self, key): + return self.data[ref(key)] + + def __repr__(self): + return "" % id(self) + + def __setitem__(self, key, value): + self.data[ref(key, self._remove)] = value + + def copy(self): + new = WeakKeyDictionary() + for key, value in self.data.items(): + o = key() + if o is not None: + new[o] = value + return new + + __copy__ = copy + + def __deepcopy__(self, memo): + from copy import deepcopy + new = self.__class__() + for key, value in self.data.items(): + o = key() + if o is not None: + new[o] = deepcopy(value, memo) + return new + + def get(self, key, default=None): + return self.data.get(ref(key),default) + + def has_key(self, key): + try: + wr = ref(key) + except TypeError: + return 0 + return wr in self.data + + def __contains__(self, key): + try: + wr = ref(key) + except TypeError: + return 0 + return wr in self.data + + def items(self): + L = [] + for key, value in self.data.items(): + o = key() + if o is not None: + L.append((o, value)) + return L + + def iteritems(self): + with _IterationGuard(self): + for wr, value in self.data.iteritems(): + key = wr() + if key is not None: + yield key, value + + def iterkeyrefs(self): + """Return an iterator that yields the weak references to the keys. + + The references are not guaranteed to be 'live' at the time + they are used, so the result of calling the references needs + to be checked before being used. This can be used to avoid + creating references that will cause the garbage collector to + keep the keys around longer than needed. + + """ + with _IterationGuard(self): + for wr in self.data.iterkeys(): + yield wr + + def iterkeys(self): + with _IterationGuard(self): + for wr in self.data.iterkeys(): + obj = wr() + if obj is not None: + yield obj + + __iter__ = iterkeys + + def itervalues(self): + with _IterationGuard(self): + for value in self.data.itervalues(): + yield value + + def keyrefs(self): + """Return a list of weak references to the keys. + + The references are not guaranteed to be 'live' at the time + they are used, so the result of calling the references needs + to be checked before being used. This can be used to avoid + creating references that will cause the garbage collector to + keep the keys around longer than needed. + + """ + return self.data.keys() + + def keys(self): + L = [] + for wr in self.data.keys(): + o = wr() + if o is not None: + L.append(o) + return L + + def popitem(self): + while 1: + key, value = self.data.popitem() + o = key() + if o is not None: + return o, value + + def pop(self, key, *args): + return self.data.pop(ref(key), *args) + + def setdefault(self, key, default=None): + return self.data.setdefault(ref(key, self._remove),default) + + def update(self, dict=None, **kwargs): + d = self.data + if dict is not None: + if not hasattr(dict, "items"): + dict = type({})(dict) + for key, value in dict.items(): + d[ref(key, self._remove)] = value + if len(kwargs): + self.update(kwargs) diff --git a/CVIssueCount/xml2py.py b/CVIssueCount/xml2py.py new file mode 100644 index 0000000..e108f0a --- /dev/null +++ b/CVIssueCount/xml2py.py @@ -0,0 +1,121 @@ +##################################################################################### +# +# Copyright (c) Harry Pierson. All rights reserved. +# +# This source code is subject to terms and conditions of the Microsoft Public License. +# A copy of the license can be found at http://opensource.org/licenses/ms-pl.html +# By using this source code in any fashion, you are agreeing to be bound +# by the terms of the Microsoft Public License. +# +# You must not remove this notice, or any other, from this software. +# +##################################################################################### + +import ipypulldom +from System.Xml import XmlNodeType + +class _type_factory(object): + class _type_node(object): + def __init__(self, node): + ty = type(node) + self.name = ty.__name__ + self.namespace = ty.xmlns + + def __init__(self): + self.types = {} + + def find_type(self, node, parent): + def create_type(node, parent): + return type(node.name, (parent,), {'xmlns':node.namespace}) + + if parent not in self.types: + self.types[parent] = {} + + tp = self.types[parent] + if node.name not in tp: + tp[node.name] = [create_type(node, parent)] + + tpn = tp[node.name] + + for t in tpn: + if t.xmlns == node.namespace: + return t + + #if there's no matching namespace type, create one and add it to the list + new_type = create_type(node, parent) + tpn.append(new_type) + return new_type + + def __call__(self, node, parent=object): + if isinstance(node, ipypulldom.XmlNode): + return self.find_type(node, parent) + return self.find_type(self._type_node(node), parent) + + +xtype = _type_factory() + + +def xml2py(nodelist): + + def children(nodelist): + while True: + child = xml2py(nodelist) + if child is None: + break + yield child + + def set_attribute(parent, child): + name = type(child).__name__ + if not hasattr(parent, name): + setattr(parent, name, child) + else: + val = getattr(parent, name) + if isinstance(val, list): + val.append(child) + else: + setattr(parent, name, [val, child]) + + node = nodelist.next() + if node.nodeType == XmlNodeType.EndElement: + return None + + elif node.nodeType == XmlNodeType.Text or node.nodeType == XmlNodeType.CDATA: + return node.value + + elif node.nodeType == XmlNodeType.Element: + + #create a new object type named for the element name + cur = xtype(node)() + cur._nodetype = XmlNodeType.Element + + #collect all the attributes and children in lists + attributes = [xtype(attr, str)(attr.value) for attr in node.attributes] + children = [child for child in children(nodelist)] + + if len(children) == 1 and isinstance(children[0], str): + #fold up elements with a single text node + cur = xtype(cur, str)(children[0]) + cur._nodetype = XmlNodeType.Element + else: + #otherwise, add child elements as properties on the current node + for child in children: + set_attribute(cur, child) + + for attr in attributes: + attr._nodetype = XmlNodeType.Attribute + set_attribute(cur, attr) + + return cur + + +def parse(xml): + return xml2py(ipypulldom.parse(xml)) + +def parseString(xml): + return xml2py(ipypulldom.parseString(xml)) + + +if __name__ == '__main__': + rss = parse('http://feeds.feedburner.com/Devhawk') + for item in rss.channel.item: + print item.title