From a765aa23f421637d02d2aec533ab995507458da9 Mon Sep 17 00:00:00 2001 From: John Peck Date: Sun, 7 Dec 2025 12:38:11 +0000 Subject: [PATCH] modules --- __init__.py | 1 + __pycache__/base.cpython-310.pyc | Bin 0 -> 1117 bytes __pycache__/gwt_rpc.cpython-310.pyc | Bin 0 -> 9596 bytes __pycache__/kubra.cpython-310.pyc | Bin 0 -> 5914 bytes __pycache__/nisc.cpython-310.pyc | Bin 0 -> 3179 bytes __pycache__/simple.cpython-310.pyc | Bin 0 -> 2536 bytes base.py | 23 ++ gwt_rpc.py | 255 ++++++++++++++ kubra.py | 173 ++++++++++ newpower.py | 322 +----------------- newpower2.py | 493 +--------------------------- nisc.py | 80 +++++ simple.py | 55 ++++ 13 files changed, 613 insertions(+), 789 deletions(-) create mode 100644 __init__.py create mode 100644 __pycache__/base.cpython-310.pyc create mode 100644 __pycache__/gwt_rpc.cpython-310.pyc create mode 100644 __pycache__/kubra.cpython-310.pyc create mode 100644 __pycache__/nisc.cpython-310.pyc create mode 100644 __pycache__/simple.cpython-310.pyc create mode 100644 base.py create mode 100644 gwt_rpc.py create mode 100644 kubra.py create mode 100644 nisc.py create mode 100644 simple.py diff --git a/__init__.py b/__init__.py new file mode 100644 index 0000000..1584277 --- /dev/null +++ b/__init__.py @@ -0,0 +1 @@ +# This file makes the 'providers' directory a Python package. \ No newline at end of file diff --git a/__pycache__/base.cpython-310.pyc b/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f44103b6af42eac551cf391095fb0f9b7ff349b8 GIT binary patch literal 1117 zcmb7CJ#Q015Z%2`D{+i52?RxgD`;HAZU_<*LQY)JSx6DhHLLUO+BtE)HGAh+Ik^iz z1WNyF4|t~lWI;N-7Cal&az zCN!iIE3_#2K)A*2uY}uz-rJ!KzQbMcUE@34yC!Y_C;Um;mbNdi`nnMf^Fl>&F%iX$ z`e{jYTkB|CD!P1l|bRNW2ec^#as_E(jwF3p8EOZ_p68zN2Sz za!r=@Ks#A95!(Nhea_@OTRPf{Wj0F2+RsIvCoYIAvmgz5u%## z`E}O4jZ}9&pLcJHN!p#txlmnfxHG-eHO7)GDHwAhb>)Hbs7G70JlUn+*<{5OGB$!X z9zzJ3nYQV`z%HqJ1R-Nr>M^+59f=~o!R=VVVi3nFTYEs}da(4dF(l8JK45Gj`7A}d z&e)e(l&)`l#<+|bQ!PZo_f^kOA?+Q7zzX?c#tN}(#Gy56xeaRKF@*L66MJwJvTFO< z^w^iPthn25#GAiuL~Qc@dnFV;zVcCFi>iU@fr%l3JWc2c808th+ zPq&sSY+&#aO~xr0<>?<_sxhhO`#(QacnKGV0PeX$Z_swN9SviJW3Og~BQbY7G`A&m T?OiqX_qbv?K2DP~t>eSrSCs86 literal 0 HcmV?d00001 diff --git a/__pycache__/gwt_rpc.cpython-310.pyc b/__pycache__/gwt_rpc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05859e1b4de801f14efb379ca2ed54b7b69efa69 GIT binary patch literal 9596 zcmbVSTW}m_R_@#M^jtI=T`XC)lXRRSWu>A~d|k(Jymn;S@g^$;Nw#CvE}i!Ltx-!o z)8oH;bZL6BS!^HyTrBKz2}!CXs0B>%1WQr-vIGhO)Gk##vR%AU1q9v*MHLQf_|D%m zy4fjqcT|1)zhBSgJE!IM^yDS{J`GR0e?KNk|3sOczYH?x@dV!hVG@%)X+C|+^D^>^ zr__~srLNAab!}dgsZRAWb$wnJY0b;ljd??)WiL0MLw&}}*9-H7dU3ueOOCYBY8#Im=Q`7&|_%ORI@%1-gNHs6PmJS(82fRX}A`k6c{jTS$~%A~1LH8GeS zI-y&45`$>uH=HC#Ddu>gJt`+gi+fF*2M*GOIc_(C1)tX)KB^?em+ZiKjr%Jub9mBE zxe3432-mx1RhnhzPetZDp5Pe}N21f1SC~AnGKHykqynKCbZ05BIv)NfvV1~XKEZQ86{CeGQn4VvAtET5J zJEq+V{W0z|ZI@T7w65+?Ln4c30#EQhNG!*aCB+J;9LpP6FH<%tr;41qN%fjYYa*Rt z8I^pG(sMk;K|{0p(n3M0}*>C3}yRrJX%lfcBNFSq3x2tEf=RRB8k|gdD$yD?t}lDFvE1-i8%efqH!WORQVc@et+n8Kp~F9> zZ6*0u;Bd>XIgK##zvxG<=h@>YE6-xm$A^qiU4DiUo~bpamuRg=*DlhEZwOsg7N8!s_DKtQ%ryRo`EB zogm4q*j~#?bmrG>w~=T~J6yb{@B!?M4}ySA^s3)jaBE36aDu?~8;M$TLQdk!i$r=r zk^>j6&P`iaU%Pl=Zqk~Vo__V>^z|w0<>{+4vq^T*vBAwjqPLnP8oZBssDTrXYKay& z-U1(@frhCVv(d`#N&E?9?>|1iV)OCU)z$IEu;KAdSY0X+i*k6 zDxq~?g6Og?m*gV;blFhzvVuS32l-4FxywUqf}*#(EZWIXq&{z!GEF# zoFoMql6K-L@S3tIx52-BGOC2?2KW_uC03R)Oln~RNa_~5ZrtnT5eXlE;VwR6!xxPlqLGRAvNSxNwm*)+8b&6 zNJxTLuJ^T7nfmpiU;p}I+K;5f8$ca+J=WT4I!@FI2RHU%9BEVjlGe_&^>(&x#5(_e zZ18^+vlKeB6w_H2CD!vh@;lPcOA9h9vEDmqy)m>2S-UN_b2L(%W974;u@Prcr?9?F znf1S~wQ0xl#&Db`S=~O3js2VAJeh)C($dJ%fsKQk*lC=F{O)Vh{3u&G6c^Y4WVXNt z1>Gm;5af4Q$nUpflJpYIaa*DNv;A~>=@}BmX*{<6-?G$(wvT$Tx5wE?TzpUZ0Jli( zZ$SFn#W#!8HchR{tJ3I!sY^*2`Y^Oey;&&>Lvib2@a`u+Z|JG_R6`(TIs$IhRBK`?&z354msI_e+M*wX{BaDP=&9C8@))_{U}t_~mk5#Q zkd`EU5*!9`am8Qm`I6EmL6JA4P3diEMdnDy%GZ@!%4{n4bgCZ7I?Sn3c`lKc6M5wx zET^+p^;*nfLOXC1BPRK?%&ms7M$L22Bmap4a#)JaJVMOv^u*Jf>=>%Eo?o@S;9O;U zm?S&nEI8cZkx=eqLb;D>`~Xd--SpYIP+`0m`BV}1Q))|T@_oq8MT6p2(`n2F8^_7c zn4?dmT2pAe{nU-toAfL|&%1$72W*G@DJsjNt7lgoPN$ftt+1M?Fh`sYl#fuE4&z$` zPSA)G#Y&R=HB#u4ocla68qTWFnEWTGb>T|A>GLqH%XRCHB?Hjsq)$71B~B;lT_9^= zdEiwGEM+Gd*o}I?57J1v?UjNz&&heU(PHL^QoEiw+RWweJsCe|P%DP`y+ zD>5uh20I*H6;P+Mj%1+A_rBW*yM?lq^uT7h%<5W|PPS{&@@XyIQqn_9YHYPEV$T-L z4hcw*GO+BVoOGPFVbTf*9>FA35xfqPhgs2JQ1bGA`H(ya6Qe!oR|geEE-Pem3|V=g zXoGmuGlX`!YRLM7qCO-SkvbxmR9#m7O&d}EMH?wSIDpZNx0DCb(?1-yUaAtebbn-= zbUYJy0%%N9pVEfRQZtmM6<69+1PG!U7?!pcD||TCf)N5cAxUBG7VSb~)dazQcURFwt3QXu5w3-+od{g2++Q763NGC1zvNG$tE0i)? zgQyI!Y+iz#w}6fzk0yVb=6z}gaA}=%vJfeA)eRR-aH84eM5y{XC_KXrEKW^n%;{GW z>?XxULQ9YXl;}Zq(E%{k8%_wdm1tGJxt`)CtqG-^XtbfEKq$2pKx6>BUPMQJg@}M` zS1EOl2J5v_5huwPTT=YxL|Y0-WH8`;Gt-c5GsRT zwUmM2LF^&s8i^RS)l>ANya@vg?1Q(!KI&97oGz%tyF!->-oCHEmg(a9l3@#6#{q1> zapbF7L91T3`8t0Yi`>J2_tcc1u2a<<5plL#2b^RCS2j@d2{|AD@3?dV^ChrEIn|a2 z9(7g~(Q%i8qygx}=;VTbz*$D$4S@*h{9~CAE0b<3?*M`X<@ChTJ4x%>3d_(9#freT zD%0B<%eFJjXzK#~Ldo4Rpb)dM1{F$ZHzP1GP%qwjsKo*l-iztTHK|=-g?2G6hH|Xl zCe;78K&<488ff)r(PCU=#bK%4gVG*Kp(cl#pQ8K-P(ON?;-0vKQ8iv;Jt!%D?&0<; zJKFc6eXnT$mEG+z_SP(l(k{3A;=VYK*}espUDj}5SlvRbV)+@Mp`XJx$uqb21ED+#=Wv~tkU$C{K|||1CAX(e)Rcc&mTK_ z^3>_*(2~7kS3J8>t6YIY1q42R{KWCo$4;F-{``qVPc`PQgP{!=cm`;4feMVbp}J$Z z-Ao?tDrp~Ym(&wffWaHgS>vBa5debqUn<$Ogoq{eY!EE`x53CKe->^)VeIFMy0@C~!l%Kgoy?8AX`tO}nw4Wc^kM z2MW<2ajqe>G`73*3}TO6gX% z;^W;hXl8JyK(V+?eD&a-yhC>m?pg04-qqk-yB+Xglp4;4Nmg> zMyG@l_FC<@_)VNohmPPd?9<3%o=94&vqyS9W{WOvkXXhxi{L=K*sf)8*xJ1NVi9vxAtIC5t zI1~1RRKRv*8JaR@S>nqEkp4Ea(p0~TC!jG)z?nti$-|fbpS-Tfhvmm*?FZ5DZZ3Ot zz?j&K;N}D#P6!tIJ1SnHK#G|nXD;+w2R4_tKtV@E?$KhYTDV1n?Kb8@{Jury0Z7#G z9J&(l2pmlA2L-Ey9GIX$qsrIhX!&6@j8K6Z3_Jo2Lp}r=)&v^PF#R1VOryYAlGRx4 z;%S5q^x#A~0)-A@gbtViWSeC1#6g;!JD+i)NRx8h#TiLeyBa&7meY zA^}C`m;ub#%_E?&D_`7`@7a?tZRL0O>)k3(=P7UHcem@?Do@)X_v-&D|Iuss03EE5mJgPTh5VYe5e^N7RK-wi1qz~1NLK|z7m z&jDq>B(?X!3!rof>7jTZ%K`QOb4VeUeXMv;3JnBQdhRMf;!=A!9uoEcCF+MBs^8CY zaWO^pm_^48kOv|oT^7h5w+C!)5VGIMZ>H#e1bqryeaszwp4sYC*wg1gtZY!co zKl^8+deXH5)gMMep!%L|RKK7IOuqxYiv>aj6JqCICGs;wewN4{T}F%jD=I$)67{5* zds<+|@m(02z$PiM<9I|sMfh~^h2WOTD~O}-^yt$2p{YhYPMjRT0`yN`ox5;(l0Y6o zyX(_47t{5aXen;(Z)o(?nL8i}l-mK{Q&S&PBllmi2ADA=j0EycUhFIeG&_8>13C}zuhWj+Ch`p;e*}X6Lg-pl*g}LGMW}qo2MGEn z1x#CSicb#w1?mGl&)p{fH5!DB8$V0r*QqK;anVQO??b8a{S9imLF5;S{3eliL6Y8d z#a7_fEPS`}l2WIjgP!>&HT`WOABpxmPQ)VUoD(M{k_L{@T6AlRNtROhY(!w+=3!cx zOVdf4kxiTo+Z zckl!RkO`x2Bli6QyK6x2!0$lh<<1`kwdg@;3w4T2KAQFxFtdOmrfkY5loLSwozD#z z#rPDMK8w!~|7$44@9Rg>{^B#j6aSB_KlEQ2Bv2Ey;5R^^A$NV>raV3v5F#kkS5f@$ zgaT_V3#%aek0Gy)NXv3VQDDjZapX1BE1mi=d4To zaT+h_=^(_a<9S__DD>5zP!p2~Va0Fa=>&j*6H7+E{Guoy9UvrL!~bE_c$FHH&Prv8 zR7z4rq}+3FR)pWnGgPEE*YRCh?qTrW_L^O?-*+fEwi3(+4-e1%e3#$%`!txF^ECWD z$X||6oz}F!(Z}IG6CZEj$pi|nah7On)tjv`^{ubz>TRqU>TRx>c3g-v8?&|gTN*dH^@+x<$h~i@HSle72j5QQ;Cqg>H$11ICxu@OGu4<*VhvT5l&aKkE&$xYC6R+p?p2Hoi z<$kGgx3s?0zQ#PJRdaZ)w7GXu`%)`)^ccG|l6G~lj$68$HE|84mxWlT&g+wP8ebca z)+k+dwpyv}HOktU(OFC_H#X;X?lF!2F}KO*CUcw9xwiIEHFAElwYjh}X4q} zc>dtT-r*C|9FB1xdsip#ZMLnoTOB#yRoV>tQ62^(k;tVy0b&4n(%Bo72 zlO}0TdQ=9(P|#M7rpo%Hu4YX?)I1h_tBD^$nGc>`y%UPn?d|Q=jl7?%4hP$jm}q^t zBP}J^208ajC>pH6biJXwY>|1)vm5LLv)Q8AFdA$LHCJ!2CTi4wg0+}?PycUm@(CES zqSoyaBu)PuROnGi@(n6IZC5YZ9spP20|2!LoFMGTT0O!CfYwP3{e&Ap5^mkr#re{> zsNFn8ID#HE1K$g~c4=25fg-y2aY>k zoVL@Ewe>iOl&4866j7K70!eAyjv&J<2W*o854RxQ2Muo|kj#&IgQRx)tq6b&5FjR)&{r_dyIwR_bFNoCwMe z;GQTAuA>BF!jeX3pVU#Dc`W`+BHTHGuT?19%0@%LG~^klaSj=}{qSy33D2;7C@$I zB~xKwB{&ytwNlT$`&wQvb#9&1h$Ve)FdFSDWD;j_iU;f$#f6*xqX3K;=ubkRzY*Q_ zfg;(8|C3uF78FDkIIAeG;`eaEGFPMvE9Ep;!5bKL>AwPq>)9rIEDdJWzq2kDdUhEC=VbfxsE5Jg&P!c zR17j%Q@8a~P+3uN*Qs$nA>xp`9B7NYqxfDWWyMi-puIj;+MDoG;<1Xk5}K)qW1ggl z0lBa1#1boZ2u5dDvM1*hH7bZA?-NprITUTZ(*Y8Z4JtSi3shUA9;<>NgBYJlaa$~- zSz0Q%&&uFKbVYa+80m=3$6=_8co!#TjP*b#UGmB4A1SPX=K`K=4TfAY1j}#K)sP*0 zs-FZY#lQY30xd!+Oc2Bxl$*nBH`(M3qRo{FS9D#qnkenB>$|q1{UGubXhFuEO*?z} zhPI`b&YlC|n%q$a4Qh^^-`uU0HMF@H>y;kYMw+@rYCL9xwC{Q4Tn{KTX@j$Dy^h<|LH4!1IU@Xu z5WG&O1nqC`{24^P2_pYg4r0$+7`uSmEtGV}#j*)HPf&U6bD-Ldiq1DbZf+qX+{K&d zyh666&i;QMK76>3=bwb?=|-6HB;x)7%>+UAXPrwqv><&`Dv6;L<5(2$p-Eh!;wlv? z9+GKP2Kp4$l!3mBnmn%jdx8T2$aOFrAnOp0;j1h$3Ptr8n?gI4)D6_q?xFuqBHyf_1Bt9(b=LmUSpaZ?0ZB17VbG-ZYW(b@WV zvgc5^@RFApd5ev}O(EQ4C(K2I!uS&Y^?OT(`Oju+h1lyu^Y0TfbbuqXVm(5JC?=DK zE06jIDis+5&9UPtoz=-w6_6-eB)*Ge7&Wf-*jVR)loY;4KA4@20V+8|xB$hao?WZT z&p~H(%%P+fJ|$j(Hz673wXu%5H6*&!i(eSXpTt}65A=?iTGvy3^@?nj7Ojf1K|rR? zF^8UN_2!t}pVsG9omOp4$oK+iw_dvN(nWsks|EmXsUt`~;@4D!RFKUciI3Dkcu01Z zy^W&Ogk6FRfRnrACWVs|NdV-G4#u$xz6>=my2V| zADvt7pIheFmft?V{Im1RSC@;Tb+mE00No=*mTr~{Lhu&Y zvJDeyvf`N;fIY|uS6)p9-7v{sTLE#my5T5`3hPqm-1V!{`2d8U62p2|MB4$e*Ad8S zoWc{i2-SZ1|*yI#9h=CNFhZ3mg@IVNUtA>ZU}88k#r`Yh+hiu332)~j2uC-Lh4B}d-X25 z{)8u^(>B?e$E}LO_G3_c1aRomG+hDC8C0eh-PNm~_V5iRGiDdT4RJN2bHpLZ0f+`E zha_S~jQpmv%cjfxLEl1(t4?@-_eNPXow$bNol4M1h2jeo$m6R_q8la2G@DRi=rRo< z6BKWvpqQNz@pFl^+HT8r-P7*rmZ^H%O@Uvat8gnF`Xvf*QJI?(y}~P#G8;jP7jz$4 ygD@|vAFZlm6CqZqpjz^fSk!UoB+`AgN7jdx#uWRW`7ybv#RnPta literal 0 HcmV?d00001 diff --git a/__pycache__/nisc.cpython-310.pyc b/__pycache__/nisc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45bf0b9bc8497aeb404994c1b7ee64f681b62ef9 GIT binary patch literal 3179 zcmZuzU5^~a6|L&;nb}>hz_CpXNs|x4fY~JwP~;c~v9U2I8e86tEJi`Cc301AuY0=3 zRW%;ZsK<(ABzWN=&k$+$C6fOK&-nrMnwRhwY$fE}?j5{q=vLkO=&D=QRp*>r<@$P? z;d!Fp$Yul=Zn_ zeM|7ZEi7Sw#`=zMungBc*UMCL(%1Z*@LF7mJXK**PZDB@aX66WDC~W-yYt3=sdOsB zBrhi-ROLh_soDw?j+s_Z3kBNo9VGQHn2H^+B^UgVFZfZ}LfM*MM9n(j6&JQ}zO)v0 zWgpn;{E*#ZyR5R8&LLYkL$+`$=TGd@aN$)R{J2AQXrX>~;aBd`t$eh3hrG`HqdY)v z!Mi6~U%Q8#v%9a~{bl8nUx#UbHsEX#a5g`!gJny!spnTrbgJMh_78{p(;7VX3C?{H zJP3A=@+qIP?pn{Zo6U(PGB;~V;}oMomg`hreZ-A-FO}I~X4<#0gCdq$s?4cmqI8J^ zRo5rFS5A*L?56TwDvVRaqZEhYmlGWiF~n2N2d|mSxb-~7Ps-6aE@tyzU6Hbs;ULw? zepU?Y<2iX~`1;to7pXykndLg_0MK$7vztDV&yRUAdp6 zV@*B0yc`aJIya@cx-H|VG#?ln|1F=VzK%@Q*f<0dmMTgs4bW^zQ!%S2^(;S2i7Lp z`n6DMKe4{^f(?ESJ%Ybk|C^utA*8ozhdpd<5Jj}fQDoXtG%Cd;r}A19-JQhw(HB38 zM43d9>|*kIxyQ__)aJtkNXn}V4whE8{eXrxzSE=LggH*b&)I<2R1AoU!UZLv8p22J zeGYu~eb6&#=y|Q?+luP*K|92=q9`#k+^MO!Qpfprk0xPP!Rr|SeKOT+q^kVqIB)OB$((vY$o%%zvT05=ZMWXYdHuV5Tvxnew z1%@5g%2=_;s`%2b84eE1_vUNbIdH35aKLPvdfWnsYW8`vOJE;#bpE{dmw|A>z82W$ zRnCfiw;JX-4;&f;4x&WdqgvPRna)_22T}7g^VXFcyB}Ws-P>=yy9f1b#rZUzspvi! zay8C!h?OqGtjKg0=h-|BX=s`dylKBe`4jvRDB`Ng7)S?3s4sBQ(NASqo<%1~fx@&% z3nOwOU!=yD$oz~9k*Dj*pHuk^8IoTUtd=TYK^5}QilQWsl>)|y4YCH+TPV?YMFTgu z$J^YoEZ};Bze33V&iU{BJYoAJ9=2AiJt4p+4(y+B_{A{+o?k$%5nV(Bh^|#x5Z#xx zvvl!=WY9s2kE5_xLI^RomI1`ruAxgIE`U69;WtwJkRJ(aYj*SKXit+r5gd7JJw4iE z!iqyZez@IsPlj0SRY_=@d*QXcy&ES(3q>~iJJOzdtA+?JpZW{_1zWNT0p{=4U2C`D zEwIpz3StBuI|8Y-^``ta20r4=8GXO|rhO^TlqO-le33lUG-ufS!jEitEhbx+@-m5Y zbs2@#K)H!Q^Ha}=@)5ckJk%f}P{ay^)U(boy{*dkuLFH7q5y%?^DiKsJL6v$|O+Uo*LrZk< zu4#MOB43AGtbg5sc5L(vzi;@gYxR1@r%TuM!65q&qz!Pf^xRmPc+4*tuedD{slTOr zjq(3d+$qXwvDfXGc2*>~h!r{$#-Hdc&-BdLd8}a&Fk9m?EA%ieM|HfGh_+2UFN~YS z6P21yBGW0Pi_RKx7eaIojSvJKCHrwPOuOwyZQiD-Ti=V2q{A`mq&~E4I2es3Niw3b zPK*sPY{IvVrFMe`;skg39+}Hzu95i&rnzrMe)E=d-VGwk|G$MLC8IW zKumcXaN4xbA?}e(c~9mq{RmHvF)xs>!GnCA%mp%UkRiouf-FsB){j&%N%MTAj5R^u zrGB+W(sh6sphzA9hQR9toy`qN?cH_b(_IC@t=_0+XT0&O7V28Ko!huMOHvQ%!I&2A sLVvEWH_iHLI#O`29Uj|_r#cZ_YHkge2^MMtCSY_m*~Fe~TBl$5Kg|0dmH+?% literal 0 HcmV?d00001 diff --git a/__pycache__/simple.cpython-310.pyc b/__pycache__/simple.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c62992ba806c3b08f6a04db5112e966303a8f6e6 GIT binary patch literal 2536 zcmaJ@TW=dh6rS0;9j6H)y`WwQ;#Sj=gojF~Dpl1R0)eR-qzX%wwe^hMP1n25%sR%_ zx7U^T%qvg)1OfuSv+GbpTh^L0=giKWIXmY&-#A@dtTH@*Wapya z1IB(KWBzeqTtd?~Ap{dVW=-m?nD=;-bJFeD>e)@Z=QJJ8B%8X@+v83DV+m|-%ir`zU9{dWwtWoDC5Md^>s3+@|sa+1WyrI(FNJwQ?$hwjal5BJ^ z)}~eSj|JlrnqGyFtjPsyT7nDfA#2*g#!ikowiWgI@yyewI!*rZ#c__;fEb01?kucG&7eSqGAcTZHFxO+I|oijUc<}UelSmuv9oJ|7G z#;aK{t%xeke9Xi`9z140Iy9eR@Yq+#&Lp@WY|Z*1AF_I_VXE!oFk1r^o0`sW?yXJ~ zXHs2%%8j=xRn!@o>K#lHp^Bt7OYMQq(w!xtDQ^eT?5ud;KsO?Jr$ZDphM8nYHgmNq24EI=@!08ca_*QPqh;JNHa=0kLUSD@b>BDu zS!jIdgE-UX(CiyWt*%N3cpYW3XSjUI)H_(Te{y3tR2#$LaN};)i#Ph|P^t}GplH26 zGHww)%7*E6h#IePi!Vb4+(BC|WB|>AR{h=j%L@+iV;B#c#$!b$G!WL2h4V z5BLo8?&vZw4}JmWy{Rue_yXF6pF1zWymQ|vFps{#e9%y@Bbw`qa@C%%cH#|+`c*W2 z1fq+2zwJ->BQ|9@YW@dn$J#0n7$j9~C8_F#aWp2Hveq}%TUhgyt5fL81kl-=>V-Jc z8Dah6+psllbA*l0K7W2?vez%fX*-PdMJNXg6~&~8&*~L*m~@Iy9VKy$#A_sulc3b8 zlO(7v>TVeheX?33@dgPBqzrL`v;rdp9-3Bs`ZM&d@;RK&XmdX4WzcQ2ju8}R@9_sjpH^Wx5lE285JX3B4{RdBc^mJGB+rjw4pMFl28yI{kR?2q zdZo~5v^w*&FzajR%Q=fQM;v-m1EiN4|3U*H!$aFb)9*p#s2%}xT+MI?!H6S!4`*GNoK$4Z|{}imL>aZhQ z{ZMInh612S0)7O1GXCHNz5vu3HB zwv~k{`x3XWSu6*xC4x+~K%e2Z!-1A&xqNjiRrfGI8)&mqj(nbqs1wOzMZIRKQPRfE zon(yB{$&np{t1Ep9qbt*%2|^`tT|lGzK_M)ese?--Q6JHog|?Ez7F{3M za3KzL)@t7kldi1W#@3lC%c20jT>_tKQk^3~EMAHN?(f5-wCWWI5VX6-PXp?!1!;S~ zjZgmXF*i_Hb#h296s4#nU-CM)Q?X~U-C4XKVkk3-4QmUE#&FO1bYJ1#D`Dc?!9N^D vC8UU~O$E2#XT^n9ZdlYe(NwYgMSGdxLU})2Bn;~s1X2DnuUgC2>e0Ue5^Q&r literal 0 HcmV?d00001 diff --git a/base.py b/base.py new file mode 100644 index 0000000..a4b1d43 --- /dev/null +++ b/base.py @@ -0,0 +1,23 @@ +from abc import ABC, abstractmethod + +class BaseProvider(ABC): + """Abstract base class for point-based outage providers.""" + def __init__(self, config, session): + self.config = config + self.session = session + self.name = config.get('name', 'Unknown') + + @abstractmethod + def fetch(self): + pass + +class BaseCountyProvider(ABC): + """Abstract base class for county-based outage providers.""" + def __init__(self, config, session): + self.config = config + self.session = session + self.name = config.get('name', 'Unknown') + + @abstractmethod + def fetch(self): + pass \ No newline at end of file diff --git a/gwt_rpc.py b/gwt_rpc.py new file mode 100644 index 0000000..99b7958 --- /dev/null +++ b/gwt_rpc.py @@ -0,0 +1,255 @@ +import logging +import json +from datetime import datetime, timezone, timedelta +from urllib.parse import urlparse +from pyproj import Transformer + +import get_rpc_config_auto +from base import BaseProvider, BaseCountyProvider + +logger = logging.getLogger(__name__) + +class GwtRpcBaseProvider: + """Base class for GWT-RPC providers to share common logic like auto-repair.""" + def __init__(self, config, session): + self.config = config + self.session = session + self.name = config.get('name', 'Unknown') + self.map_url = config.get('map_url') + self.state_filter = config.get('state_filter') + self.AUTO_UPDATE_COOLDOWN_HOURS = 4 + + # Set up session headers and cookies from config + self.session.headers.update({ + 'User-Agent': config.get('user_agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'), + 'Accept': '*/*', + 'Sec-Fetch-Site': 'same-origin' + }) + if config.get('cookies'): + for cookie in config['cookies']: + self.session.cookies.set(cookie['name'], cookie['value'], domain=cookie['domain'], path=cookie['path']) + + def attempt_auto_repair(self): + if not self.map_url: return False + last_update = self.config.get('last_auto_update') + if last_update: + try: + last_dt = datetime.fromisoformat(last_update) + if last_dt.tzinfo is None: last_dt = last_dt.replace(tzinfo=timezone.utc) + if datetime.now(timezone.utc) - last_dt < timedelta(hours=self.AUTO_UPDATE_COOLDOWN_HOURS): + logger.info(f"Skipping auto-repair for {self.name} (Cooldown active).") + return False + except ValueError: pass + + logger.info(f"Attempting Auto-Repair for {self.name}...") + try: + # This function needs to be defined in the main script context to save config + from newpower import update_provider_config as update_county_config + except ImportError: + from newpower2 import update_provider_config as update_point_config + update_county_config = update_point_config # Fallback + + try: + _, valid_headers, valid_cookies, valid_body = get_rpc_config_auto.fetch_live_data(self.map_url) + if valid_headers and valid_body: + logger.info(f"Repair successful! Updating {self.name}.") + excluded = {'content-length', 'host', 'connection', 'cookie', 'accept-encoding', 'sec-ch-ua', 'sec-ch-ua-mobile', 'sec-ch-ua-platform', 'origin'} + clean_headers = {k: v for k, v in valid_headers.items() if k.lower() not in excluded} + clean_headers['Referer'] = self.map_url + + new_settings = { + 'headers': clean_headers, 'body': valid_body, 'cookies': valid_cookies, + 'user_agent': valid_headers.get('user-agent'), + } + + # Update in-memory config for the current run + self.config.update(new_settings) + self.config['last_auto_update'] = datetime.now(timezone.utc).isoformat() + + # Update session for the current run + self.session.cookies.clear() + for cookie in valid_cookies: + self.session.cookies.set(cookie['name'], cookie['value'], domain=cookie['domain'], path=cookie['path']) + + # Save to disk for next time + update_county_config(self.name, self.config) + return True + except Exception as e: + logger.error(f"Auto-repair failed: {e}") + return False + + def _fetch_rpc_data(self, is_retry=False): + url = self.config.get('url') + headers = self.config.get('headers', {}) + body = self.config.get('body') + if not url or not body: return None + + parsed_url = urlparse(url) + origin = f"{parsed_url.scheme}://{parsed_url.netloc}" + correct_referer = headers.get('Referer') or headers.get('x-gwt-module-base') or origin + + req_headers = headers.copy() + req_headers['Content-Type'] = 'text/x-gwt-rpc; charset=UTF-8' + req_headers['Referer'] = correct_referer + + resp = self.session.post(url, headers=req_headers, data=body, verify=False) + + if "//EX" in resp.text or resp.status_code == 500: + logger.error(f"GWT Failure for {self.name}.") + if is_retry: return None + if self.attempt_auto_repair(): + logger.info("Retrying fetch with new settings...") + return self._fetch_rpc_data(is_retry=True) + return None + + if not resp.ok: return None + return json.loads(resp.text.replace('//OK', '')) + + +class GwtRpcCountyProvider(GwtRpcBaseProvider, BaseCountyProvider): + def fetch(self): + try: + data = self._fetch_rpc_data() + if data: + return self._extract_county_summary(data) + return [] + except Exception as e: + logger.error(f"County fetch error for {self.name}: {e}") + return [] + + def _extract_county_summary(self, data_list): + try: + string_table = next((item for item in data_list if isinstance(item, list)), None) + if not string_table: return [] + + stream_raw = [item for item in data_list if not isinstance(item, list)] + stream = [int(token) for token in stream_raw if isinstance(token, (int, float, str)) and str(token).replace('.','',1).isdigit()] + + REGION_SIG = "cc.nisc.oms.clientandserver.v2.pojo.Region/3192921568" + INTEGER_SIG = "java.lang.Integer/3438268394" + CATEGORY_KEY = "County" + + def get_index(val): + try: return string_table.index(val) + 1 + except ValueError: return 0 + + region_type_id = get_index(REGION_SIG) + integer_type_id = get_index(INTEGER_SIG) + county_type_id = get_index(CATEGORY_KEY) + + if region_type_id == 0: return [] + + results = [] + i = 0 + while i < len(stream): + if stream[i] == region_type_id: + try: + p = i + 1 + served = stream[p] if stream[p+1] == integer_type_id else 0 + p += 2 if served > 0 else 1 + out = stream[p] if stream[p+1] == integer_type_id else 0 + p += 2 if out > 0 else 1 + name_idx, cat_idx = stream[p], stream[p+1] + + if cat_idx == county_type_id: + name = string_table[name_idx - 1] if 0 < name_idx <= len(string_table) else "Unknown" + results.append({'county': name, 'state': self.state_filter, 'company': self.name, 'outages': out, 'served': served}) + except IndexError: pass + i += 1 + return results + except Exception as e: + logger.error(f"Could not parse county summary for {self.name}: {e}") + return [] + + +class GwtRpcProvider(GwtRpcBaseProvider, BaseProvider): + def __init__(self, config, session): + super().__init__(config, session) + self.transformer = None + self.STATE_BOUNDS = { + 'WV': {'lat_min': 37.0, 'lat_max': 40.7, 'lon_min': -82.7, 'lon_max': -77.7}, + 'OH': {'lat_min': 38.4, 'lat_max': 42.0, 'lon_min': -84.9, 'lon_max': -80.5}, + 'KY': {'lat_min': 36.4, 'lat_max': 39.2, 'lon_min': -89.6, 'lon_max': -81.9}, + 'IA': {'lat_min': 40.3, 'lat_max': 43.6, 'lon_min': -96.7, 'lon_max': -90.1} + } + if config.get('epsg'): + try: + self.transformer = Transformer.from_crs(f"EPSG:{config['epsg']}", "EPSG:4326", always_xy=True) + except: logger.error(f"EPSG Error for {self.name}") + + def fetch(self): + try: + data = self._fetch_rpc_data() + if data: + return self._extract_outages(data) + return [] + except Exception as e: + logger.error(f"Fetch error {self.name}: {e}") + return [] + + def _extract_outages(self, data_list): + results = [] + try: + string_table = next((item for item in data_list if isinstance(item, list)), None) + if not string_table: return [] + + stream_raw = [item for item in data_list if not isinstance(item, list)] + stream = [int(token) for token in stream_raw if isinstance(token, (int, float))] + + OUTAGE_SIG_KEYWORD = ".pojo.Outage/" + outage_sig_full = next((s for s in string_table if OUTAGE_SIG_KEYWORD in s), None) + if not outage_sig_full: return [] + + outage_type_id = string_table.index(outage_sig_full) + 1 + + i = 0 + while i < len(stream): + if stream[i] == outage_type_id: + try: + p = i + 1 + outagen = stream[p]; p += 1 + crew_status_idx = stream[p]; p += 1 + cause_idx = stream[p]; p += 1 + etr_high = stream[p]; p += 1 + etr_low = stream[p]; p += 1; p += 1 + start_high = stream[p]; p += 1 + start_low = stream[p]; p += 1; p += 1 + coord_x = stream[p]; p += 1 + coord_y = stream[p]; p += 1 + + lat, lon = None, None + if self.transformer and coord_x and coord_y: + try: + lon, lat = self.transformer.transform(coord_x, coord_y) + if not self._is_valid(lat, lon): lat, lon = None, None + except: pass + + if lat and lon: + start_ms = (start_high << 32) | start_low + etr_ms = (etr_high << 32) | etr_low + start_time = datetime.fromtimestamp(start_ms / 1000, tz=timezone.utc) if start_ms > 0 else None + etr_time = datetime.fromtimestamp(etr_ms / 1000, tz=timezone.utc) if etr_ms > 0 else None + + cause = string_table[cause_idx - 1].strip() if 0 < cause_idx <= len(string_table) else "Unknown" + crew_status = string_table[crew_status_idx - 1].strip() if 0 < crew_status_idx <= len(string_table) else "Unknown" + + results.append({ + 'incidentid': f"{self.name}-{lat:.5f}-{lon:.5f}", 'utility': self.name, + 'lat': lat, 'lon': lon, 'pointgeom': f"{lat:.5f},{lon:.5f}", + 'start': start_time, 'etr': etr_time, 'outagen': outagen, + 'cause': cause, 'crew_status': crew_status, + 'last_change': datetime.now(timezone.utc) + }) + except (IndexError, TypeError): + pass + i += 1 + return results + except Exception as e: + logger.error(f"Could not parse point outages for {self.name}: {e}") + return [] + + def _is_valid(self, lat, lon): + if not self.state_filter: return True + b = self.STATE_BOUNDS.get(self.state_filter) + if not b: return True + return b['lat_min'] <= lat <= b['lat_max'] and b['lon_min'] <= lon <= b['lon_max'] \ No newline at end of file diff --git a/kubra.py b/kubra.py new file mode 100644 index 0000000..e267400 --- /dev/null +++ b/kubra.py @@ -0,0 +1,173 @@ +import requests +import json +import logging +import polyline +import mercantile +from datetime import datetime +from base import BaseProvider, BaseCountyProvider + +logger = logging.getLogger(__name__) + +class KubraCountyProvider(BaseCountyProvider): + def fetch(self): + meta_url = self.config.get('county_meta_url') + report_url_suffix = self.config.get('county_report_suffix') + + try: + # 1. Get hexes from meta_url + meta_resp = self.session.get(meta_url) + meta_data = meta_resp.json() + path = meta_data.get('data', {}).get('cluster_interval_generation_data') + if not path: return [] + + # 2. Construct final report URL + # The old script's logic reveals the path is composed of a base, + # the second hex from the metadata path, and the report suffix. + # Example path from meta: data/e2ae0326-9912-436a-9355-eb2687e798b1 + path_parts = path.split('/') # e.g., ['data', 'hex1', 'hex2', 'hex3'] + if len(path_parts) < 4: + logger.error(f"Invalid metadata path format for {self.name}: {path}") + return [] + + # This is the single, correct URL format used by the original script. + # It uses the fourth element (index 3) from the metadata path. + report_url = f"https://kubra.io/data/{path_parts[3]}{report_url_suffix}" + + # 3. Fetch and process report + report_resp = self.session.get(report_url) + if not report_resp.ok or not report_resp.text: + logger.info(f"No county report data available for {self.name} at this time.") + return [] + + report_data = report_resp.json() + return self._normalize(report_data) + except json.JSONDecodeError: + logger.warning(f"Could not decode JSON from county report for {self.name}. The report may be empty or invalid.") + return [] + except requests.exceptions.RequestException as e: + logger.error(f"Error fetching Kubra county data for {self.name}: {e}") + return [] + + def _normalize(self, data): + results = [] + primary_areas = data.get("file_data", {}).get("areas", []) + if not primary_areas: return [] + + first_item_key = primary_areas[0].get("key") + + if first_item_key == "state": + for state_area in primary_areas: + for county in state_area.get("areas", []): + if county.get("key") == "county": + results.append(self._extract_info(county)) + elif first_item_key == "county": + for county in primary_areas: + if county.get("key") == "county": + results.append(self._extract_info(county)) + return results + + def _extract_info(self, county_item): + return { + 'outages': county_item.get('cust_a', {}).get('val'), + 'served': county_item.get('cust_s'), + 'county': county_item.get('name', '').capitalize(), + 'state': county_item.get('state') or self.config.get('state_filter'), + 'company': self.name + } + +class KubraProvider(BaseProvider): + def __init__(self, config, session): + super().__init__(config, session) + self.max_zoom = 14 + self.results = [] + self.base_url_template = 'https://kubra.io/cluster-data/' + + def fetch(self): + meta_url = self.config.get('meta_url') + if not meta_url: return [] + + # Fetch hexes ONCE per run, not in the recursive loop. + self.hex1, self.hex2 = self._get_hexes(meta_url) + if not self.hex1 or not self.hex2: + logger.error(f"[{self.name}] Could not get session hex keys. Aborting fetch for this provider.") + return [] + + quadkeys = self.config.get('quadkeys', []) + + self.results = [] + self._fetch_recursive(quadkeys, set(), zoom=len(quadkeys[0])) + return self.results + + def _get_hexes(self, url): + try: + resp = self.session.get(url) + path = resp.json().get('data', {}).get('cluster_interval_generation_data') + parts = path.split('/') + return parts[2], parts[3] + except: return None, None + + def _fetch_recursive(self, quadkeys, seen, zoom): + for q in quadkeys: + suffix = q[-3:][::-1] + url = f"{self.base_url_template}{suffix}/{self.hex1}/{self.hex2}/public/{self.config.get('layer')}/{q}.json" + try: + resp = self.session.get(url) + if not resp.ok: + continue + + file_data = resp.json().get('file_data', []) + for item in file_data: + desc = item.get('desc') + + # This mirrors the safe logic from the original power2.py's 'kubra' function. + # If 'desc' is missing, assume it's a cluster to be safe and drill down. + is_cluster = True if desc is None else desc.get('cluster', False) + + # If it's a cluster and we haven't hit max zoom, drill down. + if is_cluster and zoom + 1 <= self.max_zoom: + p_geom = item.get('geom', {}).get('p', []) + if p_geom: + next_key = self._get_quadkey_for_point(p_geom[0], zoom + 1) + self._fetch_recursive([next_key], seen, zoom + 1) + else: + # Otherwise, it's a final outage record. Process it. + self.results.append(self._normalize(item)) + except Exception as e: + logger.error(f"[{self.name}] Unhandled exception in _fetch_recursive for {q}: {e}", exc_info=True) + + def _normalize(self, item): + # Ensure 'desc' is a dictionary, even if it's missing from the item. This prevents the AttributeError. + desc = item.get('desc') or {} + geom = item.get('geom', {}) + p = geom.get('p', [None])[0] if geom.get('p') else None + if not p: + return {} + latlon = polyline.decode(p)[0] + def ts(s): + if not s or s=='ETR-NULL': return None + try: return datetime.strptime(s, "%Y-%m-%dT%H:%M:%S%z") + except: + try: return datetime.strptime(s, "%Y-%m-%dT%H:%M%z") + except: return None + + cause_dict = desc.get('cause') + cause = cause_dict.get('EN-US', "Pending") if cause_dict else "Pending" + + crew_dict = desc.get('crew_status') + crew_status = crew_dict.get('EN-US') if crew_dict else None + + return { + 'incidentid': desc.get('inc_id'), 'utility': self.name, + 'lat': latlon[0], 'lon': latlon[1], 'pointgeom': p, 'areageom': geom.get('a'), + 'start': ts(desc.get('start_time')), 'etr': ts(desc.get('etr')), + 'outagen': desc.get('cust_a', {}).get('val', 0), 'cause': cause, + 'crew_status': crew_status, 'active': True + } + + def _get_quadkey_for_point(self, p, z): + ll = polyline.decode(p)[0] + return mercantile.quadkey(mercantile.tile(lng=ll[1], lat=ll[0], zoom=z)) + + def _get_neighbors(self, q): + t = mercantile.quadkey_to_tile(q) + return [mercantile.quadkey(n) for n in mercantile.neighbors(t)] \ No newline at end of file diff --git a/newpower.py b/newpower.py index ed36de8..fca17ee 100644 --- a/newpower.py +++ b/newpower.py @@ -1,17 +1,27 @@ +import os +import sys +# Add the script's directory to the Python path to ensure modules can be found +sys.path.insert(0, os.path.dirname(os.path.abspath(__file__))) + import requests import json import psycopg2 import logging -import os import re from datetime import datetime, timezone, timedelta -from abc import ABC, abstractmethod from urllib.parse import urlparse from requests.packages.urllib3.exceptions import InsecureRequestWarning # Import the helper module for auto-repair import get_rpc_config_auto +# Import provider classes +from base import BaseCountyProvider +from kubra import KubraCountyProvider +from simple import SimpleCountyJsonProvider +from nisc import NiscCountyProvider +from gwt_rpc import GwtRpcCountyProvider + requests.packages.urllib3.disable_warnings(InsecureRequestWarning) # --- LOGGING --- @@ -134,314 +144,6 @@ class CountyPowerDB: # --- PROVIDERS --- -class BaseCountyProvider(ABC): - def __init__(self, config, session): - self.config = config - self.session = session - self.name = config.get('name', 'Unknown') - - @abstractmethod - def fetch(self): - pass - -class SimpleCountyJsonProvider(BaseCountyProvider): - def fetch(self): - url = self.config.get('county_url') - state = self.config.get('state_filter') - try: - resp = self.session.get(url, verify=False) - if not resp.ok: return [] - data = resp.json() - results = [] - for boundary_group in data: - for item in boundary_group.get('boundaries', []): - results.append({ - 'outages': item.get('customersOutNow'), - 'served': item.get('customersServed'), - 'county': item.get('name'), - 'state': state, - 'company': self.name - }) - return results - except Exception as e: - logger.error(f"Error fetching {self.name}: {e}") - return [] - -class KubraCountyProvider(BaseCountyProvider): - def fetch(self): - meta_url = self.config.get('county_meta_url') - report_url_suffix = self.config.get('county_report_suffix') - - try: - # 1. Get hexes from meta_url - meta_resp = self.session.get(meta_url) - meta_data = meta_resp.json() - path = meta_data.get('data', {}).get('cluster_interval_generation_data') - if not path: return [] - - # 2. Construct final report URL - # The old script's logic reveals the path is composed of a base, - # the second hex from the metadata path, and the report suffix. - # Example path from meta: data/e2ae0326-9912-436a-9355-eb2687e798b1 - path_parts = path.split('/') # e.g., ['data', 'hex1', 'hex2', 'hex3'] - if len(path_parts) < 4: - logger.error(f"Invalid metadata path format for {self.name}: {path}") - return [] - - # This is the single, correct URL format used by the original script. - # It uses the fourth element (index 3) from the metadata path. - report_url = f"https://kubra.io/data/{path_parts[3]}{report_url_suffix}" - - # 3. Fetch and process report - report_resp = self.session.get(report_url) - if not report_resp.ok or not report_resp.text: - logger.info(f"No county report data available for {self.name} at this time.") - return [] - - report_data = report_resp.json() - return self._normalize(report_data) - except json.JSONDecodeError: - logger.warning(f"Could not decode JSON from county report for {self.name}. The report may be empty or invalid.") - return [] - except requests.exceptions.RequestException as e: - logger.error(f"Error fetching Kubra county data for {self.name}: {e}") - return [] - - def _normalize(self, data): - results = [] - primary_areas = data.get("file_data", {}).get("areas", []) - if not primary_areas: return [] - - first_item_key = primary_areas[0].get("key") - - if first_item_key == "state": - for state_area in primary_areas: - for county in state_area.get("areas", []): - if county.get("key") == "county": - results.append(self._extract_info(county)) - elif first_item_key == "county": - for county in primary_areas: - if county.get("key") == "county": - results.append(self._extract_info(county)) - return results - - def _extract_info(self, county_item): - return { - 'outages': county_item.get('cust_a', {}).get('val'), - 'served': county_item.get('cust_s'), - 'county': county_item.get('name', '').capitalize(), - 'state': county_item.get('state') or self.config.get('state_filter'), - 'company': self.name - } - -class NiscCountyProvider(BaseCountyProvider): - """ Handles county data from NISC-hosted cloud sources. """ - def fetch(self): - url = self.config.get('county_url') - state = self.config.get('state_filter') - try: - resp = self.session.get(url, verify=False) - if not resp.ok: return [] - data = resp.json() - results = [] - # The structure is typically a list containing one object with a 'boundaries' key - for boundary_group in data: - for item in boundary_group.get('boundaries', []): - results.append({ - 'outages': item.get('customersOutNow'), - 'served': item.get('customersServed'), - 'county': item.get('name'), - 'state': state, - 'company': self.name - }) - return results - except Exception as e: - logger.error(f"Error fetching NISC county data for {self.name}: {e}") - return [] - -class GwtRpcCountyProvider(BaseCountyProvider): - """ Handles county data from GWT-RPC sources. """ - def __init__(self, config, session): - super().__init__(config, session) - self.state_filter = config.get('state_filter') - self.map_url = config.get('map_url') - - # Set up session headers and cookies from config - self.session.headers.update({ - 'User-Agent': config.get('user_agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'), - 'Accept': '*/*', - 'Sec-Fetch-Site': 'same-origin' - }) - if config.get('cookies'): - for cookie in config['cookies']: - self.session.cookies.set(cookie['name'], cookie['value'], domain=cookie['domain'], path=cookie['path']) - - def attempt_auto_repair(self): - if not self.map_url: return False - last_update = self.config.get('last_auto_update') - if last_update: - try: - last_dt = datetime.fromisoformat(last_update) - if last_dt.tzinfo is None: last_dt = last_dt.replace(tzinfo=timezone.utc) - if datetime.now(timezone.utc) - last_dt < timedelta(hours=AUTO_UPDATE_COOLDOWN_HOURS): - logger.info(f"Skipping auto-repair for {self.name} (Cooldown active).") - return False - except ValueError: pass - - logger.info(f"Attempting Auto-Repair for {self.name}...") - try: - _, valid_headers, valid_cookies, valid_body = get_rpc_config_auto.fetch_live_data(self.map_url) - if valid_headers and valid_body: - logger.info(f"Repair successful! Updating {self.name}.") - excluded = {'content-length', 'host', 'connection', 'cookie', 'accept-encoding', 'sec-ch-ua', 'sec-ch-ua-mobile', 'sec-ch-ua-platform', 'origin'} - clean_headers = {k: v for k, v in valid_headers.items() if k.lower() not in excluded} - clean_headers['Referer'] = self.map_url - - # Update in-memory config for the current run - self.config.update({ - 'headers': clean_headers, 'body': valid_body, 'cookies': valid_cookies, - 'user_agent': valid_headers.get('user-agent'), - 'last_auto_update': datetime.now(timezone.utc).isoformat() - }) - # Update session for the current run - self.session.cookies.clear() - for cookie in valid_cookies: - self.session.cookies.set(cookie['name'], cookie['value'], domain=cookie['domain'], path=cookie['path']) - - # Save to disk for next time - update_provider_config(self.name, self.config) - return True - except Exception as e: - logger.error(f"Auto-repair failed: {e}") - return False - - def fetch(self, is_retry=False): - url = self.config.get('url') - headers = self.config.get('headers', {}) - body = self.config.get('body') - if not url or not body: return [] - - try: - parsed_url = urlparse(url) - origin = f"{parsed_url.scheme}://{parsed_url.netloc}" - correct_referer = headers.get('Referer') or headers.get('x-gwt-module-base') or origin - - req_headers = headers.copy() - req_headers['Content-Type'] = 'text/x-gwt-rpc; charset=UTF-8' - req_headers['Referer'] = correct_referer - - resp = self.session.post(url, headers=req_headers, data=body, verify=False) - - if "//EX" in resp.text or resp.status_code == 500: - logger.error(f"GWT Failure for {self.name} (County Fetch).") - if is_retry: return [] - if self.attempt_auto_repair(): - logger.info("Retrying county fetch with new settings...") - # After repair, self.config is updated, so we can just call fetch again. - return self.fetch(is_retry=True) - return [] - - if not resp.ok: return [] - text = resp.text.replace('//OK', '') - return self._extract_county_summary(json.loads(text)) - except Exception as e: - logger.error(f"County fetch error for {self.name}: {e}") - return [] - - def _extract_county_summary(self, data_list): - """ - Decodes a GWT-RPC payload to extract outage data for Counties. - This logic is adapted from test.py. - """ - try: - # 1. Separate Stream and String Table - string_table = None - stream_raw = [] - for item in data_list: - if isinstance(item, list): - string_table = item - break - else: - stream_raw.append(item) - - if not string_table: - logger.error(f"String table not found in payload for {self.name}.") - return [] - - # 2. Normalize the Stream - stream = [] - for token in stream_raw: - if isinstance(token, int): - stream.append(token) - elif isinstance(token, float): - stream.append(int(token)) - elif isinstance(token, str): - try: - stream.append(int(float(token))) - except ValueError: - pass # Ignore non-numeric strings - - # 3. Decode Logic - REGION_SIG = "cc.nisc.oms.clientandserver.v2.pojo.Region/3192921568" - INTEGER_SIG = "java.lang.Integer/3438268394" - CATEGORY_KEY = "County" - - def get_index(val): - try: return string_table.index(val) + 1 - except ValueError: return 0 - - region_type_id = get_index(REGION_SIG) - integer_type_id = get_index(INTEGER_SIG) - county_type_id = get_index(CATEGORY_KEY) - - if region_type_id == 0: - logger.error(f"Region type signature not found for {self.name}.") - return [] - - results = [] - i = 0 - stream_len = len(stream) - - while i < stream_len: - if stream[i] == region_type_id: - try: - p = i + 1 - - served = 0 - val1 = stream[p] - p += 1 - if p < stream_len and stream[p] == integer_type_id: - served = val1 - p += 1 - - out = 0 - val2 = stream[p] - p += 1 - if p < stream_len and stream[p] == integer_type_id: - out = val2 - p += 1 - - name_idx = stream[p] - p += 1 - cat_idx = stream[p] - - if cat_idx == county_type_id: - name = "Unknown" - if 0 < name_idx <= len(string_table): - name = string_table[name_idx - 1] - - results.append({ - 'county': name, 'state': self.state_filter, - 'company': self.name, 'outages': out, 'served': served - }) - except IndexError: - pass - i += 1 - return results - except (ValueError, IndexError, TypeError) as e: - logger.error(f"Could not parse county summary for {self.name}: {e}") - return [] - # --- REGISTRY --- PROVIDER_REGISTRY = { 'kubra_county': KubraCountyProvider, diff --git a/newpower2.py b/newpower2.py index 2fd0b2b..93ff3b7 100644 --- a/newpower2.py +++ b/newpower2.py @@ -1,13 +1,16 @@ +import os +import sys +# Add the script's directory to the Python path to ensure modules can be found +sys.path.insert(0, os.path.dirname(os.path.abspath(__file__))) + import requests import polyline import json import psycopg2 import mercantile import logging -import os import re from datetime import datetime, timezone, timedelta -from abc import ABC, abstractmethod from urllib.parse import urlparse from pyproj import Transformer from requests.packages.urllib3.exceptions import InsecureRequestWarning @@ -15,6 +18,13 @@ from requests.packages.urllib3.exceptions import InsecureRequestWarning # Import the helper module for auto-repair import get_rpc_config_auto +# Import provider classes +from base import BaseProvider +from kubra import KubraProvider +from simple import SimpleJsonProvider +from gwt_rpc import GwtRpcProvider +from nisc import NiscHostedProvider + requests.packages.urllib3.disable_warnings(InsecureRequestWarning) # --- LOGGING --- @@ -114,482 +124,6 @@ class PowerDB: cursor.execute("DELETE FROM newpower WHERE fetch_time < NOW() - INTERVAL '365 days'") logger.info("Post-processing complete.") -# --- PROVIDERS --- - -class BaseProvider(ABC): - def __init__(self, config, session): - self.config = config - self.session = session - self.name = config.get('name', 'Unknown') - - @abstractmethod - def fetch(self): - pass - -class SimpleJsonProvider(BaseProvider): - def fetch(self): - url = self.config.get('url') - try: - resp = self.session.get(url, verify=False) - if not resp.ok: return [] - data = resp.json() - results = [] - for item in data: - results.append(self._normalize(item)) - return results - except Exception as e: - logger.error(f"Error fetching {self.name}: {e}") - return [] - - def _normalize(self, item): - def safe_parse(ts): - if not ts: return None - try: return datetime.fromisoformat(ts.replace('Z', '+00:00')) - except: return None - return { - 'incidentid': str(item.get('outageRecID')), 'utility': self.name, - 'lat': item.get('outagePoint', {}).get('lat'), 'lon': item.get('outagePoint', {}).get('lng'), - 'pointgeom': f"{item.get('outagePoint', {}).get('lat')},{item.get('outagePoint', {}).get('lng')}", - 'areageom': None, 'start': safe_parse(item.get('outageStartTime')), - 'etr': safe_parse(item.get('outageEndTime')), 'outagen': item.get('customersOutNow'), - 'cause': item.get('cause'), 'crew_status': item.get('outageWorkStatus'), - 'last_change': safe_parse(item.get('outageModifiedTime')) - } - -class KubraProvider(BaseProvider): - def __init__(self, config, session): - super().__init__(config, session) - self.max_zoom = 14 - self.results = [] - self.base_url_template = 'https://kubra.io/cluster-data/' - - def fetch(self): - meta_url = self.config.get('meta_url') - if not meta_url: return [] - - # Fetch hexes ONCE per run, not in the recursive loop. - self.hex1, self.hex2 = self._get_hexes(meta_url) - if not self.hex1 or not self.hex2: - logger.error(f"[{self.name}] Could not get session hex keys. Aborting fetch for this provider.") - return [] - - quadkeys = self.config.get('quadkeys', []) - - self.results = [] - self._fetch_recursive(quadkeys, set(), zoom=len(quadkeys[0])) - return self.results - - def _get_hexes(self, url): - try: - resp = self.session.get(url) - path = resp.json().get('data', {}).get('cluster_interval_generation_data') - parts = path.split('/') - return parts[2], parts[3] - except: return None, None - - def _fetch_recursive(self, quadkeys, seen, zoom): - for q in quadkeys: - suffix = q[-3:][::-1] - url = f"{self.base_url_template}{suffix}/{self.hex1}/{self.hex2}/public/{self.config.get('layer')}/{q}.json" - try: - resp = self.session.get(url) - if not resp.ok: - continue - - file_data = resp.json().get('file_data', []) - for item in file_data: - desc = item.get('desc') - - # This mirrors the safe logic from the original power2.py's 'kubra' function. - # If 'desc' is missing, assume it's a cluster to be safe and drill down. - is_cluster = True if desc is None else desc.get('cluster', False) - - # If it's a cluster and we haven't hit max zoom, drill down. - if is_cluster and zoom + 1 <= self.max_zoom: - p_geom = item.get('geom', {}).get('p', []) - if p_geom: - next_key = self._get_quadkey_for_point(p_geom[0], zoom + 1) - self._fetch_recursive([next_key], seen, zoom + 1) - else: - # Otherwise, it's a final outage record. Process it. - self.results.append(self._normalize(item)) - except Exception as e: - logger.error(f"[{self.name}] Unhandled exception in _fetch_recursive for {q}: {e}", exc_info=True) - - def _normalize(self, item): - # Ensure 'desc' is a dictionary, even if it's missing from the item. This prevents the AttributeError. - desc = item.get('desc') or {} - geom = item.get('geom', {}) - p = geom.get('p', [None])[0] if geom.get('p') else None - if not p: - return {} - latlon = polyline.decode(p)[0] - def ts(s): - if not s or s=='ETR-NULL': return None - try: return datetime.strptime(s, "%Y-%m-%dT%H:%M:%S%z") - except: - try: return datetime.strptime(s, "%Y-%m-%dT%H:%M%z") - except: return None - - cause_dict = desc.get('cause') - cause = cause_dict.get('EN-US', "Pending") if cause_dict else "Pending" - - crew_dict = desc.get('crew_status') - crew_status = crew_dict.get('EN-US') if crew_dict else None - - return { - 'incidentid': desc.get('inc_id'), 'utility': self.name, - 'lat': latlon[0], 'lon': latlon[1], 'pointgeom': p, 'areageom': geom.get('a'), - 'start': ts(desc.get('start_time')), 'etr': ts(desc.get('etr')), - 'outagen': desc.get('cust_a', {}).get('val', 0), 'cause': cause, - 'crew_status': crew_status, 'active': True - } - - def _get_quadkey_for_point(self, p, z): - ll = polyline.decode(p)[0] - return mercantile.quadkey(mercantile.tile(lng=ll[1], lat=ll[0], zoom=z)) - - def _get_neighbors(self, q): - t = mercantile.quadkey_to_tile(q) - return [mercantile.quadkey(n) for n in mercantile.neighbors(t)] - -class GwtRpcProvider(BaseProvider): - def __init__(self, config, session): - super().__init__(config, session) - self.transformer = None - self.state_filter = config.get('state_filter') - self.map_url = config.get('map_url') - - # 1. Base Headers - self.session.headers.update({ - 'User-Agent': config.get('user_agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'), - 'Accept': '*/*', - 'Sec-Fetch-Site': 'same-origin' - }) - - parsed_url = urlparse(config.get('url')) - self.session.headers.update({'Origin': f"{parsed_url.scheme}://{parsed_url.netloc}"}) - - # 2. Load Cookies - if config.get('cookies'): - for cookie in config['cookies']: - try: - self.session.cookies.set( - cookie['name'], cookie['value'], domain=cookie['domain'], path=cookie['path'] - ) - except: pass - - self.STATE_BOUNDS = { - 'WV': {'lat_min': 37.0, 'lat_max': 40.7, 'lon_min': -82.7, 'lon_max': -77.7}, - 'OH': {'lat_min': 38.4, 'lat_max': 42.0, 'lon_min': -84.9, 'lon_max': -80.5}, - 'KY': {'lat_min': 36.4, 'lat_max': 39.2, 'lon_min': -89.6, 'lon_max': -81.9}, - 'IA': {'lat_min': 40.3, 'lat_max': 43.6, 'lon_min': -96.7, 'lon_max': -90.1} - } - - if config.get('epsg'): - try: - self.transformer = Transformer.from_crs(f"EPSG:{config['epsg']}", "EPSG:4326", always_xy=True) - except: logger.error(f"EPSG Error for {self.name}") - - def attempt_auto_repair(self): - if not self.map_url: return False - - # --- Cooldown Check --- - last_update = self.config.get('last_auto_update') - if last_update: - try: - last_dt = datetime.fromisoformat(last_update) - if last_dt.tzinfo is None: last_dt = last_dt.replace(tzinfo=timezone.utc) - if datetime.now(timezone.utc) - last_dt < timedelta(hours=AUTO_UPDATE_COOLDOWN_HOURS): - logger.info(f"Skipping auto-repair for {self.name} (Cooldown active).") - return False - except ValueError: pass - - logger.info(f"Attempting Auto-Repair for {self.name}...") - - try: - # Expecting 4 values: data, headers, cookies, body - new_data, valid_headers, valid_cookies, valid_body = get_rpc_config_auto.fetch_live_data(self.map_url) - - if valid_headers and valid_body: - logger.info(f"Repair successful! Updating {self.name}.") - - # Clean Headers (Blacklist approach) - excluded = {'content-length', 'host', 'connection', 'cookie', 'accept-encoding', 'sec-ch-ua', 'sec-ch-ua-mobile', 'sec-ch-ua-platform', 'origin'} - clean_headers = {k: v for k, v in valid_headers.items() if k.lower() not in excluded} - - # Ensure Referer is set correctly for next time - clean_headers['Referer'] = self.map_url - - # Update In-Memory Config - current_time = datetime.now(timezone.utc).isoformat() - self.config['headers'] = clean_headers - self.config['body'] = valid_body - self.config['cookies'] = valid_cookies - self.config['user_agent'] = valid_headers.get('user-agent') - self.config['last_auto_update'] = current_time - - # Update Session - self.session.cookies.clear() - for cookie in valid_cookies: - self.session.cookies.set(cookie['name'], cookie['value'], domain=cookie['domain'], path=cookie['path']) - if valid_headers.get('user_agent'): - self.session.headers.update({'User-Agent': valid_headers.get('user-agent')}) - - # Save to Disk - new_settings = { - 'headers': clean_headers, - 'body': valid_body, - 'cookies': valid_cookies, - 'user_agent': valid_headers.get('user-agent') - } - update_provider_config(self.name, new_settings) - return True - except Exception as e: - logger.error(f"Auto-repair failed: {e}") - - return False - - def fetch(self, is_retry=False): - url = self.config.get('url') - headers = self.config.get('headers', {}) - body = self.config.get('body') - - if not url: return [] - - try: - # 3. Dynamic Origin Update - parsed_url = urlparse(url) - origin = f"{parsed_url.scheme}://{parsed_url.netloc}" - - # Priority: Configured Referer > Module Base > Origin - correct_referer = headers.get('Referer') or headers.get('x-gwt-module-base') or origin - - ua = headers.get('User-Agent', self.session.headers['User-Agent']) - if "Headless" in ua: # Fallback safety - ua = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36' - - self.session.headers.update({ - 'Origin': origin, - 'Referer': correct_referer, - 'User-Agent': ua - }) - - # Prime if missing cookies or map_url is defined - if self.map_url and not self.config.get('cookies'): - try: self.session.get(correct_referer, verify=False, timeout=10) - except: pass - - req_headers = headers.copy() - req_headers['Content-Type'] = 'text/x-gwt-rpc; charset=UTF-8' - req_headers['Referer'] = correct_referer - req_headers['User-Agent'] = ua - - # Only fetch if we have a body - if body: - resp = self.session.post(url, headers=req_headers, data=body, verify=False) - else: - resp = type('obj', (object,), {'status_code': 500, 'text': 'No Body', 'ok': False})() - - # 5. Error Handling & Retry - failed = False - if "//EX" in resp.text: failed = True - if resp.status_code == 500: failed = True - - if failed: - logger.error(f"GWT Failure for {self.name} (Status: {resp.status_code}).") - - if is_retry: - logger.error(f"Retry failed for {self.name}. Aborting.") - return [] - - if self.attempt_auto_repair(): - logger.info("Retrying fetch with new settings...") - return self.fetch(is_retry=True) - else: - return [] - - if not resp.ok: return [] - text = resp.text - if text.startswith('//OK'): text = text[4:] - return self._extract_outages(json.loads(text)) - except Exception as e: - logger.error(f"Fetch error {self.name}: {e}") - return [] - - def _extract_outages(self, data_list): - """ - Decodes a GWT-RPC payload to extract detailed point outage information. - This is a more robust implementation that replaces the previous heuristic-based coordinate search. - """ - results = [] - try: - # 1. Separate Stream and String Table - string_table = next((item for item in data_list if isinstance(item, list)), None) - if not string_table: return [] - - stream_raw = [item for item in data_list if not isinstance(item, list)] - - # 2. Normalize the Stream - stream = [int(token) for token in stream_raw if isinstance(token, (int, float))] - - # 3. Define Signatures and Helper - # The signature can vary (e.g., cc.nisc... vs coop.nisc...). - # We search for a common, unique substring. - OUTAGE_SIG_KEYWORD = ".pojo.Outage/" - - outage_sig_full = next((s for s in string_table if OUTAGE_SIG_KEYWORD in s), None) - - if not outage_sig_full: - logger.error(f"Outage type signature not found for {self.name}.") - return [] - - # Get the 1-based index of the found signature - outage_type_id = string_table.index(outage_sig_full) + 1 - - # 4. Decode Logic - i = 0 - stream_len = len(stream) - while i < stream_len: - if stream[i] == outage_type_id: - try: - p = i + 1 - - # Field extraction based on observed GWT stream structure - outagen = stream[p] if p < stream_len else 0; p += 1 - crew_status_idx = stream[p] if p < stream_len else 0; p += 1 - cause_idx = stream[p] if p < stream_len else 0; p += 1 - - etr_high = stream[p] if p < stream_len else 0; p += 1 - etr_low = stream[p] if p < stream_len else 0; p += 1 - p += 1 # Skip long type ID - - start_high = stream[p] if p < stream_len else 0; p += 1 - start_low = stream[p] if p < stream_len else 0; p += 1 - p += 1 # Skip long type ID - - coord_x = stream[p] if p < stream_len else 0; p += 1 - coord_y = stream[p] if p < stream_len else 0; p += 1 - - # Process coordinates - lat, lon = None, None - if self.transformer and coord_x and coord_y: - try: - lon, lat = self.transformer.transform(coord_x, coord_y) - if not self._is_valid(lat, lon): lat, lon = None, None - except: pass - - if lat and lon: - # Process timestamps (GWT sends 64-bit longs as two 32-bit integers) - start_ms = (start_high << 32) | start_low - etr_ms = (etr_high << 32) | etr_low - start_time = datetime.fromtimestamp(start_ms / 1000, tz=timezone.utc) if start_ms > 0 else None - etr_time = datetime.fromtimestamp(etr_ms / 1000, tz=timezone.utc) if etr_ms > 0 else None - - # Resolve strings from table - cause = string_table[cause_idx - 1].strip() if 0 < cause_idx <= len(string_table) else "Unknown" - crew_status = string_table[crew_status_idx - 1].strip() if 0 < crew_status_idx <= len(string_table) else "Unknown" - - results.append({ - 'incidentid': f"{self.name}-{lat:.5f}-{lon:.5f}", - 'utility': self.name, - 'lat': lat, 'lon': lon, - 'pointgeom': f"{lat:.5f},{lon:.5f}", - 'areageom': None, - 'start': start_time, - 'etr': etr_time, - 'outagen': outagen, - 'cause': cause, - 'crew_status': crew_status, - 'active': True, - 'last_change': datetime.now(timezone.utc) - }) - except (IndexError, TypeError): - pass # Move to the next potential object - i += 1 - return results - except Exception as e: - logger.error(f"Could not parse point outages for {self.name}: {e}") - return [] - - def _is_valid(self, lat, lon): - if not self.state_filter: return True - b = self.STATE_BOUNDS.get(self.state_filter) - if not b: return True - return b['lat_min'] <= lat <= b['lat_max'] and b['lon_min'] <= lon <= b['lon_max'] - -class NiscHostedProvider(BaseProvider): - """ - Handles NISC Cloud Coop format (JSON with PROJ coordinate strings). - Example: Buckeye REC - """ - def __init__(self, config, session): - super().__init__(config, session) - self.transformer = None - proj_str = config.get('proj_string') - - if proj_str: - try: - # Create transformer from the custom PROJ string to WGS84 - # always_xy=True ensures we assume (Lon, Lat) output order - self.transformer = Transformer.from_proj(proj_str, "EPSG:4326", always_xy=True) - except Exception as e: - logger.error(f"Failed to initialize projection for {self.name}: {e}") - - def fetch(self, is_retry=False): - url = self.config.get('url') - try: - resp = self.session.get(url, verify=False) - if not resp.ok: - logger.error(f"{self.name} HTTP {resp.status_code}") - return [] - - data = resp.json() - outages_list = data.get('outages', []) - - results = [] - for item in outages_list: - results.append(self._normalize(item)) - return results - except Exception as e: - logger.error(f"Error fetching {self.name}: {e}") - return [] - - def _normalize(self, item): - # Coordinates - x, y = item.get('x'), item.get('y') - lat, lon = None, None - - if x is not None and y is not None and self.transformer: - try: - # Transform custom projection X/Y to Lon/Lat - lon, lat = self.transformer.transform(x, y) - except: pass - - # Timestamps (Epoch Milliseconds) - start_ts = None - time_off = item.get('timeOff') - if time_off: - try: - # Convert ms to seconds - start_ts = datetime.fromtimestamp(time_off / 1000, tz=timezone.utc) - except: pass - - return { - 'incidentid': str(item.get('id')), - 'utility': self.name, - 'lat': lat, - 'lon': lon, - 'pointgeom': f"{lat},{lon}" if lat else None, - 'areageom': None, - 'start': start_ts, - 'etr': None, - 'outagen': item.get('nbrOut', 1), - 'cause': "Unknown", - 'crew_status': "Unknown", - 'active': True, - 'last_change': datetime.now(timezone.utc) - } # --- REGISTRY --- PROVIDER_REGISTRY = { @@ -597,9 +131,10 @@ PROVIDER_REGISTRY = { 'simple_json': SimpleJsonProvider, 'gwt_rpc': GwtRpcProvider, 'nisc_hosted': NiscHostedProvider, - } + + # --- MAIN --- # --- MAIN (Point Scraper) --- def main(): diff --git a/nisc.py b/nisc.py new file mode 100644 index 0000000..3dca4d7 --- /dev/null +++ b/nisc.py @@ -0,0 +1,80 @@ +import logging +from datetime import datetime, timezone +from pyproj import Transformer +from base import BaseProvider, BaseCountyProvider + +logger = logging.getLogger(__name__) + +class NiscCountyProvider(BaseCountyProvider): + """ Handles county data from NISC-hosted cloud sources. """ + def fetch(self): + url = self.config.get('county_url') + state = self.config.get('state_filter') + try: + resp = self.session.get(url, verify=False) + if not resp.ok: return [] + data = resp.json() + results = [] + # The structure is typically a list containing one object with a 'boundaries' key + for boundary_group in data: + for item in boundary_group.get('boundaries', []): + results.append({ + 'outages': item.get('customersOutNow'), + 'served': item.get('customersServed'), + 'county': item.get('name'), + 'state': state, + 'company': self.name + }) + return results + except Exception as e: + logger.error(f"Error fetching NISC county data for {self.name}: {e}") + return [] + +class NiscHostedProvider(BaseProvider): + """ + Handles NISC Cloud Coop format (JSON with PROJ coordinate strings). + Example: Buckeye REC + """ + def __init__(self, config, session): + super().__init__(config, session) + self.transformer = None + proj_str = config.get('proj_string') + + if proj_str: + try: + self.transformer = Transformer.from_proj(proj_str, "EPSG:4326", always_xy=True) + except Exception as e: + logger.error(f"Failed to initialize projection for {self.name}: {e}") + + def fetch(self): + url = self.config.get('url') + try: + resp = self.session.get(url, verify=False) + if not resp.ok: + logger.error(f"{self.name} HTTP {resp.status_code}") + return [] + + data = resp.json() + return [self._normalize(item) for item in data.get('outages', [])] + except Exception as e: + logger.error(f"Error fetching {self.name}: {e}") + return [] + + def _normalize(self, item): + x, y = item.get('x'), item.get('y') + lat, lon = None, None + if x is not None and y is not None and self.transformer: + try: + lon, lat = self.transformer.transform(x, y) + except: pass + + time_off = item.get('timeOff') + start_ts = datetime.fromtimestamp(time_off / 1000, tz=timezone.utc) if time_off else None + + return { + 'incidentid': str(item.get('id')), 'utility': self.name, + 'lat': lat, 'lon': lon, 'pointgeom': f"{lat},{lon}" if lat else None, + 'start': start_ts, 'outagen': item.get('nbrOut', 1), + 'cause': "Unknown", 'crew_status': "Unknown", 'active': True, + 'last_change': datetime.now(timezone.utc) + } \ No newline at end of file diff --git a/simple.py b/simple.py new file mode 100644 index 0000000..344d912 --- /dev/null +++ b/simple.py @@ -0,0 +1,55 @@ +import logging +from datetime import datetime +from base import BaseProvider, BaseCountyProvider + +logger = logging.getLogger(__name__) + +class SimpleCountyJsonProvider(BaseCountyProvider): + def fetch(self): + url = self.config.get('county_url') + state = self.config.get('state_filter') + try: + resp = self.session.get(url, verify=False) + if not resp.ok: return [] + data = resp.json() + results = [] + for boundary_group in data: + for item in boundary_group.get('boundaries', []): + results.append({ + 'outages': item.get('customersOutNow'), + 'served': item.get('customersServed'), + 'county': item.get('name'), + 'state': state, + 'company': self.name + }) + return results + except Exception as e: + logger.error(f"Error fetching {self.name}: {e}") + return [] + +class SimpleJsonProvider(BaseProvider): + def fetch(self): + url = self.config.get('url') + try: + resp = self.session.get(url, verify=False) + if not resp.ok: return [] + data = resp.json() + return [self._normalize(item) for item in data] + except Exception as e: + logger.error(f"Error fetching {self.name}: {e}") + return [] + + def _normalize(self, item): + def safe_parse(ts): + if not ts: return None + try: return datetime.fromisoformat(ts.replace('Z', '+00:00')) + except: return None + return { + 'incidentid': str(item.get('outageRecID')), 'utility': self.name, + 'lat': item.get('outagePoint', {}).get('lat'), 'lon': item.get('outagePoint', {}).get('lng'), + 'pointgeom': f"{item.get('outagePoint', {}).get('lat')},{item.get('outagePoint', {}).get('lng')}", + 'areageom': None, 'start': safe_parse(item.get('outageStartTime')), + 'etr': safe_parse(item.get('outageEndTime')), 'outagen': item.get('customersOutNow'), + 'cause': item.get('cause'), 'crew_status': item.get('outageWorkStatus'), + 'last_change': safe_parse(item.get('outageModifiedTime')) + } \ No newline at end of file