From cd436605a513080df79ed6635aced3493cbb5484 Mon Sep 17 00:00:00 2001 From: hofee Date: Mon, 19 May 2025 16:32:04 +0800 Subject: [PATCH] update pbnbv --- app_inference.py | 31 +- .../__pycache__/predict_result.cpython-39.pyc | Bin 6306 -> 6340 bytes beans/predict_result.py | 4 +- .../local/global_only_inference_config.yaml | 4 +- .../local/global_pts_and_local_pts_pose.yaml | 4 +- .../local/local_only_inference_config.yaml | 4 +- configs/local/mlp_inference_config.yaml | 130 +++ configs/local/pbnbv_evalutaion_config.yaml | 131 +++ ...=> real_global_only_inference_config.yaml} | 37 +- .../uncertainty_guide_evaluation_config.yaml | 9 +- ...ab_global_only_pts_pipeline.cpython-39.pyc | Bin 3220 -> 3220 bytes .../ab_mlp_pipeline.cpython-39.pyc | Bin 2933 -> 3001 bytes core/ab_mlp_pipeline.py | 2 + .../__pycache__/evaluate_pbnbv.cpython-39.pyc | Bin 0 -> 25946 bytes .../evaluate_uncertainty_guide.cpython-39.pyc | Bin 12622 -> 11859 bytes ...and_local_points_inferencer.cpython-39.pyc | Bin 12294 -> 12421 bytes .../global_points_inferencer.cpython-39.pyc | Bin 12035 -> 12162 bytes .../local_points_inferencer.cpython-39.pyc | Bin 12137 -> 12264 bytes runners/evaluate_pbnbv.py | 786 ++++++++++++++++++ runners/evaluate_uncertainty_guide.py | 150 ++-- runners/global_and_local_points_inferencer.py | 39 +- runners/global_points_inferencer.py | 37 +- runners/local_points_inferencer.py | 38 +- utils/__pycache__/data_load.cpython-39.pyc | Bin 11518 -> 11518 bytes utils/__pycache__/render.cpython-39.pyc | Bin 4798 -> 4798 bytes 25 files changed, 1228 insertions(+), 178 deletions(-) create mode 100644 configs/local/mlp_inference_config.yaml create mode 100644 configs/local/pbnbv_evalutaion_config.yaml rename configs/local/{global_points_and_pose_inference_config.yaml => real_global_only_inference_config.yaml} (78%) create mode 100644 runners/__pycache__/evaluate_pbnbv.cpython-39.pyc create mode 100644 runners/evaluate_pbnbv.py diff --git a/app_inference.py b/app_inference.py index bb3ccf7..76f51e0 100644 --- a/app_inference.py +++ b/app_inference.py @@ -4,6 +4,8 @@ from runners.global_and_local_points_inferencer import GlobalAndLocalPointsInfer from runners.local_points_inferencer import LocalPointsInferencer from runners.inference_server import InferencerServer from runners.evaluate_uncertainty_guide import EvaluateUncertaintyGuide +from runners.evaluate_pbnbv import EvaluatePBNBV + @PytorchBootApplication("global_points_inference") class GlobalPointsInferenceApp: @staticmethod @@ -46,6 +48,27 @@ class LocalPointsInferenceApp: ''' LocalPointsInferencer("./configs/local/local_only_inference_config.yaml").run() +@PytorchBootApplication("real_global_only_inference") +class RealGlobalOnlyInferenceApp: + @staticmethod + def start(): + ''' + call default or your custom runners here, code will be executed + automatically when type "pytorch-boot run" or "ptb run" in terminal + + example: + Trainer("path_to_your_train_config").run() + Evaluator("path_to_your_eval_config").run() + ''' + GlobalPointsInferencer("./configs/local/real_global_only_inference_config.yaml").run() + +@PytorchBootApplication("mlp_inference") +class MLPInferenceApp: + @staticmethod + def start(): + GlobalAndLocalPointsInferencer("./configs/local/mlp_inference_config.yaml").run() + + @PytorchBootApplication("server") class InferenceServerApp: @staticmethod @@ -72,4 +95,10 @@ class EvaluateUncertaintyGuideApp: Trainer("path_to_your_train_config").run() Evaluator("path_to_your_eval_config").run() ''' - EvaluateUncertaintyGuide("./configs/local/uncertainty_guide_evaluation_config.yaml").run() \ No newline at end of file + EvaluateUncertaintyGuide("./configs/local/uncertainty_guide_evaluation_config.yaml").run() + +@PytorchBootApplication("evaluate_pbnbv") +class EvaluatePBNBVApp: + @staticmethod + def start(): + EvaluatePBNBV("./configs/local/pbnbv_evalutaion_config.yaml").run() \ No newline at end of file diff --git a/beans/__pycache__/predict_result.cpython-39.pyc b/beans/__pycache__/predict_result.cpython-39.pyc index 457c10235bcef0e0c560da34b3f90d66ac22f315..1e1913d88f331e47e8b27dae2b85efaa294d0597 100644 GIT binary patch delta 273 zcmZ2vc*Kx5k(ZZ?0SIhAt7LrN$g9j>-vH#5Fr+XxGuAStFw`=aFxCLsDNNZ+MG7^H zDa?`#!3;HwE)20@wJa&jwX9&aMJ-zm!vdxnmK25*mR6uX5Shj#$xy?FF0zoRmc53x zhTWW@mbK6T$YRQ7C{n3mtp)0?WiHfGsbLOg&}8+y#hjU!R^$Y9O%WrA-~Pid2ET$-DVi$VmYi42&F%EQ~xr5=3(_>M(LKfmm$JlWPTbF{W;|5M*Ix%-(D( cCeO$yv^iGXh*4M!q(BG=xfsQOTKIrk0G(Pj!vFvP delta 239 zcmX?NxX6$p9AETFr+XxGuAStFw`>FFuE|rhSjp9Fx9ex z*%q~IH4F7A7DD z(mIS>U=|zG}O diff --git a/core/__pycache__/ab_mlp_pipeline.cpython-39.pyc b/core/__pycache__/ab_mlp_pipeline.cpython-39.pyc index 2db6c28e2bd6391ce3fb57b7d0e0165cde761fdf..26511bc31b11d04aec079626d7b20531ee041aa9 100644 GIT binary patch delta 288 zcmew=wo{xpk(ZZ?0SMZcDP`1c00rFfC+^VX9@RWldqJVaa5yWvgKeX3%8yyTw(M zT9BGp5}#L^`_c$#;!8##af_#*C^aR%Aip>@-ZDj#wMYXb$ZVXRT5^jGqM-=nv?#v3 z)QXb$q}1Y)__EB@a!vLkF`#gfGKf$G5!#d2u-S|70htVpT#P(GD8R(>pM|+fbMgfi zmCf?(T1-+_K*=cHG>|KbQw!sBGK))!j6s@BCMR>oFj`JNzf!G;{i`9Tc4Z{M48pef;wM;b(S&R#q zYM2%>#xT_~*RrHA)-Y!>*0R>H1~X_f`Q73vC`wIPG>MjBqAH|oK zT2T_8lv-R8UzV9#uE|y;29i?-5vm{sVnC_P+{BX9$v@eQ1i(&WX)?Hm)-CgQKXGtSz)RIQhXzoGG%+5%&*G!MlXlHke9g0$Ab*oy{ zRn;%Ebrf04AuThDkv%L9Y+!byd%ww<2G?JKy`>_rCXCFIAnG$Qbzh5q13PQ-8xSevgUazc>=l;c+jS zhT#~F*)+-~eyygp7Awb0=Ea)!TD%-zOOz99$#PQi>}F~$T~3?E2Ms6gB<>haqMp7T zD`${SIw_=6x2u>LlU29KCpJUd|2Ye=8?6d<)aec)qHU6SoxU5XPb*_$IHhhKG!_4 zcCviZH0q_zhn)E>6Ej_KcHc3a-SwllW4F!nDaU-(Sln};&|X?JgUJt8H(a+-ZN1#M z>20X`Vl1#krxGG{S);P$nB|ybm2D?h zjyrZa;l#^HCxN#ngY;|doAu`9Tb;UM)B6%wjKvt(2$mL8L8?+|RoCj3N|32k*4oZS zlkr@ovc6Gmh9&mWD@zxG)GIH%@Ue>@zHmN>UwHZA6MQ!1*sP=y|Q%q z!xt{yGgJ}%O7QL{Pp{RTM)ma7_Hw;`+NpZg(;d}bt=GKMom<}3cI$NO%Jqt>*V--D zQyVp}!L0g?N|=3GZM0f-<({rzuQoTTUcJ(}f}&HMTPlf{@NtRSDbo)BN&Ys#B~a%% zRtAm@0C!T57T~Mft8sY}^(226jOcgSS^z4)9dlAn`i@;rMKztw+m@4ca>z?N6Hfk) zQO-D%P65AJsgrZ2P-EJeL5+!iE9MoQUC7HzUida=7Ws3|Jn|>Q)(gnl?d(BLVWcMW z_d5HKKjrLq4&Z&$QSbb9N*L=w=Mc(goClo4C@(q>IY*rb@0jIX&MB$0__paBcTU`i zmuH=0K$O!#RuJV?S%P=It@u+P{4d=*&#Dw4cAwyV2M_*!=h+$|9{n*+WO@OQdkKMW z`bNd@Erg~YLumOn!k8aNX!{9-@f8DMLc*kkDGAdK^D`1=8;pZaylUJtmlh|2!gZoC z(8O&wH;5uZUR7^ZJXNjLE3Q}X1k>(@QtgeF(`cEabc4C; zjrxsFyV3I83id<4B$%rK_f@rWqphy7xr6OkZPn_*WV=?|=rrn1Mc(fQx%yhiyA`GK z8?9@t_KjAQo?KS#me)YnVfEydD>o}^jaHx|)tf<9PsGE9bg?~cx87Vd##k#ctZzz9C{99yA;^X^@3h@| zkWh6Hq*jfTA^IbM;3~@sFC*d`n_OjU%j#jRH%;H%GJ9C*RckZm$F@w9HS8@ze&fDH z8vhwkC~l*EO7fVqWl6n+uD_aOKg$*n(ppkPJCLRV7P;jvw-uIB)-IS77&f{f1(Kl- zqGph&-}LG&Cz#pjfXdV>S0ZG%(gxTgY{Bvdvtf;~)l!G~LR%ffk2=oa1cGvo#3;gp z@x>^FUFlU_t9H0-ZXSVQ<;;wkGgIa`w|h4Brd=?4ao^mGp(Np$ zJwSaEdvnWlTiBu4sjKlKPX40tB+C7y6MMoyoup%5v(+=}t@US|cn^EFmyX)KWva7? zC)mel4KMkoaog-=kdu1T*fP~u2EDRJC(%o-U-wh)ANi=Wn!awjzr$ZYvtW3c)$BEE zB%QlvsXy@3n-lEIG+K{Gt==+SX+LO(aU4PqIo;biLM0!y8yy4RkM>vm^m*g;Ll|?| zN{@ZCe^MO7dB6koERVNL4vno4jSX*D zUO&wBT1+p=Ijz!ISxtSvir9;nYdpB4KayN7{|p|Nbb|!+^m>($W|3MLY=2Nvre4}%_rx#` z4{fMZ=bG(Wwdp>CNYrX^N*zHPbreA`w^GLu&P~ZyP6MYsQQ8gcTDx;Am~OZrX(Jm} zs9{iCse77~R%-3G0%=DdLEIBQDT`M9ZTTSIMLMNO71cD~ny7Xcnm?185&T!9}gz_(F*n^C?(F9ddmaNaf4W`<&`I*w^x>%)s=E_t$OQ9y)t?v zl+&6rT&v&G(ESF!mLOISBUr@azJ$Oqi>7U*KrvEg%Chl0%^32?)sT{;tc;mQ+`@C* z%vt-9n==a_{3(RfXqz(ko3qI4&b`k%jo?rYFYOcd9Iy);8b~T+#8l@HBN;)=vsPmu z4@k3g9-FX-<9(H+-P~i$fw;7AtgpsrEK3%RJx0&&r&E%Sd)8*!32W^!e0vVh z76{Yp-`h**GRI__qT$CSUxDnP{$_@RrD!0Q^^=VCQe*j=tH#w7EIqBW@b&pIh(xWY_H_*@5@D%Va|?<_Gw!obOKuQDMqu6y%v*ukRUhPtM_C5-Eb=~% z$1Nc+fIvn3D_DtG!7Rk~TJt9V<$*i}GjDYt7)GD>*|{UAL?BUql+)K8E4NR_!edVf>o) z8d#WhurHvXWU>|$ibN)efhH+(Lqp_-#O0o)o?^M#x@YR8l|>M0nm$J`-t4GnuCw7Y z2)d6ALwRS0WQeXt?f)N6=$@XC`UHnGHlYIZKFQ#l8T4nOK855BW^oRSXW!7FSK10Bvf9!TnwnvK!cMOslygq)s5yAB?A7J&h>XoFy8fYY-ejl^pZ#zTKK%yBBlg{xF2(M^pw z6a6txc9^hopF}`^_T&Auc@506Ft;y)&pd)I~6l|^Hl_yfrM4m>Wmo17PBcmME&wx6QghR+=;!zYmR5cdf3Z^>|rePoY`Oac<- zQc?+GkeO5sZN>-nJn}w+#}z=C-O})Yb~?QmEzMeQgYF0sS(4##lSjkraaV$QbwD=m zT0KodVvZdV3oy1e2u-mU_u{Jwv`l&_KTcAy{-_TwXfduCD=<5D^=8A}wvo89#8kl_ z;ivLA4@tm`qA|=#nF1QjgWFxub$pY2BMP8n3pe`dVUS#>I5v=6hIaUXb4qJ@&|wdC z2KuYD1v{_Jllkfy%OQqIXMG`|E1ShtGg(S#JbdenkngAH^FuX+=YOU}UH zmJuFg+U)N6OJ)E#aL4`?yM+Xl`(Y62(cR$7pr5XGSc90I1i){DvSOFdhDpNB|l%+zHN#`Srp&jv?S$Y-ure|2_O^UOt8QY9_*#wN%uU=tUM{m>in)x1uPp zoSS%V;pqmch!l=*y>CIy@5kek+2x^54lsp-+%k7#^}4%nrnGU6Wy9ibgG7aQNFnuHWVC#%ZGCYAb1eVdQFjR{v5!MG_ z2x3r=XjqUQB3ZpDL@`8#ay}yX70o=2VZ|9V`T`!&0cx_J0}Q$chG!ohufrOF#hB2c zAl=t4XcQ(SqCb)(k_AB?QaUYOtpb6d5`>;YxEVu;eN0j!U9UpEUbTTvap*n_=suu1 z{|ss`Xr5#UCq%;$*ZbRUx9Z>!-K$m5+r(8Upla6w^YZZCRtjTKE`oAaYd$Jh+iUG0 z+n+<-8OM@`Q2R@GTp}S)jXWiU5p6`jb*N6Man8{nHxM6z0Z)Idwb3MVPu!sHC+CjKH{;YTAePunf|)n{L?xwDXqWau%%u%s2q>ZyEWT`<0dy9#L81)- z?h$7gTblJ&kXos?*T99{3Nja|0D7U>-f)5>^n%TXcPq%h2Jy139&z-qUS}JHfNQPY z1aq<&?zmOVCx}a4kP*^YaY3AeSQ{^;2B;kC)W$&??5e3cWm(-I8p}_jx=XrjU=vdD zL;fQVk*s@g7!`MH`wp8K#-Bc0i|d$#q{Cb_0sW655`rEI)E?BQkc}+Ru}3Kj`6;M$ z(m2r5zHCe=UlLT!`I!}Ak^SVRxdN2^A{Gpks_4h5uEP>#5yN76TqaPl&6I;PRhA@M zNlRIVW!kjkWxbrAAiGYceGC8mY-K|CK=}))yr0D~Cd0EfrWP>lq2L;m#QJR%h!cMm zk6S_@$L~C8j)TKjhmJXCtXTRW25so;M+D)Kp_%W-IR5i2{+D<()BVT}OF@-Hw1_*N z$CK(NJ0Pi6T?9e$rADj%DiF|TR*wOXXd&xC5;|(`VU2bnE}m^@TnyqO5~^G5CJq6z z?y7G`Jd_5@nSmBiy@>pN2DE?>IP0wy@2dJPzDxKEHq`>(hgdlcZHL~&ke=}P#l34XE zQU@faUnh$?iGCd@CDnIH9XX}LesZXj?AL*E6G}Y33s~ImX99In{W>W>rG8oJq@<2$ zN1*D1!gVu`nLrr|h=4yxw0cKlFqA`?+Uvm;tz&$B-DL7+WY=qOBS$jn=*4 z3ZyAf3&wTZ&me|sgAD1BdsX%?miK$5-8=fF_n%dtV;O@LJoqaT$TC78J0aG_yr zbVYL=WL^uCbrnp6xN`&2V*7^PPr1tq=FpX9{bg(&7-KYf9ccV2%L?|HAXjyq{!Uf5 z@kY%{>|yK>g9jKKWjQ|`#m63~GZKus{9O+-=O8G^s%Xnu~&hKezGgaNgpdiBmC z7T8zX?WUsWtDa{-Aw`{MKwv0xe~K)QMh7mq`XYk?)svWL2q^P%6z)+3DHEINy-dM2 zcfXg*6i=lVQibVwF*cu?j?HINsrf`Io0(E6k*UHYaNnalt8~?Jj4QreJBCsuxg?x!ROGUDrnDg0q0O+L%1!JlKEt{)|p=nxe~-d^R;9JkpkLe$A-C(zp%v5fu8RHy@t-XZ#Wh@n1Y=3 z&S(Dcn|^lx!{7Uykg>*dAX^b}`x4%+9%n$g5j;YlNFpwnD*;)F#(%@OA)E*;;i3nj z9IdzC(D_~nk7#N{8ZEa`3v$b}epK}79AxUvW~1Y_8;*-{9OK)i(;s7OfR=)h+zxP~ ziA1;r)%@C$`}I^DoMT>qJh0nZf#&-wHFCWAyd}J&KQd87PJun?4j?ENA)0GH1+58A zRH|Vi$I|dh7;4>SO#OY-mK{h%o>v)~L|jkeCs;u!-eJZLG8mmskgQcZ^k|vplsFCp zc8TtUoIdvk+P zB+PDBk3vd>RsjY26B&_sh`fCSUhNOcH{EI9a;G^UUkoHvW>EISaUFv-oF)?5QH@P# ztsRq0G}#)pj2vB)k`8MU!K0c`G3c6t&@p(?L{{k_6-mCJxSV>BLa! zQG<32ZQ#{JXP-b`OhY2%TR2{9m$JDB`|uVC6DB1Ob^{uaU=WHyT7)qZVh2)l#%(S1 zK+OmF+HzuG6T#xa_KbA^yeY^aMtikyUjtv@C$Tb#UTpo49|PrrVTUga?B$InO`y3J z4*h6w<;hz6dX>xwyh^C}1Vs?#8}y&c>U+>mbr_I+S47+(hFJ%(D~%dz%3l9z7X4!c z<4AB1wdGIQO5g!q1bQ0EH@ZnHn6XZxmdnv>N(3hV_ z#US6`tupBCARghv|0bG1dIX4}x)E9K^)5hUuEH^3K^y~CK_3uH(CDI#`k)=jUdppI zhwRV-m-bVzui20of!2^CGkzAlE?qq&T()3DkN5N7{(*FiqQ9T@VFi3!Tt(LN(V}E9 zDwRb*cR0~G+v1Y~fyrq!7Bd?9{{$7_d{~D=NTr+`ES6i*d*UUO2R3aG;D6T@mZpBI zK4Nbpz|=ouz%8Y|TNR?nL zl@ZK`07X+QOJFPGZaI$Sqdtn7DlGJt<-@-NyM|_6s5)`bCaArFV&JeB7cK@%724&J z$iZQJXONS^&PvUL3N9v=Le3u``2`GSgtqfs;zM5Zc(p zCZhf!DlG|YA%{GLL|}S>>5fC2B;uO^Z7tEC(9rBV2lgR2B+Bn-Jp1d9@5}x1sjnis z)3`nrjVmJA3nVTZP!oB4a;4V+%5&ZoUBGjltDPRG=aO7*X5==eBRE9RFJQx8-w5yT6RdkAS>aIfV zwVDFK(OOnpYgs|e>@Y7*?iCtJ1B3@E0+2LgGlM>bz2GU~!Bv3jtk;V`%~UV#B>dEj z0Sr&WaP%hBX1Oh5dU9z5w@LcB4R=&nfdR*aKnF$T8!D_~LO8Tfn?~_SV^tpmy`sjd z8sSm*5e?V(MWZ1!?^}>GZz~j)1VhX36GL2skiHETad+zDOh_lrx$EjW@`7Ylt+;Ja z(0I45;JN_a(guAb;eV6x+I$sg@&G+2YB=2CrArserUTL(CaIq;XQLEY=W?nW*{1UB zAFh?q1T<+7(OXJoB`XE^#2sGXD^ zZ1~P;9JN^w<*BiJ!Bga?kstTrF5{;`p)-C)sH{G8co2#1REsewBtd0F@yNAP{{aDb zE_jismziE-h0xilKWVrM_vr|a7`ise5vP8M%@Zyp8R#Sx6PdN@YFkxkEiGqYr3vG( zpF%-1zB0dt7Ola`6v43+arULexdbZ{o_gEJ-F*iQ?-T0K8=S)PNyZ7A(fajJ`16f{wAAAJi@-nFU@)um5Ohe z@ZGbWI84iNQY27~G^xaAtv8G}u&E)_vn(NHNiT!7PV$_6%Y6MKnu>tIN#cO}^7`8t zOA>y7bS%koRjCIM^K{S8@+;G2q96}GWQ{kx|7V?%tPGHd=V5BvdC0eX9^I?UZXdo3TucW zvNz=q)j%&uu@`>|ZKwU|u=TXO0jW)t_|tx2nT}R7NKZKvh!y=Azlc4Mm;HiJ*ulwu zUS87n=CofFKIB)|-xj0&Wax%PLUW(Y^ENU}$zcYMOHoJc277y8FSx(pFh*e)^DM%! z9U1X~X2d8^0@*s;CF&wGLV_G?R(bm(p*^oOnv1Yby8K3;E}zwurtcnAtAdJ&-!VB5 zcPFy^{$nqp>&WKeLHvCatU1OEjHR(<*!jQ`j{ukv=-eC)*`y$ormwraJIHzP8IBYZ zV>9?{Gy|a`FJh)C=ti~23$-LGcmMcjHvi8refb&neMkvMt{t&ww2I&ebB{6@P#z&S z_mCIR?BI6blOH6V`pQ|rQ2iU${Z$5iP^)iZ@)8@wYdBosRu9GN#v0fdT0PY-viJgm z#a;S{`2)nu6U}yOrQvNj^$Mm6Q=S|#betZ6d^wE;lxjFpv`)q?=RVicwe6lei{k*=ov6YK#p){Y2Sqw1FX4t>p{{6%>$OW`@4ODT8or17au~Bs_3LOKp|nkS?&MeV|bXu6$WH2L#PJn z{-S8ty+{NBmjY$b*jpiS>o@!iflBTMKn3m(v>stOP-G5;G&s`cfaLGuNBth-3-4=i zP(Q*-|A4_yF(AZ5v!z|Eh^mVE7f4>7QgJ4~!eo58*#^NNZYpx0ij0oNCYdxBQvE0c z*b{~!DLMiB4Njn4dje$SBqWJ`=xf( z;rpS68o8lY!*oXecvSPx{Q`o>Y%*{K#uY{yPz+pSl#8Ue$|x5|VLUnFFR$xAH zrkrWSQqF^Ni_XN|yJOC5m`9@rZ{aP9slzO1o#WE~K4<@(G_IYVkkkPfJkq#)N<)WH z&PyJR8>#Z-8-{I^3)o{PgNX~_Yy|yBq*zL@L{Q?8dj~7)8Mj02iz&{ZRGiT(3Yr#p zLQzW`=0MkroYYVb^ufqU59L5LkDLtd*x+|H!@G2=X_!9}GxUWpDsW!!_m`BqId6h> z^c@orl1WEQ)Lvl~CS{a>=ahOglEWK9Gq{!Wr`|5ccq8|3?FH97yE(`ETX@6#$04oQ z_;))7ozVY&*l>UU46vL3K8dH5klT6lgK=PX3#+^39edE{9w$rVO=YjB^Y=-(-#c)d zQj@W|7t;2NG~N65brbyNM|^v;MClwo=e>hYZu1aRx6SV|y$3cA3*?RHSu z?QVxVGiNZ@2m40xiKSrX3J!c4dfl38bZ}Lp>Rk;o?F|nq7+yP3--#6qa*$=ZXbPr&e8DJv3F2qi!4f1a)1nd*D27 zq%>*}j!PbtzNN6R#EY{}m%gn>dzrfd=0h7maa-CDw`(wMsG%9A%&U-}>upheCN6z! zY3ahHAUiY%Sl7vz|0%6!6Yb`Q-RxgT{YIgECm+i zcw1m%uflICC~!)=Qwd_BVTC(%m=9{v@hY%owl=%LPzH4a`{lNSi^`WTym}eVXH;|J z_Acm4Fe`}&Hga#60174~KY|yo^s&ti)xeYmf}px}C~INhhqC~#`PMJsE~g3-y!8j_ zo8g2L+AknTp8vq*54?KevR0I3I<|hL$5B})RDpH8Cfs99GJ^N+O5~I=yW5P z+BU`@6%Fs68Kf()u2o&PQW@f$7XjAa!XVto5izVBbzbt%h7MvHY2Me#TzGpYy@@oW*kavIqUIbI4Q-uq~$Slbyl52VakNd4?3sVH- zHR|CYL!Z$~Vp10>(%@}s!u})WTX9;{Hsjz?V;;;C)D6nr5juO@&=W!{^TWpHEgm3u zp}~IrcYE>f=g>C2nIX*377lOVTVXMBlF;HLz1->qBxt(s#QfwB!{O)i)B&Un-!vlO}XLrqMm)&H9|Yj#fzP)d`JQnd+U>f9=60#X7}m88|vrEcQ+s#m-E&F`A8i(;~yl+Aszv^cN+ zBQQXn!pKyf!^@FEKt;5IPF?LJ2VYN5-mLjFxNR8P6m)RALqp`*DZxW~bvp4Ys0xz+1+(h!aCAxR9<7jD+-;xYu1N-`6~ zLBiWA)T#y9h|p11ILEFBbWTW3Tm*43GO3^DR6oc{yT)G>(q4T|so!Kp*KP1 zqq-Co47&(PFog>(dtp-|je*xUQUzF^_9Mk>T)gsf6k4~@jo5$j$aW+v(^ty>020^^ zAUj*eU?c9}t6?}cJT~k|2!yf|Vk1;MaZ&Lg6qTqA3n31A)PkdUSgMusxbe3tYSI;P zlDTWa%TyQ@nr^|eWav5r<8IQyoxcVxM7bph798xrk3(X}1$Yc~ZKNp59W5Hkl_Kdd z({H-W81wE^OLYG%f*?nP5#he_lmn}bpdQFcC{X?oo7hMOdw5tiZqgPX84N3r<5JYd z8g9ArSR#wSJFMzW_KW-W9e5IlrepecKq0HUQ13~F_VThEjCB7@O7Y`3ip61@S`P3c zI`^qjg0-7k^L?XOGS*Und!L?N+Q;h?oHzki zBbcH;+Uxo>{R=n?w8os8Zk%A2pNeRjP%AL{4)okxfEwy0K#A!pNs7!{)pjrIq(d$U)&5QPYHb|4AM>x=rrji;SkC%$4q=ibn5=(HvM8ApXGpM2)t-iruB?+9-!Ar z-K#&j1!tHBHKg%XTn3Huk;*xRyOv*Yrp_5Vm#vs6otDxG)?2`BZ;lkT;lFj)lJc2; zYu<1VdzF$RO2DPBLSyRef`NRf2WVPnR*RdvaB7&nY2nVN0WqsLy?K9j#q2Hk^9XmN&VGLZ z-`jwrsk3m`#&+M+1JB+A$KKnI=YW&#mHa)Zdl0ch-hs{106Ne|TY9d&2fTy+K}a8Q z=#LZXlwa}_VGQ0EItDB_fWWanHY|Ea{bL$>-h=RH%R8mJmUHlKtalurXfc|{dMEr7 zn~TB&9N#>ye^*cRPWmUobHrZv*%#KHp{zO5#IIvR__eqVzmE0sD=HKG>i(##!s3pr z@KEnm@3gnDdD7qQp9W!!y%lrM`mx@b7ToxqL(uS_5ja1@S@#~s8a?z@OnnaPe9C{= zKjNRkT0vO78wW(r_8#%iqW$Lp-P6ty@+N-CIf}0}l>7smXZ*#@hqqw)AkXoZwf^J& zZseWT>6Ql``vbA#{vO8s z{lk4nZw&d9M?d{2-!(|JaFeKqTyiV^LmFdXX|Ue1)rauT$D9*<&(3|ys0B;?hla59 zvECz`vHzHibq~Umz{`gg42&Aw9eR6Ar=3&txPpl{{auV6^=0%2G5W`()&gP=N$l9> z<4`S}0mO*`PXLpOA#FH@<0VsaMwKn7?{pk{|WzrdCc5-#D7BI@c^KpUEQSw zqj?H@Fs_0=R2~IXOv#C`W#90M%BnoZcIfZ%@m^aW(r*m=O~At{f;RiLK>-f=hXe}` zp{J*v_wC7lqW1t-7?S)S4IoaOWlE?aRsnHPRe!-hsMpl{2B9@6f4{%aKY+X^0J$gM z&i0=459%^aWt^wp<~_A@ehO>$sPnWg6AX9~%)~b=2{YE69%6n&hE-T)^#TucI8T7{ zjTm4NowKj-cz;RQf%_lDsTU+xz_dbWhIFMM*208}4_4vSXFV~8N34TpQz-o?Hk3m& zm>O+`j-YCbJE8|D!t8$q7df1Mr+g2z0{RV16owMu9o%cd?kH#b00JAI_Nt3j0zCc{ zK6BJ*@+(SJ=uI%%PEZ`p(l#)7Wa%&NRB-tO9{TO36HF{OniSq)li4{8x*Qbd$eqoswIQr9|J>gGcMxkN?`6OuH<74%f&1dHefU9uIlOC=8` zhPuaT!_e?_>0}9FMtvP?bQ9VVXqupj#pfNo`pVbt{n@X-eC6V^-MuI)MP((H0bd#&5~cxoulP;yR#%d#3xgwYbsnt^yvN4zvP_;-aFX zI{iy(9<)Xs=L&Udp8f(`0Q0B5@KX<_G4#+9{*)G)++YDXT)QSjESyy(TxZc*LiHr& zVP2591t%3;4!S@{|0f2oZ~))LfL>@p40}#QC`CuWU?2R2oc3DfeI|?#KH~NWgLU?` zh}hy&+OSNaocB2)`%}9J@vBd_RF7Tn8d`}#S&$;Wldc76#sqaiun>(@Wt8;QGJ_Qc z9)k@8!7N7^(lv*lTEu7Vu&TkFJ_qEg)8OQI$_a`)6;P8SN^OXCLAFmCD@(Xq)P}G) zy#u1r7_ASFa1nypk*_)8lf9k|3uOzaZGLUDs;fGG4(yY za!{~R(Ib>In1I@MK#kR#1l1c1=&Jx%2a*s7$GWzVD=uabb`om8g@P@PqcJG+4ku^@ zU&2bl(+&P=b@dwG!~-Zu%PWK!7zN$BN`J1#50&4N=IOu zW8O~S+QU;N+74op7{asN1C-{sRYu;DjJAte5uFiK0UnU=PjQ&}GN?odNiOiViAc9)dP%7XAu*aoM?qZx8V1D8Efb9YaYp;j^f# z|FP9>>yUL4da45a7+?;Cp9Az&xK=oFbXN#W6S3ZaKynvmQ&%tE*^K|M zL|$^fo{Q6)VpKB{VFeB#Q5?SW?x$CO!TbKJ&yF~)Q8z+mxTv?wh>%f-qlC2w!q}z- z*CaYe@rnbT2&v-cH3l0ZI-In{MHR*|)PNogEupH0Q7$oWK;VS~hM&T38lok%X!tlL zt}|eP3~)b+3>3bKDW7n}P7~}7Cc<^(?94_XnY(F|ky(<2~7{c|Y1oxIJZ z`|qHaUm=odUy4~Qy8ZxK_r&M|jPGw;$Q&F~;a5w&(OdLXQpNGeYMevFhs!3uYgXin ztl&aPFW!Pvyf_I%g#ozpTVzn#LpX=_(WCwnrU8oT(QT>F>+%OMYB_M#-)4+QxcYts z(N&PIARZ*SYf0bJeNwM(u%H+b1hV&iQaRxG^&M2X=oK6sIWzhsZK%rlw&gf}V*pWH z=ay}NPd_{`3eg?ei2q;ctbQ58R=>vJ-!mu@#Uv0%3Zlt>9!zS0CN+9tM1NFebJ1nN zzXuWe0%{8y?>@lulWF*f<9nA!vB5+=4cT1Rf=vlhuL(;J?rF(q5n}w1kb|2PTa@7C z61o$E&3r!)C4&nw2J}V=mJM6YTLoCn{WDM!^B=9|Vu!(Y{SYP*SU0W&<_+8kLt2SWqjrtog?6AuR!0e!!weo|Ak2*Zn#I5-Ej*_%AUxa#W(sr{WJ8@W zWm&~bE8{?3Lf-Ea$aw-31eX_(ielaz$DJLmCNGY-N}!{`Vw@RSjL1nsKnOS7Yq%{5 z$FpMqXsQK@xMgz3;lfrocY(Id3T&Aj44QIKV3UJ@Dt;7x!Q{Z&Ek+gL&4cuLesQu* zHe%+54_$cSqZeQK@Y3bV`43;JEPdeR3u69M&tcy1B|665llYtvUoZxnV1_Kb@Tz+2 z5r}_6SdrutRxk&@XT8@wIJ(DnXtlHrN-^}n$K8uy*r5dQu;_dahl-dWk+lRn6d?j* z#sXKcga9`-`i6ctG{?)F?uB#8@|ycbN88fAkIL2QxOJ|Bk%B;E0GohS@zZ3ejzyY#R~UYYM_o2Es@ zY0^Lrcb-r0*mG{@o(10~=xTQ!fZ1{8uVT{j2~#NN4JhZ~Kt$UiW-DfCl2dIq$446N zEdxgR{oxo{j=RW!0WM9T76>xN2bH_t_3884K23d5`dth1eLTp>RucCP^-+u{bYi#y zj}b63umHP@aTzLk67fi}#Sm*=Q%_@@}lx~+w%zzRj@dJkZYZT$*b-#VPqzlvWV4yJ}H$jtQy zADKm{&l87{Nev#pr9#@QHEoHoPcG`vmhYc;LFycsar_q-&!B?gk}}Ynq6j9rkj#I( XaSP9rBmtBm}Fq{ zy-t&8ZWeG#`%w#5Q^7}Dv`%ShrSuCG1QG%y5J(76N#SFQD*k8zRTU~yh19?~vwqbI zzS(onoO9;Pne&>t`TgAObH1$CYbfyd(qigjb>;ZZKS=u|ZGaBmaHiXyRdgloKdmIkn&B^A&K8%lX72H_cuyT6e-XO2 zT&GU1WW1)@&`iBxE7+-PO$Cb2Y+F{~?Kj&S0j?B$Yvdv2{9a4r8db~=YAh4C@{Af$ zEZqz?I?eV%S0jXSTw7Du2w;A$A66D`aitNa?lnS`O&KTk$TcEBsdqJ^ARJjEtArgl zyAh|}W%XMW6gBVpI}x8b zZj&S89cR*i6eQS}Ho>egTr-b&V2q5~SBRpRu;l{S{t(!Ae)ybf5lcl!Q9@O!aboH% z{+f*_Ty5Ca6iYMhKvkDv7DU)#I>lSASlxx;0Dsx310wG*je@)3fw^9;amPzgWv{4f zn&p&qA9{qnVYW5=)P-E&frrj0i%)E>ZEy`{xnI$^je0@~*QEtYqn+Df9T#{q0Gj1$ z+_k3FThnz&z*TZ}H&HBuDAy*1Z!(_QZF~QmO2YTu#D@*;uB|-ExbjWR3^cc zMPt(#D1eq5Hr{BA45DwAJp#ULiVuwaWJdJ5Px-oY`5*nPYM1JiWVteLIl%uJqMVJE~%&j6^dc!uNKLWxsw<%i){ zL(PgV-u9fSWA}5wPe%AAAYiz{2_!gHq-K(ax&S@!CIP5Qs7a^;)WNelcvbg4F$S3 zq~0V=J$ofzYw=vDd@q2er^<3PQ?1l;nL{+`V!Lp{C~_RxT9&cwRq-S502vbRd82hZ z%T;q(GifmNGMhm$4PupcO9iE@$?{j4_R56|5X7d&DrVXtIs0kaKcB_a$k05?&71j3 zx#>1pwp=S_aj-6Th;FZ1RNuA$=}=iRt4n5P0MjE6)LIaLLVTp3I7v`7G(YjEQQ}jt zjfnO{L_FtEH>wnd=2B8q7%7}UR_ayXC<CkPy0p$)pepLQNr>f2RV?a=50jSeVbVg>NH-~nh3 zs@1_eXcweh(6lwW%uoRi3Ld1P6=)Pzrx__kc^ii&Q0V5J+|51Qpy3tAInC;_LROe} zEf6ch!vOIF>ERKoa4&b$$clbWyy8#PV~zNh54d|xZSBY!Fni4eOzGp!h=Lss`l5FJ z6Kl{^;OLG5uWy+^U+U%wfIZv}Fvj~Oj(yC)J_aTaa5%QCA?Q+rdx|lxK9=@zzgbO1-M=;SZD+(T5mCZ$hh`B>CV($-8pXUwkFI_ zBgvzD0(!8vs@Cps&6+Gbp+EFPu}_-l%W>SJwFf+xTGiN-5U^3chY#~fa5`K!R&22U zz1BXycU7xBX)5)`7#%`a@dO<{pjZjs+ZgAm#_lz+9Xt7|x_FsKfi}?^mrdyG6RXX=!ZtONzMC z(O0Kn>;#{dCia1e(^78)uyKhc8~fl2I0=5mKu^Bc$ z1Ar2a^BC_1+5yX=5bA3^IxkMB3 zdtP^c9|^A`ctfnk!V$TSr<#{ z*kL6Vpj~|0`v=0rZxTb0HXkLX$x$)bH$~nPXZ!XCckJRPu!BzVYTw%+dbYpUF0V_s z#r6I$`L_5@|LL9bKLq)hxYPgBfqwwTK0@$s1b5LpEZ!gZ`M_D=Hr1!+n&j!EXFVBl z!nPZ>_{Ct)1YYvkB!YbiWFk0ZA~Zc4x9u|C%w*6@!ZQ?)97D++E6)RMw>UcV`h={u zrjFr;`(mpMa00H}Y|CA#=2*UzD?=`1A+Zu7JUlpb1O-k3NUE*19r;=YHEdr-WQW7# zptv^tfS64lx5seKgeZ-K@5PyKuFrJOW#=y~RWs7)lPDuCot4r6?t_bSO$S7Q(5~MH z?@!vqo#Y|s9iV~*F>!w?6bXVL+o7NfXb*}?>gdBNDu5qGPO8JN9)=3!dh7dWebwx6 z)ffoPpcSY?QLn}1o%H(bvG6Vb|hbKf07lYO?=CnIw&UT}&^u!IP;WC^~!?5{mKHaG^6iyDdQh1+OnkWbG z1F~s@23EZ)KZ2M;d^#~j5@LMvo!PB0Z)RN6dAL-eOU2v~u(b6#>V)2bT}3|7Wnc_& KW3+)e$^QU|g{}_( delta 5795 zcmahtTWn*;b$2c=UzZO_ltht~L{X$()MNEnt@i0%HMZBTccZ%NG#j(tFcjzRF1dVZ zI9l&2bImqdJ3xz#I+hz#c(5{FP3Dpc4^<|rsa$GKHNut2YC4xDniXwlXkUjsr_p{o@Ia#j z*4VylpX8?G82LH*mQ3g?tyvnTeGf>^OXuhSjR5AOSvo|A0rS&&8tu_WXzYQOGwA|N z&{60M&_y~%#{mn{r7vqY37w#+2mV}$o=~kxnub=GX6O`*>r=F8I`hDn>;JN5Xt{xx zwCu7IlV6yR$sg+p@prczo3C=pcJUQDVS5(jfunb|%I>lU z;8=NFO_T(&Ng9kJ$HXm!DFr8adSA0W&^rN}yKH-fS9tE?Qhw)o;b~`t7uQ`2^C=o? zPh)GJe2MUx_N4k)_qgW;)mR``(jfb2A2$MYRn64gMlVfmhMMQ*suWI=Y^)$fdY z{+o=CfxV8x)4YI|s|WQ6>3$6ob;(8Fd*uA@`Lq3-(8*pn0$1c&|K%GYXg>{4^Gih2 zxYi~enDotUP%o&o3^w%)v~SG3qT9sQxzTo0LUrmAgyTM;zvCvF(A(aQX1ll#sQNx^ z3y#sxP5C4L_>}^nTnn`x8FUj!-}L zLLaE%y0$}cZh6KWKRYNqAkPnK83GAPs_=K<9Ffb=`0xR_e~xIjNwmEc9u?+38p}X? zM1I8_*{0XEmqC@b`I@;yyYCfk1xD^fAMfsPZ-;N(<0n`QKW;JI zyqJtRZeC}_+m5$Z&);GN>iDhNZq;H1Zm|vdTY(WWD}O(*I5CZ z<-Z1=AKV1g3D5#BG%cPl(R=JUc`=xQskVbNsblAgL$&~1g{O%^(In)f;PsXla1T7{ zz;6Nq$Dj!UhK!!);-sykKT`6#-i0@T4$3yTt8574sE;81mX3z-PzRGRs2eQT4Q;LB z!J|fV0E_hqcZq=2dbd7s!EB2RY)s1%$0*z@HQDE&@qr(J6KpVx<{R~AsTxK4r&kc@p4l132@5t@`M-`0TC1?jX(6h;!UHfV(hA4uVX8#oZqJH1EOQ!S#+;Z&`4G=$>{)0- zpcPXs*x#I(qZ2S?mIlCz=k0kh-(El~=8GcR)etVJz0}d#CpsEmrYT6UlMi*8e(16n z#6o$+UKES4K-Z0L*;#eC2tVQYq1W4968Y zPz&}d9OSxK6*FQ5&SHvAKXij;*6fpFtpx*r0BSx(XHbbEMF&r7c1ldNH~8A27TTvf z5Fao&9_TyY6^ZtSSnjrK0z&bb1LOYl5Q^6!6mNs9vvgMY;c(`}IvkFHoctWk^0j?k zVe?`XF_DBhWRC&jp!0OAI0=JeYE`;3_6o7mS4AoR@JE?D-j2p`17 zbM$PtPaXOh$X@5RR8rZCyvXWBtJy5oZl#(vK0N4TV*@>$I*44?Dmf&84$zNRpnogS4d4ZSewfqLn^LoCU#Pi*aJ`eqE`K`u)G%Hy)iQruTj+roG6DL?6!3I0 zLywXqlpLl3eR@haNf5qia!xmZBc-R+_!LllB&i2oW4Z^V>hSHor8Du@q%I$&ronLk zCUuHzN_}#jyf05rZVWxKzwg4XBJ#H<{}fnHrzbp{&}RQAUrEQvTXHx3(v$pu2K@>7 z&*|@H{tG!jMDV`|K0$5<MPkysFmKm5e`woKMWY4YUP0IsGX9!AJ0M3^aqR|M<-E{pn$n zl>ak5HGLi>d>TMjSN7?6OU*ovI4)LRnu(E9@^dqrt)dRRhaW5g@Cu77YW^hvxCP-u z1nIyR5;4@g6sqJd<*)`5g#87Usco$LyxdoY+bd)+s0kGfiUDY;TDcv7qMbA=JV;&b z5OBA`)WAAIRlCR84c;f9c7Xak0p)o=EJKxwzffxRuWHSAj~7}{@_AJ0JN9ty&Vx>y86u)hy zta}ZMm8up<_@eC?>I~zcEXPz1YBlN@f)rkYVlF$#ve3)mU!GpkhJmDad@V$~mV@0Y zrVb)DgaGS2XW%l-y;bLz>$^4jEMs-X@Q|Iz<8?IiPzT*C-D1k-l)0e^IKkpBW3bM= zT1T%68V1QuUr!A!&_kZp>)cUV=q4vW?pC1;4bm}C56A5XiG7eUX#TM_p{~}N_;-V* zLV-bspjHZkBZogSAUp)g960>m+_QHJm0bwvzfmh%j2C2m-jt0wBX~^GmMqLBje5NKdbO=PH~s!rSSHA)Su zQmR=!MKvL5S$SG2a!AdaAWYTta3?sTKcQeYh*onAR?{>9(NKHis^)CW{fu0bcd~Dh z8}jGb*v2*Jbv!)_7=Zx}x#N(#PsnpZ4Tgu^3q9B6)%iCn^T54lE**WKQZ_-FQW(30 znEIDt5!l#oVsisP*4^E}Do$nc!ZsL7vjjH;m6D!AwkLFol0GXhE%6#xp zyIzP(aYFfg5&RSGW0guie@szo-O7ggfe)R!nF0XWZo%jAjr*p(W=qSMKGSdde5UC( z`@-(9;WHSrXQ#VPy&_wSwc(e6!Z4t+?;)5&5Re}&u0x<+U;6tOmAO6!IH}{iP_5J5 eiggi|+x>qAvomH)_06`ni$^uD})Y}RXgz4oqOc< zaT?vNB}@u5SZsSsgh(o_5kfqolE#Q1z>f-0e;|-Rl@QDiUII~}2t@+=ryz08T{})z z6}#GR&zy7ax#ynOoma2iOb60_zej@4H~!VRuyW#^z(+Fq3HR8pu9pi8k`r@^uI#SXDi9IVDQpKv-ck9wT~yUgx&sb~jj;)P?<$hi$@Qf2wUa$a$wAUcf;JD(K@uP-{xjEmWQyN%E0JkfKu z^-K332|2^x_Pj}Y_)%{cndBMou~-sy`w<2Z>JSDIdoUxHlVr+nRwPwz(0fbuDR)Aza@aBVgD!*b|0YiG_SspO=$S)92l6hBt`!WD>N zZ%H<8vtG>`k>*KJ$&Z-|>n-=Z6%xWIc6&k=H1Mubv8=HL9?SAXZ_3zSswzAhe|g3&}>;8W<#ut z4KlCRrFCyOJ|h>0iz93p);~0*@`%=hmBHGzhP_g;oyE$dEV*-v^4O+?(u-0_zWRIC zTpnFxsR|>{?2RLhEHUcO*?=qj#jY?5Jt()%sX+T^3Z5wIIdX zK5mm87cNSdpg4=8uZ-fRyH9x!aCxq7&%guNYEtV*^Z(xz8sPWqB4mXByiVzIP#N4A zVq?OI4sc>j*oXintU-SE@uvE5vY(%+A0YcUtItgz5QXpz8)HOEL6*mwq$L7H^30CI zEc$WB5k8JX9uDxnhL~Eh^4e2Y>|o_J78j9^gZ@EH=tBS}*n|jU0>UUdmrWECcpCV* zq1r-ES6g|Ef2$#Oa*B1V6Xs@ZEXrcw?OxEDyz4FQWgVuFnF-X;_T9CCUx%3cAf{)3 zMJ8gh7iPkNGxP$lYix32htN;*!NwV4@;4ffl1uzIjlbhR2<>+%geoB2dK5ZL$QmCC zpMxe_4$uWd+gi6=*+f6Fz;2$;}A}Sw`j5vEx7Um0)+_;!YmVb6ZpIuV72btx3SYVXDifq2^a|bd5K)PEHk2=N5n! zUZ%RHE*A`4-KVLu>AaSKp`{yE00o&@Q>D7Gk~OWG$BKNTb)78py6DMB)g05nVg`Q3 z-?j8^z1T$c zaC`s;3I8zmqr+vO=sO5@gzq6Z5Z(q*+?9C!3TVrf*H`h#R!y#;=~*P{7x|<1^VP<2 zWevaB@zcZyXy_rrKM|@WoEJ72kGI;DRTv0^Xiz9UMYt3Sk&Q zq|_x`w0t|)LOz|-sYq&pFD2R{$569sXB~LW{OgGyc07IUShf1~Wu0bndLFWyHWz3c zFLrhwnnD}T11NH3H&@0`u>`6NTMhGYF^O7TE@ZZCbZ-5j^A;fyzSJF^96?J#0wC@+ zET-!7`cNwR#E(<}VUWMu9Tsj_^}8+_>TDrDmzk#`E+Qo8q~)7kp%jmhf%{#l1c=u1 zP){<6(Xi|}-Aq@zUJK0%#fp6EV5*gYgN`P|1!m-Z&%{8{4dEbkp#cmAHF04cLZ09w0GeV z#gMyL{TI~>S2|`>J}*{E}>t$z@*W!DV6PDO`MP}oGd$-gV|rgL>8F6(#af{bjfFQsdbfZ zd>h^*-yo0o+yg4P?1bg*l@xQL^A=e>#iUB_4y2??&q*l{zi*%8M;y1C zT`L{mjo1E_kU383-XNX)R$V6<=lAMP#8T+ni;zYLAoL@65N3JIbE*+Z%{nz>WOU6? za_Ut&!`D4Ya+YsR~;3uNuE0ms3U$>#3x6L~)p34TL4NE-dY7jI^3ieQNs+Sxw%sm3vulzOS5S z=@ZhH?ZQv^yg%96&w5lS>U9~=dtF|X!_p2x4wm9K{ITW~=zU=IEy_ov^|y^kJ_?rK z`Xk-}*1`sueaE(C(^vRIe~aB{%@6WGAiVKML#hm^-Pk)8R~sj!a-78~2U%(_vC8lc zgdx&7X-i&xpS6H(z2b;kyI9bv8` zSo%YjP1Ixp2J-M3dX|5!DdfbnOy~J_(*mKKG#@AD_)POhH7P>7!NQ{l4(%!jrU@zY zw}M}RariWN+{?j|ve8?(^auRuP`m5s-YgYDGi~QVZ#oUFq%TvmX0foGF-C@I7kYQ| z4?@Z2HWY^uMi8v|7R_F_+}iw%S| zgl{5T=VPtc$k+KVTYo(9LsYzpa2vpMP|ct(n{_KxQx)LVwN)#$r;yPNWyR2y(pr(0 z`OUV;qeWcyZCq9>{8yHB&Gey|%Nh#gu~sn5;A4|A1(cbRnuAM7)44OUNlN^+$ZWXk z@>u}JOYrD_=l_U2ANG3&98Thvqhx@z5m}DI&FPd~vS&9Fy-3J5e=jyhrulzjqvS)L zj*nO0W$yr$o3F)xJ-rPQ{W*di;b#aAgu4JKmsQd~fNa*3v@5t-v$nXbY6TSOt9&`} zTs1kaY~UXzewX|Ni5?*Q8=-1Xm_#eT*ZyAed5}!`%7RI*q&$yK1}^x*&CTCRwha!T zvJYVpL3F)Fq-c8fZt+s4s8P}3B7cyKhMz{ys>oT;C3&LbSBd?<5@x-2bw#7OqE>>w zX5iek@y|OtdJiMVGXN>sidK`;6>NSLvDwIPb~cj!-HV;yAtcBjbVVkIaIKF3cu>MK zTfIX!TE%$O2|t8>KHVKWhJnq7CrRl_cDb~eyF^7LL=iA8)017J6qC*w8ckUxpJm<@3NL%T3ODOjA8zu`|i98qQgDl#V-d9W4b*EjhGb%qhXMK z`lt|f3z_WF+KM8oeiE&s)Tj5BJ;gunDNghpf@-vW*vXuO}B0!Ar4s87KQ&TFzI->CE_OQNq%3B?q2JyA>>*9 zO8Vy9MdZcSnD&Ljnr1qnghd^;{wx|22=%~Vk|mS8RK;(4S11%td35&XdA9GnB*FjH z7d$TtcWjTFCV_%>;_Fut#sMIN*h4q6yN%33K~wbHx<>QpF@sQ56Qn!CU+kanT}JB) z!s=s*f&`8G*!ueH(HsA$e|!V~=b~p2<`D2Qq3Nb|&sx}5Sy^4~B z^R8#LLP1f!Pz8~p=ypB$;imN{0Ng}9E~hK%-jF)nF8=yJ$SEdmkl!6BHH%i(fE)b< b!Zw194-Sr!Hm(ld_dl6d>oD<|p)dU(;UwBh diff --git a/runners/__pycache__/global_points_inferencer.cpython-39.pyc b/runners/__pycache__/global_points_inferencer.cpython-39.pyc index 00bb362edd678acea848b919763ef96794176349..2b28bb9f489823efeef24b1e8fb9a50ecca5e5c2 100644 GIT binary patch delta 3142 zcmaJ@eQaA-6@T~nGq&S=I*H>XcI+heN9=sIN!q4MyR<8#ql3WOW^0Y(_nO#s?Bu?e ztPR%(f zO_@na*F7A&;8Zgy>lQLKUI`>>^}2F>Weq#ELZ$(zhH@jIXOn;otjkSG)g~d5-kfbI zdtt|}ID~Ag;P`;ky17b9geLShTqw?U`MI{DNpCNAFq1IjKPM@Y8NTZ#BIWA`q!Q7B z{Sp!13xIiR21D}i1r31(vYo(MX3~U|TuT~Bt;qp&Epd|szh!%ijPO}|#Crr3=)^7@2g)$frTodQ zSM0wdWRkBro+n+r+1X8o__%Y@A4b^-LKLAEp%=l0aDtberyJ0y;Z#z3N>lY@M!8DI z`G1@ta)$d|J&hA6C;+|DNq|rC3$CZht*!T5e<7ZJ7GTyZQprmDS&-Q(6o21+{_~K; za7os(EUY-a(kv-P^22(Bh0D=RsT|vsSglg$m2hu|qxUL)#jON3WTkV%RE)A{HeQVA zRnWjL7S2{H!40_>W06UzWV*P_pQ;H#c3!RVk{JJKjo%YfB*pz@S#e*Mk4h`A>h0MM z7UTb}Xi7|801Lg#h8a=fP~D*x=?W2?om{nl+%ZIK z!;t?Yyj1Uxp%p9by3dMPaDpP^L6ASH?8rCr59@8-+i+3uLQgf`-&5sb?r!jtah_=K zof&7H>qHMBDx)u&z~3=uKZ@xd)5V@RT;+`0LFf4-O1EfG~(qx!3$} z%}LV1&$b*RpX1NA3|hNzuZh3iGH(%6hbO!(En~n0+l5?4)9Di^aS7ofzvPXN`jI?| z5~lgoRoV|!WR!|IGXwOrCry)?e445-9FqDRRkgW7PN7%%JKk{*W{74GvIrDGX17~g0vBt1Yp!!WxMPm_N}q@&l0l6pY1qEj_@CM43n4mzd8+=o&kv1D>=N$+X5{_=LZ8*`zI-iS@?^AH$vY)t(OsALzvhF zaSn9p<}00dLXQAt$WP7~ zoTmA$V22lM+SNQntP}i{$wDBN%lPlhA z87+yW-DlWnn1u&TdQMGWStw+-Z3MO&yVeP5<~O?ANBhvy4-s4l*q{5)`+ekzTk!*= z-azQ(f9-A-VHov$A!^BVA%8hDOGREpN-)T2S9*z3JVFMZ*V_rAZ}Jm8u^48{I?Z@WyyJJU=Cr%@oH970JZO zU{r$FbToz62?o*;l)x7R!?DNTcW7Z@eCto)7%`s)QC=7E@ZRXPGmoJ%R>&~VII6m-)_hg{O{2(t^NX}R~+%(4_qUBwXJ0IFjAr) zeulX$mFA5%whu%^fE&9~yy>h}-5M{GYM0sEV?e z5oUL(IiR4vo0Y$BZLj#b-b1VSHb+k(Od{aap(TW85FSSmjf@RYX$q`Tr6?GJQJsW$ z#*_vRms~EHyeCT{UeOdaAcuam4geTeQM=XN?pT$Y9Cq%Cw^+l#rU@R6=RKn0He7!V cK@6$y^4H?S#K->_|5fb-a`)gkemm%@ol0Y-};ZGl)V>N$Vm_C>O3fZ2g6_*4DL2E2U9of97OwGwre%+m z2(|02C@ZzQ;y2rh7ClgIGc24X_==zi(vBbryA~qkJ;Q?b`H(>P{{mp%mPUtyx>&Yw zhiriDnGjzl45VSj!CmfHMY{;csNT6N6eSq#m=i9}7p;cXkT%hgWkV`=8CG;U@7BAO zx^fRbjqH-w$m2c7KqVG!Fg;S14I3Jl$dxmOQ10CWmlR=9ke%$Iw7_E4=SiGxT37e~ zV*NAeYlhGU;L@fr$f8qhB<5ctM???t5|70J8V_-kKKAdL2V|Vxx6A%%Fs8>J!wHZo z7P^r?wg0*O4}_d#zjb_zL|CS-iwv_;-CU>}ZF>-62p)u91Si5X?4NaKnvk?orzG{H zrs|2ba+Mxs=bcfqz^Jp^dw4Z^5{#c?-*^a)8ezk>CX{T{YebA1Q(}|# zub$IuGd9I(#4;`=bW<$Kdd!HGb`?gp%cAp8caf8t6A-3dHmc5zzI9xFY(6J%SvD%=$ecpA338jJj9ESz-H*9?Z1E zNYWE@4H&J)fFbQ!N)~O7z3FL{^gw2iz3=hw+|h;dkkXB*Gr~&aq)-eSq4KaHA2?PX z*@I<>@QP3puiQ3TVQj#B&Kr`TkCvoseMBgZ5@CB8xW<$oqxP1d#Ej8f!aEXL>f?%R z$h#t!dW`@|M!UI&-Hb4M0hYGU_Yan_Rlp1{I}tJdshXC~uZFdh znpbDSyi)0DCV7|13S0F~kmuQL?=ZTV*Q@A*y;l4GA8c?pXBZ1XACH5idC zbQ^B;E9_Qt#D47Hq!vyv?&_0bU;Me9!1-4gZrQ5-}VLNHGs zyVQ~(ZR|nIak9w%(=q}FbljgCT5_c>tEsg_VN<2q3pxxrkaaWc(Lg~rG&b~QBNtD<`gsBDCN6uNuj28i?~f`srUf)(NG0J7bT z%C|vYs>!QcxUr?$TtQK@C{oIP6ke?EZaCA;0v#Vj%OKG^2=5{MA1|8OzdAmQo`unh zxV2m%Te9;}!g0b=?UsESX&a2AQby=Q;EAi@E>@fetv8>{sl1+PlJ!K}{WEA;wekhf zh1jL&-HyYpy;84UT~}#3r{*DtN&PbQv*FIpJzvo03mbXm zG^GVf(XmS76Q{LAs*qnvuTmZ<9wc;o$(h=q6gK5fq;-|bX3Xe)_Vey|90OF5a;l!J z-c3E6P35WW46x>&J1_hg?BV?1$D5TNfg9~Yc!+?j(T@P$H6yiDx13C^ZLBAFT4r&S z=K^24eA+SQh^bRQghz-il2sZ~R88t?;>oV)$7o$O`4^x&$-WoME%M0{#lAv$d?QF> zIUfO8IF>*3fZh8t+>`HmcyeOJznRPp{;_T zK9*KMg8Y8Ry#3(O6Z>7?#14LN(K!VCf}z-zDE1QGEa(N4cx__=m_>nEGLwQXR9uNf z3eF9@U9#Ck;;ATce|b^VgPk8P4MPeoEoQgbgN_}c!(nfC)H>{Thr{ZqcU#?(U7~0# rcbO-skGp zWZMO&132x~6;dWNsdwUo;#`$~&{;C+U6pQT5@tf@BtHVux$qcK}^M|C1M@klEVdg6s$}MKD z3@{7EUGV6Gin|iSr&V&3JlS;|IC9Af>b;c&v!ZO7EEQnKLsbY#kPv9k>3=>^09A5v>{skeY zdCBoZGQgXiePoJHIS+*rC_98OjL?KIg5W|p$?rOk`Ov80R5E%-Q}uLCxkiuj$Id8u zowvLCTb}nuPlNDF{9CSDvd6Z*;R?w9J1oqsm!-0m-eEn=R-?G1@%)>R!@;tw-C=`@ zGbqiHQml~DV{EVzuS%6fRbpPHDJbFE7Dpda!iqV`B$Z7UKjt4bMmv&V9jfw<4CsDGz9a{wDnXkp!5=k-{0ZQDVAXR;-X-1m zrQS8)4J%=9mwSY@vm`TDO=XkzHb3EQUx9!}%O?6r@0cHBqpXjOFsIU|^siaokV|8w zaW)2`k94Urt_)yfu(0CWE|tP8RGDCjtxZ%?RS1Dd=cKZ{w87e8Em?amFtemjmd)37 zB9$hHbYli=ZBqtW!(B;lY-H1XC1 zI_14heMcXF@hN2p?frjU5vDXBWR5_H7mEDvtkA-w^@%!e-Fs-Qnut<_-UUlxB+{zLx1_7@2^UV7knWYrtkYY zPVQ!rRiZ}`l^yp?;BODJ@50tappbvw-a&eQaD==&SZuh3#v)wEn;HmqgUf;t@* z13_Qq|MJZcou6+xNG|YuEnjlxpSHyaje*+wZ~S`+S>cac&%jI_4;*yg0dCAh--kbX zm%ktAvG3g)$Ug=ScYYiAhE-P!T9F#IOZj3(pBSfuC>`UyZBc(Wk|~5qgxZkvH`>x9 z!hhGchot$x+s3T}xEA2a;KHOhf(&`JATHOQUlD$bz=5k!?-R!@Wf<-Be*2osA%P331hSIHv(qVq(s?j%z{7dNW? zolkeY7P1qUoFrB_avgFvNs>+?lQ0~`R@pAQh<*L%U9S;R<==EqlKngtN|7&lF0{>G zXLKD5ek1W;hwksK0!berm=Qikups;jAZD-S@N=LITS2{wyE7UVii(;?lHTS&4WF$) zd{NWNosr*0zd}R*K=>Nr1yMBehmqe$&%vr8U!5_?)tGCuQBdKTw*mjGr*mW!nMs5( z1W{(22+?qDy%`Fb1y#%eE%MfAckmEO)_GQe7vQI&_ao20d5lK&+M-Hx3u*xhnSn;> z=Aqu+J-gAyDS((<^JdFwX{>ggVZ%2I?n4L2k z1f7GoaLh-?(L)mf5U)IJ<@!9haaGKe2dPGc5j5I#B$roD7xg2><$`jU(juk1`BHyd zpGcR;6vl42vdfg>&Vke+D9tfk74^V-fl!;|j|Y@P|Aj}$FB&tN za8z|Q{j?mk8%@_Khk-W@KTQTDiQE|Akw}1lGI+dM7&B8mqiVSw;NDmuz8|0QqG32R z9Ub5})@@nH?^%B}7ANEkXYu#Qo7|IVn>d5IQG{l2X^_hXxl*TUxECpweX|!2@(YPo z(#!vx2wV_3+K1Yr!2Mu?ilgr$@}>YFks`b}T6gH;nx^G$!0insjv~};e;@D;@KZxa z;!C(n5ws_i4iwbrVeRifn@{}Hp=~R8x1+Bh;6;hz8AI_*5JOLKI%>U$MXVJTGhIvM zK3Fx}X}CdVG| hqDnRpqn{vD5v<&oOp$JWEcvGan6hQ~(s{SVEs5Q+c* delta 3053 zcmaJ@du&@*8NcWH8NcGZ8ryN+b>bW6{ch7XjHRXR(yn5evI;uO@x4jy_?2^S`iPDf zsaDxE6g6-rra?s=K(PJ+B1A$6X%hcU8k1K38e9n>#Mp+A5bO^$Q4oCJagr@og)RU5 zo$q|F^F7YJAIJalqA%g~x+M5K7;3xz-i2M?M>4t3|0%C+GAZpbtZ7@?8j4{fNM&7aR2@5nNJdloSh*Q?oT^jE1_UPvoWKs*Ce&$!kXCC- z^1X1;VziXQ%p$C`E=sCo-IT0S$wH)@e?-#%JuDINy$YDKr!b&aLoQoHKz6{kbh~iV z0Y*Dimk7CY1?9Q~b{oe_Qqc;#opaK)b444oG3yNsWQkeJ5oW`nb3UU-b(ec_HCiHf z$l;lzz>F1w1-Xb&n=T0R!3&QmLAzT2;w9xh3+}@|o z_X(Njk6f>j2-n;ZGReR0o@-Z7whtkOP=_#p;6b>^|K>j5fJRNXnlKW&X2etKEqaz; z@N|;}p7r!J9`#1&LHHv7k>?fp6Z_tXw^?r5U>(f9Ds9{82J2#u3dOJ0zOZ1_r0uGQ zMblojV@ob7MwCU%%8p15^Q(11Np%OM1C9|>J5`_BwI!?FTb5!U>r3|+V=Oi&ZCkFr zS&V`;7mM;gc#kc>!ciVzQ571;urVeNi=_(|mIQG&Qdf~h-@}+NqwMn&K zMD~AmMO-i&EXewq7tR>kvcd_+i=%ip{@*$!Vx=EKB*Sc6MA8Y7j0?L#po9mg%oM*= zKS8E>seXV=@!k6DQzt|t^ssS8)Iq59ctBbu&?G$vQM1-3ok_$!0Rwe{#~a#X6)U+T zR;*wp$GSwmyFmV=T9JR)&=PzH&f|pMYC76mlAaI!fc;3#GW|k6pZf7Wx4?UzEOXL=98MPr*bPDdQ!`2Cp*L-&}X^YxI`#_yYXqV z$lXo9b0~!Nf(rkj>EvGI*eoGMt~GxdX6YBrPkUd39mPUlhd=rTcLutgrw;~kG;lV2 z75Jvz&~kd7nvUg6K4FZF(jJuV+gc`*ze!JnCOFAwQo2D^G_#DL@ww1IxC6;c0H!6IxJ5^R zQl_c6byoRSsD<3)4@0M$Fe%hPSVy>v@HPH8^b)zlH(Gu;^&Mn<8{s~HX``A!Z=3D{ z)zo;B#&vD2;>43l=tjI?=<(comKOP9c=~h})m}xl8X;fE>ze69GLxKm7x~*ZRE8?-{h&d0ku6UbJn2kO#cj zK0#*rUi&zCpMTsw*;J*z3#v}S$2xXr9{@>zieN?f34#sb#{i16lCAfEHXS+bCZ5i$ z$>vopgCu>0cXvKleW{|RpTFAq%kGcR&|eWgL3lzGLB7)UtM2Dv*OYHAndGM8*>40? z_}neeFLsBQ2a(y2FoYlq>JTBCo`V}dm&j_O&nf2qvnPR?qNY2)ZWPv`MhW9O{UVyE z3MmP^PX2E9Zs*ZQjajSRDrhv7)pGmVVgB9Y$Ir~5jb{K9xe|sWrN^-istlVAEATK9 zEnLW__HD#|7P&)m&7f_%MC)jUeDbc(9$OCF@W z2m@%e?p!LPUCtZl^6NSEETwr$XZYWH0uhl$kwFaI^d#3Q#e)f-m3&ekALxz6Fh`~} zs~L&v1J%MjL$BgciW3d5a#e(7fx zC{DYx)wL;gxt#nT0|C3}?IB(}m}?SMbbuJ$Ll760m49<^oP_z?gTL{AHoBFr;cpIo F;eV_?`s4rr diff --git a/runners/evaluate_pbnbv.py b/runners/evaluate_pbnbv.py new file mode 100644 index 0000000..ab3cc3e --- /dev/null +++ b/runners/evaluate_pbnbv.py @@ -0,0 +1,786 @@ +import numpy as np + +from sklearn.mixture import GaussianMixture +from typing import List, Tuple, Dict +from enum import Enum + +class VoxelType(Enum): + NONE = 0 + OCCUPIED = 1 + EMPTY = 2 + UNKNOWN = 3 + FRONTIER = 4 + +class VoxelStruct: + def __init__(self, voxel_resolution=0.01, ray_trace_step=0.01, surrounding_radius=1, + num_parallels=10, viewpoints_per_parallel=10, camera_working_distance=0.5): + self.voxel_resolution = voxel_resolution + self.ray_trace_step = ray_trace_step + self.surrounding_radius = surrounding_radius + self.num_parallels = num_parallels + self.viewpoints_per_parallel = viewpoints_per_parallel + self.camera_working_distance = camera_working_distance + self.occupied_voxels = [] + self.empty_voxels = [] + self.unknown_voxels = [] + self.frontier_voxels = [] + self.bbx_min = None + self.bbx_max = None + self.voxel_types: Dict[Tuple[float, float, float], VoxelType] = {} + + def update_voxel_map(self, points: np.ndarray, + camera_pose: np.ndarray) -> Tuple[List[np.ndarray], List[np.ndarray]]: + points = self.transform_points(points, camera_pose) + new_occupied = self.voxelize_points(points) + self.occupied_voxels.extend(new_occupied) + self.update_bounding_box() + self.ray_tracing(camera_pose[:3, 3], camera_pose[:3, :3]) + self.update_frontier_voxels() + return self.frontier_voxels, self.occupied_voxels + + def ray_tracing(self, camera_position: np.ndarray, camera_rotation: np.ndarray): + if self.bbx_min is None or self.bbx_max is None: + return + + directions = self.generate_ray_directions() + for direction in directions: + direction_cam = camera_rotation @ direction + current_pos = camera_position.copy() + + cnt = 0 + while not self.is_in_bounding_box(current_pos): + current_pos -= direction_cam * self.ray_trace_step*2 + cnt += 1 + if cnt > 200: + break + + occupied_flag = False + maybe_unknown_voxels = [] + while self.is_in_bounding_box(current_pos): + voxel = self.get_voxel_coordinate(current_pos) + voxel_key = tuple(voxel) + + if self.is_occupied(voxel): + current_pos -= direction_cam * self.ray_trace_step + occupied_flag = True + continue + if not occupied_flag: + if voxel_key not in self.voxel_types or self.voxel_types[voxel_key] == VoxelType.NONE or self.voxel_types[voxel_key] == VoxelType.UNKNOWN: + maybe_unknown_voxels.append(voxel) + else: + if voxel_key not in self.voxel_types or self.voxel_types[voxel_key] == VoxelType.NONE: + self.voxel_types[voxel_key] = VoxelType.UNKNOWN + self.unknown_voxels.append(voxel) + current_pos -= direction_cam * self.ray_trace_step + if not occupied_flag: + for voxel in maybe_unknown_voxels: + self.voxel_types[tuple(voxel)] = VoxelType.UNKNOWN + self.unknown_voxels.append(voxel) + else: + for voxel in maybe_unknown_voxels: + voxel_key = tuple(voxel) + if voxel_key in self.voxel_types and self.voxel_types[voxel_key] == VoxelType.UNKNOWN: + self.unknown_voxels = [v for v in self.unknown_voxels if not np.array_equal(v, voxel)] + self.voxel_types[voxel_key] = VoxelType.EMPTY + self.empty_voxels.append(voxel) + + def generate_ray_directions(self): + directions = [] + if self.bbx_min is not None and self.bbx_max is not None: + bbx_diagonal = np.linalg.norm(self.bbx_max - self.bbx_min) + hemisphere_radius = self.camera_working_distance + bbx_diagonal / 2 + else: + hemisphere_radius = self.camera_working_distance + + # 使用更密集的采样 + theta_step = np.pi / (6 * self.num_parallels) # 减小theta的步长 + phi_step = np.pi / (6 * self.viewpoints_per_parallel) # 减小phi的步长 + + # 从顶部到底部采样 + for theta in np.arange(0, np.pi/6 + theta_step, theta_step): + # 在每个纬度上采样 + for phi in np.arange(0, 2*np.pi, phi_step): + x = hemisphere_radius * np.sin(theta) * np.cos(phi) + y = hemisphere_radius * np.sin(theta) * np.sin(phi) + z = hemisphere_radius * np.cos(theta) + direction = np.array([-x, -y, -z]) + direction = direction / np.linalg.norm(direction) + directions.append(direction) + + return directions + + def update_frontier_voxels(self): + self.frontier_voxels = [] + remaining_unknown = [] + + for voxel in self.unknown_voxels: + neighbors = self.find_neighbors(voxel) + has_empty = any(self.voxel_types.get(tuple(n), VoxelType.NONE) == VoxelType.EMPTY for n in neighbors) + has_occupied = any(self.voxel_types.get(tuple(n), VoxelType.NONE) == VoxelType.OCCUPIED for n in neighbors) + + if has_empty and has_occupied: + self.voxel_types[tuple(voxel)] = VoxelType.FRONTIER + self.frontier_voxels.append(voxel) + else: + remaining_unknown.append(voxel) + self.unknown_voxels = remaining_unknown + + def is_in_bounding_box(self, point: np.ndarray) -> bool: + if self.bbx_min is None or self.bbx_max is None: + return False + return np.all(point >= self.bbx_min) and np.all(point <= self.bbx_max) + + def get_voxel_coordinate(self, point: np.ndarray) -> np.ndarray: + return (point / self.voxel_resolution).astype(int) * self.voxel_resolution + + def voxelize_points(self, points: np.ndarray) -> List[np.ndarray]: + voxel_coords = (points / self.voxel_resolution).astype(int) + unique_voxels = np.unique(voxel_coords, axis=0) + voxels = [voxel * self.voxel_resolution for voxel in unique_voxels] + for voxel in voxels: + self.voxel_types[tuple(voxel)] = VoxelType.OCCUPIED + return voxels + + def is_occupied(self, voxel: np.ndarray) -> bool: + return self.voxel_types.get(tuple(voxel), VoxelType.NONE) == VoxelType.OCCUPIED + + def find_neighbors(self, voxel: np.ndarray) -> List[np.ndarray]: + neighbors = [] + for dx in [-1, 0, 1]: + for dy in [-1, 0, 1]: + for dz in [-1, 0, 1]: + if dx == 0 and dy == 0 and dz == 0: + continue + neighbor = voxel + np.array([dx, dy, dz]) * self.voxel_resolution + neighbors.append(neighbor) + return neighbors + + def update_bounding_box(self): + if not self.occupied_voxels: + return + + occupied_array = np.array(self.occupied_voxels) + self.bbx_min = occupied_array.min(axis=0) - 2 * self.voxel_resolution + self.bbx_max = occupied_array.max(axis=0) + 2 * self.voxel_resolution + + def transform_points(self, points: np.ndarray, transform: np.ndarray) -> np.ndarray: + ones = np.ones((points.shape[0], 1)) + points_homo = np.hstack((points, ones)) + transformed = (transform @ points_homo.T).T + return transformed[:, :3] + + def create_voxel_geometry(self,voxels, color, voxel_size): + import open3d as o3d + points = np.array(voxels) + if len(points) == 0: + return None + + pcd = o3d.geometry.PointCloud() + pcd.points = o3d.utility.Vector3dVector(points) + pcd.colors = o3d.utility.Vector3dVector(np.tile(color, (len(points), 1))) + return pcd + + def create_ray_geometry(self,camera_pos, directions, camera_rot, length=1.0): + import open3d as o3d + lines = [] + colors = [] + for direction in directions: + # 将方向向量转换到相机坐标系 + direction_cam = camera_rot @ direction + end_point = camera_pos - direction_cam * length + lines.append([camera_pos, end_point]) + colors.append([0.5, 0.5, 0.5]) # 灰色光线 + + line_set = o3d.geometry.LineSet() + line_set.points = o3d.utility.Vector3dVector(np.array(lines).reshape(-1, 3)) + line_set.lines = o3d.utility.Vector2iVector(np.array([[i*2, i*2+1] for i in range(len(lines))])) + line_set.colors = o3d.utility.Vector3dVector(colors) + return line_set + + def visualize_voxel_struct(self, camera_pose: np.ndarray = None): + import open3d as o3d + vis = o3d.visualization.Visualizer() + vis.create_window() + + coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.1, origin=[0, 0, 0]) + vis.add_geometry(coordinate_frame) + + # 显示已占据的体素(蓝色) + occupied_voxels = self.create_voxel_geometry( + self.occupied_voxels, + [0, 0, 1], + self.voxel_resolution + ) + if occupied_voxels: + vis.add_geometry(occupied_voxels) + + # 显示空体素(绿色) + empty_voxels = self.create_voxel_geometry( + self.empty_voxels, + [0, 1, 0], + self.voxel_resolution + ) + if empty_voxels: + vis.add_geometry(empty_voxels) + + # 显示未知体素(灰色) + unknown_voxels = self.create_voxel_geometry( + self.unknown_voxels, + [0.5, 0.5, 0.5], + self.voxel_resolution + ) + if unknown_voxels: + vis.add_geometry(unknown_voxels) + + # 显示frontier体素(红色) + frontier_voxels = self.create_voxel_geometry( + self.frontier_voxels, + [1, 0, 0], + self.voxel_resolution + ) + if frontier_voxels: + vis.add_geometry(frontier_voxels) + + # 显示光线 + if camera_pose is not None: + directions = self.generate_ray_directions() + rays = self.create_ray_geometry( + camera_pose[:3, 3], + directions, + camera_pose[:3, :3], + length=0.5 # 光线长度 + ) + vis.add_geometry(rays) + + opt = vis.get_render_option() + opt.background_color = np.asarray([0.8, 0.8, 0.8]) + opt.point_size = 5.0 + + vis.run() + vis.destroy_window() + +class PBNBV: + def __init__(self, voxel_resolution=0.01, camera_intrinsic=None): + self.voxel_resolution = voxel_resolution + self.voxel_struct = VoxelStruct(voxel_resolution) + self.camera_intrinsic = camera_intrinsic or np.array([ + [902.14, 0, 320], + [0, 902.14, 200], + [0, 0, 1] + ]) + self.focal_length = (self.camera_intrinsic[0,0] + self.camera_intrinsic[1,1]) / 2 / 1000 + self.ellipsoids = [] + + def capture(self, point_cloud: np.ndarray, camera_pose: np.ndarray): + frontier_voxels, occupied_voxels = self.voxel_struct.update_voxel_map(point_cloud, camera_pose) + # self.voxel_struct.visualize_voxel_struct(camera_pose) + self.fit_ellipsoids(frontier_voxels, occupied_voxels) + + def reset(self): + self.ellipsoids = [] + self.voxel_struct = VoxelStruct(self.voxel_resolution) + + def fit_ellipsoids(self, frontier_voxels: List[np.ndarray], occupied_voxels: List[np.ndarray], + max_ellipsoids=10): + self.ellipsoids = [] + + if not frontier_voxels and not occupied_voxels: + return + + if frontier_voxels: + frontier_gmm = self.fit_gmm(np.array(frontier_voxels), max_ellipsoids) + self.ellipsoids.extend(self.gmm_to_ellipsoids(frontier_gmm, "frontier")) + + if occupied_voxels: + occupied_gmm = self.fit_gmm(np.array(occupied_voxels), max_ellipsoids) + self.ellipsoids.extend(self.gmm_to_ellipsoids(occupied_gmm, "occupied")) + + def fit_gmm(self, data: np.ndarray, max_components: int) -> GaussianMixture: + best_gmm = None + best_bic = np.inf + + for n in range(1, min(max_components, len(data)) + 1): + gmm = GaussianMixture(n_components=n, covariance_type='full') + gmm.fit(data) + bic = gmm.bic(data) + + if bic < best_bic: + best_bic = bic + best_gmm = gmm + + return best_gmm + + def gmm_to_ellipsoids(self, gmm: GaussianMixture, ellipsoid_type: str) -> List[Dict]: + ellipsoids = [] + + for i in range(gmm.n_components): + mean = gmm.means_[i] + cov = gmm.covariances_[i] + + eigvals, eigvecs = np.linalg.eigh(cov) + radii = np.sqrt(eigvals) * 3 + + rotation = eigvecs + pose = np.eye(4) + pose[:3, :3] = rotation + pose[:3, 3] = mean + + ellipsoids.append({ + "type": ellipsoid_type, + "pose": pose, + "radii": radii + }) + + return ellipsoids + + def evaluate_viewpoint(self, viewpoint_pose: np.ndarray) -> float: + if not self.ellipsoids: + return 0.0 + + ellipsoid_weights = self.compute_ellipsoid_weights(viewpoint_pose) + + projection_scores = [] + for ellipsoid, weight in zip(self.ellipsoids, ellipsoid_weights): + score = self.project_ellipsoid(ellipsoid, viewpoint_pose) * weight + projection_scores.append((ellipsoid["type"], score)) + + frontier_score = sum(s for t, s in projection_scores if t == "frontier") + occupied_score = sum(s for t, s in projection_scores if t == "occupied") + + return frontier_score - occupied_score + + def compute_ellipsoid_weights(self, viewpoint_pose: np.ndarray) -> List[float]: + centers_world = np.array([e["pose"][:3, 3] for e in self.ellipsoids]) + centers_homo = np.hstack((centers_world, np.ones((len(centers_world), 1)))) + centers_cam = (np.linalg.inv(viewpoint_pose) @ centers_homo.T).T[:, :3] + + z_coords = centers_cam[:, 2] + sorted_indices = np.argsort(z_coords) + + weights = np.zeros(len(self.ellipsoids)) + for rank, idx in enumerate(sorted_indices): + weights[idx] = 0.5 ** rank + + return weights.tolist() + + def project_ellipsoid(self, ellipsoid: Dict, viewpoint_pose: np.ndarray) -> float: + ellipsoid_pose_cam = np.linalg.inv(viewpoint_pose) @ ellipsoid["pose"] + + radii = ellipsoid["radii"] + rotation = ellipsoid_pose_cam[:3, :3] + scales = np.diag(radii) + transform = rotation @ scales + + major_axis = np.linalg.norm(transform[:, 0]) + minor_axis = np.linalg.norm(transform[:, 1]) + area = np.pi * major_axis * minor_axis + + return area + + def generate_candidate_views(self, num_views=100, longitude_num=5) -> List[np.ndarray]: + if self.voxel_struct.bbx_min is None: + return [] + + center = (self.voxel_struct.bbx_min + self.voxel_struct.bbx_max) / 2 + radius = np.linalg.norm(self.voxel_struct.bbx_max - self.voxel_struct.bbx_min) / 2 + self.focal_length + + candidate_views = [] + + latitudes = np.linspace(np.deg2rad(40), np.deg2rad(90), longitude_num) + + lengths = [2 * np.pi * np.sin(lat) * radius for lat in latitudes] + total_length = sum(lengths) + points_per_lat = [int(round(num_views * l / total_length)) for l in lengths] + + for lat, n in zip(latitudes, points_per_lat): + if n == 0: + continue + + longitudes = np.linspace(0, 2*np.pi, n, endpoint=False) + for lon in longitudes: + x = radius * np.sin(lat) * np.cos(lon) + y = radius * np.sin(lat) * np.sin(lon) + z = radius * np.cos(lat) + position = np.array([x, y, z]) + center + + z_axis = center - position + z_axis /= np.linalg.norm(z_axis) + + x_axis = np.cross(z_axis, np.array([0, 0, 1])) + if np.linalg.norm(x_axis) < 1e-6: + x_axis = np.array([1, 0, 0]) + x_axis /= np.linalg.norm(x_axis) + + y_axis = np.cross(z_axis, x_axis) + y_axis /= np.linalg.norm(y_axis) + + rotation = np.column_stack((x_axis, y_axis, z_axis)) + + view_pose = np.eye(4) + view_pose[:3, :3] = rotation + view_pose[:3, 3] = position + + candidate_views.append(view_pose) + + return candidate_views + + def select_best_view(self) -> np.ndarray: + candidate_views = self.generate_candidate_views() + if not candidate_views: + return np.eye(4) + + scores = [self.evaluate_viewpoint(view) for view in candidate_views] + best_idx = np.argmax(scores) + + return candidate_views[best_idx] + + def execute(self) -> Tuple[np.ndarray, bool]: + best_view = self.select_best_view() + + has_frontier = any(e["type"] == "frontier" for e in self.ellipsoids) + done = not has_frontier + + return best_view, done + +import os +import json +from utils.render import RenderUtil +from utils.pose import PoseUtil +from utils.pts import PtsUtil +from utils.reconstruction import ReconstructionUtil +from beans.predict_result import PredictResult + +from tqdm import tqdm +import numpy as np +import pickle + +from PytorchBoot.config import ConfigManager +import PytorchBoot.namespace as namespace +import PytorchBoot.stereotype as stereotype +from PytorchBoot.factory import ComponentFactory + +from PytorchBoot.dataset import BaseDataset +from PytorchBoot.runners.runner import Runner +from PytorchBoot.utils import Log +from PytorchBoot.status import status_manager +from utils.data_load import DataLoadUtil + +@stereotype.runner("evaluate_pbnbv") +class EvaluatePBNBV(Runner): + def __init__(self, config_path): + + super().__init__(config_path) + + self.script_path = ConfigManager.get(namespace.Stereotype.RUNNER, "blender_script_path") + self.output_dir = ConfigManager.get(namespace.Stereotype.RUNNER, "output_dir") + self.voxel_size = ConfigManager.get(namespace.Stereotype.RUNNER, "voxel_size") + self.min_new_area = ConfigManager.get(namespace.Stereotype.RUNNER, "min_new_area") + CM = 0.01 + self.min_new_pts_num = self.min_new_area * (CM / self.voxel_size) ** 2 + self.overlap_limit = ConfigManager.get(namespace.Stereotype.RUNNER, "overlap_limit") + + self.pbnbv = PBNBV(self.voxel_size) + ''' Experiment ''' + self.load_experiment("nbv_evaluator") + self.stat_result_path = os.path.join(self.output_dir, "stat.json") + if os.path.exists(self.stat_result_path): + with open(self.stat_result_path, "r") as f: + self.stat_result = json.load(f) + else: + self.stat_result = {} + + ''' Test ''' + self.test_config = ConfigManager.get(namespace.Stereotype.RUNNER, namespace.Mode.TEST) + self.test_dataset_name_list = self.test_config["dataset_list"] + self.test_set_list = [] + self.test_writer_list = [] + seen_name = set() + for test_dataset_name in self.test_dataset_name_list: + if test_dataset_name not in seen_name: + seen_name.add(test_dataset_name) + else: + raise ValueError("Duplicate test dataset name: {}".format(test_dataset_name)) + test_set: BaseDataset = ComponentFactory.create(namespace.Stereotype.DATASET, test_dataset_name) + self.test_set_list.append(test_set) + self.print_info() + + + def run(self): + Log.info("Loading from epoch {}.".format(self.current_epoch)) + self.inference() + Log.success("Inference finished.") + + + def inference(self): + #self.pipeline.eval() + + test_set: BaseDataset + for dataset_idx, test_set in enumerate(self.test_set_list): + status_manager.set_progress("inference", "inferencer", f"dataset", dataset_idx, len(self.test_set_list)) + test_set_name = test_set.get_name() + + total=int(len(test_set)) + for i in tqdm(range(total), desc=f"Processing {test_set_name}", ncols=100): + try: + self.pbnbv.reset() + data = test_set.__getitem__(i) + scene_name = data["scene_name"] + inference_result_path = os.path.join(self.output_dir, test_set_name, f"{scene_name}.pkl") + + if os.path.exists(inference_result_path): + Log.info(f"Inference result already exists for scene: {scene_name}") + continue + + status_manager.set_progress("inference", "inferencer", f"Batch[{test_set_name}]", i+1, total) + output = self.predict_sequence(data) + self.save_inference_result(test_set_name, data["scene_name"], output) + except Exception as e: + print(e) + Log.error(f"Error, {e}") + continue + + status_manager.set_progress("inference", "inferencer", f"dataset", len(self.test_set_list), len(self.test_set_list)) + + def get_output_data(self): + pose_matrix, done = self.pbnbv.execute() + + offset = np.asarray([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]) + pose_matrix = pose_matrix @ offset + rot = pose_matrix[:3,:3] + + pose_6d = PoseUtil.matrix_to_rotation_6d_numpy(rot) + translation = pose_matrix[:3, 3] + + pose_9d = np.concatenate([pose_6d, translation], axis=0).reshape(1,9) + pose_9d = pose_9d.repeat(50, axis=0) + #import ipdb; ipdb.set_trace() + return {"pred_pose_9d": pose_9d} + + def predict_sequence(self, data, cr_increase_threshold=0, overlap_area_threshold=25, scan_points_threshold=10, max_iter=50, max_retry = 10, max_success=3): + scene_name = data["scene_name"] + Log.info(f"Processing scene: {scene_name}") + status_manager.set_status("inference", "inferencer", "scene", scene_name) + + ''' data for rendering ''' + scene_path = data["scene_path"] + O_to_L_pose = data["O_to_L_pose"] + voxel_threshold = self.voxel_size + filter_degree = 75 + down_sampled_model_pts = data["gt_pts"] + + first_frame_to_world_9d = data["first_scanned_n_to_world_pose_9d"][0] + first_frame_to_world = np.eye(4) + first_frame_to_world[:3,:3] = PoseUtil.rotation_6d_to_matrix_numpy(first_frame_to_world_9d[:6]) + first_frame_to_world[:3,3] = first_frame_to_world_9d[6:] + self.pbnbv.capture(data["first_scanned_pts"][0], first_frame_to_world) + ''' data for inference ''' + input_data = {} + + input_data["combined_scanned_pts"] = np.array(data["first_scanned_pts"][0], dtype=np.float32) + input_data["scanned_pts"] = [np.array(data["first_scanned_pts"][0], dtype=np.float32)] + input_data["scanned_pts_mask"] = [np.zeros(input_data["combined_scanned_pts"].shape[0], dtype=np.bool_)] + input_data["scanned_n_to_world_pose_9d"] = [np.array(data["first_scanned_n_to_world_pose_9d"], dtype=np.float32)] + input_data["mode"] = namespace.Mode.TEST + input_pts_N = input_data["combined_scanned_pts"].shape[0] + root = os.path.dirname(scene_path) + display_table_info = DataLoadUtil.get_display_table_info(root, scene_name) + radius = display_table_info["radius"] + scan_points = np.asarray(ReconstructionUtil.generate_scan_points(display_table_top=0,display_table_radius=radius)) + + first_frame_target_pts, first_frame_target_normals, first_frame_scan_points_indices = RenderUtil.render_pts(first_frame_to_world, scene_path, self.script_path, scan_points, voxel_threshold=voxel_threshold, filter_degree=filter_degree, nO_to_nL_pose=O_to_L_pose) + scanned_view_pts = [first_frame_target_pts] + history_indices = [first_frame_scan_points_indices] + last_pred_cr, added_pts_num = self.compute_coverage_rate(scanned_view_pts, None, down_sampled_model_pts, threshold=voxel_threshold) + retry_duplication_pose = [] + retry_no_pts_pose = [] + retry_overlap_pose = [] + retry = 0 + pred_cr_seq = [last_pred_cr] + success = 0 + last_pts_num = PtsUtil.voxel_downsample_point_cloud(data["first_scanned_pts"][0], voxel_threshold).shape[0] + #import time + while len(pred_cr_seq) < max_iter and retry < max_retry and success < max_success: + #import ipdb; ipdb.set_trace() + Log.green(f"iter: {len(pred_cr_seq)}, retry: {retry}/{max_retry}, success: {success}/{max_success}") + combined_scanned_pts = np.vstack(scanned_view_pts) + voxel_downsampled_combined_scanned_pts_np, inverse = self.voxel_downsample_with_mapping(combined_scanned_pts, voxel_threshold) + + output = self.get_output_data() + pred_pose_9d = output["pred_pose_9d"] + pred_pose = np.eye(4) + + predict_result = PredictResult(pred_pose_9d, input_pts=input_data["combined_scanned_pts"], cluster_params=dict(eps=0.25, min_samples=3)) + # ----------------------- + import ipdb; ipdb.set_trace() + predict_result.visualize() + # ----------------------- + pred_pose_9d_candidates = predict_result.candidate_9d_poses + #import ipdb; ipdb.set_trace() + for pred_pose_9d in pred_pose_9d_candidates: + #import ipdb; ipdb.set_trace() + pred_pose_9d = np.array(pred_pose_9d, dtype=np.float32) + pred_pose[:3,:3] = PoseUtil.rotation_6d_to_matrix_numpy(pred_pose_9d[:6]) + pred_pose[:3,3] = pred_pose_9d[6:] + try: + new_target_pts, new_target_normals, new_scan_points_indices = RenderUtil.render_pts(pred_pose, scene_path, self.script_path, scan_points, voxel_threshold=voxel_threshold, filter_degree=filter_degree, nO_to_nL_pose=O_to_L_pose) + #import ipdb; ipdb.set_trace() + if not ReconstructionUtil.check_scan_points_overlap(history_indices, new_scan_points_indices, scan_points_threshold): + curr_overlap_area_threshold = overlap_area_threshold + else: + curr_overlap_area_threshold = overlap_area_threshold * 0.5 + + downsampled_new_target_pts = PtsUtil.voxel_downsample_point_cloud(new_target_pts, voxel_threshold) + #import ipdb; ipdb.set_trace() + if self.overlap_limit: + overlap, _ = ReconstructionUtil.check_overlap(downsampled_new_target_pts, voxel_downsampled_combined_scanned_pts_np, overlap_area_threshold = curr_overlap_area_threshold, voxel_size=voxel_threshold, require_new_added_pts_num = True) + if not overlap: + Log.yellow("no overlap!") + retry += 1 + retry_overlap_pose.append(pred_pose.tolist()) + continue + + history_indices.append(new_scan_points_indices) + except Exception as e: + Log.error(f"Error in scene {scene_path}, {e}") + print("current pose: ", pred_pose) + print("curr_pred_cr: ", last_pred_cr) + retry_no_pts_pose.append(pred_pose.tolist()) + retry += 1 + continue + + if new_target_pts.shape[0] == 0: + Log.red("no pts in new target") + retry_no_pts_pose.append(pred_pose.tolist()) + retry += 1 + continue + + pred_cr, _ = self.compute_coverage_rate(scanned_view_pts, new_target_pts, down_sampled_model_pts, threshold=voxel_threshold) + Log.yellow(f"{pred_cr}, {last_pred_cr}, max: , {data['seq_max_coverage_rate']}") + if pred_cr >= data["seq_max_coverage_rate"] - 1e-3: + print("max coverage rate reached!: ", pred_cr) + + + + pred_cr_seq.append(pred_cr) + scanned_view_pts.append(new_target_pts) + + pred_pose_9d = pred_pose_9d.reshape(1, -1) + input_data["scanned_n_to_world_pose_9d"] = [np.concatenate([input_data["scanned_n_to_world_pose_9d"][0], pred_pose_9d], axis=0)] + + combined_scanned_pts = np.vstack(scanned_view_pts) + voxel_downsampled_combined_scanned_pts_np = PtsUtil.voxel_downsample_point_cloud(combined_scanned_pts, voxel_threshold) + random_downsampled_combined_scanned_pts_np = PtsUtil.random_downsample_point_cloud(voxel_downsampled_combined_scanned_pts_np, input_pts_N) + self.pbnbv.capture(np.array(random_downsampled_combined_scanned_pts_np, dtype=np.float32), pred_pose) + input_data["combined_scanned_pts"] = np.array(random_downsampled_combined_scanned_pts_np, dtype=np.float32) + input_data["scanned_pts"] = [np.concatenate([input_data["scanned_pts"][0], np.array(random_downsampled_combined_scanned_pts_np, dtype=np.float32)], axis=0)] + + last_pred_cr = pred_cr + pts_num = voxel_downsampled_combined_scanned_pts_np.shape[0] + Log.info(f"delta pts num:,{pts_num - last_pts_num },{pts_num}, {last_pts_num}") + + if pts_num - last_pts_num < self.min_new_pts_num and pred_cr <= data["seq_max_coverage_rate"] - 1e-2: + retry += 1 + retry_duplication_pose.append(pred_pose.tolist()) + Log.red(f"delta pts num < {self.min_new_pts_num}:, {pts_num}, {last_pts_num}") + elif pts_num - last_pts_num < self.min_new_pts_num and pred_cr > data["seq_max_coverage_rate"] - 1e-2: + success += 1 + Log.success(f"delta pts num < {self.min_new_pts_num}:, {pts_num}, {last_pts_num}") + + last_pts_num = pts_num + + + input_data["scanned_n_to_world_pose_9d"] = input_data["scanned_n_to_world_pose_9d"][0].tolist() + result = { + "pred_pose_9d_seq": input_data["scanned_n_to_world_pose_9d"], + "combined_scanned_pts": input_data["combined_scanned_pts"], + "target_pts_seq": scanned_view_pts, + "coverage_rate_seq": pred_cr_seq, + "max_coverage_rate": data["seq_max_coverage_rate"], + "pred_max_coverage_rate": max(pred_cr_seq), + "scene_name": scene_name, + "retry_no_pts_pose": retry_no_pts_pose, + "retry_duplication_pose": retry_duplication_pose, + "retry_overlap_pose": retry_overlap_pose, + "best_seq_len": data["best_seq_len"], + } + self.stat_result[scene_name] = { + "coverage_rate_seq": pred_cr_seq, + "pred_max_coverage_rate": max(pred_cr_seq), + "pred_seq_len": len(pred_cr_seq), + } + print('success rate: ', max(pred_cr_seq)) + + return result + + def voxel_downsample_with_mapping(self, point_cloud, voxel_size=0.003): + voxel_indices = np.floor(point_cloud / voxel_size).astype(np.int32) + unique_voxels, inverse, counts = np.unique(voxel_indices, axis=0, return_inverse=True, return_counts=True) + idx_sort = np.argsort(inverse) + idx_unique = idx_sort[np.cumsum(counts)-counts] + downsampled_points = point_cloud[idx_unique] + return downsampled_points, inverse + + def compute_coverage_rate(self, scanned_view_pts, new_pts, model_pts, threshold=0.005): + if new_pts is not None: + new_scanned_view_pts = scanned_view_pts + [new_pts] + else: + new_scanned_view_pts = scanned_view_pts + combined_point_cloud = np.vstack(new_scanned_view_pts) + down_sampled_combined_point_cloud = PtsUtil.voxel_downsample_point_cloud(combined_point_cloud,threshold) + return ReconstructionUtil.compute_coverage_rate(model_pts, down_sampled_combined_point_cloud, threshold) + + def voxel_downsample_with_mapping(self, point_cloud, voxel_size=0.003): + voxel_indices = np.floor(point_cloud / voxel_size).astype(np.int32) + unique_voxels, inverse, counts = np.unique(voxel_indices, axis=0, return_inverse=True, return_counts=True) + idx_sort = np.argsort(inverse) + idx_unique = idx_sort[np.cumsum(counts)-counts] + downsampled_points = point_cloud[idx_unique] + return downsampled_points, inverse + + def save_inference_result(self, dataset_name, scene_name, output): + dataset_dir = os.path.join(self.output_dir, dataset_name) + if not os.path.exists(dataset_dir): + os.makedirs(dataset_dir) + output_path = os.path.join(dataset_dir, f"{scene_name}.pkl") + pickle.dump(output, open(output_path, "wb")) + with open(self.stat_result_path, "w") as f: + json.dump(self.stat_result, f) + + + def get_checkpoint_path(self, is_last=False): + return os.path.join(self.experiment_path, namespace.Direcotry.CHECKPOINT_DIR_NAME, + "Epoch_{}.pth".format( + self.current_epoch if self.current_epoch != -1 and not is_last else "last")) + + def load_experiment(self, backup_name=None): + super().load_experiment(backup_name) + self.current_epoch = self.experiments_config["epoch"] + #self.load_checkpoint(is_last=(self.current_epoch == -1)) + + def create_experiment(self, backup_name=None): + super().create_experiment(backup_name) + + + def load(self, path): + # 如果仍然需要加载某些数据,可以使用numpy的load方法 + pass + + def print_info(self): + def print_dataset(dataset: BaseDataset): + config = dataset.get_config() + name = dataset.get_name() + Log.blue(f"Dataset: {name}") + for k,v in config.items(): + Log.blue(f"\t{k}: {v}") + + super().print_info() + table_size = 70 + Log.blue(f"{'+' + '-' * (table_size // 2)} Pipeline {'-' * (table_size // 2)}" + '+') + #Log.blue(self.pipeline) + Log.blue(f"{'+' + '-' * (table_size // 2)} Datasets {'-' * (table_size // 2)}" + '+') + for i, test_set in enumerate(self.test_set_list): + Log.blue(f"test dataset {i}: ") + print_dataset(test_set) + + Log.blue(f"{'+' + '-' * (table_size // 2)}----------{'-' * (table_size // 2)}" + '+') + diff --git a/runners/evaluate_uncertainty_guide.py b/runners/evaluate_uncertainty_guide.py index be858ff..c95f80e 100644 --- a/runners/evaluate_uncertainty_guide.py +++ b/runners/evaluate_uncertainty_guide.py @@ -6,7 +6,6 @@ from utils.pts import PtsUtil from utils.reconstruction import ReconstructionUtil from beans.predict_result import PredictResult -import torch from tqdm import tqdm import numpy as np import pickle @@ -34,8 +33,9 @@ class EvaluateUncertaintyGuide(Runner): self.min_new_area = ConfigManager.get(namespace.Stereotype.RUNNER, "min_new_area") CM = 0.01 self.min_new_pts_num = self.min_new_area * (CM / self.voxel_size) ** 2 + self.overlap_limit = ConfigManager.get(namespace.Stereotype.RUNNER, "overlap_limit") - + self.radius = 0.5 self.output_data_root = ConfigManager.get(namespace.Stereotype.RUNNER, "output_data_root") self.output_data = dict() for scene_name in os.listdir(self.output_data_root): @@ -75,38 +75,48 @@ class EvaluateUncertaintyGuide(Runner): def inference(self): #self.pipeline.eval() - with torch.no_grad(): - test_set: BaseDataset - for dataset_idx, test_set in enumerate(self.test_set_list): - status_manager.set_progress("inference", "inferencer", f"dataset", dataset_idx, len(self.test_set_list)) - test_set_name = test_set.get_name() + + test_set: BaseDataset + for dataset_idx, test_set in enumerate(self.test_set_list): + status_manager.set_progress("inference", "inferencer", f"dataset", dataset_idx, len(self.test_set_list)) + test_set_name = test_set.get_name() - total=int(len(test_set)) - for i in tqdm(range(total), desc=f"Processing {test_set_name}", ncols=100): - try: - data = test_set.__getitem__(i) - scene_name = data["scene_name"] - inference_result_path = os.path.join(self.output_dir, test_set_name, f"{scene_name}.pkl") - - if os.path.exists(inference_result_path): - Log.info(f"Inference result already exists for scene: {scene_name}") - continue - - status_manager.set_progress("inference", "inferencer", f"Batch[{test_set_name}]", i+1, total) - output = self.predict_sequence(data) - self.save_inference_result(test_set_name, data["scene_name"], output) - except Exception as e: - print(e) - Log.error(f"Error, {e}") + total=int(len(test_set)) + for i in tqdm(range(total), desc=f"Processing {test_set_name}", ncols=100): + try: + data = test_set.__getitem__(i) + scene_name = data["scene_name"] + inference_result_path = os.path.join(self.output_dir, test_set_name, f"{scene_name}.pkl") + + if os.path.exists(inference_result_path): + Log.info(f"Inference result already exists for scene: {scene_name}") continue - status_manager.set_progress("inference", "inferencer", f"dataset", len(self.test_set_list), len(self.test_set_list)) + status_manager.set_progress("inference", "inferencer", f"Batch[{test_set_name}]", i+1, total) + output = self.predict_sequence(data) + self.save_inference_result(test_set_name, data["scene_name"], output) + except Exception as e: + print(e) + Log.error(f"Error, {e}") + continue + + status_manager.set_progress("inference", "inferencer", f"dataset", len(self.test_set_list), len(self.test_set_list)) def get_output_data(self, scene_name, idx): pose_matrix = self.output_data[scene_name][idx] - pose_6d = PoseUtil.matrix_to_rotation_6d_numpy(pose_matrix[:3,:3]) - pose_9d = np.concatenate([pose_6d, pose_matrix[:3,3]], axis=0).reshape(1,9) - import ipdb; ipdb.set_trace() + offset = np.asarray([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]) + pose_matrix = pose_matrix @ offset + rot = pose_matrix[:3,:3] + + pose_6d = PoseUtil.matrix_to_rotation_6d_numpy(rot) + + # 计算相机在球面上的位置 + camera_direction = rot[:, 2] # 相机朝向球心 + translation = -self.radius * camera_direction # 相机位置在球面上 + + pose_9d = np.concatenate([pose_6d, translation], axis=0).reshape(1,9) + pose_9d = pose_9d.repeat(50, axis=0) + #import ipdb; ipdb.set_trace() return {"pred_pose_9d": pose_9d} def predict_sequence(self, data, cr_increase_threshold=0, overlap_area_threshold=25, scan_points_threshold=10, max_iter=50, max_retry = 10, max_success=3): @@ -129,17 +139,17 @@ class EvaluateUncertaintyGuide(Runner): ''' data for inference ''' input_data = {} - input_data["combined_scanned_pts"] = torch.tensor(data["first_scanned_pts"][0], dtype=torch.float32).to(self.device).unsqueeze(0) - input_data["scanned_pts"] = [torch.tensor(data["first_scanned_pts"][0], dtype=torch.float32).to(self.device).unsqueeze(0)] - input_data["scanned_pts_mask"] = [torch.zeros(input_data["combined_scanned_pts"].shape[1], dtype=torch.bool).to(self.device).unsqueeze(0)] - input_data["scanned_n_to_world_pose_9d"] = [torch.tensor(data["first_scanned_n_to_world_pose_9d"], dtype=torch.float32).to(self.device)] + input_data["combined_scanned_pts"] = np.array(data["first_scanned_pts"][0], dtype=np.float32) + input_data["scanned_pts"] = [np.array(data["first_scanned_pts"][0], dtype=np.float32)] + input_data["scanned_pts_mask"] = [np.zeros(input_data["combined_scanned_pts"].shape[0], dtype=np.bool_)] + input_data["scanned_n_to_world_pose_9d"] = [np.array(data["first_scanned_n_to_world_pose_9d"], dtype=np.float32)] input_data["mode"] = namespace.Mode.TEST - input_pts_N = input_data["combined_scanned_pts"].shape[1] + input_pts_N = input_data["combined_scanned_pts"].shape[0] root = os.path.dirname(scene_path) display_table_info = DataLoadUtil.get_display_table_info(root, scene_name) radius = display_table_info["radius"] scan_points = np.asarray(ReconstructionUtil.generate_scan_points(display_table_top=0,display_table_radius=radius)) - + first_frame_target_pts, first_frame_target_normals, first_frame_scan_points_indices = RenderUtil.render_pts(first_frame_to_world, scene_path, self.script_path, scan_points, voxel_threshold=voxel_threshold, filter_degree=filter_degree, nO_to_nL_pose=O_to_L_pose) scanned_view_pts = [first_frame_target_pts] history_indices = [first_frame_scan_points_indices] @@ -160,30 +170,20 @@ class EvaluateUncertaintyGuide(Runner): output = self.get_output_data(scene_name, i) pred_pose_9d = output["pred_pose_9d"] - import ipdb; ipdb.set_trace() - #pred_pose = torch.eye(4, device=pred_pose_9d.device) - # # save pred_pose_9d ------ - # root = "/media/hofee/data/project/python/nbv_reconstruction/nbv_reconstruction/temp_output_result" - # scene_dir = os.path.join(root, scene_name) - # if not os.path.exists(scene_dir): - # os.makedirs(scene_dir) - # pred_9d_path = os.path.join(scene_dir,f"pred_pose_9d_{len(pred_cr_seq)}.npy") - # pts_path = os.path.join(scene_dir,f"combined_scanned_pts_{len(pred_cr_seq)}.txt") - # np_combined_scanned_pts = input_data["combined_scanned_pts"][0].cpu().numpy() - # np.save(pred_9d_path, pred_pose_9d.cpu().numpy()) - # np.savetxt(pts_path, np_combined_scanned_pts) - # # ----- ----- ----- - predict_result = PredictResult(pred_pose_9d, input_pts=input_data["combined_scanned_pts"][0].cpu().numpy(), cluster_params=dict(eps=0.25, min_samples=3)) + pred_pose = np.eye(4) + + predict_result = PredictResult(pred_pose_9d, input_pts=input_data["combined_scanned_pts"], cluster_params=dict(eps=0.25, min_samples=3)) # ----------------------- # import ipdb; ipdb.set_trace() # predict_result.visualize() # ----------------------- pred_pose_9d_candidates = predict_result.candidate_9d_poses + #import ipdb; ipdb.set_trace() for pred_pose_9d in pred_pose_9d_candidates: #import ipdb; ipdb.set_trace() - pred_pose_9d = torch.tensor(pred_pose_9d, dtype=torch.float32).to(self.device).unsqueeze(0) - pred_pose[:3,:3] = PoseUtil.rotation_6d_to_matrix_tensor_batch(pred_pose_9d[:,:6])[0] - pred_pose[:3,3] = pred_pose_9d[0,6:] + pred_pose_9d = np.array(pred_pose_9d, dtype=np.float32) + pred_pose[:3,:3] = PoseUtil.rotation_6d_to_matrix_numpy(pred_pose_9d[:6]) + pred_pose[:3,3] = pred_pose_9d[6:] try: new_target_pts, new_target_normals, new_scan_points_indices = RenderUtil.render_pts(pred_pose, scene_path, self.script_path, scan_points, voxel_threshold=voxel_threshold, filter_degree=filter_degree, nO_to_nL_pose=O_to_L_pose) #import ipdb; ipdb.set_trace() @@ -193,25 +193,27 @@ class EvaluateUncertaintyGuide(Runner): curr_overlap_area_threshold = overlap_area_threshold * 0.5 downsampled_new_target_pts = PtsUtil.voxel_downsample_point_cloud(new_target_pts, voxel_threshold) - overlap, _ = ReconstructionUtil.check_overlap(downsampled_new_target_pts, voxel_downsampled_combined_scanned_pts_np, overlap_area_threshold = curr_overlap_area_threshold, voxel_size=voxel_threshold, require_new_added_pts_num = True) - if not overlap: - Log.yellow("no overlap!") - retry += 1 - retry_overlap_pose.append(pred_pose.cpu().numpy().tolist()) - continue + #import ipdb; ipdb.set_trace() + if self.overlap_limit: + overlap, _ = ReconstructionUtil.check_overlap(downsampled_new_target_pts, voxel_downsampled_combined_scanned_pts_np, overlap_area_threshold = curr_overlap_area_threshold, voxel_size=voxel_threshold, require_new_added_pts_num = True) + if not overlap: + Log.yellow("no overlap!") + retry += 1 + retry_overlap_pose.append(pred_pose.tolist()) + continue history_indices.append(new_scan_points_indices) except Exception as e: Log.error(f"Error in scene {scene_path}, {e}") print("current pose: ", pred_pose) print("curr_pred_cr: ", last_pred_cr) - retry_no_pts_pose.append(pred_pose.cpu().numpy().tolist()) + retry_no_pts_pose.append(pred_pose.tolist()) retry += 1 continue if new_target_pts.shape[0] == 0: Log.red("no pts in new target") - retry_no_pts_pose.append(pred_pose.cpu().numpy().tolist()) + retry_no_pts_pose.append(pred_pose.tolist()) retry += 1 continue @@ -225,13 +227,14 @@ class EvaluateUncertaintyGuide(Runner): pred_cr_seq.append(pred_cr) scanned_view_pts.append(new_target_pts) - input_data["scanned_n_to_world_pose_9d"] = [torch.cat([input_data["scanned_n_to_world_pose_9d"][0], pred_pose_9d], dim=0)] + pred_pose_9d = pred_pose_9d.reshape(1, -1) + input_data["scanned_n_to_world_pose_9d"] = [np.concatenate([input_data["scanned_n_to_world_pose_9d"][0], pred_pose_9d], axis=0)] combined_scanned_pts = np.vstack(scanned_view_pts) voxel_downsampled_combined_scanned_pts_np = PtsUtil.voxel_downsample_point_cloud(combined_scanned_pts, voxel_threshold) random_downsampled_combined_scanned_pts_np = PtsUtil.random_downsample_point_cloud(voxel_downsampled_combined_scanned_pts_np, input_pts_N) - input_data["combined_scanned_pts"] = torch.tensor(random_downsampled_combined_scanned_pts_np, dtype=torch.float32).unsqueeze(0).to(self.device) - input_data["scanned_pts"] = [torch.cat([input_data["scanned_pts"][0], torch.tensor(random_downsampled_combined_scanned_pts_np, dtype=torch.float32).unsqueeze(0).to(self.device)], dim=0)] + input_data["combined_scanned_pts"] = np.array(random_downsampled_combined_scanned_pts_np, dtype=np.float32) + input_data["scanned_pts"] = [np.concatenate([input_data["scanned_pts"][0], np.array(random_downsampled_combined_scanned_pts_np, dtype=np.float32)], axis=0)] last_pred_cr = pred_cr pts_num = voxel_downsampled_combined_scanned_pts_np.shape[0] @@ -239,7 +242,7 @@ class EvaluateUncertaintyGuide(Runner): if pts_num - last_pts_num < self.min_new_pts_num and pred_cr <= data["seq_max_coverage_rate"] - 1e-2: retry += 1 - retry_duplication_pose.append(pred_pose.cpu().numpy().tolist()) + retry_duplication_pose.append(pred_pose.tolist()) Log.red(f"delta pts num < {self.min_new_pts_num}:, {pts_num}, {last_pts_num}") elif pts_num - last_pts_num < self.min_new_pts_num and pred_cr > data["seq_max_coverage_rate"] - 1e-2: success += 1 @@ -248,7 +251,7 @@ class EvaluateUncertaintyGuide(Runner): last_pts_num = pts_num - input_data["scanned_n_to_world_pose_9d"] = input_data["scanned_n_to_world_pose_9d"][0].cpu().numpy().tolist() + input_data["scanned_n_to_world_pose_9d"] = input_data["scanned_n_to_world_pose_9d"][0].tolist() result = { "pred_pose_9d_seq": input_data["scanned_n_to_world_pose_9d"], "combined_scanned_pts": input_data["combined_scanned_pts"], @@ -311,21 +314,6 @@ class EvaluateUncertaintyGuide(Runner): "Epoch_{}.pth".format( self.current_epoch if self.current_epoch != -1 and not is_last else "last")) - def load_checkpoint(self, is_last=False): - self.load(self.get_checkpoint_path(is_last)) - Log.success(f"Loaded checkpoint from {self.get_checkpoint_path(is_last)}") - if is_last: - checkpoint_root = os.path.join(self.experiment_path, namespace.Direcotry.CHECKPOINT_DIR_NAME) - meta_path = os.path.join(checkpoint_root, "meta.json") - if not os.path.exists(meta_path): - raise FileNotFoundError( - "No checkpoint meta.json file in the experiment {}".format(self.experiments_config["name"])) - file_path = os.path.join(checkpoint_root, "meta.json") - with open(file_path, "r") as f: - meta = json.load(f) - self.current_epoch = meta["last_epoch"] - self.current_iter = meta["last_iter"] - def load_experiment(self, backup_name=None): super().load_experiment(backup_name) self.current_epoch = self.experiments_config["epoch"] @@ -336,8 +324,8 @@ class EvaluateUncertaintyGuide(Runner): def load(self, path): - state_dict = torch.load(path) - self.pipeline.load_state_dict(state_dict) + # 如果仍然需要加载某些数据,可以使用numpy的load方法 + pass def print_info(self): def print_dataset(dataset: BaseDataset): diff --git a/runners/global_and_local_points_inferencer.py b/runners/global_and_local_points_inferencer.py index 26ad563..05cad89 100644 --- a/runners/global_and_local_points_inferencer.py +++ b/runners/global_and_local_points_inferencer.py @@ -34,6 +34,8 @@ class GlobalAndLocalPointsInferencer(Runner): self.min_new_area = ConfigManager.get(namespace.Stereotype.RUNNER, "min_new_area") CM = 0.01 self.min_new_pts_num = self.min_new_area * (CM / self.voxel_size) **2 + self.overlap_limit = ConfigManager.get(namespace.Stereotype.RUNNER, "overlap_limit") + self.enable_cluster = ConfigManager.get(namespace.Stereotype.RUNNER, "enable_cluster") ''' Pipeline ''' self.pipeline_name = self.config[namespace.Stereotype.PIPELINE] self.pipeline:torch.nn.Module = ComponentFactory.create(namespace.Stereotype.PIPELINE, self.pipeline_name) @@ -149,27 +151,15 @@ class GlobalAndLocalPointsInferencer(Runner): Log.green(f"iter: {len(pred_cr_seq)}, retry: {retry}/{max_retry}, success: {success}/{max_success}") combined_scanned_pts = np.vstack(scanned_view_pts) voxel_downsampled_combined_scanned_pts_np, inverse = self.voxel_downsample_with_mapping(combined_scanned_pts, voxel_threshold) - + #import ipdb; ipdb.set_trace() output = self.pipeline(input_data) pred_pose_9d = output["pred_pose_9d"] + if not self.enable_cluster: + pred_pose_9d_candidates = [pred_pose_9d[0]] + else: + predict_result = PredictResult(pred_pose_9d.cpu().numpy(), input_pts=input_data["combined_scanned_pts"][0].cpu().numpy(), cluster_params=dict(eps=0.25, min_samples=3)) + pred_pose_9d_candidates = predict_result.candidate_9d_poses pred_pose = torch.eye(4, device=pred_pose_9d.device) - # # save pred_pose_9d ------ - # root = "/media/hofee/data/project/python/nbv_reconstruction/nbv_reconstruction/temp_output_result" - # scene_dir = os.path.join(root, scene_name) - # if not os.path.exists(scene_dir): - # os.makedirs(scene_dir) - # pred_9d_path = os.path.join(scene_dir,f"pred_pose_9d_{len(pred_cr_seq)}.npy") - # pts_path = os.path.join(scene_dir,f"combined_scanned_pts_{len(pred_cr_seq)}.txt") - # np_combined_scanned_pts = input_data["combined_scanned_pts"][0].cpu().numpy() - # np.save(pred_9d_path, pred_pose_9d.cpu().numpy()) - # np.savetxt(pts_path, np_combined_scanned_pts) - # # ----- ----- ----- - predict_result = PredictResult(pred_pose_9d.cpu().numpy(), input_pts=input_data["combined_scanned_pts"][0].cpu().numpy(), cluster_params=dict(eps=0.25, min_samples=3)) - # ----------------------- - # import ipdb; ipdb.set_trace() - # predict_result.visualize() - # ----------------------- - pred_pose_9d_candidates = predict_result.candidate_9d_poses for pred_pose_9d in pred_pose_9d_candidates: #import ipdb; ipdb.set_trace() pred_pose_9d = torch.tensor(pred_pose_9d, dtype=torch.float32).to(self.device).unsqueeze(0) @@ -185,12 +175,13 @@ class GlobalAndLocalPointsInferencer(Runner): curr_overlap_area_threshold = overlap_area_threshold * 0.5 downsampled_new_target_pts = PtsUtil.voxel_downsample_point_cloud(new_target_pts, voxel_threshold) - overlap, _ = ReconstructionUtil.check_overlap(downsampled_new_target_pts, voxel_downsampled_combined_scanned_pts_np, overlap_area_threshold = curr_overlap_area_threshold, voxel_size=voxel_threshold, require_new_added_pts_num = True) - if not overlap: - Log.yellow("no overlap!") - retry += 1 - retry_overlap_pose.append(pred_pose.cpu().numpy().tolist()) - continue + if self.overlap_limit: + overlap, _ = ReconstructionUtil.check_overlap(downsampled_new_target_pts, voxel_downsampled_combined_scanned_pts_np, overlap_area_threshold = curr_overlap_area_threshold, voxel_size=voxel_threshold, require_new_added_pts_num = True) + if not overlap: + Log.yellow("no overlap!") + retry += 1 + retry_overlap_pose.append(pred_pose.cpu().numpy().tolist()) + continue history_indices.append(new_scan_points_indices) except Exception as e: diff --git a/runners/global_points_inferencer.py b/runners/global_points_inferencer.py index a5d6403..f59c0d4 100644 --- a/runners/global_points_inferencer.py +++ b/runners/global_points_inferencer.py @@ -34,6 +34,8 @@ class GlobalPointsInferencer(Runner): self.min_new_area = ConfigManager.get(namespace.Stereotype.RUNNER, "min_new_area") CM = 0.01 self.min_new_pts_num = self.min_new_area * (CM / self.voxel_size) **2 + self.overlap_limit = ConfigManager.get(namespace.Stereotype.RUNNER, "overlap_limit") + self.enable_cluster = ConfigManager.get(namespace.Stereotype.RUNNER, "enable_cluster") ''' Pipeline ''' self.pipeline_name = self.config[namespace.Stereotype.PIPELINE] self.pipeline:torch.nn.Module = ComponentFactory.create(namespace.Stereotype.PIPELINE, self.pipeline_name) @@ -149,24 +151,12 @@ class GlobalPointsInferencer(Runner): voxel_downsampled_combined_scanned_pts_np, inverse = self.voxel_downsample_with_mapping(combined_scanned_pts, voxel_threshold) output = self.pipeline(input_data) pred_pose_9d = output["pred_pose_9d"] + if not self.enable_cluster: + pred_pose_9d_candidates = [pred_pose_9d[0]] + else: + predict_result = PredictResult(pred_pose_9d.cpu().numpy(), input_pts=input_data["combined_scanned_pts"][0].cpu().numpy(), cluster_params=dict(eps=0.25, min_samples=3)) + pred_pose_9d_candidates = predict_result.candidate_9d_poses pred_pose = torch.eye(4, device=pred_pose_9d.device) - # # save pred_pose_9d ------ - # root = "/media/hofee/data/project/python/nbv_reconstruction/nbv_reconstruction/temp_output_result" - # scene_dir = os.path.join(root, scene_name) - # if not os.path.exists(scene_dir): - # os.makedirs(scene_dir) - # pred_9d_path = os.path.join(scene_dir,f"pred_pose_9d_{len(pred_cr_seq)}.npy") - # pts_path = os.path.join(scene_dir,f"combined_scanned_pts_{len(pred_cr_seq)}.txt") - # np_combined_scanned_pts = input_data["combined_scanned_pts"][0].cpu().numpy() - # np.save(pred_9d_path, pred_pose_9d.cpu().numpy()) - # np.savetxt(pts_path, np_combined_scanned_pts) - # # ----- ----- ----- - predict_result = PredictResult(pred_pose_9d.cpu().numpy(), input_pts=input_data["combined_scanned_pts"][0].cpu().numpy(), cluster_params=dict(eps=0.25, min_samples=3)) - # ----------------------- - # import ipdb; ipdb.set_trace() - # predict_result.visualize() - # ----------------------- - pred_pose_9d_candidates = predict_result.candidate_9d_poses for pred_pose_9d in pred_pose_9d_candidates: #import ipdb; ipdb.set_trace() pred_pose_9d = torch.tensor(pred_pose_9d, dtype=torch.float32).to(self.device).unsqueeze(0) @@ -181,12 +171,13 @@ class GlobalPointsInferencer(Runner): curr_overlap_area_threshold = overlap_area_threshold * 0.5 downsampled_new_target_pts = PtsUtil.voxel_downsample_point_cloud(new_target_pts, voxel_threshold) - overlap, _ = ReconstructionUtil.check_overlap(downsampled_new_target_pts, voxel_downsampled_combined_scanned_pts_np, overlap_area_threshold = curr_overlap_area_threshold, voxel_size=voxel_threshold, require_new_added_pts_num = True) - if not overlap: - Log.yellow("no overlap!") - retry += 1 - retry_overlap_pose.append(pred_pose.cpu().numpy().tolist()) - continue + if self.overlap_limit: + overlap, _ = ReconstructionUtil.check_overlap(downsampled_new_target_pts, voxel_downsampled_combined_scanned_pts_np, overlap_area_threshold = curr_overlap_area_threshold, voxel_size=voxel_threshold, require_new_added_pts_num = True) + if not overlap: + Log.yellow("no overlap!") + retry += 1 + retry_overlap_pose.append(pred_pose.cpu().numpy().tolist()) + continue history_indices.append(new_scan_points_indices) except Exception as e: diff --git a/runners/local_points_inferencer.py b/runners/local_points_inferencer.py index b5c3f50..d7e4d7b 100644 --- a/runners/local_points_inferencer.py +++ b/runners/local_points_inferencer.py @@ -34,7 +34,8 @@ class LocalPointsInferencer(Runner): self.min_new_area = ConfigManager.get(namespace.Stereotype.RUNNER, "min_new_area") CM = 0.01 self.min_new_pts_num = self.min_new_area * (CM / self.voxel_size) ** 2 - + self.overlap_limit = ConfigManager.get(namespace.Stereotype.RUNNER, "overlap_limit") + self.enable_cluster = ConfigManager.get(namespace.Stereotype.RUNNER, "enable_cluster") ''' Pipeline ''' self.pipeline_name = self.config[namespace.Stereotype.PIPELINE] self.pipeline:torch.nn.Module = ComponentFactory.create(namespace.Stereotype.PIPELINE, self.pipeline_name) @@ -151,24 +152,12 @@ class LocalPointsInferencer(Runner): output = self.pipeline(input_data) pred_pose_9d = output["pred_pose_9d"] + if not self.enable_cluster: + pred_pose_9d_candidates = [pred_pose_9d[0]] + else: + predict_result = PredictResult(pred_pose_9d.cpu().numpy(), input_pts=input_data["combined_scanned_pts"][0].cpu().numpy(), cluster_params=dict(eps=0.25, min_samples=3)) + pred_pose_9d_candidates = predict_result.candidate_9d_poses pred_pose = torch.eye(4, device=pred_pose_9d.device) - # # save pred_pose_9d ------ - # root = "/media/hofee/data/project/python/nbv_reconstruction/nbv_reconstruction/temp_output_result" - # scene_dir = os.path.join(root, scene_name) - # if not os.path.exists(scene_dir): - # os.makedirs(scene_dir) - # pred_9d_path = os.path.join(scene_dir,f"pred_pose_9d_{len(pred_cr_seq)}.npy") - # pts_path = os.path.join(scene_dir,f"combined_scanned_pts_{len(pred_cr_seq)}.txt") - # np_combined_scanned_pts = input_data["combined_scanned_pts"][0].cpu().numpy() - # np.save(pred_9d_path, pred_pose_9d.cpu().numpy()) - # np.savetxt(pts_path, np_combined_scanned_pts) - # # ----- ----- ----- - predict_result = PredictResult(pred_pose_9d.cpu().numpy(), input_pts=input_data["combined_scanned_pts"][0].cpu().numpy(), cluster_params=dict(eps=0.25, min_samples=3)) - # ----------------------- - # import ipdb; ipdb.set_trace() - # predict_result.visualize() - # ----------------------- - pred_pose_9d_candidates = predict_result.candidate_9d_poses for pred_pose_9d in pred_pose_9d_candidates: #import ipdb; ipdb.set_trace() pred_pose_9d = torch.tensor(pred_pose_9d, dtype=torch.float32).to(self.device).unsqueeze(0) @@ -183,12 +172,13 @@ class LocalPointsInferencer(Runner): curr_overlap_area_threshold = overlap_area_threshold * 0.5 downsampled_new_target_pts = PtsUtil.voxel_downsample_point_cloud(new_target_pts, voxel_threshold) - overlap, _ = ReconstructionUtil.check_overlap(downsampled_new_target_pts, voxel_downsampled_combined_scanned_pts_np, overlap_area_threshold = curr_overlap_area_threshold, voxel_size=voxel_threshold, require_new_added_pts_num = True) - if not overlap: - Log.yellow("no overlap!") - retry += 1 - retry_overlap_pose.append(pred_pose.cpu().numpy().tolist()) - continue + if self.overlap_limit: + overlap, _ = ReconstructionUtil.check_overlap(downsampled_new_target_pts, voxel_downsampled_combined_scanned_pts_np, overlap_area_threshold = curr_overlap_area_threshold, voxel_size=voxel_threshold, require_new_added_pts_num = True) + if not overlap: + Log.yellow("no overlap!") + retry += 1 + retry_overlap_pose.append(pred_pose.cpu().numpy().tolist()) + continue history_indices.append(new_scan_points_indices) except Exception as e: diff --git a/utils/__pycache__/data_load.cpython-39.pyc b/utils/__pycache__/data_load.cpython-39.pyc index 6034bea7c38d2fe6f3f2de929f04c9a7844a8d47..ae12b38fe09165d8374fb3623fb9f5b171119bf7 100644 GIT binary patch delta 20 acmewt`7e??k(ZZ?0SHd7QQpY?SqA_~y#}-Z delta 20 acmewt`7e??k(ZZ?0SLCcNNnW(tOEc>iUutJ diff --git a/utils/__pycache__/render.cpython-39.pyc b/utils/__pycache__/render.cpython-39.pyc index 38f814ed4dcab61e92cc116af9326b1b98782e81..980c54ef656bfd1164c56292ee28eef95d4f64a4 100644 GIT binary patch delta 20 acmdm|x=)omk(ZZ?0SIKaC~xH6ECc{Ex&+Gr delta 20 acmdm|x=)omk(ZZ?0SIPsNp0lbECc{E0|b@;