From 376e7bc48823968bbbbb2ff607ce168b70681c48 Mon Sep 17 00:00:00 2001 From: Yakhyokhuja Valikhujaev Date: Tue, 30 Dec 2025 19:29:39 +0900 Subject: [PATCH] docs: Add mkdocs material theme for documentation (#51) * docs: Add mkdocs material theme for documentation * chore: Add custom folder for rendering --- .github/workflows/docs.yml | 34 ++ .pre-commit-config.yaml | 1 + docs/api/reference.md | 200 ++++++++++++ docs/assets/logo.png | Bin 0 -> 33472 bytes docs/assets/logo.webp | Bin 0 -> 33472 bytes docs/changelog.md | 46 +++ docs/concepts/coordinate-systems.md | 191 ++++++++++++ docs/concepts/execution-providers.md | 204 ++++++++++++ docs/concepts/inputs-outputs.md | 218 +++++++++++++ docs/concepts/model-cache-offline.md | 218 +++++++++++++ docs/concepts/overview.md | 195 ++++++++++++ docs/concepts/thresholds-calibration.md | 234 ++++++++++++++ docs/contributing.md | 72 +++++ docs/faq.md | 138 +++++++++ docs/index.md | 137 +++++++++ docs/installation.md | 174 +++++++++++ docs/license-attribution.md | 43 +++ docs/modules/attributes.md | 279 +++++++++++++++++ docs/modules/detection.md | 251 +++++++++++++++ docs/modules/gaze.md | 270 ++++++++++++++++ docs/modules/landmarks.md | 250 +++++++++++++++ docs/modules/parsing.md | 265 ++++++++++++++++ docs/modules/privacy.md | 277 +++++++++++++++++ docs/modules/recognition.md | 240 +++++++++++++++ docs/modules/spoofing.md | 266 ++++++++++++++++ docs/quickstart.md | 362 ++++++++++++++++++++++ docs/recipes/anonymize-stream.md | 88 ++++++ docs/recipes/batch-processing.md | 353 +++++++++++++++++++++ docs/recipes/custom-models.md | 96 ++++++ docs/recipes/face-search.md | 340 ++++++++++++++++++++ docs/recipes/image-pipeline.md | 279 +++++++++++++++++ docs/recipes/video-webcam.md | 392 ++++++++++++++++++++++++ docs/stylesheets/extra.css | 43 +++ docs/troubleshooting.md | 159 ++++++++++ mkdocs.yml | 152 +++++++++ 35 files changed, 6467 insertions(+) create mode 100644 .github/workflows/docs.yml create mode 100644 docs/api/reference.md create mode 100644 docs/assets/logo.png create mode 100644 docs/assets/logo.webp create mode 100644 docs/changelog.md create mode 100644 docs/concepts/coordinate-systems.md create mode 100644 docs/concepts/execution-providers.md create mode 100644 docs/concepts/inputs-outputs.md create mode 100644 docs/concepts/model-cache-offline.md create mode 100644 docs/concepts/overview.md create mode 100644 docs/concepts/thresholds-calibration.md create mode 100644 docs/contributing.md create mode 100644 docs/faq.md create mode 100644 docs/index.md create mode 100644 docs/installation.md create mode 100644 docs/license-attribution.md create mode 100644 docs/modules/attributes.md create mode 100644 docs/modules/detection.md create mode 100644 docs/modules/gaze.md create mode 100644 docs/modules/landmarks.md create mode 100644 docs/modules/parsing.md create mode 100644 docs/modules/privacy.md create mode 100644 docs/modules/recognition.md create mode 100644 docs/modules/spoofing.md create mode 100644 docs/quickstart.md create mode 100644 docs/recipes/anonymize-stream.md create mode 100644 docs/recipes/batch-processing.md create mode 100644 docs/recipes/custom-models.md create mode 100644 docs/recipes/face-search.md create mode 100644 docs/recipes/image-pipeline.md create mode 100644 docs/recipes/video-webcam.md create mode 100644 docs/stylesheets/extra.css create mode 100644 docs/troubleshooting.md create mode 100644 mkdocs.yml diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml new file mode 100644 index 0000000..256c185 --- /dev/null +++ b/.github/workflows/docs.yml @@ -0,0 +1,34 @@ +name: Deploy docs + +on: + push: + branches: [main] + workflow_dispatch: + +permissions: + contents: write + +jobs: + deploy: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install mkdocs-material pymdown-extensions + + - name: Build docs + run: mkdocs build --strict + + - name: Deploy to GitHub Pages + uses: peaceiris/actions-gh-pages@v4 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + publish_dir: ./site + destination_dir: docs diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c023331..84afcf8 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -10,6 +10,7 @@ repos: - id: trailing-whitespace - id: end-of-file-fixer - id: check-yaml + exclude: ^mkdocs.yml$ - id: check-toml - id: check-added-large-files args: ['--maxkb=1000'] diff --git a/docs/api/reference.md b/docs/api/reference.md new file mode 100644 index 0000000..71eeeb3 --- /dev/null +++ b/docs/api/reference.md @@ -0,0 +1,200 @@ +# API Reference + +Quick reference for all UniFace classes and functions. + +--- + +## Detection + +### RetinaFace + +```python +from uniface import RetinaFace + +detector = RetinaFace( + model_name=RetinaFaceWeights.MNET_V2, # Model variant + confidence_threshold=0.5, # Min confidence + nms_threshold=0.4, # NMS IoU threshold + input_size=(640, 640) # Input resolution +) + +faces = detector.detect(image) # Returns list[Face] +``` + +### SCRFD + +```python +from uniface import SCRFD + +detector = SCRFD( + model_name=SCRFDWeights.SCRFD_10G_KPS, + confidence_threshold=0.5, + nms_threshold=0.4, + input_size=(640, 640) +) +``` + +### YOLOv5Face + +```python +from uniface import YOLOv5Face + +detector = YOLOv5Face( + model_name=YOLOv5FaceWeights.YOLOV5S, + confidence_threshold=0.6, + nms_threshold=0.5 +) +``` + +--- + +## Recognition + +### ArcFace + +```python +from uniface import ArcFace + +recognizer = ArcFace(model_name=ArcFaceWeights.MNET) + +embedding = recognizer.get_normalized_embedding(image, landmarks) +# Returns: np.ndarray (1, 512) +``` + +### MobileFace / SphereFace + +```python +from uniface import MobileFace, SphereFace + +recognizer = MobileFace(model_name=MobileFaceWeights.MNET_V2) +recognizer = SphereFace(model_name=SphereFaceWeights.SPHERE20) +``` + +--- + +## Landmarks + +```python +from uniface import Landmark106 + +landmarker = Landmark106() +landmarks = landmarker.get_landmarks(image, bbox) +# Returns: np.ndarray (106, 2) +``` + +--- + +## Attributes + +### AgeGender + +```python +from uniface import AgeGender + +predictor = AgeGender() +result = predictor.predict(image, bbox) +# Returns: AttributeResult(gender, age, sex) +``` + +### FairFace + +```python +from uniface import FairFace + +predictor = FairFace() +result = predictor.predict(image, bbox) +# Returns: AttributeResult(gender, age_group, race, sex) +``` + +--- + +## Gaze + +```python +from uniface import MobileGaze + +gaze = MobileGaze(model_name=GazeWeights.RESNET34) +result = gaze.estimate(face_crop) +# Returns: GazeResult(pitch, yaw) in radians +``` + +--- + +## Parsing + +```python +from uniface.parsing import BiSeNet + +parser = BiSeNet(model_name=ParsingWeights.RESNET18) +mask = parser.parse(face_image) +# Returns: np.ndarray (H, W) with values 0-18 +``` + +--- + +## Anti-Spoofing + +```python +from uniface.spoofing import MiniFASNet + +spoofer = MiniFASNet(model_name=MiniFASNetWeights.V2) +result = spoofer.predict(image, bbox) +# Returns: SpoofingResult(is_real, confidence) +``` + +--- + +## Privacy + +```python +from uniface.privacy import BlurFace, anonymize_faces + +# One-liner +anonymized = anonymize_faces(image, method='pixelate') + +# Manual control +blurrer = BlurFace(method='gaussian', blur_strength=3.0) +anonymized = blurrer.anonymize(image, faces) +``` + +--- + +## Types + +### Face + +```python +@dataclass +class Face: + bbox: np.ndarray # [x1, y1, x2, y2] + confidence: float # 0.0 to 1.0 + landmarks: np.ndarray # (5, 2) + embedding: np.ndarray | None = None + gender: int | None = None + age: int | None = None + age_group: str | None = None + race: str | None = None +``` + +### Result Types + +```python +GazeResult(pitch: float, yaw: float) +SpoofingResult(is_real: bool, confidence: float) +AttributeResult(gender: int, age: int, age_group: str, race: str) +EmotionResult(emotion: str, confidence: float) +``` + +--- + +## Utilities + +```python +from uniface import ( + compute_similarity, # Compare embeddings + face_alignment, # Align face for recognition + draw_detections, # Visualize detections + vis_parsing_maps, # Visualize parsing + verify_model_weights, # Download/verify models +) +``` diff --git a/docs/assets/logo.png b/docs/assets/logo.png new file mode 100644 index 0000000000000000000000000000000000000000..534d9761c6ca1e5ea0a6611e2cfb2b970f94d96a GIT binary patch literal 33472 zcmaI7V{|UTvoHL_wr$(C_l|8RJGO1_I6Jm&+qP}nPTu^_x$8am!@ak9bd*iHfbb`Og93bk0a79&@*_Y$rvRXl|8P)70KnSD z$w5&|AP(x8}t4LyE@r9{m97w2RkS$iu_>XA58h@f5V3V8#c0Y_>X+l zj|{K1mGgh<`Vaj_J6IDN6{VjeK#6L$-MU0OunB022E@WrnE$ zKyxqvfVuWRWu!R(074J|&@}x&W&hJBb_Nax|8+ai9}aA43IN;{0|0Ou007Di006D^ zU)O%v|D|n&KbLTS+GYEb{s621CICW!6u<^x1fc&x%m79J6M+5a2@{U_{dA%wa3&Bn z0L%}VKl4|vbYTGnUA_ZXYYj@ck{StWoyvck<`v zH{Me1cBjzCs;}eNwEuVa@%x-WVQ04Q+ULJ-gq!y_fp^mT%AzUH@(x5ofZ&`+3mhEIYkpPKB@Z`)bDP2VZs z)o-#_nkS&Q`)|2bgtsn2J^%0IcZC~}kLI5$^ltfYx_7=`zU{y67eg1mn*@NjXS;je z?0mnUncj!Lzc(YFF}`1}eer4&n*6tkKdgOTAAIty?d(S6hD{_$3d1Yd7%xf=oNC!Z zihK{NAh95^I*{iq6T1$W0F4yzb4tYodA_+mz2c~JJ|+K+CD|4SM#3A1Zvy@HKF z-=o#1yl2&>_)MQ}=lzj0gH-~1C}A5Nv@$WAvq+3!7)0Aic%ba0-GtI>bpQatt`h^F z;bi^ciL6C$)f$jUj+z1d8%5p1U{zt{)X)-6nunSTk5->>TA$#xpS*PIY^fY+N%{yp zH>E%vm#c|=_JsOHO$5%z>PhSRbJOWLay z0`7#+n2_{8+TkVPM*kzPS?I6JkDD7nX6$#j;m4t$K|0 zwzgS4={(nn`@~QrjrlsXn$b%lWP`ce{Q|*q3-^~qt6l;Kx>A~Z4EV6!4t8u{CcFnR zm}SY-;2}Z`v0e%O`%Y3&>PL~!@^aMDAz4XsN~KM~Qg!2TY~$#Cjf4>Q5j~#ilM!F8 zu)(zlB%|-b$l<_o+#t~ZZe89%xA3%%R|`V-TELA4S$G8};F}Ju<)5TDETVGECmnEe zqbZ6Rp_$LV|IYZ>JhFP)-kzlYh_}7N&1$Y9ErJ%=@0%L|yyjqXk-h50z(<>&rSt#v zDLqexZVtjCQ423s;iJo2ma0|{x}Gw)9FYKC5(Y>2MIN4lxc}ZOFyoEJbRI){G!;7ZY|pe#Rvvu?IgnpR*5$*M7Qo$SQP}HCYA~UUgZvsRkrO-N2$@Sb;XtFRu?1 z3T_Vgv1kbi@##0ZDJG@Pn^$X3WY^Pt@8C9%YT(&_GRpY+8z7es$rtwQq&!&A^XW~2 zw;-`FwgwwZt$b`1Ahskk@zwJvc#LR$fzeakl!k$N+9cxR()7m|_J^f|h~2LDo*RVa zRWh|2pS$?&X81m@W)0|Hmwk(B6)Ipa-hO2>c5|8HYxtr%(|>KZAfX+{KpK zE=02-xqO#6uIur(>NgvV7EqM}2Gh#)Rw45CD`?Nv68Rzp?M``$*x=-_DGnv(KD_-+ z0ItDgM1fKeD}xnZnkX>=Ol$z!c<`kj%EmS56*wU~Yn#wl>}qQCI~aEk6c^q519$J` zxQ4bf;OPq`C5a6+mL~)s66<`LE=nZp_GR{AuVaig;TK65O$mjbkBgLJTlqxg9R)ra#jiv@8*CYeR-%pYmlaU&Wv})8{ z&uY>A=l(EhF<*inO$MH6Ca-u({J3w}{|ucrvSeHdmX|yW$!$zy?{3nM?LHn5$HCmI zPYQe(KBY;2@!USep+YV-{T%|~{+otOEOqkID*C{S4|3I;_nYbN?s8$ClY98WO0T!J zC(Dw>o$NKq&>@<8+upYgv2uuwKIWeEQjVxR98zG>6Mh|UCSMzF7bBdJ=M~0@q4x2D z!tt6J=GxA@l~;g|U3kaLK%EEJG}QqzY_7Ts;u(kF8=~oc;Yof|K%|WZr-GR@QN;aF zow$8&9JGfwFAAtdM%bdW6O0>rzDG58=y~%q`0gK(W#aXH8ySIuKp(|P#T#qaZPd_C zkAf^0*BTatM@=8-Q&vnG)IOF_xLSmv<+tQZeRdvRJ-%8N%`&xKt$pOA?PO|u6aDwg zGkmu3;;cIRjs;{s36eHL4QGos7#>E%lw)9A#6EE#8!?{z7JnpFB3%tA_xK+C`<|Xk zgrG+ZrO>F$ow(pEr&*S}e|bHQrg&77FKvJxt51)*UE()_(6A<&b&Jc@J2}P! z{q_C@RC%HH337CXBBV;H+3!wwe&uwsXrk_dvKVRX?1TAVwGz&ukLfLGU zZ=jRptPA!l;(30RgQaYB@@J;cvAVfU6K5@dWUZRubx9OttcvIqTD}`Xw~o`%C7}L@ z)x@BO9?5|%bI_+3%o+^T1firCz4pC_)0?4=+dlbFv?n+cZ5a(C>R$8Y@N!xcL)7?N zHj}6d;dotgDWh+>Il2RO6a%&>gaTgwEnf=-wSi%L%m!}s`dh>jghU9qjBX;WMl-5P zq6?u#^1p?hOE~z@;o)h}2Td=?eX$fBv?oxe9hrRya|k64J93)o*L=HqM!0I=yx*e2 z>T6-%zG9%0LCQBiw?RumUG~bYZDL)l&3KFNU9~Vxma_x-HK;WIL|C9t?D4qZJ&QG& zL2DwluONf3uKdI30{4O@zvjD7F6HV+we3(b4fhP9ppm}Z){>~+^Eas>?eTaT)xWgZ zPzsZD?gSvgt59$_pOHXXUy_M|h%FJ5LdZsbECJqTKYwjd@^Wyd>d|l&Q!^vy;NyyOW3AU7U<;$lZa2srvP2 zjzL>ASg4@MEwlS&NO+!IFb{OctheduIC$yhMC?_L(B^oJ{_H%W`@jL754x%`+%dDA z$R_sCkcm0eiSwtoYEm2UWzd!FAr-wv0}eL!j)G&pkM>lMYtERmzr_0hw*rcleQ231 zOaNog{5L;|0zzFM@L_YK@#22;*3c@w>hKTP9ezxV!i%`)Q0q5vn}Pb&(V6gyo|xv( zQx*>e?T|D!<)vIm;*VT3nr{>d_Q{|MITw=G!j4B7|EniEzyXNymj7gFmwUKn-Lu1k zDS}5IgE9%8YiJu1 z^sBRB^>3~d9*+cMUJ-ykob1uJRtP!=Sc+$*RwtZNkuh?)5Ttf=QkAm#UH14}d@1sO zx7`*V3SZw&Lvxz5VL(cIQyc{gM$hh4%P2EzIG;wuZ4GtrzVh0JyD>MmoY>$QLH2cN zgqK>UH}S;U4rH`3(I(t34}v*(+Yt=7{U`-!j@9;jyB~S2vq?9W<6b&SUk-<}<_4-% z6B=dWpFc(}9UxOAhoFSt+NEP^l*nn3qUOWNB9hpv+jHRg?#d!)sv3W;j%e;=NWXg} z7#Zz`DCy;Znxu?(+=qc>7ro_OqJWGL53*Vu=3B;!?b!N>m{cCxXt74R9Yw(#pYzw_ zLvnc4!2dtQxxy5G4~){@)q>k*wm!;tP_C$bt%{srk@UJ;b>~WboBX!^&ElLaKzdGQrT5Q#eDBb%s>H7L9G6O>H6S#a%2=DR*iR37+Lhbu2 zd2$VdC+Xc~}bHXiKjpryOCn zvcN+Z?CM13=V-t!v16Ai6I0Dw%0`4HNUVX%I+O+5X3esd%^ zBC`qTu)u{OS*zb&dnaOtoC~hSPDK*W>^Di3k8J#+kJD{~g38SsqA{VV+nWi5c*wg| ztDQxd5i5uAF#Fy0iYHzYDfD)Xj7PXU-c`xv3A;5nRLA%%u}+A94;wD-xMpJYO9ws* zwmjUXMPO(vf>VUqqL1G5ej|1EJDp3``*CjWcZ3**LoIXcTsDGjJ~==sO~#-H;$2N0 zR0K%MGr7;k8tlgFCd*nwO~gI;Z+mfsd9Ha?VB#UO9ZySS$T?)^&*}B9vJSw z8E>M=yM5#rG_Flz((#ZJ7nc0=w0e%|Ab~4%AHHbPSAN{PvM`YIYP9nN@CzU@C+p!n za_nuS5=Oqpd!YmQ^!YXFRO1>h&*Xw5GP`s8bez%mg#1$dV-xX1b9btyLH;~t`8RU| zF$CypM{R9*iAKDv!tXlv`alT&!g}+n)itPFokdM5RSFup5=|%2zXPdGz@aJ+Gf3^` zdQSrAk1hEy{f&8(xU=?!`8?ouvwj&i8AFo+-IfFg> zr|{ehj^m^F9FQ|R;k`=bD)UM0U3GV zgP`RYhnK`GkDg{w;?v$HI41BhP@!6(il2pDK@qF3oAobPQe4(|ry&PUO7VioU(t+(aa)sJzy*}7Fpa{!KwvE5iVq9I$tqqW`fo!yEMkRMp}DtXjM)}PR0Z*Lh{j|CH^lJRbkod2!!?5YBuiZAb0n>p)|c( zp$%5>L@`Rv;Px6Xo?3D#vZkm6H54LgTb*mYCXyC4FM9WwZLdw{@m$LAVimS3{NXU7 zVO)fABv&SkPIZ+1xvYLJ(7Za-zpU<8YukOn#m7ooEM&uV-T|AW zQaq=xTDlR@L5M)26*o7f$F0sUS$@x@ZhKl8gL$U%=$(nP-`p%p;MU~2^)n%XXVWTw zc~C+?6q-yu@G-Q38HA{`ACa=;yzpBhRUXgCMp@Vffr1w7I0x^Wv)p*8_+-1PozmM( z?&IQsDx(c7P4b}BWemb+45-FrWd#{;NZXrhZ{&2&PI? z4V~)?MX{r_Zl;H*l=6&9HdaSN;0e##EIM?_D)cFd3NK>p#Yxx`L3*Hkuj~5k%2`3tYz-k3Y#>T4|BGJ*YvfH-!Y^#Slaw%RD49SXorP^| zc9N3;Qrg_TO@z_%peEX)PzyC>`Vkxnfm8wA(_C1kfn@}i7X}z zIkW4L9VutjoPR%$j_;`!neqjqL;C1+)i2bi>HRK-cLWY6Rmx`6hS3|ox*~(shA47S zkEYJP;(66sV(w8uQJjW^1j0_j{pNArN);05@^`mxMgx!aQer*26#k2SuSJdSd*py@ z8sF_ye^I6Psibi75Nv&DA~vlcJ;B|kRId=#*O2`5lwh|+wrO>p02JfkPll~`D{;jx z3CeC%Xw6gS-*v#Xrr7cT?y3VjDZ?uN9~brikU1|YTWULGY~S{O2E>!;-C(@%6s|~lsb?0ELzynD4=)vj>QGu(h+c*FhfyGT2GhySS?2~b zh0^G;4McLaIQja3I?)ak5wDZw!OdI1&_yKl0`@x&LEn@tts3g+l~V^H&br_Ge(Z<8 zmX?RZ{QK0E{6aX6<8~B>T~x$pT%LQk*eEWj=LP^DS+@^4p6PUr$bxsmrQ~4&sT5+I za{AeGBT357DzC_mL$>RHY~|_^8XcpnMoD{+4Cn7^$}O2IEZ_|daK7`u^n}#Mpu&Wy z-Sc*1&f9paPtFjgHtl4x7fkhQSP5idL?DGgjz|p1a#yuw@B(Q8`WLA+mB;nEKr~(c z(BvQe$d`Tp#;JtU@N&t`TIqA~po${sO#}~Q%&5zzgl+D&FktmbX#_S%wVT(C_EmQe zH(yt@t@by80GuGrqy@+pgckjNS+cN7OQhJ`RHRp*x(#jRNjwhZ1YTx~H{#IGeg0gkN$vBr$6>Zp;&;N01|NJJ%+&AkY~u5% zn1m85rt)f@1>xljMcJg&O-Ftjj z2A|D{kCOEn)dSVP+Z`F@FEw<-Xi4EQdBI1F z9qmM*7&XwXJVNgMar~N=6wKA!yL);gHp~;F-0-nYg*ld@&)}vU@Fgk=Cxk`B)94)d zztjFMq5ZA0_F)mQYh9R9$HEt)qgc-@HloRNRNxw)EUR{mmh3+f7!Vl+mEK8Rr70ul zEY=^NjB!l)MOtBJ5m~XvzTjy{KrVg{7RkB^n}gWyf`kKkV2I3C*(2sZpQV=aWns!} zYyq`V6_B6E4d`H`ja!rQ+$_Z3{z5^e-v` zo2^+o1qY|&&*`ibc#{hqgANfYq-k~m59kwP(~r{frFjQmTVz1)I7Vs!?7m*uW`?i- ziN@H>gxRhjXIr^pg=Q33i90_=z5??aFr&tXv%45FTjp{5Do-bX>L_DhRiRu+>;_e> zj}=e#RWv*wW}`Hrj2Tj6o;FZa$f#{vuk72vaw5n(-Ppu0X0xs+zhf3F1jOYD#<{1b zXws4W88cP#p8j1WyW~7=F}iJe990neJfmWhZf*zQn9D9eWq2&_w#aI|2NM-2$xPu9 z7l#w$ZN)sDScYcp?FpON4vMc;{nk-yTySIm^3k0$iT|a1-?Tx&{)fGTogc$3NS~8r z6{~mq?@-z7i_T{Nmkac?gPwG)2)KCEo_@mx06<=1Vq}68qjE?wTouaD#_~q>$QBL3 zBL~{EeEN$OZ07}~ecd)zll28iC+jN5N0w#cFD-mD62+=-^koobcM=2P>_kL8h#ZR zShbzNlpgNJ-@|Z;LIfu2jfz8x1#e{W$xL*G>dl~4DaBToAA-NV$X7G6Iy)O|TQ$tO z80(%k>V47vBD3q6R%=$`_+7)Mhzw<_6cRzGETkTUF}i4c%cYQjo*)I54MDk>)zneo zX0;Hb`i`r_hG<86J!tODz>A^CPnSF~tqxT4zs03uP*n6aH}?4hc($|_6WDv$r%2u2 z)?AeW@pw7SJup~;6;lK7FpPME7!=sQTcWjvh;8V#j&i=))%#*g4Lv)u)W|rD-N%#o z^8K?H6O%CQg(SuBWl(qZQ<6)C`o;$CU#1>k7IOg2rn7+KeJY!OkbPVJ@_$^Akk48Y z_$<<f`@l!JW zd>`DnLE5~6TQ-b%sigGcQ8z^Y47<-$VuJdJ3{(drvT!yRJgP~g7Mr_N9T3j&)+3s` z%u9Z{rtH=}BI2SYV6_32`ZdCK>664eZ;f^(W*vq=Nr3-SBJ#!Sbg98MdT0QUL`<`!oy$c=hLEVX1Pm0_u{UDFqA43lvH^@s6blG)k# zz;U8S#g&Us#=MKO&V$%kx)EQBJ`uhf6C(9xq}+MLi((tUJD6-I)Qg8kt=e0d+0mPc zs(hYZ3*kH=jP2C%W4L^K+~~!Z_eT)h!-$mx5p2TGdi)t5Ea0EzV4d`19UXLdC~Aex z9SWpLNJFxfOV6qXV!J7`UzfEj86VQ-9EN#OfTY}y zd+6N`&?y)QJfxK$su+*2GCUwh9}*&fyRm8n!To{XWztcd>IIm(d;8~^3p+$_|Kh(1f#krUnbpBjjnU6%EXpA zRP}U4BH)aR$TEBZrOBf!@@A-TJ&+W~VYZRxyJK7(M-!Cfs;cw{&tcc$Lf*bK*x5bpGiq6GR7jnA0oG2D_3XOFolZ+)2G7FHHM)j zmQz)t(+%DmHWYu+jU@i!__O6l5HG}b9hcd4QyL#~{!EE4>QY3uuLE0RH~W{Snlx4C}t;;1suwG&q*&j;}jJ zn!S4)6PE2qIj*p|Crop1vT?kd-@qYX8S!~lBW~$M*%-5#TDH8sL#~Leq$S|r%A!@T zBpd$*n@lh}d(_o*kQMYF-*Sm4=5lat@=3uVG*pe5QA42_P+0TKC(Il>WB)l2H6 zC2NAVJ?y>1ftyuyX1#mx_2otYbbjr7AIy-=ma7Ht&Pc*}mC*EL&8MWHqtyr^q`>Iy^a*ljLPkcdR>+O)ua?1$ma#>3F<_Q?$=ZU@X(p9pLDMaqo0Irrp~W z&O7|E>V_F`r#H5o-pfZIitCZRt%#N;IlzfB4u8MxsdQho)wuJf>Y z8&B$UcRG5!iW=t7M9t~4{bm%i=8BsR8s`8c3*4{V?2=z>E&i3=rpV!$aTv1_(li7W z`WCocv$vcca*k+XVgV)%Y}`(*^QdsD8e1fuJ|sD9nu<3%8XOLJ!j*W*aWk z*FP^C_w=!H2N8wyeBzIPxI&JI$6nGYQQrm`at49Z{WAVuvQ^7W z?jBt_gMJRstM(KhMarH2QeA2z)IxkwlKzgIPEq#U(*t81LyAd9h&7M=2|G@=M17Nd zwcmVj>r(fxTx6|K8w0t61?(mUvr61L<@!f-k%dlI%T!N6;xoY~Ocj?pkEQ_cSlGXK z>FK7z;D9kv_Y*r1h~|vegCQ>Hc5EhVQRG&#gg5T%EXmzEhx}gRrS@wIS<2NX42{bN z*d%rB^GHjYFhkfGXjTgCq-Zg(Rta(m?!9^MLQp&d^x91;HSW;~0~FP#cY#-4tcYZh z9+Y|Fo~u!`&{kia6^DVavnKghjFQBB&p$TU{)K8{541;cJ|_?LwAC4Pl17h}XYd_34Wz z{bW||27$lM7_FmY64KyWl!9J*g;N238B1xE(fB1FWn-M~IcG4T4J|WVL){Ff8I+R%h z(SC(sJaaXS0<7@s{SxUla3P+`0rrrjP9W^|`~{ysxnUGnt18hfqUS=r*~ul-U8%)X zW4GZd3zy5hNO3IA(#s}e*QM}pJw#K;ZIm)JH}ErYm7}hT*t!`U>xN+B@{AKKQyJdC z7Uc$M#JCPLJ_Eq>7sKUl~Gc_MP|dy9?;rS*TV19TQT}#M*P)O?+48^*;qR! z>bHZG!B82J&uA6Jzm%0@>|AQ4gP>LJh}4u-9nX{Pd1Mv1;pOB+VDG+6%*QRajD4Aw z{CtBUR|Du1)t!E;wH#-{xaAPESB#5&zV9aqJuPRopjS>u;K?499Dy8vXy64pqg6tF zTZ`+4ieQ@^9cJO&sq@-PfTHK&@W43SFwb@F4{xz9l-r$T0t3c$0^u-?Pml0ZP9u$Lp+%{8F^ zxG@rw_Eb8M!k((m-A|O;+qI|RiUmDP7;-_I!f9_D7gGY96Iqm^59@&^ zR2cQ|djQEqhPz;XCt1W9IZKw~?wpvFLtf%ofVM1b3uYU&ENUZa=qr|K>vxi7qXa^x*25U4{m#d|t-GMLvVqjwlORi!`~CGWGKR?`$3HX- znsT$msa?^(&e>5-zL016&i^#MQj@c_oiW?G{ouwvrL>v8qd?$Z9bhQg^DdXkdzIZO zb3tK*sq5Gq#6kcIvu4YSJ=(U8!Wu9%wd(X*JNy?7+$>M-6x@h{>H$WvAj;R?k2!aHaFxn@_@VEo>~^dkNERg{r%H*;O;(gnI%jr#Z#`x zf}>zWDW9ml8grS8b%JAEj4b^;xG_qi$!3BK0k?=P4Jb zs>07apRf@X0TFSrp_9F-Ez=tbk6qw%*%#+&@HZ*5PmN|>2B_(^=8z^0pfkG}7Bwmt{qo;tR?fB@hdk3b<0zp1-9wSlRHX@$;t<@-kGrs7akSJ%rvHmDv z^j(z}EpSj0tS9Zr0IEBoj}Os&6xi4z@6vTL?8Nf8a*03>rcY*hWu-PVL(r*zk4Zn3O9ir@PqdrEZ5cwin3PvP z94V$)M-RwBSmI-D?McJQ)pGjdG%1|#D9OyN3T}Lw0evLwo}5t z_QW-;#ieZjP>ZOZtHDq2`dE7Srm3Er6+4*%BtMMwq`S=A(Cj=fAay8`&h+YD^>cY@ zG8e~*bd$2H_jf+H%+4NTX55TvYB=a1+tWOs!V@A zMDHL$WD{)h!$W$R*!k7r{s<3HAGDs9hfR_8k1h{*u_@P*y1NbO9S5}#{~GS|(@wTd z6A%un@h#JJjQ>*e^O;&0FF7;iu68vs@c^ozKw?tgTTk``Lf0!@%$4#F)E$JRJpJ3|C0UMOiPlAyP(3VZ9htux>}{}ZA2uvq+YUBN@b-RKhdXXoU046i zPji(ncro`98~7y%e8otyX$L`ic-wGUdnRu4@oocf+ZNW~Hy5p39RkT3y^t&~G7@?S zy{bqR$9xR+u7262fm)S_Jk|VrUD-`eyUh*_SN+s#(oC0fO^-Ac3TSTTLRu!YM1HhH zlz@X4qf++_okT?+8C#$Y{4-Apoz|ggbzI>~DyF$;E__rB#9eZkC%V|AK*uTnPUuO1 ziU^D(POzO~JdXG0*5|J?jTmOCJ27{&42(%rOsE=p1^vGbuU_nr+`SmOEyg^xNV%+A zUf({HYXUuyQ`q1kTHu=Eg)16<{a&#XX`p2GD8ONRfUc1`dHM=F@; za)ERD@y;!~QlH#bh_fJ5ATK`SJH6uY41GN zq~RMggv0`y{Sw{dq8m1C_4yP>Ti4e(L$ItC$OA4m&FO>Od|ll54T!MLXHex)qc;|NNZ~Jak@d6=e92 z(zkCtwshfNpx*XDc3lqb8LX}-{)USD^xwyKugte~mwlihEF3Tx_KV=L+z$!XkWIBZ z3BOSpk54_Q46lIwl4mY3y8IVKpFuFD$IFEU(AW|=>r?Nv(V>{N0`^N7i3C4Q6rS#B z(X75NJdaX7Cvc34>t1R(UH47rFE+e%1jwjx>i6=FE?P?9304rCvnQH&S>@U z>%{MJ?%vTdh*x5;umZ|{!J3a7e(&s8%5cO_D;CCcogOXkq|B@^ZzjhDNjVf@ zEwCY-CV{e%eqP0pxsDW_z~Z`huOrnN`;k}?z|%X46FpZC=vPAB$a>zU)*Ybi=DcY? zotzigC+<;sYo|;7WBDeP*J1 z$AlD!bPUN?IT992u55Bpi0^{}m)kF!#`sYj1LZzN%n56DV|}H_5CCVf2>a!r#DQTz z^<|W{4ZY{q`OR3U3h!164LE#Jy+&vM@2 z5%+2?rbr=*seQ}eezZFGDv7Xr57vUC&S>G(i*xp=7Wbnhoy9{zGB*%YXW(q`bl$(u z^&-x3Fq$>uyRo4ze+IT*#QW)6mJ)c6CGFJpb0FFvab^qn%cwdM_9s?byOO}EP@-g4 z1-K)*?UdA?{wPy$RqhLJnGgdP#LhLTGuJQ~e=h{E{w=6VkErGvL2e&_|0ry^8?a_D z*d&Sxk8h~O!J}SxsRT7zi|F8UW#f?JCk=fwgzMuVwR6Pa{+^ZDvLDvGg%Mb`R7F@} z1u=v(L<#-#ZxSW6@vwuT=_{9ogw`X@^@yP6J)i@X@iT+@@bZSoJT?82bxa=bD4cLA z9)0A*5jY1O^BmPV;!EW6kUA*Dj-2<4*~cxTo0@U7>^;RP_FD3n>{(t-FQwh)$Sbb1 z&BOJW2)X)53S+u1J*<{C0d1#~5Y<+@^no4DlLH1#7omFnn(L%dfmt5>e&54RYghnY z7o@9uhm;o^TYmalfUGnLw}s41Z;=5Af{q-7;cw-cehNkp9yY-gJV}bm#56;v964H4 zYc*h@CV|_TXGI8FY7)%KKU#JPjUqMZM|aIj2-y5~uDYW}t-%7{%#ioDtbSUIH3Wqv`QPINVbD^7+u^I|?JRwxJ^jx8sUj+wl> z>tF!TIA3BT&EvR}*M?sbyoD=ir!{1!*DiBP4as*PY$8y4Xu?8p*C-gJ#F6sBKaq96 z2t!hO1)sNDT{l}TctIU2G`2xfuwf_sIf$`RV8!qT&wbS<>FuZy#xs{R;A`*;!R0M# z$z$+czDfC6I^xKh%v>>hRHWDggYAVAXgR!cXNaLJv@YRbM77H5 z_VKNeA}7NkgF-&uCHM7U@`WW5KtX3dy6xz??P)8Pai7y#M9Ss`NE4)NwJ zqFBn6wEqOA=N1T59GLhwbMWi($tWlUe%6&)u>2D*{gq5H<*12tOqE>$QW%QdNt|xPG&)-x zcDilY?5smW6#9saT$jF>@Y%l!OP zQcD`gJ0TT(7-yY?TJoT`jC2_5zVb$~fNFbKCkkKhTQjpwWSW9e=x}{eriyjanEUSU zpS@E)=z*Tvtfba$DUu4mGpW<4@=kYFmDgNUSrkG*mlu1CCDY+GSB*#7zjrf1RH-8O z>%;^=j2!e**P4bQk^5)$h~8P(Mw>b8R>=sR^cjPEiC;;5LEu9-ft3ffViwz@*xVU# zcdSbiaimNAA%ae$Bb4!gvFnoPx-MK{VrPIZxQJtkHs&boNcci$gTn_V_FigMCSi9O zyM7sk6UGYsxX_t3Up@(r@hghno+9(`NX%1&kWpT&2c4%hNa883KOHy2&gaVhX`!AQ zj8lRtZ5BUXS*L)?DW+IuvX&GFP&|e0v=ULF{#u!>W$h4R>%Ex)s8An$kD1z*&3KO7 z6_R^0fClPlgda8+3E${v;uV!dav=>$Z?*$fNl;quWj4VQ?s?*2!bhsw>COv=JqdpR zi0fb-#pYljH{1fp_UF3O(~19-b@nMxstybVXc?$UIW(`Bdz_<(s}4d=;g{KO(}V`t zw>3yx=RLU~^63YcljHKGk<;3Qghui&Oc2riOf{^iA9_L1j08~o4CGd_4`@vZ( zIs8PmiTMe+FQ?h2k2#yEbI{e}R33AP{qRG8TOzjKY~La*zGT7=KZI!zznAmSACT8O zu+Z5B*AO~v-FUemTVyNBtmm3vCIWn2N>Y+rbb9zY z1lz0nf{`ld?XlGo9RdpOxjf~`Y@r!V-m~przZjOF*GM6kpJ5#%G{x2uz6bm<8gN$z zonYTQf=|nIK2qZ|Riox1lZ8GX%ub?=Tgeaa%`Y@hU8vz)$ozca-wp<15(=%p1vBy4 zzqFnMznxU!f(oZ$Ocvqa>OeV~1d#XF)^-8--JL4NOy~!|^PJ+ZWwWMtX$Oq34zEGx zK%ZDv%ll7)T$9_@Q<7Hl5+Df`69NIMds8gcN>Y1=f?4~tdFLqM+BEulur49+4udC{ zZd}780*#B@b%7oyQ2NFBP|ifPrvEBo*y$VxRuCdM-G0W*2e0;mdCKYSOvX-xmipSr`6QEH`z*T zgc^)jDLoB=`#>DHwIi>wOtc_` z1^P8LZtG^o8n$Nr&VIb5b|v*iiv^b36-}e5817szvtNJPYWu@D!)Nq?wIvA!PNjS! zIKrJDe-FrYIS=j`^_t|kpeIRdJ(`ky?ChuxOOlNLn@xc#DjdrU@Y)?Jyl<-p#DDj& z@QuLYLpEi>*USH-q;7>#b9sh`dc;e{; za_StmF?mHgUzOwb6)I+6>F5Nw&((|Vj0cU8%0Y4HY~>)H=^Kmiy^1$S-`ZTp;SO;8 z!K+c*;rn1fAp@(+POt&t94KRCwPu{Qu1iIH+(BzxpZ(E(0V>LYs=e2R>a(7I4q9b) z`>KT{zdYtslvZzbaLa~Y6y#BAI!}RHJbT^Z9Geu-6+?;qla3=v#+296u|T|!>T_Lj zg;16krn0@N-;6t^hJ|*=`tUc(yyeQAnfPbB)CVy=mFUBi>!3AXL*^8(SO_aZQ2CX# zuQWS;&w9@>-7JqFavtmO%p<$DB&cYZRY%b?Yqy1lSd1;g}{N^91(X-^vM z-}MWt65KSlXop_U*{Na1%VBIBl=^rmU56YGh_pSK{o_AcRxrk;S<_(&zTW>OwM8w(%`D8qQd6`bVbBV2o~wnnnMx|Oufw) z@%2j9Y0av?vj-*bBd%vXoqg-G#no_BEstE%_=OyGclc6DMU?Asjj2&R-Yz~cd7AwX086` zWr-+^qM1g}_#>t)x zs?ID1Dfpc4LM(YH?HbqRSDS>6x0!bfEGgC!6`W4iw0ivLaNUgPE$b6|bk*ewO{W=I z(Y{|JAgko}E(*Cx(1ki-n6B2@odlCBI^NWlUFbYG#w-{f`{dT5B_vXTn(BqN*qk`DySY zJ(!C;flrhiNd6#cMU4#xafr=Jz^wH>TBZw=HeP$OyOt)rPPpnGlKi2v%J1jJ_U{wN zfhYdG5Xl!RoeVT~;75wh0{Xzo7b~Z9xOiC-6fw@{Hy~$_+Xe!Bs>J?wZ;8l-=Hsp2 zl$Oz~Sr3T5pz3DZCuKsAlw}K^?zPR@%ua9k#rEY92x(>OQaEG=7HVm2Je_g!?VtfS z9vLwG{{cBb#=qiMVmcdd*OIpG!HF$-JhQs~B9|nS@}}^besFF`JctibMpwzqe!^d? zUY3mYaSuuqyf~k0GGeh%$@Qu#J+O3aYQgS0eJU*BWSg$4wBFrYKNSP>%@J89aebxq z5SdGdl&9lIrNAi%lj_izyzy>oD!;b(Tgq{~+^3TZU7h( z&$*XVPtz26mIqL~2MG?26@ zJLnpaI`IpcWY|;P?vWT+G4B&98ebQ4chyVeX2zRmZR;qqWczitxXIC7xYhR{b7@i@ z=O(x2Pyoa8<+cMiAT#=+VWVC`jABF@Q6Q6ZtLow1yUWL6DSn6}ipd^LBBt%R`WDVl zME_J~_*FNYunKk(p}))IpDa5hJfa>A!#v*;!$7!nh*fI!XaZJAm=D_~LIX_-OYZ@% z%mMn5L+QsX^R=H3av$15*>*a8mi{6C1h~~NDJRc^uxr?rBXs6N-F=AAe8J9&9#t$~ zfzYkwePJV8&GN{@(IQ@kz`PxqWn26jur_GU=Zgd75N?vCTL)n&;&5nd(n%9ps!k`! zA%7zv7znM%Csx(yWFKYa`G-Z4U*42Y4fG$Opi}muW_cZ;jvKtUI@U1x;FDBhGk5UE zJDEe8p4LQ3WCW-h=esL`Y=MO|psMr?ZndbnA~wyapyoW_c#_VkAE z+dBLes*cIYZ`2O7^v?MW7&LPjUJ+cbo4u-xvi+Y@lcoJ zg)<1yO&CsTUOT`xZeY{jdB)`a2JZsr8fjAXi>1jlI*qeTUi*ah|HOaPlHQo~7 z4tfY~^O_G)o{P5Jm52kLN@l^NvxeQF<%!31^D^-ykRD|Uasv!6GoG0b*a}o!pTm#? zSP>3cU}jSB6!E&YhxIDJ8wZ zp54p40pge7mv|AKV;+YC)vBfWpApAJ3<7mfv9$Cy^vT#5JBe4Sv+XVijwwPhIO@uz zj-nut_%ihDJo+h7sso-Vu5_JX65eWz$H--nfm%6MNoK>c8_Q@(ID;(_pbjY&2CnJ1 z&^&?>L^(8hVo0&2Zv;^im6(6Kla@L}GEdwZb+sR^;yCIF0DSQR^BFp)7l6G4&-P^a z{`dNVuOf!l7fpJ8JbX`tW}N@%jy4;Io*)%}wHJmpPgpy_rFS6~vHsAVbN!qS!{aH~ zbevu3*8SL53KUM=Qb2K70NSk4ZNXIP4VByT?us|5-Oair6{un7e%MIlY7C?fmmk{huqg3^bUyqzt?DO$!b`4uFPAk>yL)=EZ=WXeM78IMj*o98Bm6{D zDs_TKhBDz!;G@syQ(L+oCwr(E9&V&mWA`D->5am+vh;q~9f2@jvsZidaf!m9S2(0n zlEr1mOSM>Yuj0fv-{0B!`! zwFebABMnPy%27n4ik&4bdsLzme@&e}m{?{!&--?5hfSaQ2W-70jC;ssdG_$!$mnG% zA_DFkTxqx(=Gg}u%XYvSPBk|^!^28-2?uM^P!j)eLv$;{*~DIKI1th9rtB&`4~`)& zh&Ms07ULM2I^?gNjl!@9S@&afp+mz`;>Q(mX+}r!5Gp+0B-V+$XHe(tWcW?fVn?`{ zsHedzB?wgNWf10_HL`~~n#*CL0KK?R0%#q^g{rq3y5gzgr8FvGRP+)ow&TA8e;CxL zSK(&5jK4LKAJyMFBhXBVweN#d9~m*AJ!~_li(L@hoNmgitFaxV^^gvB3)f9UB+hpH zj^Dj$&Ge}@OY_;*Kr#_@>0?DPJ~`w@X?pM~CRVX7aw*XNCKmOO^S>OYUs=38#9C;T zr?YvKbOzZIE1jc@-kSrcRVtRguri6^wEQR$U_STqjq0)o%Fp_%R~Pn!{!uk$#ZwLQ9}E4 zn1q53G9_j4&Ky)m@jcDJm&UyD>naa_nI&hPhvBMsT>?jSct1veAk3W!P4QO#Y|E%x zJrO+ub9)K(OvkPtj!_#^*U6y8w5?=gi9}yiu*j30+Xnu|6d!qCM5qez zfZ&7`>hB(FriYR74@4V>^%K?ZKia_a_OP99vci=5-Q$R$j%M!IA_h+&gw&A`EvCopU3FDz-zii$|TJWC+f8-}~_~61HL|DyP zHwPGET?Eoh&l+}M)4{X8KaEI6%dey|iBjhQ>>L@mjYE2GIK%vNJ(jf*iz^C`Lc(Fc zAzR|SZuajpv}-no0zN83iD{U3IJ{R|CA2L zT_qN+R4AqnTzg1ic}YQWRC4Bk3h^n6ve^_0*0NfTGR*g(2g_g-xZi!r=O0csz*Pc> zYqW5HVZ4Y_qd_pJx{2u)ZSv&+00v2VbnV1Yt7y4To&wdtZ)~{0AqC3r@H}m$IBU3u zk^z+a4(vA1g^kv4(bvXHj~oMs#rKAS12zot&*eX>Z+OA63QCWXNrCHb&I52BIoVq5 zh1{yH-E<_EUne{v`=R7W3wkKC5QbeTg4Wc+DNM~ zBsKn312dUwkgTph#Ki;4_WqKLo3ACla1v}c?qVhhlmHsf2a-P z#?$Q1y8fITERYc3nvDFngEL6=Ds4{ZnoOdiG*25qFZq8~tk8TA>=^%<+n!M-Yh?4o zaZ>ypmKTx_(6ZLj<`B&}Ro_cZKCWSjDn@jv8oSmQT8m1nKy-Z;Y+uEc3d9HVp0@(0 z&jsmH9{d(F`^$A2Qx*BG0ltthyo9^SzQ#(V5?xs#GXb4`bnuLI>V^pm8aI~_?ibE@g5&x}YQY`Av-X3!T2rjTDSX3Z;L4YPk4;;N z|HLO?`~x`XZ4^>Z8S;xnbvG!g^$7YZb$Ic%> zmmh4~NDw|DI7+Sx53;mPy)T_5tLK$YYe10EA73=O!*-+7bh_pnoNpU3c{pj^&Bnl| zb6;^$t?fza*IbVWNEhOk))Wc15`Md?UP&Oi=uuI0bj<$ujm@UqOy6BLj7HG>n%{^1 z1O(RSDL5kSef16{;C2}&vy-*4rJmS-pgL)3p`}`!Mxinz!9r_TONShfT(ulR7%sV$ z&#UGR?n4Y)AZaGS%L^H;_eMk`!fIXuht}DzyUCWj06u6Pzv3PSz;EAl8HuQ#9q8am z9hJbh`>J>CbZY}a4@b2ovq>U2uZU`IP(Mg0poJ7>-6G@sA|_^(@w@gdckY$100_c_FnxK_GzIWnbS`GFxbYs|2Rri*LEzxcWLqA5U*2HBaW~_sZOS$ z0+3v61zdz}IojN8F~RdyKclss9pSUic`n9#KF}qsd;S%+iq#P&j(>3b9$`|?EC!IW zu2yc4__U!7uY&6Qv{^;uxjSrR*xb(p5#9gWe128trt@6h1gUPAK0E#8a|T*K9a!OA zekPgt5|-1sASmnedDRO-W4QxsBe0`vk8;WTpS?qh->)9vr7CpxE_a!~Xx=ZuC*&_W z>3NSCXb;c>vc&Gyzmdt=4JShSI{P5MfXtY`=WKO06|keh#ptXuoZx=_^vDV)4($9#o1B}ZUD{t|Twa8*&XSlTX>P2_NPAe8rp~FJ1I}d7qA`qK&@qRYa-uDWyg#VZ=c_wCm zg_Y;NBpF7F{KD00X-1l1)OW;@@PrNQugiG;L1-|f zG=N`a^f*(1%5}k||K!hwSSptH8<&3isjDlRG%+fv5!k%#`AO*6RSSYisY9c+zm9^} ze9?;-gvv)e1>{WK;~UwXXqcq>|GK4%Y%(+tG#-7R*dV-lnmTi|&YhUKQ$In|9)I~$ zV9^P-*T)$Ea&nA#RrGn{3}zK6^g3p-~H&_zH7Kk`){B-wr7X0&rKuv@|c4g6TjemzNuNnSxV+Nf}hFM@L%V3?k2YrB8s_hesG`C13_>r{G0QF#o0Z0E>Es@8sbzK+maMzlfApH!7rj*L;wW_i015~$NyL&UG2ol)rjpBlDZ0x#w<% z!%AbDBE#bKhr=`-<)Q@w6{4pd5{GAB@&bB<%JN0F>w1&H%Mb#@-xut1t3IO1&#YK%OEJe!D)Q~g{w)!{5!Z-=0_=~%LY%fh{^%7ks}=b`*&He7U6Siw=2Xq zx`_BRN<*Lw;Rpj1<*lGnYH4xz=`S4KOdSmHaT=8byHy~I=r8%hq0J_)aOI}p%O&BG z7BvWqr5IO<#zklXCWphu%Ugb@{zVhG3AD8q<9pD9M)&Bs!doKk50AMFB=lzTZ(TxQk?2FHQB$`nPU(VJRQV)@X}UBD_wu- zs=ExRSPJ@J@c)%AfradAwEcmE`roJob7%WDr7<+%u?Z#Q``{M$#M6WnvQWaaA3u&fsIx5ruqo}q4Sxv5+x6Yr-IefurHdZYAPg+o7?YsF{ z@RrZ9wRXWX8Ir8fcLbixZ!Qf#Ri=l{7C}Omcke%~&fT$gNROM%ys&a+0uYM1$Gasf zkrnTb_}Cud18^B!oqUqMGs&=E%Jq zOnjodGCYWRt`+T1Ij1xXYRM7#_(oTon%rv$?lAMhfiV0`!t+A@nX?7BoGdxiMc(;0 z`>!JK+8C6(t&LFrXnVcj3laPcS0-E-^BHW;x11{pY~nrnWZJ@FfHlL|muFLW-6eEt z3#Yp;Pw;gpY;5$wCCi4^%h`fIS=Uu7GHyXhu(Fa5N**iUol=aF`iBRVgRS{h8PmnFC5Bmg@^yk0E8{2*@KPKx> zv9cJ`|5i}V(=aT*nIq;;jzrwiIseUsW!GtK}h&6 zu*?_r71GU2No!4>k1`E=?QzZLnpW3wZ7iGNjp$WCfi}dwf7`2Y?RbT=sl@L6txXLL z(-<6_#+6M*$9*nq3s;`E+w@i73r_GC1!@+v0^^PG*&z=vf7+O0mVR5<+SE8DdKnZz z>JzRTma>B*6V5&B$=!S@i!C*1Gaa&YW}G06J;yb^#~hU;2B;JppbpSSf@$K+!0DY#hcfEA#w=w9?J$gYOcdSr{8A}{7OV=Zys{)O68HWNYxr*q#5@`@iAcT6Xu*n#Er$b@O>@x#(t) zLJszbmBCr(TC}D}^+Gfj(H(~KCaWW?C_8qSvp=XRODL-EHL({>gZ4c0Iu48Wg$A{X0c2+0~#qR zSt~Z5AGt`QIaLZ`BMW8bCWQhOK}I~_7QC%j;N^RmYa1cBUyL7wnGT%8{O(8C_J^(< zReytEA=jpQ?=V{RR_*gE>=aw5nUKVRGb;(FWr#L=mABvu7-3B%dYSm5O2~!sdr?RT z%LEda>4~lQ1;<;SLl1DZTf`WfHD+FzF!26gf7{l1)dyfpc(++BpK`w=@TCDgmae63 zec<&K={FTq3ZylpUb2E1Um*NB#!QjuLGLa_A}JEgL1@{;5({`=f&(!6PmKqA%-?aQ zY)r;&GSj|oQTevP9U}Y|R0xNQ&={RoIxT6HyVimq`{f5&W(l)7sXDwz5SBVs%UcN6 zL;Rd)Qb2O3*;1*R|4kS@m0qMS5MPivx|N)H93Zxl6IpXk5F0~8;49Na0hLyH7bB5w zM3>aojYy_h0%5h2XbrfgWo!xw0H(M1)0N(RWj*V)x|&`Zpe}e%PzOq!<%f+eTc*^E zTfdRaBdq-!04@*!00Fvxe1aSI>^1v*pKM?f3qiTc)h3RZ!_Vq%A#mgEC$>D?bXgmC z^2wv>*hY-yxBSHBL#<<`YnyV@gi*dqri+BLOdxQTgai$zsnEqyu`u<9E_*AyfKpu) za4|v3*r#FRc4<~C>2c7c1Bu=T@6+BsIJw>ecgFR~jHQ!o`FtvMy`EdtZi@0?Y5(qud6Z(b&l%f=y@0#3|D)eFWHQXJ0Bp+N|MU{#pqed}A3Cqip!>YJ0phee! z91zSA)eOL%ygG8*>nlS{xUe3R$v2-nd0fz_Ybe^iOaUKLlDE)^>SAtzzVKS1KT70) z(J!;mZglB_KpbV4)HYLmDYb|G{ux){A zg;hG@{}Yn_X>cuFPllW^&dLe87EUR{DnJ~u&ZI9tjqX8NC3&8Rvi{sQ!YFgnSXjEi zL1&MKDIhH#x_n4d%jU8FE$`@vKKj|fr)N=6A2@;D-S-4Hb5plm=D8xsC~|KX1*@nb zyUt`xYY1-a83?!J6`Eo|5$1sih+oyH%xB?yv~PY+B<1TK3bT@j4A#E9ksKbhwDkESz zGGXCUEj{poL54;_=L#PdvpiyCBu!<@y3iu_ zv_}K?>;3+!CV0zwvXJ}XDF~m)-dJoGHmfQ~z`0&wCtA^cxUROJY5q7BgsApv6c7@Y z?si@YI(VyLUs{ilMYh6R(~x})bX;SpJ3+!X!6!nsRfByXjZ|+G&R4+c&$^VPtT@a~ ztka`$#Xm-9vvsHks4Xeavod-xH-mR{A^IezZ)GOKsw1cPyQ;qOnVW0hI8^o;-= zg#y-SvFdO|2w+$j)8Ps+cA%Y46QbAv0000|yFk^9b>IL10h60c00hN=asz(czD#*% z%=tIJhJPRq4}U$F2PBx_+rcJZDv-1cfa^GG&E>K69(I-_z=R(=v%PR_u8N-xgTm9Q zhQJMY#G#(K>BiV}*o~$l2!yF9^xeJDeX&1q4qx%1BNnwNY0bs}ThgjiSfq>4vSWxQ zi47Lwhjyr@Qsg2*1mcA@{#l#%ZMr1hv^e8J9&?;f4_cQp0494>Jy)>R6lBQ#;jpOX z7}ly1_T>`Y)atyL`=w#PF=93Q(J<8!%{>tS$+N zbi(n9wTFy|C;orVM)1t6z_^Iwq(_xBt1c6n#yWt2W3FHnhZI=2?hFM4U?Q%xF*%c0 zMWxAP-><>rZ1k8EubQaF19QCHAp{c>2L5ZIupJlvsw#U1%rZrfZ(wX2i~MH_gm{`n z^)e5YRw6|$KeB)i>|#qXijg-_C`f0zo?z%)iw57;94n6F>6YqMH;%ED7k0FQ`r5^4 zU$&$2uEkTS`R4zJnPTY+L!RyA#pB?5>1YSO!)Fm_W7uo#NG;r22?#v{$cEF#003Cf zq=F8N`^gFN`>)-I5p{12aG*#PqZpdJ<>AYG>M#m@SxDw7f7sfl*9rC4VDg<7S~n)L zSiP7$?mpXUuL947@jLE_@{5ZdBNSRR&FCFFJH!Bb&y~O*UdQcJ<)s(>oSI8U;9o6} zI5y!3aUfW-=HJjb|5p^2u!x2cPRo2)?1@h0W65KmKiWD2v~Ia3kHi`lbIlTD;ml|5 zk(xd|UG>#w8UF%p=`f%NfkNg!FSyMMvODfvDw<1UF+`W|ZTUVlj-##>cOZiDbo0^O z2ctaUBO0M+T1Jc`fhNLhesE@Gv4^Fdal4Gc(q~X;=kqheKr5cMZlV{;?F-T$Gc=tp zb0Xm9Ff`vCW){ymcT|mXeaGz3vr=U3vl0|y;?Uw9d;f%u)+x};It3GS02UVP);~Y6OirYsK7;+CFtv zUY?;FzO}fO3iyPsN36w9i1|d9yVI?%5gE3a2}gt?A#>%&`6YlUvTyfWX&2&0L!apbZ4tkz$9$$5AP?+*PzlLQ}Du1Q!XN^l?f~pn4rT7*i&0AMHU6LvhuiBuoBjpj;18_y+=)g& zyQ>5duH2k#hlu(I z98ZjUl`kSX?)JXW?DK@7EOe`tAr}7GOS~N}yXRaOeAsuYv+MSnU z4>}`WwNLE)7fNkY&z$Mo0+g?$BR(2zm<`ONU`|j*;@9cS>DpOb|GmRn`Lr z^k5U{JlnU?SC`y_fxQ5+#vG0E z_@PoXdWGnBQ$8lttO;n#cdP&aTpl;fH?n!noOtEg^GP#pP>kL-*Qg|y8@~TyP)zyp zXhQ7FQWG?=q)QEcYRl=7+72Ojx7E(hcR~ts7xUv!*(Zv2D7~+T5H& zEQ3^od}YLy=Z}7yOC@jF(8qv4l3;E9CDG<>T6MBqqXLrMoctG7^K;$P0j#@c00H(c zU0p5O5;s}W4tkD7IJR6*HYklz>HXCn63Fm(zhFYR^-gvmrUBhi3Sc3dEyhK62=*WM zAC#SJG?k_}rH3qK)&GPL&ZNK0cBDs~qaH%?yrCruVCmlKeOc@#COi*Ee{Z;$YfD^eoZ51)1Z<9}LB z?k(_V2xJaUv|<`66b4U>=pNt&Cd<4w+GP1|2%)M zbJP(}Q&m2DaylbpGJZt~!9(OZGp~ZKu>~yGvnNh64NcX@;6ZMaF|{ks)}TZG3;AJZ zhh5E}#X>RCxbl9En$!&?R!TO=uMb*3@mhlQpN~B!$LB1E?`I6;4?}w;h@#SV8eOHx zL73+XE@bLz|P<#4nVu=v%OIsVk_CfI&f zu|_;i1iB`>DOFS7o$jus{gN^p3Pa}}t6|RHMHv=(DCAID!DS+{RUOEJn99c$u(ymmX@q)aRr>!Od*yTiLpEp_w;uqcWp5W)0?7XLuF z1NX~VEk6#lNB!6&SDDxPfQu)DxB+xtfIBNklGDL|I>Fvn@WbxKq!6afTU=*9;K}za zsy7(fV3&>?=j??D*H({K@dS*a5hFzJv?QmjKUQm+^VKD;_2enycWp)bIuv^Q z)#@{^ReSoLULj3Wl)NVPTVLza!7HH2`){WWEe0Qeohn}~3kmhIK{2;e$3p&zj30p? z_b-zx-VOpKT2YRd!v^9`cv+O6zDu7MNU5;ol%RitQ(slCbKB3GT}7^eiIQ>$S&Pg7 zY1;9HmX#VL#-Dr>J(2fh$%Tqt6Cm|dFLu%FlpC0_jH(Zfvz(As2%6x6?biVPR(z1Y zHC_RxD#vy9|1ot8HIEDb54@lh7LKKt(A^%kYW8+mM={Y z?xev_6G8pg_DDlP2F4leLz$77a7D>E;x>yZj(8Do94Ph4r;o;Mwi~%6WyM2Db$2Vrjk1T*gmAhqkx5olr=9-SI z2{9M6poVp@>JrM%k(pJ%2G+}CRmA-U_w-yUdt7km<+TZ5Ea(NEg8@M_Bfv9s)eg%h zrpD!v2wBoBD92?u@J=4Gs}?sZvN|nVBL9+@{ywsFeFh$2$R&XYI6epyQ^3yICpXrb zWfW*{ka}I#CCaD$MzBNhsOFRQ1oS#HgO2;wT7pI1B)p1`hF9*&1?eXRm}xw}Yxv63YS$GFsAwY-jb`O(2AL*E zJyFXun^>U$hJI=S|KpSBilW^qdcTe|pOtur7b(~&TmCnsn71`==7TJD%YZUQ5P$(~ zj3?G&emvf4Bx9Mv*^jf(VJ)8}38M{K5zWDcZS~H<(r@X_iZ|^0C1`FGw~lKboWx1e z@M`?4zP+KF02AIwTV0Jw0Gp(&U(*rc@0kNfU=Hy80gm`#suRa(<6uajn8E(;m?dM`k%~$mO_hV9ZAV z8~BDnF$2qRmemHMf9Y|7*)>67Dk zfs4MHPRd$T=((m8biYfi*vw){s0f~cLj~#nT8Zt0001C)7+G0+rWNTgduWMVo8O! zLOQ&&xfe~S3g6U4jZYH#oInelF}~s)2mxZ}&-iX!^07{Arep38FVO z!jzc`6^?29d(z6uqPC`|HYY-Pxlq=^tb(Et?l>PFU}JHn2AcgRZOVTugJTLN(nXS~ zU^d5v)@j4(;E94a2EjX-kCT&?>}diFTpl)a2ZiuR;U<}a=wfZOwjgW9Nqw7M*g)-n zD5VONd?98ykD!Sy?3y1Plfi?|bjtI!Xcj$*4k9JR`zVDI>3=Z?@eW;33N4?MemKv4 zCl(?JH(;-4#G-v9(Lr3D2bNWf(qI*CNm&Q`X0A`K3~hdJills*soLd>rX&{=(Llh7^u8mqYku~_vpgT{4Adj%b1 z;YjL!*~C9)o#d#WRN@{oc>{U38P|`obA!_qQj1NFJnl~FEtQ!f;AL)*}FUIhjKb#PAQ{tk8d|^Np`PlG0-^`(W{X*44GL1 z)q--BHF%=VupfV4^+Jo@8;0go=@^j9 zc!{{6uc`?>g{}Q6or zD;B}ahp@@I|H;tfDlK)5zLO;w#*22MmKAc-Kv2%kP(_2 z)0W#y0?HvNHW910Fr9Mhj{+~-{ll73M-*8N!W^8>D{v>*vHM9XCGxlSX|9vzMaWfS z#QS5p8>d}4G5;+mj4vXW0%Py$oI0ht$@X(3Jj}ZUM8s48DUhs456W`;ahI1 z*n;9Awkl$BCz)K}j^(Ft?~Q^p?xK!b7gcX$#E%IikEXz~K!Xn&&%29PM&T+~OEzff z@^mayIri+|>H@#EnM^rQok#-_ukoEHsp=Qi&r}`jOT-@}U7{$^V*B6$9~ni~Zim8VTMSJ86?cHpQEs&o%s)n z%(*@$;zo^BXCOet3dztd%#mEAZTB5z_r}112g8o|0V5Fc>4?{QrT8LfG9u5q?m_^N zr*XCGIO6v}3;{=|Km-eouYF8Ul!V5Kq;>lSo8bn~5;Ai$P&7lb{|%>UPh(TM(lEFV z#Fx%lPA9Dy;_TWN#zw*fL97r6p8i2%&rj&Uqh6%RH^l@%dGbkFV)A4b9_vmD1n7u) zMtFgoo8ZJBpB5-Va<|EbY5F3geixXERLaG-xoOR$8bIcRB^=gJc$+u^C@C!n)$jI0!GocPzn=c zhLfO2O&`kl`xf8xOEF;gr^sUSAUn|G3%q9>(r~L1R9MFP?A4hS zdp2g{70b@gt-l4-X#fzEfrmnn$c~OsQg5ZM0Y%B}+VBcvGQ8!|ADOI1tdqO_E#BH(Q$xWTX1U;_^H@bAK?Lhb9vJm)ot1a^rOyTAEqd*X5i)q zn@B2Gc^K#p_4N|81f5KeKD98R%4rPlza6{JU<1z%j#sP6t){0iCPVwypYy<5(+H_7 zPtc}SL03Z~b>&%ppGC22Ya>j@*^yCg^?7(ByBsvvnc%#|__Cv`=tZ7?`KzGuj$?F8uLcyaZc=#Tg- zIqgEg(2)*QDI@thX#baj%Yw66!Yn0=HUUmeJ1NXowXC{tyJ}-v4e45*;fy5!NHGynAttXC9{FeZkG~cx|K@Bm9?j7U|n{Gyv6g* zzjZw26v@J)vv++7N!frqPiZZJ*!frmJj>lRPkdd}+>s)SrWAC}PPC{m8TngOoY}d* zj>E6EmgGq+XFZH6;vF`~VmMHwIm#QhM@^=1r*>WO1Xr-0mt{X6maaHir%bO~_x!oO zX;+7Ie|P7{{$E=6ZkHbi4IvY`A6IPde0 zuMxNO+EDK35&!h2M6F&@@6RI1!Hj0 zOC2Fn8de26jw(u&q`3heT?UZ207Zxmx#Vc_<=Q=8#4zjXV~|W#jv2glIJj_!uHl%n z=mUot*~2VKv0EUpnmbG<5#dW0fUYxxTW=Kcw2&M)`@U)`umHyE`v&B>*oWRLK8oB` z8gg8mGXrBJ{_l9kfUpdPz$t0Q(3G$^t%P3q3ZmesVVYXxKO zVJTeD8V&(m{;2WE5MIuW*1Njb@T>c6nQ9;12&*dA)d2F=;q+jy(^m~;_R94B5!e+2|$f+TvY`ILVt7LmDh~Pi& z62+VQM`xL2p$%FFdH+)*$KcoL)KED?Fu&u-_#M=Bs#(1vNUh&pIFS*C%Mm8y2O{)( z5VCWnd}0;6Iqxsd^K}=6xn?141)u#(yN-D@WaCxlg&jZzpn+^sqPkQc5v-U9T+5Bq z32VqpbQ3hPJOe5HD!Mu-Oduj2+m8G`^g28db zJ$`4S-H@Yl20y<}tyj=Kb}vqj_O}7#2a?is2ea9mpepeYI42O2tYfAkDGP_2Z%ttC zkwir3XWX46Src4J*lubQWsNiU95_A&+-Iys6y#B^xq|9kqOeI@%tB@{S$SokjtuxQ zIKXr44BUL^My1d@%ND>{py#+0EdGrLrs6A|QPWMj1rR(Rki*RJ>|66Y5R!;;h8m>>Yp3Zi*N^wI{yD>KOJlZZ3-@1C1?G}FZD14{T3x6a}5NA zW!z`m+7JZzRq<(*$7yjw*hK|S*fN1BgAY5$j%QBsCD_!qPqY*VS+#4$B*WPPBDETc z^BE4KUm$3~UC(NiV2c+8alcMCP@hg^(Lg0ga}nNNgIJ3E$M^=>rs)H+-%@kkUzG*{ z3N?P0+36|4_qnR3aoLJ4yk!~8Gd>r%mW2n;M+;*d@_HCGFqXp zEj}_(<+dh62uUdKIQ_=4ix87E+4FQ!hF0p`xGKPuHK|%)pTV8r8#lHymv>7Vrkvz9 zadY0Z7|!p8cUAHD^kij5ztI3_zKKll?juWIK}iyH;@R{wv)>&%EMkaC{f%kv2heLM z&2T9HGmy+2>hUlkBWaWT%mgYy=#wJUcIqb)-8)diY=8tDQX2U_o07kqnO&RD{yEbgotNH5&BpbsKn^%jO))-Qm$dxD3) zRn1QsA&{Wvci_3Ce6ltxq||MK5m+cW8jaV<59&<-T6!b{WfIi z!)X~Xso8cR1Y1LiR|py;Na5pu{84!LNLSOB5eeWX^bu@%2fbvabKuwn@#Vb>hq4$q zTH`E>6D%|Y?{BVUkcyYfanTbGua~8)NP&kVA*=P60!*C|Z+z0TCa6&skYS~wRX3xF zsDz3T{)Z$J#lEHptMD1}?OqnfQ=eJUY|}m@)y{C|9gG|d2Dfb?O_4JLNk2!9*^$6& zb6jt3Xdza)*d@twOJld*iHfbb`Og93bk0a79&@*_Y$rvRXl|8P)70KnSD z$w5&|AP(x8}t4LyE@r9{m97w2RkS$iu_>XA58h@f5V3V8#c0Y_>X+l zj|{K1mGgh<`Vaj_J6IDN6{VjeK#6L$-MU0OunB022E@WrnE$ zKyxqvfVuWRWu!R(074J|&@}x&W&hJBb_Nax|8+ai9}aA43IN;{0|0Ou007Di006D^ zU)O%v|D|n&KbLTS+GYEb{s621CICW!6u<^x1fc&x%m79J6M+5a2@{U_{dA%wa3&Bn z0L%}VKl4|vbYTGnUA_ZXYYj@ck{StWoyvck<`v zH{Me1cBjzCs;}eNwEuVa@%x-WVQ04Q+ULJ-gq!y_fp^mT%AzUH@(x5ofZ&`+3mhEIYkpPKB@Z`)bDP2VZs z)o-#_nkS&Q`)|2bgtsn2J^%0IcZC~}kLI5$^ltfYx_7=`zU{y67eg1mn*@NjXS;je z?0mnUncj!Lzc(YFF}`1}eer4&n*6tkKdgOTAAIty?d(S6hD{_$3d1Yd7%xf=oNC!Z zihK{NAh95^I*{iq6T1$W0F4yzb4tYodA_+mz2c~JJ|+K+CD|4SM#3A1Zvy@HKF z-=o#1yl2&>_)MQ}=lzj0gH-~1C}A5Nv@$WAvq+3!7)0Aic%ba0-GtI>bpQatt`h^F z;bi^ciL6C$)f$jUj+z1d8%5p1U{zt{)X)-6nunSTk5->>TA$#xpS*PIY^fY+N%{yp zH>E%vm#c|=_JsOHO$5%z>PhSRbJOWLay z0`7#+n2_{8+TkVPM*kzPS?I6JkDD7nX6$#j;m4t$K|0 zwzgS4={(nn`@~QrjrlsXn$b%lWP`ce{Q|*q3-^~qt6l;Kx>A~Z4EV6!4t8u{CcFnR zm}SY-;2}Z`v0e%O`%Y3&>PL~!@^aMDAz4XsN~KM~Qg!2TY~$#Cjf4>Q5j~#ilM!F8 zu)(zlB%|-b$l<_o+#t~ZZe89%xA3%%R|`V-TELA4S$G8};F}Ju<)5TDETVGECmnEe zqbZ6Rp_$LV|IYZ>JhFP)-kzlYh_}7N&1$Y9ErJ%=@0%L|yyjqXk-h50z(<>&rSt#v zDLqexZVtjCQ423s;iJo2ma0|{x}Gw)9FYKC5(Y>2MIN4lxc}ZOFyoEJbRI){G!;7ZY|pe#Rvvu?IgnpR*5$*M7Qo$SQP}HCYA~UUgZvsRkrO-N2$@Sb;XtFRu?1 z3T_Vgv1kbi@##0ZDJG@Pn^$X3WY^Pt@8C9%YT(&_GRpY+8z7es$rtwQq&!&A^XW~2 zw;-`FwgwwZt$b`1Ahskk@zwJvc#LR$fzeakl!k$N+9cxR()7m|_J^f|h~2LDo*RVa zRWh|2pS$?&X81m@W)0|Hmwk(B6)Ipa-hO2>c5|8HYxtr%(|>KZAfX+{KpK zE=02-xqO#6uIur(>NgvV7EqM}2Gh#)Rw45CD`?Nv68Rzp?M``$*x=-_DGnv(KD_-+ z0ItDgM1fKeD}xnZnkX>=Ol$z!c<`kj%EmS56*wU~Yn#wl>}qQCI~aEk6c^q519$J` zxQ4bf;OPq`C5a6+mL~)s66<`LE=nZp_GR{AuVaig;TK65O$mjbkBgLJTlqxg9R)ra#jiv@8*CYeR-%pYmlaU&Wv})8{ z&uY>A=l(EhF<*inO$MH6Ca-u({J3w}{|ucrvSeHdmX|yW$!$zy?{3nM?LHn5$HCmI zPYQe(KBY;2@!USep+YV-{T%|~{+otOEOqkID*C{S4|3I;_nYbN?s8$ClY98WO0T!J zC(Dw>o$NKq&>@<8+upYgv2uuwKIWeEQjVxR98zG>6Mh|UCSMzF7bBdJ=M~0@q4x2D z!tt6J=GxA@l~;g|U3kaLK%EEJG}QqzY_7Ts;u(kF8=~oc;Yof|K%|WZr-GR@QN;aF zow$8&9JGfwFAAtdM%bdW6O0>rzDG58=y~%q`0gK(W#aXH8ySIuKp(|P#T#qaZPd_C zkAf^0*BTatM@=8-Q&vnG)IOF_xLSmv<+tQZeRdvRJ-%8N%`&xKt$pOA?PO|u6aDwg zGkmu3;;cIRjs;{s36eHL4QGos7#>E%lw)9A#6EE#8!?{z7JnpFB3%tA_xK+C`<|Xk zgrG+ZrO>F$ow(pEr&*S}e|bHQrg&77FKvJxt51)*UE()_(6A<&b&Jc@J2}P! z{q_C@RC%HH337CXBBV;H+3!wwe&uwsXrk_dvKVRX?1TAVwGz&ukLfLGU zZ=jRptPA!l;(30RgQaYB@@J;cvAVfU6K5@dWUZRubx9OttcvIqTD}`Xw~o`%C7}L@ z)x@BO9?5|%bI_+3%o+^T1firCz4pC_)0?4=+dlbFv?n+cZ5a(C>R$8Y@N!xcL)7?N zHj}6d;dotgDWh+>Il2RO6a%&>gaTgwEnf=-wSi%L%m!}s`dh>jghU9qjBX;WMl-5P zq6?u#^1p?hOE~z@;o)h}2Td=?eX$fBv?oxe9hrRya|k64J93)o*L=HqM!0I=yx*e2 z>T6-%zG9%0LCQBiw?RumUG~bYZDL)l&3KFNU9~Vxma_x-HK;WIL|C9t?D4qZJ&QG& zL2DwluONf3uKdI30{4O@zvjD7F6HV+we3(b4fhP9ppm}Z){>~+^Eas>?eTaT)xWgZ zPzsZD?gSvgt59$_pOHXXUy_M|h%FJ5LdZsbECJqTKYwjd@^Wyd>d|l&Q!^vy;NyyOW3AU7U<;$lZa2srvP2 zjzL>ASg4@MEwlS&NO+!IFb{OctheduIC$yhMC?_L(B^oJ{_H%W`@jL754x%`+%dDA z$R_sCkcm0eiSwtoYEm2UWzd!FAr-wv0}eL!j)G&pkM>lMYtERmzr_0hw*rcleQ231 zOaNog{5L;|0zzFM@L_YK@#22;*3c@w>hKTP9ezxV!i%`)Q0q5vn}Pb&(V6gyo|xv( zQx*>e?T|D!<)vIm;*VT3nr{>d_Q{|MITw=G!j4B7|EniEzyXNymj7gFmwUKn-Lu1k zDS}5IgE9%8YiJu1 z^sBRB^>3~d9*+cMUJ-ykob1uJRtP!=Sc+$*RwtZNkuh?)5Ttf=QkAm#UH14}d@1sO zx7`*V3SZw&Lvxz5VL(cIQyc{gM$hh4%P2EzIG;wuZ4GtrzVh0JyD>MmoY>$QLH2cN zgqK>UH}S;U4rH`3(I(t34}v*(+Yt=7{U`-!j@9;jyB~S2vq?9W<6b&SUk-<}<_4-% z6B=dWpFc(}9UxOAhoFSt+NEP^l*nn3qUOWNB9hpv+jHRg?#d!)sv3W;j%e;=NWXg} z7#Zz`DCy;Znxu?(+=qc>7ro_OqJWGL53*Vu=3B;!?b!N>m{cCxXt74R9Yw(#pYzw_ zLvnc4!2dtQxxy5G4~){@)q>k*wm!;tP_C$bt%{srk@UJ;b>~WboBX!^&ElLaKzdGQrT5Q#eDBb%s>H7L9G6O>H6S#a%2=DR*iR37+Lhbu2 zd2$VdC+Xc~}bHXiKjpryOCn zvcN+Z?CM13=V-t!v16Ai6I0Dw%0`4HNUVX%I+O+5X3esd%^ zBC`qTu)u{OS*zb&dnaOtoC~hSPDK*W>^Di3k8J#+kJD{~g38SsqA{VV+nWi5c*wg| ztDQxd5i5uAF#Fy0iYHzYDfD)Xj7PXU-c`xv3A;5nRLA%%u}+A94;wD-xMpJYO9ws* zwmjUXMPO(vf>VUqqL1G5ej|1EJDp3``*CjWcZ3**LoIXcTsDGjJ~==sO~#-H;$2N0 zR0K%MGr7;k8tlgFCd*nwO~gI;Z+mfsd9Ha?VB#UO9ZySS$T?)^&*}B9vJSw z8E>M=yM5#rG_Flz((#ZJ7nc0=w0e%|Ab~4%AHHbPSAN{PvM`YIYP9nN@CzU@C+p!n za_nuS5=Oqpd!YmQ^!YXFRO1>h&*Xw5GP`s8bez%mg#1$dV-xX1b9btyLH;~t`8RU| zF$CypM{R9*iAKDv!tXlv`alT&!g}+n)itPFokdM5RSFup5=|%2zXPdGz@aJ+Gf3^` zdQSrAk1hEy{f&8(xU=?!`8?ouvwj&i8AFo+-IfFg> zr|{ehj^m^F9FQ|R;k`=bD)UM0U3GV zgP`RYhnK`GkDg{w;?v$HI41BhP@!6(il2pDK@qF3oAobPQe4(|ry&PUO7VioU(t+(aa)sJzy*}7Fpa{!KwvE5iVq9I$tqqW`fo!yEMkRMp}DtXjM)}PR0Z*Lh{j|CH^lJRbkod2!!?5YBuiZAb0n>p)|c( zp$%5>L@`Rv;Px6Xo?3D#vZkm6H54LgTb*mYCXyC4FM9WwZLdw{@m$LAVimS3{NXU7 zVO)fABv&SkPIZ+1xvYLJ(7Za-zpU<8YukOn#m7ooEM&uV-T|AW zQaq=xTDlR@L5M)26*o7f$F0sUS$@x@ZhKl8gL$U%=$(nP-`p%p;MU~2^)n%XXVWTw zc~C+?6q-yu@G-Q38HA{`ACa=;yzpBhRUXgCMp@Vffr1w7I0x^Wv)p*8_+-1PozmM( z?&IQsDx(c7P4b}BWemb+45-FrWd#{;NZXrhZ{&2&PI? z4V~)?MX{r_Zl;H*l=6&9HdaSN;0e##EIM?_D)cFd3NK>p#Yxx`L3*Hkuj~5k%2`3tYz-k3Y#>T4|BGJ*YvfH-!Y^#Slaw%RD49SXorP^| zc9N3;Qrg_TO@z_%peEX)PzyC>`Vkxnfm8wA(_C1kfn@}i7X}z zIkW4L9VutjoPR%$j_;`!neqjqL;C1+)i2bi>HRK-cLWY6Rmx`6hS3|ox*~(shA47S zkEYJP;(66sV(w8uQJjW^1j0_j{pNArN);05@^`mxMgx!aQer*26#k2SuSJdSd*py@ z8sF_ye^I6Psibi75Nv&DA~vlcJ;B|kRId=#*O2`5lwh|+wrO>p02JfkPll~`D{;jx z3CeC%Xw6gS-*v#Xrr7cT?y3VjDZ?uN9~brikU1|YTWULGY~S{O2E>!;-C(@%6s|~lsb?0ELzynD4=)vj>QGu(h+c*FhfyGT2GhySS?2~b zh0^G;4McLaIQja3I?)ak5wDZw!OdI1&_yKl0`@x&LEn@tts3g+l~V^H&br_Ge(Z<8 zmX?RZ{QK0E{6aX6<8~B>T~x$pT%LQk*eEWj=LP^DS+@^4p6PUr$bxsmrQ~4&sT5+I za{AeGBT357DzC_mL$>RHY~|_^8XcpnMoD{+4Cn7^$}O2IEZ_|daK7`u^n}#Mpu&Wy z-Sc*1&f9paPtFjgHtl4x7fkhQSP5idL?DGgjz|p1a#yuw@B(Q8`WLA+mB;nEKr~(c z(BvQe$d`Tp#;JtU@N&t`TIqA~po${sO#}~Q%&5zzgl+D&FktmbX#_S%wVT(C_EmQe zH(yt@t@by80GuGrqy@+pgckjNS+cN7OQhJ`RHRp*x(#jRNjwhZ1YTx~H{#IGeg0gkN$vBr$6>Zp;&;N01|NJJ%+&AkY~u5% zn1m85rt)f@1>xljMcJg&O-Ftjj z2A|D{kCOEn)dSVP+Z`F@FEw<-Xi4EQdBI1F z9qmM*7&XwXJVNgMar~N=6wKA!yL);gHp~;F-0-nYg*ld@&)}vU@Fgk=Cxk`B)94)d zztjFMq5ZA0_F)mQYh9R9$HEt)qgc-@HloRNRNxw)EUR{mmh3+f7!Vl+mEK8Rr70ul zEY=^NjB!l)MOtBJ5m~XvzTjy{KrVg{7RkB^n}gWyf`kKkV2I3C*(2sZpQV=aWns!} zYyq`V6_B6E4d`H`ja!rQ+$_Z3{z5^e-v` zo2^+o1qY|&&*`ibc#{hqgANfYq-k~m59kwP(~r{frFjQmTVz1)I7Vs!?7m*uW`?i- ziN@H>gxRhjXIr^pg=Q33i90_=z5??aFr&tXv%45FTjp{5Do-bX>L_DhRiRu+>;_e> zj}=e#RWv*wW}`Hrj2Tj6o;FZa$f#{vuk72vaw5n(-Ppu0X0xs+zhf3F1jOYD#<{1b zXws4W88cP#p8j1WyW~7=F}iJe990neJfmWhZf*zQn9D9eWq2&_w#aI|2NM-2$xPu9 z7l#w$ZN)sDScYcp?FpON4vMc;{nk-yTySIm^3k0$iT|a1-?Tx&{)fGTogc$3NS~8r z6{~mq?@-z7i_T{Nmkac?gPwG)2)KCEo_@mx06<=1Vq}68qjE?wTouaD#_~q>$QBL3 zBL~{EeEN$OZ07}~ecd)zll28iC+jN5N0w#cFD-mD62+=-^koobcM=2P>_kL8h#ZR zShbzNlpgNJ-@|Z;LIfu2jfz8x1#e{W$xL*G>dl~4DaBToAA-NV$X7G6Iy)O|TQ$tO z80(%k>V47vBD3q6R%=$`_+7)Mhzw<_6cRzGETkTUF}i4c%cYQjo*)I54MDk>)zneo zX0;Hb`i`r_hG<86J!tODz>A^CPnSF~tqxT4zs03uP*n6aH}?4hc($|_6WDv$r%2u2 z)?AeW@pw7SJup~;6;lK7FpPME7!=sQTcWjvh;8V#j&i=))%#*g4Lv)u)W|rD-N%#o z^8K?H6O%CQg(SuBWl(qZQ<6)C`o;$CU#1>k7IOg2rn7+KeJY!OkbPVJ@_$^Akk48Y z_$<<f`@l!JW zd>`DnLE5~6TQ-b%sigGcQ8z^Y47<-$VuJdJ3{(drvT!yRJgP~g7Mr_N9T3j&)+3s` z%u9Z{rtH=}BI2SYV6_32`ZdCK>664eZ;f^(W*vq=Nr3-SBJ#!Sbg98MdT0QUL`<`!oy$c=hLEVX1Pm0_u{UDFqA43lvH^@s6blG)k# zz;U8S#g&Us#=MKO&V$%kx)EQBJ`uhf6C(9xq}+MLi((tUJD6-I)Qg8kt=e0d+0mPc zs(hYZ3*kH=jP2C%W4L^K+~~!Z_eT)h!-$mx5p2TGdi)t5Ea0EzV4d`19UXLdC~Aex z9SWpLNJFxfOV6qXV!J7`UzfEj86VQ-9EN#OfTY}y zd+6N`&?y)QJfxK$su+*2GCUwh9}*&fyRm8n!To{XWztcd>IIm(d;8~^3p+$_|Kh(1f#krUnbpBjjnU6%EXpA zRP}U4BH)aR$TEBZrOBf!@@A-TJ&+W~VYZRxyJK7(M-!Cfs;cw{&tcc$Lf*bK*x5bpGiq6GR7jnA0oG2D_3XOFolZ+)2G7FHHM)j zmQz)t(+%DmHWYu+jU@i!__O6l5HG}b9hcd4QyL#~{!EE4>QY3uuLE0RH~W{Snlx4C}t;;1suwG&q*&j;}jJ zn!S4)6PE2qIj*p|Crop1vT?kd-@qYX8S!~lBW~$M*%-5#TDH8sL#~Leq$S|r%A!@T zBpd$*n@lh}d(_o*kQMYF-*Sm4=5lat@=3uVG*pe5QA42_P+0TKC(Il>WB)l2H6 zC2NAVJ?y>1ftyuyX1#mx_2otYbbjr7AIy-=ma7Ht&Pc*}mC*EL&8MWHqtyr^q`>Iy^a*ljLPkcdR>+O)ua?1$ma#>3F<_Q?$=ZU@X(p9pLDMaqo0Irrp~W z&O7|E>V_F`r#H5o-pfZIitCZRt%#N;IlzfB4u8MxsdQho)wuJf>Y z8&B$UcRG5!iW=t7M9t~4{bm%i=8BsR8s`8c3*4{V?2=z>E&i3=rpV!$aTv1_(li7W z`WCocv$vcca*k+XVgV)%Y}`(*^QdsD8e1fuJ|sD9nu<3%8XOLJ!j*W*aWk z*FP^C_w=!H2N8wyeBzIPxI&JI$6nGYQQrm`at49Z{WAVuvQ^7W z?jBt_gMJRstM(KhMarH2QeA2z)IxkwlKzgIPEq#U(*t81LyAd9h&7M=2|G@=M17Nd zwcmVj>r(fxTx6|K8w0t61?(mUvr61L<@!f-k%dlI%T!N6;xoY~Ocj?pkEQ_cSlGXK z>FK7z;D9kv_Y*r1h~|vegCQ>Hc5EhVQRG&#gg5T%EXmzEhx}gRrS@wIS<2NX42{bN z*d%rB^GHjYFhkfGXjTgCq-Zg(Rta(m?!9^MLQp&d^x91;HSW;~0~FP#cY#-4tcYZh z9+Y|Fo~u!`&{kia6^DVavnKghjFQBB&p$TU{)K8{541;cJ|_?LwAC4Pl17h}XYd_34Wz z{bW||27$lM7_FmY64KyWl!9J*g;N238B1xE(fB1FWn-M~IcG4T4J|WVL){Ff8I+R%h z(SC(sJaaXS0<7@s{SxUla3P+`0rrrjP9W^|`~{ysxnUGnt18hfqUS=r*~ul-U8%)X zW4GZd3zy5hNO3IA(#s}e*QM}pJw#K;ZIm)JH}ErYm7}hT*t!`U>xN+B@{AKKQyJdC z7Uc$M#JCPLJ_Eq>7sKUl~Gc_MP|dy9?;rS*TV19TQT}#M*P)O?+48^*;qR! z>bHZG!B82J&uA6Jzm%0@>|AQ4gP>LJh}4u-9nX{Pd1Mv1;pOB+VDG+6%*QRajD4Aw z{CtBUR|Du1)t!E;wH#-{xaAPESB#5&zV9aqJuPRopjS>u;K?499Dy8vXy64pqg6tF zTZ`+4ieQ@^9cJO&sq@-PfTHK&@W43SFwb@F4{xz9l-r$T0t3c$0^u-?Pml0ZP9u$Lp+%{8F^ zxG@rw_Eb8M!k((m-A|O;+qI|RiUmDP7;-_I!f9_D7gGY96Iqm^59@&^ zR2cQ|djQEqhPz;XCt1W9IZKw~?wpvFLtf%ofVM1b3uYU&ENUZa=qr|K>vxi7qXa^x*25U4{m#d|t-GMLvVqjwlORi!`~CGWGKR?`$3HX- znsT$msa?^(&e>5-zL016&i^#MQj@c_oiW?G{ouwvrL>v8qd?$Z9bhQg^DdXkdzIZO zb3tK*sq5Gq#6kcIvu4YSJ=(U8!Wu9%wd(X*JNy?7+$>M-6x@h{>H$WvAj;R?k2!aHaFxn@_@VEo>~^dkNERg{r%H*;O;(gnI%jr#Z#`x zf}>zWDW9ml8grS8b%JAEj4b^;xG_qi$!3BK0k?=P4Jb zs>07apRf@X0TFSrp_9F-Ez=tbk6qw%*%#+&@HZ*5PmN|>2B_(^=8z^0pfkG}7Bwmt{qo;tR?fB@hdk3b<0zp1-9wSlRHX@$;t<@-kGrs7akSJ%rvHmDv z^j(z}EpSj0tS9Zr0IEBoj}Os&6xi4z@6vTL?8Nf8a*03>rcY*hWu-PVL(r*zk4Zn3O9ir@PqdrEZ5cwin3PvP z94V$)M-RwBSmI-D?McJQ)pGjdG%1|#D9OyN3T}Lw0evLwo}5t z_QW-;#ieZjP>ZOZtHDq2`dE7Srm3Er6+4*%BtMMwq`S=A(Cj=fAay8`&h+YD^>cY@ zG8e~*bd$2H_jf+H%+4NTX55TvYB=a1+tWOs!V@A zMDHL$WD{)h!$W$R*!k7r{s<3HAGDs9hfR_8k1h{*u_@P*y1NbO9S5}#{~GS|(@wTd z6A%un@h#JJjQ>*e^O;&0FF7;iu68vs@c^ozKw?tgTTk``Lf0!@%$4#F)E$JRJpJ3|C0UMOiPlAyP(3VZ9htux>}{}ZA2uvq+YUBN@b-RKhdXXoU046i zPji(ncro`98~7y%e8otyX$L`ic-wGUdnRu4@oocf+ZNW~Hy5p39RkT3y^t&~G7@?S zy{bqR$9xR+u7262fm)S_Jk|VrUD-`eyUh*_SN+s#(oC0fO^-Ac3TSTTLRu!YM1HhH zlz@X4qf++_okT?+8C#$Y{4-Apoz|ggbzI>~DyF$;E__rB#9eZkC%V|AK*uTnPUuO1 ziU^D(POzO~JdXG0*5|J?jTmOCJ27{&42(%rOsE=p1^vGbuU_nr+`SmOEyg^xNV%+A zUf({HYXUuyQ`q1kTHu=Eg)16<{a&#XX`p2GD8ONRfUc1`dHM=F@; za)ERD@y;!~QlH#bh_fJ5ATK`SJH6uY41GN zq~RMggv0`y{Sw{dq8m1C_4yP>Ti4e(L$ItC$OA4m&FO>Od|ll54T!MLXHex)qc;|NNZ~Jak@d6=e92 z(zkCtwshfNpx*XDc3lqb8LX}-{)USD^xwyKugte~mwlihEF3Tx_KV=L+z$!XkWIBZ z3BOSpk54_Q46lIwl4mY3y8IVKpFuFD$IFEU(AW|=>r?Nv(V>{N0`^N7i3C4Q6rS#B z(X75NJdaX7Cvc34>t1R(UH47rFE+e%1jwjx>i6=FE?P?9304rCvnQH&S>@U z>%{MJ?%vTdh*x5;umZ|{!J3a7e(&s8%5cO_D;CCcogOXkq|B@^ZzjhDNjVf@ zEwCY-CV{e%eqP0pxsDW_z~Z`huOrnN`;k}?z|%X46FpZC=vPAB$a>zU)*Ybi=DcY? zotzigC+<;sYo|;7WBDeP*J1 z$AlD!bPUN?IT992u55Bpi0^{}m)kF!#`sYj1LZzN%n56DV|}H_5CCVf2>a!r#DQTz z^<|W{4ZY{q`OR3U3h!164LE#Jy+&vM@2 z5%+2?rbr=*seQ}eezZFGDv7Xr57vUC&S>G(i*xp=7Wbnhoy9{zGB*%YXW(q`bl$(u z^&-x3Fq$>uyRo4ze+IT*#QW)6mJ)c6CGFJpb0FFvab^qn%cwdM_9s?byOO}EP@-g4 z1-K)*?UdA?{wPy$RqhLJnGgdP#LhLTGuJQ~e=h{E{w=6VkErGvL2e&_|0ry^8?a_D z*d&Sxk8h~O!J}SxsRT7zi|F8UW#f?JCk=fwgzMuVwR6Pa{+^ZDvLDvGg%Mb`R7F@} z1u=v(L<#-#ZxSW6@vwuT=_{9ogw`X@^@yP6J)i@X@iT+@@bZSoJT?82bxa=bD4cLA z9)0A*5jY1O^BmPV;!EW6kUA*Dj-2<4*~cxTo0@U7>^;RP_FD3n>{(t-FQwh)$Sbb1 z&BOJW2)X)53S+u1J*<{C0d1#~5Y<+@^no4DlLH1#7omFnn(L%dfmt5>e&54RYghnY z7o@9uhm;o^TYmalfUGnLw}s41Z;=5Af{q-7;cw-cehNkp9yY-gJV}bm#56;v964H4 zYc*h@CV|_TXGI8FY7)%KKU#JPjUqMZM|aIj2-y5~uDYW}t-%7{%#ioDtbSUIH3Wqv`QPINVbD^7+u^I|?JRwxJ^jx8sUj+wl> z>tF!TIA3BT&EvR}*M?sbyoD=ir!{1!*DiBP4as*PY$8y4Xu?8p*C-gJ#F6sBKaq96 z2t!hO1)sNDT{l}TctIU2G`2xfuwf_sIf$`RV8!qT&wbS<>FuZy#xs{R;A`*;!R0M# z$z$+czDfC6I^xKh%v>>hRHWDggYAVAXgR!cXNaLJv@YRbM77H5 z_VKNeA}7NkgF-&uCHM7U@`WW5KtX3dy6xz??P)8Pai7y#M9Ss`NE4)NwJ zqFBn6wEqOA=N1T59GLhwbMWi($tWlUe%6&)u>2D*{gq5H<*12tOqE>$QW%QdNt|xPG&)-x zcDilY?5smW6#9saT$jF>@Y%l!OP zQcD`gJ0TT(7-yY?TJoT`jC2_5zVb$~fNFbKCkkKhTQjpwWSW9e=x}{eriyjanEUSU zpS@E)=z*Tvtfba$DUu4mGpW<4@=kYFmDgNUSrkG*mlu1CCDY+GSB*#7zjrf1RH-8O z>%;^=j2!e**P4bQk^5)$h~8P(Mw>b8R>=sR^cjPEiC;;5LEu9-ft3ffViwz@*xVU# zcdSbiaimNAA%ae$Bb4!gvFnoPx-MK{VrPIZxQJtkHs&boNcci$gTn_V_FigMCSi9O zyM7sk6UGYsxX_t3Up@(r@hghno+9(`NX%1&kWpT&2c4%hNa883KOHy2&gaVhX`!AQ zj8lRtZ5BUXS*L)?DW+IuvX&GFP&|e0v=ULF{#u!>W$h4R>%Ex)s8An$kD1z*&3KO7 z6_R^0fClPlgda8+3E${v;uV!dav=>$Z?*$fNl;quWj4VQ?s?*2!bhsw>COv=JqdpR zi0fb-#pYljH{1fp_UF3O(~19-b@nMxstybVXc?$UIW(`Bdz_<(s}4d=;g{KO(}V`t zw>3yx=RLU~^63YcljHKGk<;3Qghui&Oc2riOf{^iA9_L1j08~o4CGd_4`@vZ( zIs8PmiTMe+FQ?h2k2#yEbI{e}R33AP{qRG8TOzjKY~La*zGT7=KZI!zznAmSACT8O zu+Z5B*AO~v-FUemTVyNBtmm3vCIWn2N>Y+rbb9zY z1lz0nf{`ld?XlGo9RdpOxjf~`Y@r!V-m~przZjOF*GM6kpJ5#%G{x2uz6bm<8gN$z zonYTQf=|nIK2qZ|Riox1lZ8GX%ub?=Tgeaa%`Y@hU8vz)$ozca-wp<15(=%p1vBy4 zzqFnMznxU!f(oZ$Ocvqa>OeV~1d#XF)^-8--JL4NOy~!|^PJ+ZWwWMtX$Oq34zEGx zK%ZDv%ll7)T$9_@Q<7Hl5+Df`69NIMds8gcN>Y1=f?4~tdFLqM+BEulur49+4udC{ zZd}780*#B@b%7oyQ2NFBP|ifPrvEBo*y$VxRuCdM-G0W*2e0;mdCKYSOvX-xmipSr`6QEH`z*T zgc^)jDLoB=`#>DHwIi>wOtc_` z1^P8LZtG^o8n$Nr&VIb5b|v*iiv^b36-}e5817szvtNJPYWu@D!)Nq?wIvA!PNjS! zIKrJDe-FrYIS=j`^_t|kpeIRdJ(`ky?ChuxOOlNLn@xc#DjdrU@Y)?Jyl<-p#DDj& z@QuLYLpEi>*USH-q;7>#b9sh`dc;e{; za_StmF?mHgUzOwb6)I+6>F5Nw&((|Vj0cU8%0Y4HY~>)H=^Kmiy^1$S-`ZTp;SO;8 z!K+c*;rn1fAp@(+POt&t94KRCwPu{Qu1iIH+(BzxpZ(E(0V>LYs=e2R>a(7I4q9b) z`>KT{zdYtslvZzbaLa~Y6y#BAI!}RHJbT^Z9Geu-6+?;qla3=v#+296u|T|!>T_Lj zg;16krn0@N-;6t^hJ|*=`tUc(yyeQAnfPbB)CVy=mFUBi>!3AXL*^8(SO_aZQ2CX# zuQWS;&w9@>-7JqFavtmO%p<$DB&cYZRY%b?Yqy1lSd1;g}{N^91(X-^vM z-}MWt65KSlXop_U*{Na1%VBIBl=^rmU56YGh_pSK{o_AcRxrk;S<_(&zTW>OwM8w(%`D8qQd6`bVbBV2o~wnnnMx|Oufw) z@%2j9Y0av?vj-*bBd%vXoqg-G#no_BEstE%_=OyGclc6DMU?Asjj2&R-Yz~cd7AwX086` zWr-+^qM1g}_#>t)x zs?ID1Dfpc4LM(YH?HbqRSDS>6x0!bfEGgC!6`W4iw0ivLaNUgPE$b6|bk*ewO{W=I z(Y{|JAgko}E(*Cx(1ki-n6B2@odlCBI^NWlUFbYG#w-{f`{dT5B_vXTn(BqN*qk`DySY zJ(!C;flrhiNd6#cMU4#xafr=Jz^wH>TBZw=HeP$OyOt)rPPpnGlKi2v%J1jJ_U{wN zfhYdG5Xl!RoeVT~;75wh0{Xzo7b~Z9xOiC-6fw@{Hy~$_+Xe!Bs>J?wZ;8l-=Hsp2 zl$Oz~Sr3T5pz3DZCuKsAlw}K^?zPR@%ua9k#rEY92x(>OQaEG=7HVm2Je_g!?VtfS z9vLwG{{cBb#=qiMVmcdd*OIpG!HF$-JhQs~B9|nS@}}^besFF`JctibMpwzqe!^d? zUY3mYaSuuqyf~k0GGeh%$@Qu#J+O3aYQgS0eJU*BWSg$4wBFrYKNSP>%@J89aebxq z5SdGdl&9lIrNAi%lj_izyzy>oD!;b(Tgq{~+^3TZU7h( z&$*XVPtz26mIqL~2MG?26@ zJLnpaI`IpcWY|;P?vWT+G4B&98ebQ4chyVeX2zRmZR;qqWczitxXIC7xYhR{b7@i@ z=O(x2Pyoa8<+cMiAT#=+VWVC`jABF@Q6Q6ZtLow1yUWL6DSn6}ipd^LBBt%R`WDVl zME_J~_*FNYunKk(p}))IpDa5hJfa>A!#v*;!$7!nh*fI!XaZJAm=D_~LIX_-OYZ@% z%mMn5L+QsX^R=H3av$15*>*a8mi{6C1h~~NDJRc^uxr?rBXs6N-F=AAe8J9&9#t$~ zfzYkwePJV8&GN{@(IQ@kz`PxqWn26jur_GU=Zgd75N?vCTL)n&;&5nd(n%9ps!k`! zA%7zv7znM%Csx(yWFKYa`G-Z4U*42Y4fG$Opi}muW_cZ;jvKtUI@U1x;FDBhGk5UE zJDEe8p4LQ3WCW-h=esL`Y=MO|psMr?ZndbnA~wyapyoW_c#_VkAE z+dBLes*cIYZ`2O7^v?MW7&LPjUJ+cbo4u-xvi+Y@lcoJ zg)<1yO&CsTUOT`xZeY{jdB)`a2JZsr8fjAXi>1jlI*qeTUi*ah|HOaPlHQo~7 z4tfY~^O_G)o{P5Jm52kLN@l^NvxeQF<%!31^D^-ykRD|Uasv!6GoG0b*a}o!pTm#? zSP>3cU}jSB6!E&YhxIDJ8wZ zp54p40pge7mv|AKV;+YC)vBfWpApAJ3<7mfv9$Cy^vT#5JBe4Sv+XVijwwPhIO@uz zj-nut_%ihDJo+h7sso-Vu5_JX65eWz$H--nfm%6MNoK>c8_Q@(ID;(_pbjY&2CnJ1 z&^&?>L^(8hVo0&2Zv;^im6(6Kla@L}GEdwZb+sR^;yCIF0DSQR^BFp)7l6G4&-P^a z{`dNVuOf!l7fpJ8JbX`tW}N@%jy4;Io*)%}wHJmpPgpy_rFS6~vHsAVbN!qS!{aH~ zbevu3*8SL53KUM=Qb2K70NSk4ZNXIP4VByT?us|5-Oair6{un7e%MIlY7C?fmmk{huqg3^bUyqzt?DO$!b`4uFPAk>yL)=EZ=WXeM78IMj*o98Bm6{D zDs_TKhBDz!;G@syQ(L+oCwr(E9&V&mWA`D->5am+vh;q~9f2@jvsZidaf!m9S2(0n zlEr1mOSM>Yuj0fv-{0B!`! zwFebABMnPy%27n4ik&4bdsLzme@&e}m{?{!&--?5hfSaQ2W-70jC;ssdG_$!$mnG% zA_DFkTxqx(=Gg}u%XYvSPBk|^!^28-2?uM^P!j)eLv$;{*~DIKI1th9rtB&`4~`)& zh&Ms07ULM2I^?gNjl!@9S@&afp+mz`;>Q(mX+}r!5Gp+0B-V+$XHe(tWcW?fVn?`{ zsHedzB?wgNWf10_HL`~~n#*CL0KK?R0%#q^g{rq3y5gzgr8FvGRP+)ow&TA8e;CxL zSK(&5jK4LKAJyMFBhXBVweN#d9~m*AJ!~_li(L@hoNmgitFaxV^^gvB3)f9UB+hpH zj^Dj$&Ge}@OY_;*Kr#_@>0?DPJ~`w@X?pM~CRVX7aw*XNCKmOO^S>OYUs=38#9C;T zr?YvKbOzZIE1jc@-kSrcRVtRguri6^wEQR$U_STqjq0)o%Fp_%R~Pn!{!uk$#ZwLQ9}E4 zn1q53G9_j4&Ky)m@jcDJm&UyD>naa_nI&hPhvBMsT>?jSct1veAk3W!P4QO#Y|E%x zJrO+ub9)K(OvkPtj!_#^*U6y8w5?=gi9}yiu*j30+Xnu|6d!qCM5qez zfZ&7`>hB(FriYR74@4V>^%K?ZKia_a_OP99vci=5-Q$R$j%M!IA_h+&gw&A`EvCopU3FDz-zii$|TJWC+f8-}~_~61HL|DyP zHwPGET?Eoh&l+}M)4{X8KaEI6%dey|iBjhQ>>L@mjYE2GIK%vNJ(jf*iz^C`Lc(Fc zAzR|SZuajpv}-no0zN83iD{U3IJ{R|CA2L zT_qN+R4AqnTzg1ic}YQWRC4Bk3h^n6ve^_0*0NfTGR*g(2g_g-xZi!r=O0csz*Pc> zYqW5HVZ4Y_qd_pJx{2u)ZSv&+00v2VbnV1Yt7y4To&wdtZ)~{0AqC3r@H}m$IBU3u zk^z+a4(vA1g^kv4(bvXHj~oMs#rKAS12zot&*eX>Z+OA63QCWXNrCHb&I52BIoVq5 zh1{yH-E<_EUne{v`=R7W3wkKC5QbeTg4Wc+DNM~ zBsKn312dUwkgTph#Ki;4_WqKLo3ACla1v}c?qVhhlmHsf2a-P z#?$Q1y8fITERYc3nvDFngEL6=Ds4{ZnoOdiG*25qFZq8~tk8TA>=^%<+n!M-Yh?4o zaZ>ypmKTx_(6ZLj<`B&}Ro_cZKCWSjDn@jv8oSmQT8m1nKy-Z;Y+uEc3d9HVp0@(0 z&jsmH9{d(F`^$A2Qx*BG0ltthyo9^SzQ#(V5?xs#GXb4`bnuLI>V^pm8aI~_?ibE@g5&x}YQY`Av-X3!T2rjTDSX3Z;L4YPk4;;N z|HLO?`~x`XZ4^>Z8S;xnbvG!g^$7YZb$Ic%> zmmh4~NDw|DI7+Sx53;mPy)T_5tLK$YYe10EA73=O!*-+7bh_pnoNpU3c{pj^&Bnl| zb6;^$t?fza*IbVWNEhOk))Wc15`Md?UP&Oi=uuI0bj<$ujm@UqOy6BLj7HG>n%{^1 z1O(RSDL5kSef16{;C2}&vy-*4rJmS-pgL)3p`}`!Mxinz!9r_TONShfT(ulR7%sV$ z&#UGR?n4Y)AZaGS%L^H;_eMk`!fIXuht}DzyUCWj06u6Pzv3PSz;EAl8HuQ#9q8am z9hJbh`>J>CbZY}a4@b2ovq>U2uZU`IP(Mg0poJ7>-6G@sA|_^(@w@gdckY$100_c_FnxK_GzIWnbS`GFxbYs|2Rri*LEzxcWLqA5U*2HBaW~_sZOS$ z0+3v61zdz}IojN8F~RdyKclss9pSUic`n9#KF}qsd;S%+iq#P&j(>3b9$`|?EC!IW zu2yc4__U!7uY&6Qv{^;uxjSrR*xb(p5#9gWe128trt@6h1gUPAK0E#8a|T*K9a!OA zekPgt5|-1sASmnedDRO-W4QxsBe0`vk8;WTpS?qh->)9vr7CpxE_a!~Xx=ZuC*&_W z>3NSCXb;c>vc&Gyzmdt=4JShSI{P5MfXtY`=WKO06|keh#ptXuoZx=_^vDV)4($9#o1B}ZUD{t|Twa8*&XSlTX>P2_NPAe8rp~FJ1I}d7qA`qK&@qRYa-uDWyg#VZ=c_wCm zg_Y;NBpF7F{KD00X-1l1)OW;@@PrNQugiG;L1-|f zG=N`a^f*(1%5}k||K!hwSSptH8<&3isjDlRG%+fv5!k%#`AO*6RSSYisY9c+zm9^} ze9?;-gvv)e1>{WK;~UwXXqcq>|GK4%Y%(+tG#-7R*dV-lnmTi|&YhUKQ$In|9)I~$ zV9^P-*T)$Ea&nA#RrGn{3}zK6^g3p-~H&_zH7Kk`){B-wr7X0&rKuv@|c4g6TjemzNuNnSxV+Nf}hFM@L%V3?k2YrB8s_hesG`C13_>r{G0QF#o0Z0E>Es@8sbzK+maMzlfApH!7rj*L;wW_i015~$NyL&UG2ol)rjpBlDZ0x#w<% z!%AbDBE#bKhr=`-<)Q@w6{4pd5{GAB@&bB<%JN0F>w1&H%Mb#@-xut1t3IO1&#YK%OEJe!D)Q~g{w)!{5!Z-=0_=~%LY%fh{^%7ks}=b`*&He7U6Siw=2Xq zx`_BRN<*Lw;Rpj1<*lGnYH4xz=`S4KOdSmHaT=8byHy~I=r8%hq0J_)aOI}p%O&BG z7BvWqr5IO<#zklXCWphu%Ugb@{zVhG3AD8q<9pD9M)&Bs!doKk50AMFB=lzTZ(TxQk?2FHQB$`nPU(VJRQV)@X}UBD_wu- zs=ExRSPJ@J@c)%AfradAwEcmE`roJob7%WDr7<+%u?Z#Q``{M$#M6WnvQWaaA3u&fsIx5ruqo}q4Sxv5+x6Yr-IefurHdZYAPg+o7?YsF{ z@RrZ9wRXWX8Ir8fcLbixZ!Qf#Ri=l{7C}Omcke%~&fT$gNROM%ys&a+0uYM1$Gasf zkrnTb_}Cud18^B!oqUqMGs&=E%Jq zOnjodGCYWRt`+T1Ij1xXYRM7#_(oTon%rv$?lAMhfiV0`!t+A@nX?7BoGdxiMc(;0 z`>!JK+8C6(t&LFrXnVcj3laPcS0-E-^BHW;x11{pY~nrnWZJ@FfHlL|muFLW-6eEt z3#Yp;Pw;gpY;5$wCCi4^%h`fIS=Uu7GHyXhu(Fa5N**iUol=aF`iBRVgRS{h8PmnFC5Bmg@^yk0E8{2*@KPKx> zv9cJ`|5i}V(=aT*nIq;;jzrwiIseUsW!GtK}h&6 zu*?_r71GU2No!4>k1`E=?QzZLnpW3wZ7iGNjp$WCfi}dwf7`2Y?RbT=sl@L6txXLL z(-<6_#+6M*$9*nq3s;`E+w@i73r_GC1!@+v0^^PG*&z=vf7+O0mVR5<+SE8DdKnZz z>JzRTma>B*6V5&B$=!S@i!C*1Gaa&YW}G06J;yb^#~hU;2B;JppbpSSf@$K+!0DY#hcfEA#w=w9?J$gYOcdSr{8A}{7OV=Zys{)O68HWNYxr*q#5@`@iAcT6Xu*n#Er$b@O>@x#(t) zLJszbmBCr(TC}D}^+Gfj(H(~KCaWW?C_8qSvp=XRODL-EHL({>gZ4c0Iu48Wg$A{X0c2+0~#qR zSt~Z5AGt`QIaLZ`BMW8bCWQhOK}I~_7QC%j;N^RmYa1cBUyL7wnGT%8{O(8C_J^(< zReytEA=jpQ?=V{RR_*gE>=aw5nUKVRGb;(FWr#L=mABvu7-3B%dYSm5O2~!sdr?RT z%LEda>4~lQ1;<;SLl1DZTf`WfHD+FzF!26gf7{l1)dyfpc(++BpK`w=@TCDgmae63 zec<&K={FTq3ZylpUb2E1Um*NB#!QjuLGLa_A}JEgL1@{;5({`=f&(!6PmKqA%-?aQ zY)r;&GSj|oQTevP9U}Y|R0xNQ&={RoIxT6HyVimq`{f5&W(l)7sXDwz5SBVs%UcN6 zL;Rd)Qb2O3*;1*R|4kS@m0qMS5MPivx|N)H93Zxl6IpXk5F0~8;49Na0hLyH7bB5w zM3>aojYy_h0%5h2XbrfgWo!xw0H(M1)0N(RWj*V)x|&`Zpe}e%PzOq!<%f+eTc*^E zTfdRaBdq-!04@*!00Fvxe1aSI>^1v*pKM?f3qiTc)h3RZ!_Vq%A#mgEC$>D?bXgmC z^2wv>*hY-yxBSHBL#<<`YnyV@gi*dqri+BLOdxQTgai$zsnEqyu`u<9E_*AyfKpu) za4|v3*r#FRc4<~C>2c7c1Bu=T@6+BsIJw>ecgFR~jHQ!o`FtvMy`EdtZi@0?Y5(qud6Z(b&l%f=y@0#3|D)eFWHQXJ0Bp+N|MU{#pqed}A3Cqip!>YJ0phee! z91zSA)eOL%ygG8*>nlS{xUe3R$v2-nd0fz_Ybe^iOaUKLlDE)^>SAtzzVKS1KT70) z(J!;mZglB_KpbV4)HYLmDYb|G{ux){A zg;hG@{}Yn_X>cuFPllW^&dLe87EUR{DnJ~u&ZI9tjqX8NC3&8Rvi{sQ!YFgnSXjEi zL1&MKDIhH#x_n4d%jU8FE$`@vKKj|fr)N=6A2@;D-S-4Hb5plm=D8xsC~|KX1*@nb zyUt`xYY1-a83?!J6`Eo|5$1sih+oyH%xB?yv~PY+B<1TK3bT@j4A#E9ksKbhwDkESz zGGXCUEj{poL54;_=L#PdvpiyCBu!<@y3iu_ zv_}K?>;3+!CV0zwvXJ}XDF~m)-dJoGHmfQ~z`0&wCtA^cxUROJY5q7BgsApv6c7@Y z?si@YI(VyLUs{ilMYh6R(~x})bX;SpJ3+!X!6!nsRfByXjZ|+G&R4+c&$^VPtT@a~ ztka`$#Xm-9vvsHks4Xeavod-xH-mR{A^IezZ)GOKsw1cPyQ;qOnVW0hI8^o;-= zg#y-SvFdO|2w+$j)8Ps+cA%Y46QbAv0000|yFk^9b>IL10h60c00hN=asz(czD#*% z%=tIJhJPRq4}U$F2PBx_+rcJZDv-1cfa^GG&E>K69(I-_z=R(=v%PR_u8N-xgTm9Q zhQJMY#G#(K>BiV}*o~$l2!yF9^xeJDeX&1q4qx%1BNnwNY0bs}ThgjiSfq>4vSWxQ zi47Lwhjyr@Qsg2*1mcA@{#l#%ZMr1hv^e8J9&?;f4_cQp0494>Jy)>R6lBQ#;jpOX z7}ly1_T>`Y)atyL`=w#PF=93Q(J<8!%{>tS$+N zbi(n9wTFy|C;orVM)1t6z_^Iwq(_xBt1c6n#yWt2W3FHnhZI=2?hFM4U?Q%xF*%c0 zMWxAP-><>rZ1k8EubQaF19QCHAp{c>2L5ZIupJlvsw#U1%rZrfZ(wX2i~MH_gm{`n z^)e5YRw6|$KeB)i>|#qXijg-_C`f0zo?z%)iw57;94n6F>6YqMH;%ED7k0FQ`r5^4 zU$&$2uEkTS`R4zJnPTY+L!RyA#pB?5>1YSO!)Fm_W7uo#NG;r22?#v{$cEF#003Cf zq=F8N`^gFN`>)-I5p{12aG*#PqZpdJ<>AYG>M#m@SxDw7f7sfl*9rC4VDg<7S~n)L zSiP7$?mpXUuL947@jLE_@{5ZdBNSRR&FCFFJH!Bb&y~O*UdQcJ<)s(>oSI8U;9o6} zI5y!3aUfW-=HJjb|5p^2u!x2cPRo2)?1@h0W65KmKiWD2v~Ia3kHi`lbIlTD;ml|5 zk(xd|UG>#w8UF%p=`f%NfkNg!FSyMMvODfvDw<1UF+`W|ZTUVlj-##>cOZiDbo0^O z2ctaUBO0M+T1Jc`fhNLhesE@Gv4^Fdal4Gc(q~X;=kqheKr5cMZlV{;?F-T$Gc=tp zb0Xm9Ff`vCW){ymcT|mXeaGz3vr=U3vl0|y;?Uw9d;f%u)+x};It3GS02UVP);~Y6OirYsK7;+CFtv zUY?;FzO}fO3iyPsN36w9i1|d9yVI?%5gE3a2}gt?A#>%&`6YlUvTyfWX&2&0L!apbZ4tkz$9$$5AP?+*PzlLQ}Du1Q!XN^l?f~pn4rT7*i&0AMHU6LvhuiBuoBjpj;18_y+=)g& zyQ>5duH2k#hlu(I z98ZjUl`kSX?)JXW?DK@7EOe`tAr}7GOS~N}yXRaOeAsuYv+MSnU z4>}`WwNLE)7fNkY&z$Mo0+g?$BR(2zm<`ONU`|j*;@9cS>DpOb|GmRn`Lr z^k5U{JlnU?SC`y_fxQ5+#vG0E z_@PoXdWGnBQ$8lttO;n#cdP&aTpl;fH?n!noOtEg^GP#pP>kL-*Qg|y8@~TyP)zyp zXhQ7FQWG?=q)QEcYRl=7+72Ojx7E(hcR~ts7xUv!*(Zv2D7~+T5H& zEQ3^od}YLy=Z}7yOC@jF(8qv4l3;E9CDG<>T6MBqqXLrMoctG7^K;$P0j#@c00H(c zU0p5O5;s}W4tkD7IJR6*HYklz>HXCn63Fm(zhFYR^-gvmrUBhi3Sc3dEyhK62=*WM zAC#SJG?k_}rH3qK)&GPL&ZNK0cBDs~qaH%?yrCruVCmlKeOc@#COi*Ee{Z;$YfD^eoZ51)1Z<9}LB z?k(_V2xJaUv|<`66b4U>=pNt&Cd<4w+GP1|2%)M zbJP(}Q&m2DaylbpGJZt~!9(OZGp~ZKu>~yGvnNh64NcX@;6ZMaF|{ks)}TZG3;AJZ zhh5E}#X>RCxbl9En$!&?R!TO=uMb*3@mhlQpN~B!$LB1E?`I6;4?}w;h@#SV8eOHx zL73+XE@bLz|P<#4nVu=v%OIsVk_CfI&f zu|_;i1iB`>DOFS7o$jus{gN^p3Pa}}t6|RHMHv=(DCAID!DS+{RUOEJn99c$u(ymmX@q)aRr>!Od*yTiLpEp_w;uqcWp5W)0?7XLuF z1NX~VEk6#lNB!6&SDDxPfQu)DxB+xtfIBNklGDL|I>Fvn@WbxKq!6afTU=*9;K}za zsy7(fV3&>?=j??D*H({K@dS*a5hFzJv?QmjKUQm+^VKD;_2enycWp)bIuv^Q z)#@{^ReSoLULj3Wl)NVPTVLza!7HH2`){WWEe0Qeohn}~3kmhIK{2;e$3p&zj30p? z_b-zx-VOpKT2YRd!v^9`cv+O6zDu7MNU5;ol%RitQ(slCbKB3GT}7^eiIQ>$S&Pg7 zY1;9HmX#VL#-Dr>J(2fh$%Tqt6Cm|dFLu%FlpC0_jH(Zfvz(As2%6x6?biVPR(z1Y zHC_RxD#vy9|1ot8HIEDb54@lh7LKKt(A^%kYW8+mM={Y z?xev_6G8pg_DDlP2F4leLz$77a7D>E;x>yZj(8Do94Ph4r;o;Mwi~%6WyM2Db$2Vrjk1T*gmAhqkx5olr=9-SI z2{9M6poVp@>JrM%k(pJ%2G+}CRmA-U_w-yUdt7km<+TZ5Ea(NEg8@M_Bfv9s)eg%h zrpD!v2wBoBD92?u@J=4Gs}?sZvN|nVBL9+@{ywsFeFh$2$R&XYI6epyQ^3yICpXrb zWfW*{ka}I#CCaD$MzBNhsOFRQ1oS#HgO2;wT7pI1B)p1`hF9*&1?eXRm}xw}Yxv63YS$GFsAwY-jb`O(2AL*E zJyFXun^>U$hJI=S|KpSBilW^qdcTe|pOtur7b(~&TmCnsn71`==7TJD%YZUQ5P$(~ zj3?G&emvf4Bx9Mv*^jf(VJ)8}38M{K5zWDcZS~H<(r@X_iZ|^0C1`FGw~lKboWx1e z@M`?4zP+KF02AIwTV0Jw0Gp(&U(*rc@0kNfU=Hy80gm`#suRa(<6uajn8E(;m?dM`k%~$mO_hV9ZAV z8~BDnF$2qRmemHMf9Y|7*)>67Dk zfs4MHPRd$T=((m8biYfi*vw){s0f~cLj~#nT8Zt0001C)7+G0+rWNTgduWMVo8O! zLOQ&&xfe~S3g6U4jZYH#oInelF}~s)2mxZ}&-iX!^07{Arep38FVO z!jzc`6^?29d(z6uqPC`|HYY-Pxlq=^tb(Et?l>PFU}JHn2AcgRZOVTugJTLN(nXS~ zU^d5v)@j4(;E94a2EjX-kCT&?>}diFTpl)a2ZiuR;U<}a=wfZOwjgW9Nqw7M*g)-n zD5VONd?98ykD!Sy?3y1Plfi?|bjtI!Xcj$*4k9JR`zVDI>3=Z?@eW;33N4?MemKv4 zCl(?JH(;-4#G-v9(Lr3D2bNWf(qI*CNm&Q`X0A`K3~hdJills*soLd>rX&{=(Llh7^u8mqYku~_vpgT{4Adj%b1 z;YjL!*~C9)o#d#WRN@{oc>{U38P|`obA!_qQj1NFJnl~FEtQ!f;AL)*}FUIhjKb#PAQ{tk8d|^Np`PlG0-^`(W{X*44GL1 z)q--BHF%=VupfV4^+Jo@8;0go=@^j9 zc!{{6uc`?>g{}Q6or zD;B}ahp@@I|H;tfDlK)5zLO;w#*22MmKAc-Kv2%kP(_2 z)0W#y0?HvNHW910Fr9Mhj{+~-{ll73M-*8N!W^8>D{v>*vHM9XCGxlSX|9vzMaWfS z#QS5p8>d}4G5;+mj4vXW0%Py$oI0ht$@X(3Jj}ZUM8s48DUhs456W`;ahI1 z*n;9Awkl$BCz)K}j^(Ft?~Q^p?xK!b7gcX$#E%IikEXz~K!Xn&&%29PM&T+~OEzff z@^mayIri+|>H@#EnM^rQok#-_ukoEHsp=Qi&r}`jOT-@}U7{$^V*B6$9~ni~Zim8VTMSJ86?cHpQEs&o%s)n z%(*@$;zo^BXCOet3dztd%#mEAZTB5z_r}112g8o|0V5Fc>4?{QrT8LfG9u5q?m_^N zr*XCGIO6v}3;{=|Km-eouYF8Ul!V5Kq;>lSo8bn~5;Ai$P&7lb{|%>UPh(TM(lEFV z#Fx%lPA9Dy;_TWN#zw*fL97r6p8i2%&rj&Uqh6%RH^l@%dGbkFV)A4b9_vmD1n7u) zMtFgoo8ZJBpB5-Va<|EbY5F3geixXERLaG-xoOR$8bIcRB^=gJc$+u^C@C!n)$jI0!GocPzn=c zhLfO2O&`kl`xf8xOEF;gr^sUSAUn|G3%q9>(r~L1R9MFP?A4hS zdp2g{70b@gt-l4-X#fzEfrmnn$c~OsQg5ZM0Y%B}+VBcvGQ8!|ADOI1tdqO_E#BH(Q$xWTX1U;_^H@bAK?Lhb9vJm)ot1a^rOyTAEqd*X5i)q zn@B2Gc^K#p_4N|81f5KeKD98R%4rPlza6{JU<1z%j#sP6t){0iCPVwypYy<5(+H_7 zPtc}SL03Z~b>&%ppGC22Ya>j@*^yCg^?7(ByBsvvnc%#|__Cv`=tZ7?`KzGuj$?F8uLcyaZc=#Tg- zIqgEg(2)*QDI@thX#baj%Yw66!Yn0=HUUmeJ1NXowXC{tyJ}-v4e45*;fy5!NHGynAttXC9{FeZkG~cx|K@Bm9?j7U|n{Gyv6g* zzjZw26v@J)vv++7N!frqPiZZJ*!frmJj>lRPkdd}+>s)SrWAC}PPC{m8TngOoY}d* zj>E6EmgGq+XFZH6;vF`~VmMHwIm#QhM@^=1r*>WO1Xr-0mt{X6maaHir%bO~_x!oO zX;+7Ie|P7{{$E=6ZkHbi4IvY`A6IPde0 zuMxNO+EDK35&!h2M6F&@@6RI1!Hj0 zOC2Fn8de26jw(u&q`3heT?UZ207Zxmx#Vc_<=Q=8#4zjXV~|W#jv2glIJj_!uHl%n z=mUot*~2VKv0EUpnmbG<5#dW0fUYxxTW=Kcw2&M)`@U)`umHyE`v&B>*oWRLK8oB` z8gg8mGXrBJ{_l9kfUpdPz$t0Q(3G$^t%P3q3ZmesVVYXxKO zVJTeD8V&(m{;2WE5MIuW*1Njb@T>c6nQ9;12&*dA)d2F=;q+jy(^m~;_R94B5!e+2|$f+TvY`ILVt7LmDh~Pi& z62+VQM`xL2p$%FFdH+)*$KcoL)KED?Fu&u-_#M=Bs#(1vNUh&pIFS*C%Mm8y2O{)( z5VCWnd}0;6Iqxsd^K}=6xn?141)u#(yN-D@WaCxlg&jZzpn+^sqPkQc5v-U9T+5Bq z32VqpbQ3hPJOe5HD!Mu-Oduj2+m8G`^g28db zJ$`4S-H@Yl20y<}tyj=Kb}vqj_O}7#2a?is2ea9mpepeYI42O2tYfAkDGP_2Z%ttC zkwir3XWX46Src4J*lubQWsNiU95_A&+-Iys6y#B^xq|9kqOeI@%tB@{S$SokjtuxQ zIKXr44BUL^My1d@%ND>{py#+0EdGrLrs6A|QPWMj1rR(Rki*RJ>|66Y5R!;;h8m>>Yp3Zi*N^wI{yD>KOJlZZ3-@1C1?G}FZD14{T3x6a}5NA zW!z`m+7JZzRq<(*$7yjw*hK|S*fN1BgAY5$j%QBsCD_!qPqY*VS+#4$B*WPPBDETc z^BE4KUm$3~UC(NiV2c+8alcMCP@hg^(Lg0ga}nNNgIJ3E$M^=>rs)H+-%@kkUzG*{ z3N?P0+36|4_qnR3aoLJ4yk!~8Gd>r%mW2n;M+;*d@_HCGFqXp zEj}_(<+dh62uUdKIQ_=4ix87E+4FQ!hF0p`xGKPuHK|%)pTV8r8#lHymv>7Vrkvz9 zadY0Z7|!p8cUAHD^kij5ztI3_zKKll?juWIK}iyH;@R{wv)>&%EMkaC{f%kv2heLM z&2T9HGmy+2>hUlkBWaWT%mgYy=#wJUcIqb)-8)diY=8tDQX2U_o07kqnO&RD{yEbgotNH5&BpbsKn^%jO))-Qm$dxD3) zRn1QsA&{Wvci_3Ce6ltxq||MK5m+cW8jaV<59&<-T6!b{WfIi z!)X~Xso8cR1Y1LiR|py;Na5pu{84!LNLSOB5eeWX^bu@%2fbvabKuwn@#Vb>hq4$q zTH`E>6D%|Y?{BVUkcyYfanTbGua~8)NP&kVA*=P60!*C|Z+z0TCa6&skYS~wRX3xF zsDz3T{)Z$J#lEHptMD1}?OqnfQ=eJUY|}m@)y{C|9gG|d2Dfb?O_4JLNk2!9*^$6& zb6jt3Xdza)*d@twOJl 0: + bw, bh = x2 - x1, y2 - y1 + x1 = max(0, x1 - int(bw * margin)) + y1 = max(0, y1 - int(bh * margin)) + x2 = min(w, x2 + int(bw * margin)) + y2 = min(h, y2 + int(bh * margin)) + + return image[y1:y2, x1:x2] + +# Usage +face_crop = crop_face(image, face.bbox, margin=0.1) +``` + +--- + +## Gaze Angles + +Gaze estimation returns pitch and yaw in **radians**: + +```python +result = gaze_estimator.estimate(face_crop) + +# Angles in radians +pitch = result.pitch # Vertical: + = up, - = down +yaw = result.yaw # Horizontal: + = right, - = left + +# Convert to degrees +import numpy as np +pitch_deg = np.degrees(pitch) +yaw_deg = np.degrees(yaw) +``` + +**Angle Reference:** + +``` + pitch = +90° (up) + │ + │ +yaw = -90° ────┼──── yaw = +90° +(left) │ (right) + │ + pitch = -90° (down) +``` + +--- + +## Face Alignment + +Face alignment uses 5-point landmarks to normalize face orientation: + +```python +from uniface import face_alignment + +# Align face to standard template +aligned_face = face_alignment(image, face.landmarks) +# Output: 112x112 aligned face image +``` + +The alignment transforms faces to a canonical pose for better recognition accuracy. + +--- + +## Next Steps + +- [Inputs & Outputs](inputs-outputs.md) - Data types reference +- [Recognition Module](../modules/recognition.md) - Face recognition details diff --git a/docs/concepts/execution-providers.md b/docs/concepts/execution-providers.md new file mode 100644 index 0000000..0b30751 --- /dev/null +++ b/docs/concepts/execution-providers.md @@ -0,0 +1,204 @@ +# Execution Providers + +UniFace uses ONNX Runtime for model inference, which supports multiple hardware acceleration backends. + +--- + +## Automatic Provider Selection + +UniFace automatically selects the optimal execution provider based on available hardware: + +```python +from uniface import RetinaFace + +# Automatically uses best available provider +detector = RetinaFace() +``` + +**Priority order:** + +1. **CUDAExecutionProvider** - NVIDIA GPU +2. **CoreMLExecutionProvider** - Apple Silicon +3. **CPUExecutionProvider** - Fallback + +--- + +## Check Available Providers + +```python +import onnxruntime as ort + +providers = ort.get_available_providers() +print("Available providers:", providers) +``` + +**Example outputs:** + +=== "macOS (Apple Silicon)" + + ``` + ['CoreMLExecutionProvider', 'CPUExecutionProvider'] + ``` + +=== "Linux (NVIDIA GPU)" + + ``` + ['CUDAExecutionProvider', 'CPUExecutionProvider'] + ``` + +=== "Windows (CPU)" + + ``` + ['CPUExecutionProvider'] + ``` + +--- + +## Platform-Specific Setup + +### Apple Silicon (M1/M2/M3/M4) + +No additional setup required. ARM64 optimizations are built into `onnxruntime`: + +```bash +pip install uniface +``` + +Verify ARM64: + +```bash +python -c "import platform; print(platform.machine())" +# Should show: arm64 +``` + +!!! tip "Performance" + Apple Silicon Macs use CoreML acceleration automatically, providing excellent performance for face analysis tasks. + +--- + +### NVIDIA GPU (CUDA) + +Install with GPU support: + +```bash +pip install uniface[gpu] +``` + +**Requirements:** + +- CUDA 11.x or 12.x +- cuDNN 8.x +- Compatible NVIDIA driver + +Verify CUDA: + +```python +import onnxruntime as ort + +if 'CUDAExecutionProvider' in ort.get_available_providers(): + print("CUDA is available!") +else: + print("CUDA not available, using CPU") +``` + +--- + +### CPU Fallback + +CPU execution is always available: + +```bash +pip install uniface +``` + +Works on all platforms without additional configuration. + +--- + +## Internal API + +For advanced use cases, you can access the provider utilities: + +```python +from uniface.onnx_utils import get_available_providers, create_onnx_session + +# Check available providers +providers = get_available_providers() +print(f"Available: {providers}") + +# Models use create_onnx_session() internally +# which auto-selects the best provider +``` + +--- + +## Performance Tips + +### 1. Use GPU When Available + +For batch processing or real-time applications, GPU acceleration provides significant speedups: + +```bash +pip install uniface[gpu] +``` + +### 2. Optimize Input Size + +Smaller input sizes are faster but may reduce accuracy: + +```python +from uniface import RetinaFace + +# Faster, lower accuracy +detector = RetinaFace(input_size=(320, 320)) + +# Balanced (default) +detector = RetinaFace(input_size=(640, 640)) +``` + +### 3. Batch Processing + +Process multiple images to maximize GPU utilization: + +```python +# Process images in batch (GPU-efficient) +for image_path in image_paths: + image = cv2.imread(image_path) + faces = detector.detect(image) + # ... +``` + +--- + +## Troubleshooting + +### CUDA Not Detected + +1. Verify CUDA installation: + ```bash + nvidia-smi + ``` + +2. Check CUDA version compatibility with ONNX Runtime + +3. Reinstall with GPU support: + ```bash + pip uninstall onnxruntime onnxruntime-gpu + pip install uniface[gpu] + ``` + +### Slow Performance on Mac + +Verify you're using ARM64 Python (not Rosetta): + +```bash +python -c "import platform; print(platform.machine())" +# Should show: arm64 (not x86_64) +``` + +--- + +## Next Steps + +- [Model Cache & Offline](model-cache-offline.md) - Model management +- [Thresholds & Calibration](thresholds-calibration.md) - Tuning parameters diff --git a/docs/concepts/inputs-outputs.md b/docs/concepts/inputs-outputs.md new file mode 100644 index 0000000..7a2f68b --- /dev/null +++ b/docs/concepts/inputs-outputs.md @@ -0,0 +1,218 @@ +# Inputs & Outputs + +This page describes the data types used throughout UniFace. + +--- + +## Input: Images + +All models accept NumPy arrays in **BGR format** (OpenCV default): + +```python +import cv2 + +# Load image (BGR format) +image = cv2.imread("photo.jpg") +print(f"Shape: {image.shape}") # (H, W, 3) +print(f"Dtype: {image.dtype}") # uint8 +``` + +!!! warning "Color Format" + UniFace expects **BGR** format (OpenCV default). If using PIL or other libraries, convert first: + + ```python + from PIL import Image + import numpy as np + + pil_image = Image.open("photo.jpg") + bgr_image = np.array(pil_image)[:, :, ::-1] # RGB → BGR + ``` + +--- + +## Output: Face Dataclass + +Detection returns a list of `Face` objects: + +```python +from dataclasses import dataclass +import numpy as np + +@dataclass +class Face: + # Required (from detection) + bbox: np.ndarray # [x1, y1, x2, y2] + confidence: float # 0.0 to 1.0 + landmarks: np.ndarray # (5, 2) or (106, 2) + + # Optional (enriched by analyzers) + embedding: np.ndarray | None = None + gender: int | None = None # 0=Female, 1=Male + age: int | None = None # Years + age_group: str | None = None # "20-29", etc. + race: str | None = None # "East Asian", etc. + emotion: str | None = None # "Happy", etc. + emotion_confidence: float | None = None +``` + +### Properties + +```python +face = faces[0] + +# Bounding box formats +face.bbox_xyxy # [x1, y1, x2, y2] - same as bbox +face.bbox_xywh # [x1, y1, width, height] + +# Gender as string +face.sex # "Female" or "Male" (None if not predicted) +``` + +### Methods + +```python +# Compute similarity with another face +similarity = face1.compute_similarity(face2) + +# Convert to dictionary +face_dict = face.to_dict() +``` + +--- + +## Result Types + +### GazeResult + +```python +from dataclasses import dataclass + +@dataclass(frozen=True) +class GazeResult: + pitch: float # Vertical angle (radians), + = up + yaw: float # Horizontal angle (radians), + = right +``` + +**Usage:** + +```python +import numpy as np + +result = gaze_estimator.estimate(face_crop) +print(f"Pitch: {np.degrees(result.pitch):.1f}°") +print(f"Yaw: {np.degrees(result.yaw):.1f}°") +``` + +--- + +### SpoofingResult + +```python +@dataclass(frozen=True) +class SpoofingResult: + is_real: bool # True = real, False = fake + confidence: float # 0.0 to 1.0 +``` + +**Usage:** + +```python +result = spoofer.predict(image, face.bbox) +label = "Real" if result.is_real else "Fake" +print(f"{label}: {result.confidence:.1%}") +``` + +--- + +### AttributeResult + +```python +@dataclass(frozen=True) +class AttributeResult: + gender: int # 0=Female, 1=Male + age: int | None # Years (AgeGender model) + age_group: str | None # "20-29" (FairFace model) + race: str | None # Race label (FairFace model) + + @property + def sex(self) -> str: + return "Female" if self.gender == 0 else "Male" +``` + +**Usage:** + +```python +# AgeGender model +result = age_gender.predict(image, face.bbox) +print(f"{result.sex}, {result.age} years old") + +# FairFace model +result = fairface.predict(image, face.bbox) +print(f"{result.sex}, {result.age_group}, {result.race}") +``` + +--- + +### EmotionResult + +```python +@dataclass(frozen=True) +class EmotionResult: + emotion: str # "Happy", "Sad", etc. + confidence: float # 0.0 to 1.0 +``` + +--- + +## Embeddings + +Face recognition models return normalized 512-dimensional embeddings: + +```python +embedding = recognizer.get_normalized_embedding(image, landmarks) +print(f"Shape: {embedding.shape}") # (1, 512) +print(f"Norm: {np.linalg.norm(embedding):.4f}") # ~1.0 +``` + +### Similarity Computation + +```python +from uniface import compute_similarity + +similarity = compute_similarity(embedding1, embedding2) +# Returns: float between -1 and 1 (cosine similarity) +``` + +--- + +## Parsing Masks + +Face parsing returns a segmentation mask: + +```python +mask = parser.parse(face_image) +print(f"Shape: {mask.shape}") # (H, W) +print(f"Classes: {np.unique(mask)}") # [0, 1, 2, ...] +``` + +**19 Classes:** + +| ID | Class | ID | Class | +|----|-------|----|-------| +| 0 | Background | 10 | Ear Ring | +| 1 | Skin | 11 | Nose | +| 2 | Left Eyebrow | 12 | Mouth | +| 3 | Right Eyebrow | 13 | Upper Lip | +| 4 | Left Eye | 14 | Lower Lip | +| 5 | Right Eye | 15 | Neck | +| 6 | Eye Glasses | 16 | Neck Lace | +| 7 | Left Ear | 17 | Cloth | +| 8 | Right Ear | 18 | Hair | +| 9 | Hat | | | + +--- + +## Next Steps + +- [Coordinate Systems](coordinate-systems.md) - Bbox and landmark formats +- [Thresholds & Calibration](thresholds-calibration.md) - Tuning confidence thresholds diff --git a/docs/concepts/model-cache-offline.md b/docs/concepts/model-cache-offline.md new file mode 100644 index 0000000..eb05c64 --- /dev/null +++ b/docs/concepts/model-cache-offline.md @@ -0,0 +1,218 @@ +# Model Cache & Offline Use + +UniFace automatically downloads and caches models. This page explains how model management works. + +--- + +## Automatic Download + +Models are downloaded on first use: + +```python +from uniface import RetinaFace + +# First run: downloads model to cache +detector = RetinaFace() # ~3.5 MB download + +# Subsequent runs: loads from cache +detector = RetinaFace() # Instant +``` + +--- + +## Cache Location + +Default cache directory: + +``` +~/.uniface/models/ +``` + +**Example structure:** + +``` +~/.uniface/models/ +├── retinaface_mv2.onnx +├── w600k_mbf.onnx +├── 2d106det.onnx +├── gaze_resnet34.onnx +├── parsing_resnet18.onnx +└── ... +``` + +--- + +## Custom Cache Directory + +Specify a custom cache location: + +```python +from uniface.model_store import verify_model_weights +from uniface.constants import RetinaFaceWeights + +# Download to custom directory +model_path = verify_model_weights( + RetinaFaceWeights.MNET_V2, + root='./my_models' +) +print(f"Model at: {model_path}") +``` + +--- + +## Pre-Download Models + +Download models before deployment: + +```python +from uniface.model_store import verify_model_weights +from uniface.constants import ( + RetinaFaceWeights, + ArcFaceWeights, + AgeGenderWeights, +) + +# Download all needed models +models = [ + RetinaFaceWeights.MNET_V2, + ArcFaceWeights.MNET, + AgeGenderWeights.DEFAULT, +] + +for model in models: + path = verify_model_weights(model) + print(f"Downloaded: {path}") +``` + +Or use the CLI tool: + +```bash +python tools/download_model.py +``` + +--- + +## Offline Use + +For air-gapped or offline environments: + +### 1. Pre-download models + +On a connected machine: + +```python +from uniface.model_store import verify_model_weights +from uniface.constants import RetinaFaceWeights + +path = verify_model_weights(RetinaFaceWeights.MNET_V2) +print(f"Copy from: {path}") +``` + +### 2. Copy to target machine + +```bash +# Copy the entire cache directory +scp -r ~/.uniface/models/ user@offline-machine:~/.uniface/models/ +``` + +### 3. Use normally + +```python +# Models load from local cache +from uniface import RetinaFace +detector = RetinaFace() # No network required +``` + +--- + +## Model Verification + +Models are verified with SHA-256 checksums: + +```python +from uniface.constants import MODEL_SHA256, RetinaFaceWeights + +# Check expected checksum +expected = MODEL_SHA256[RetinaFaceWeights.MNET_V2] +print(f"Expected SHA256: {expected}") +``` + +If a model fails verification, it's re-downloaded automatically. + +--- + +## Available Models + +### Detection Models + +| Model | Size | Download | +|-------|------|----------| +| RetinaFace MNET_025 | 1.7 MB | ✅ | +| RetinaFace MNET_V2 | 3.5 MB | ✅ | +| RetinaFace RESNET34 | 56 MB | ✅ | +| SCRFD 500M | 2.5 MB | ✅ | +| SCRFD 10G | 17 MB | ✅ | +| YOLOv5n-Face | 11 MB | ✅ | +| YOLOv5s-Face | 28 MB | ✅ | +| YOLOv5m-Face | 82 MB | ✅ | + +### Recognition Models + +| Model | Size | Download | +|-------|------|----------| +| ArcFace MNET | 8 MB | ✅ | +| ArcFace RESNET | 166 MB | ✅ | +| MobileFace MNET_V2 | 4 MB | ✅ | +| SphereFace SPHERE20 | 50 MB | ✅ | + +### Other Models + +| Model | Size | Download | +|-------|------|----------| +| Landmark106 | 14 MB | ✅ | +| AgeGender | 8 MB | ✅ | +| FairFace | 44 MB | ✅ | +| Gaze ResNet34 | 82 MB | ✅ | +| BiSeNet ResNet18 | 51 MB | ✅ | +| MiniFASNet V2 | 1.2 MB | ✅ | + +--- + +## Clear Cache + +Remove cached models: + +```bash +# Remove all cached models +rm -rf ~/.uniface/models/ + +# Remove specific model +rm ~/.uniface/models/retinaface_mv2.onnx +``` + +Models will be re-downloaded on next use. + +--- + +## Environment Variables + +Set custom cache location via environment variable: + +```bash +export UNIFACE_CACHE_DIR=/path/to/custom/cache +``` + +```python +import os +os.environ['UNIFACE_CACHE_DIR'] = '/path/to/custom/cache' + +from uniface import RetinaFace +detector = RetinaFace() # Uses custom cache +``` + +--- + +## Next Steps + +- [Thresholds & Calibration](thresholds-calibration.md) - Tune model parameters +- [Detection Module](../modules/detection.md) - Detection model details diff --git a/docs/concepts/overview.md b/docs/concepts/overview.md new file mode 100644 index 0000000..747a940 --- /dev/null +++ b/docs/concepts/overview.md @@ -0,0 +1,195 @@ +# Overview + +UniFace is designed as a modular, production-ready face analysis library. This page explains the architecture and design principles. + +--- + +## Architecture + +UniFace follows a modular architecture where each face analysis task is handled by a dedicated module: + +```mermaid +graph TB + subgraph Input + IMG[Image/Frame] + end + + subgraph Detection + DET[RetinaFace / SCRFD / YOLOv5Face] + end + + subgraph Analysis + REC[Recognition] + LMK[Landmarks] + ATTR[Attributes] + GAZE[Gaze] + PARSE[Parsing] + SPOOF[Anti-Spoofing] + PRIV[Privacy] + end + + subgraph Output + FACE[Face Objects] + end + + IMG --> DET + DET --> REC + DET --> LMK + DET --> ATTR + DET --> GAZE + DET --> PARSE + DET --> SPOOF + DET --> PRIV + REC --> FACE + LMK --> FACE + ATTR --> FACE +``` + +--- + +## Design Principles + +### 1. ONNX-First + +All models use ONNX Runtime for inference: + +- **Cross-platform**: Same models work on macOS, Linux, Windows +- **Hardware acceleration**: Automatic selection of optimal provider +- **Production-ready**: No Python-only dependencies for inference + +### 2. Minimal Dependencies + +Core dependencies are kept minimal: + +``` +numpy # Array operations +opencv-python # Image processing +onnxruntime # Model inference +requests # Model download +tqdm # Progress bars +``` + +### 3. Simple API + +Factory functions and direct instantiation: + +```python +# Factory function +detector = create_detector('retinaface') + +# Direct instantiation (recommended) +from uniface import RetinaFace +detector = RetinaFace() +``` + +### 4. Type Safety + +Full type hints throughout: + +```python +def detect(self, image: np.ndarray) -> list[Face]: + ... +``` + +--- + +## Module Structure + +``` +uniface/ +├── detection/ # Face detection (RetinaFace, SCRFD, YOLOv5Face) +├── recognition/ # Face recognition (ArcFace, MobileFace, SphereFace) +├── landmark/ # 106-point landmarks +├── attribute/ # Age, gender, emotion, race +├── parsing/ # Face semantic segmentation +├── gaze/ # Gaze estimation +├── spoofing/ # Anti-spoofing +├── privacy/ # Face anonymization +├── types.py # Dataclasses (Face, GazeResult, etc.) +├── constants.py # Model weights and URLs +├── model_store.py # Model download and caching +├── onnx_utils.py # ONNX Runtime utilities +└── visualization.py # Drawing utilities +``` + +--- + +## Workflow + +A typical face analysis workflow: + +```python +import cv2 +from uniface import RetinaFace, ArcFace, AgeGender + +# 1. Initialize models +detector = RetinaFace() +recognizer = ArcFace() +age_gender = AgeGender() + +# 2. Load image +image = cv2.imread("photo.jpg") + +# 3. Detect faces +faces = detector.detect(image) + +# 4. Analyze each face +for face in faces: + # Recognition embedding + embedding = recognizer.get_normalized_embedding(image, face.landmarks) + + # Attributes + attrs = age_gender.predict(image, face.bbox) + + print(f"Face: {attrs.sex}, {attrs.age} years") +``` + +--- + +## FaceAnalyzer + +For convenience, `FaceAnalyzer` combines multiple modules: + +```python +from uniface import FaceAnalyzer + +analyzer = FaceAnalyzer( + detect=True, + recognize=True, + attributes=True +) + +faces = analyzer.analyze(image) +for face in faces: + print(f"Age: {face.age}, Gender: {face.sex}") + print(f"Embedding: {face.embedding.shape}") +``` + +--- + +## Model Lifecycle + +1. **First use**: Model is downloaded from GitHub releases +2. **Cached**: Stored in `~/.uniface/models/` +3. **Verified**: SHA-256 checksum validation +4. **Loaded**: ONNX Runtime session created +5. **Inference**: Hardware-accelerated execution + +```python +# Models auto-download on first use +detector = RetinaFace() # Downloads if not cached + +# Or manually pre-download +from uniface.model_store import verify_model_weights +from uniface.constants import RetinaFaceWeights + +path = verify_model_weights(RetinaFaceWeights.MNET_V2) +``` + +--- + +## Next Steps + +- [Inputs & Outputs](inputs-outputs.md) - Understand data types +- [Execution Providers](execution-providers.md) - Hardware acceleration +- [Detection Module](../modules/detection.md) - Start with face detection diff --git a/docs/concepts/thresholds-calibration.md b/docs/concepts/thresholds-calibration.md new file mode 100644 index 0000000..1917d94 --- /dev/null +++ b/docs/concepts/thresholds-calibration.md @@ -0,0 +1,234 @@ +# Thresholds & Calibration + +This page explains how to tune detection and recognition thresholds for your use case. + +--- + +## Detection Thresholds + +### Confidence Threshold + +Controls minimum confidence for face detection: + +```python +from uniface import RetinaFace + +# Default (balanced) +detector = RetinaFace(confidence_threshold=0.5) + +# High precision (fewer false positives) +detector = RetinaFace(confidence_threshold=0.8) + +# High recall (catch more faces) +detector = RetinaFace(confidence_threshold=0.3) +``` + +**Guidelines:** + +| Threshold | Use Case | +|-----------|----------| +| 0.3 - 0.4 | Maximum recall (research, analysis) | +| 0.5 - 0.6 | Balanced (default, general use) | +| 0.7 - 0.9 | High precision (production, security) | + +--- + +### NMS Threshold + +Non-Maximum Suppression removes overlapping detections: + +```python +# Default +detector = RetinaFace(nms_threshold=0.4) + +# Stricter (fewer overlapping boxes) +detector = RetinaFace(nms_threshold=0.3) + +# Looser (for crowded scenes) +detector = RetinaFace(nms_threshold=0.5) +``` + +--- + +### Input Size + +Affects detection accuracy and speed: + +```python +# Faster, lower accuracy +detector = RetinaFace(input_size=(320, 320)) + +# Balanced (default) +detector = RetinaFace(input_size=(640, 640)) + +# Higher accuracy, slower +detector = RetinaFace(input_size=(1280, 1280)) +``` + +!!! tip "Dynamic Size" + For RetinaFace, enable dynamic input for variable image sizes: + ```python + detector = RetinaFace(dynamic_size=True) + ``` + +--- + +## Recognition Thresholds + +### Similarity Threshold + +For identity verification (same person check): + +```python +import numpy as np +from uniface import compute_similarity + +similarity = compute_similarity(embedding1, embedding2) + +# Threshold interpretation +if similarity > 0.6: + print("Same person (high confidence)") +elif similarity > 0.4: + print("Uncertain (manual review)") +else: + print("Different people") +``` + +**Recommended thresholds:** + +| Threshold | Decision | False Accept Rate | +|-----------|----------|-------------------| +| 0.4 | Low security | Higher FAR | +| 0.5 | Balanced | Moderate FAR | +| 0.6 | High security | Lower FAR | +| 0.7 | Very strict | Very low FAR | + +--- + +### Calibration for Your Dataset + +Test on your data to find optimal thresholds: + +```python +import numpy as np + +def calibrate_threshold(same_pairs, diff_pairs, recognizer, detector): + """Find optimal threshold for your dataset.""" + same_scores = [] + diff_scores = [] + + # Compute similarities for same-person pairs + for img1_path, img2_path in same_pairs: + img1 = cv2.imread(img1_path) + img2 = cv2.imread(img2_path) + + faces1 = detector.detect(img1) + faces2 = detector.detect(img2) + + if faces1 and faces2: + emb1 = recognizer.get_normalized_embedding(img1, faces1[0].landmarks) + emb2 = recognizer.get_normalized_embedding(img2, faces2[0].landmarks) + same_scores.append(np.dot(emb1, emb2.T)[0][0]) + + # Compute similarities for different-person pairs + for img1_path, img2_path in diff_pairs: + # ... similar process + diff_scores.append(similarity) + + # Find optimal threshold + thresholds = np.arange(0.3, 0.8, 0.05) + best_threshold = 0.5 + best_accuracy = 0 + + for thresh in thresholds: + tp = sum(1 for s in same_scores if s >= thresh) + tn = sum(1 for s in diff_scores if s < thresh) + accuracy = (tp + tn) / (len(same_scores) + len(diff_scores)) + + if accuracy > best_accuracy: + best_accuracy = accuracy + best_threshold = thresh + + return best_threshold, best_accuracy +``` + +--- + +## Anti-Spoofing Thresholds + +The MiniFASNet model returns a confidence score: + +```python +from uniface.spoofing import MiniFASNet + +spoofer = MiniFASNet() +result = spoofer.predict(image, face.bbox) + +# Default threshold (0.5) +if result.is_real: # confidence > 0.5 + print("Real face") + +# Custom threshold for high security +SPOOF_THRESHOLD = 0.7 +if result.confidence > SPOOF_THRESHOLD: + print("Real face (high confidence)") +else: + print("Potentially fake") +``` + +--- + +## Attribute Model Confidence + +### Emotion + +```python +result = emotion_predictor.predict(image, landmarks) + +# Filter low-confidence predictions +if result.confidence > 0.6: + print(f"Emotion: {result.emotion}") +else: + print("Uncertain emotion") +``` + +--- + +## Visualization Threshold + +For drawing detections, filter by confidence: + +```python +from uniface.visualization import draw_detections + +# Only draw high-confidence detections +bboxes = [f.bbox for f in faces if f.confidence > 0.7] +scores = [f.confidence for f in faces if f.confidence > 0.7] +landmarks = [f.landmarks for f in faces if f.confidence > 0.7] + +draw_detections( + image=image, + bboxes=bboxes, + scores=scores, + landmarks=landmarks, + vis_threshold=0.6 # Additional visualization filter +) +``` + +--- + +## Summary + +| Parameter | Default | Range | Lower = | Higher = | +|-----------|---------|-------|---------|----------| +| `confidence_threshold` | 0.5 | 0.1-0.9 | More detections | Fewer false positives | +| `nms_threshold` | 0.4 | 0.1-0.7 | Fewer overlaps | More overlapping boxes | +| Similarity threshold | 0.6 | 0.3-0.8 | More matches (FAR↑) | Fewer matches (FRR↑) | +| Spoof confidence | 0.5 | 0.3-0.9 | More "real" | Stricter liveness | + +--- + +## Next Steps + +- [Detection Module](../modules/detection.md) - Detection model options +- [Recognition Module](../modules/recognition.md) - Recognition model options diff --git a/docs/contributing.md b/docs/contributing.md new file mode 100644 index 0000000..f4900a7 --- /dev/null +++ b/docs/contributing.md @@ -0,0 +1,72 @@ +# Contributing + +Thank you for contributing to UniFace! + +--- + +## Quick Start + +```bash +# Clone +git clone https://github.com/yakhyo/uniface.git +cd uniface + +# Install dev dependencies +pip install -e ".[dev]" + +# Run tests +pytest +``` + +--- + +## Code Style + +We use [Ruff](https://docs.astral.sh/ruff/) for formatting: + +```bash +ruff format . +ruff check . --fix +``` + +**Guidelines:** + +- Line length: 120 +- Python 3.11+ type hints +- Google-style docstrings + +--- + +## Pre-commit Hooks + +```bash +pip install pre-commit +pre-commit install +pre-commit run --all-files +``` + +--- + +## Pull Request Process + +1. Fork the repository +2. Create a feature branch +3. Write tests for new features +4. Ensure tests pass +5. Submit PR with clear description + +--- + +## Adding New Models + +1. Create model class in appropriate submodule +2. Add weight constants to `uniface/constants.py` +3. Export in `__init__.py` files +4. Write tests in `tests/` +5. Add example in `tools/` or notebooks + +--- + +## Questions? + +Open an issue on [GitHub](https://github.com/yakhyo/uniface/issues). diff --git a/docs/faq.md b/docs/faq.md new file mode 100644 index 0000000..e7689f3 --- /dev/null +++ b/docs/faq.md @@ -0,0 +1,138 @@ +# FAQ + +Frequently asked questions. + +--- + +## General + +### What is UniFace? + +A Python library for face analysis: detection, recognition, landmarks, attributes, parsing, gaze estimation, anti-spoofing, and privacy protection. + +### What are the requirements? + +- Python 3.11+ +- Works on macOS, Linux, Windows + +### Is GPU required? + +No. CPU works fine. GPU (CUDA) provides faster inference. + +--- + +## Models + +### Where are models stored? + +``` +~/.uniface/models/ +``` + +### How to use offline? + +Pre-download models: + +```python +from uniface.model_store import verify_model_weights +from uniface.constants import RetinaFaceWeights + +verify_model_weights(RetinaFaceWeights.MNET_V2) +``` + +### Which detection model is best? + +| Use Case | Model | +|----------|-------| +| Balanced | RetinaFace MNET_V2 | +| Accuracy | SCRFD 10G | +| Speed | YOLOv5n-Face | + +--- + +## Usage + +### What image format? + +BGR (OpenCV default): + +```python +image = cv2.imread("photo.jpg") # BGR +``` + +### How to compare faces? + +```python +from uniface import compute_similarity + +similarity = compute_similarity(emb1, emb2) +if similarity > 0.6: + print("Same person") +``` + +### How to get age and gender? + +```python +from uniface import AgeGender + +predictor = AgeGender() +result = predictor.predict(image, face.bbox) +print(f"{result.sex}, {result.age}") +``` + +--- + +## Performance + +### How to speed up detection? + +1. Use smaller input: + ```python + detector = RetinaFace(input_size=(320, 320)) + ``` + +2. Skip frames in video: + ```python + if frame_count % 3 == 0: + faces = detector.detect(frame) + ``` + +3. Use GPU: + ```bash + pip install uniface[gpu] + ``` + +--- + +## Accuracy + +### Detection threshold? + +Default: 0.5 + +- Higher (0.7+): Fewer false positives +- Lower (0.3): More detections + +### Similarity threshold? + +| Threshold | Meaning | +|-----------|---------| +| > 0.6 | Same person | +| 0.4-0.6 | Uncertain | +| < 0.4 | Different | + +--- + +## Privacy + +### How to blur faces? + +```python +from uniface.privacy import anonymize_faces + +result = anonymize_faces(image, method='pixelate') +``` + +### Available blur methods? + +`pixelate`, `gaussian`, `blackout`, `elliptical`, `median` diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 0000000..87e692f --- /dev/null +++ b/docs/index.md @@ -0,0 +1,137 @@ +--- +hide: + - toc + - navigation +--- + +
+ +# :material-face-recognition: UniFace { .hero-title } + +A lightweight, production-ready face analysis library built on ONNX Runtime. + +[![PyPI](https://img.shields.io/pypi/v/uniface.svg)](https://pypi.org/project/uniface/) +[![Python](https://img.shields.io/badge/Python-3.11%2B-blue)](https://www.python.org/) +[![License](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) +[![Downloads](https://static.pepy.tech/badge/uniface)](https://pepy.tech/project/uniface) + +![UniFace](assets/logo.webp){ width="60%" } + +[Get Started](quickstart.md){ .md-button .md-button--primary } +[View on GitHub](https://github.com/yakhyo/uniface){ .md-button } + +
+ +--- + +## Features + +
+ +
+### :material-face-recognition: Face Detection +ONNX-optimized RetinaFace, SCRFD, and YOLOv5-Face models with 5-point landmarks. +
+ +
+### :material-account-check: Face Recognition +ArcFace, MobileFace, and SphereFace embeddings for identity verification. +
+ +
+### :material-map-marker: Landmarks +Accurate 106-point facial landmark localization for detailed face analysis. +
+ +
+### :material-account-details: Attributes +Age, gender, race (FairFace), and emotion detection from faces. +
+ +
+### :material-face-man-shimmer: Face Parsing +BiSeNet semantic segmentation with 19 facial component classes. +
+ +
+### :material-eye: Gaze Estimation +Real-time gaze direction prediction with MobileGaze models. +
+ +
+### :material-shield-check: Anti-Spoofing +Face liveness detection with MiniFASNet to prevent fraud. +
+ +
+### :material-blur: Privacy +Face anonymization with 5 blur methods for privacy protection. +
+ +
+ +--- + +## Installation + +=== "Standard" + + ```bash + pip install uniface + ``` + +=== "GPU (CUDA)" + + ```bash + pip install uniface[gpu] + ``` + +=== "From Source" + + ```bash + git clone https://github.com/yakhyo/uniface.git + cd uniface + pip install -e . + ``` + +--- + +## Next Steps + +
+ +
+### :material-rocket-launch: Quickstart +Get up and running in 5 minutes with common use cases. + +[Quickstart Guide →](quickstart.md) +
+ +
+### :material-book-open-variant: Concepts +Learn about the architecture and design principles. + +[Read Concepts →](concepts/overview.md) +
+ +
+### :material-puzzle: Modules +Explore individual modules and their APIs. + +[Browse Modules →](modules/detection.md) +
+ +
+### :material-chef-hat: Recipes +Complete examples for common workflows. + +[View Recipes →](recipes/image-pipeline.md) +
+ +
+ +--- + +## License + +UniFace is released under the [MIT License](https://opensource.org/licenses/MIT). diff --git a/docs/installation.md b/docs/installation.md new file mode 100644 index 0000000..f4b277a --- /dev/null +++ b/docs/installation.md @@ -0,0 +1,174 @@ +# Installation + +This guide covers all installation options for UniFace. + +--- + +## Requirements + +- **Python**: 3.11 or higher +- **Operating Systems**: macOS, Linux, Windows + +--- + +## Quick Install + +The simplest way to install UniFace: + +```bash +pip install uniface +``` + +This installs the CPU version with all core dependencies. + +--- + +## Platform-Specific Installation + +### macOS (Apple Silicon - M1/M2/M3/M4) + +For Apple Silicon Macs, the standard installation automatically includes ARM64 optimizations: + +```bash +pip install uniface +``` + +!!! tip "Native Performance" + The base `onnxruntime` package has native Apple Silicon support with ARM64 optimizations built-in since version 1.13+. No additional configuration needed. + +Verify ARM64 installation: + +```bash +python -c "import platform; print(platform.machine())" +# Should show: arm64 +``` + +--- + +### Linux/Windows with NVIDIA GPU + +For CUDA acceleration on NVIDIA GPUs: + +```bash +pip install uniface[gpu] +``` + +**Requirements:** + +- CUDA 11.x or 12.x +- cuDNN 8.x + +!!! info "CUDA Compatibility" + See [ONNX Runtime GPU requirements](https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html) for detailed compatibility matrix. + +Verify GPU installation: + +```python +import onnxruntime as ort +print("Available providers:", ort.get_available_providers()) +# Should include: 'CUDAExecutionProvider' +``` + +--- + +### CPU-Only (All Platforms) + +```bash +pip install uniface +``` + +Works on all platforms with automatic CPU fallback. + +--- + +## Install from Source + +For development or the latest features: + +```bash +git clone https://github.com/yakhyo/uniface.git +cd uniface +pip install -e . +``` + +With development dependencies: + +```bash +pip install -e ".[dev]" +``` + +--- + +## Dependencies + +UniFace has minimal dependencies: + +| Package | Purpose | +|---------|---------| +| `numpy` | Array operations | +| `opencv-python` | Image processing | +| `onnxruntime` | Model inference | +| `requests` | Model download | +| `tqdm` | Progress bars | + +--- + +## Verify Installation + +Test your installation: + +```python +import uniface +print(f"UniFace version: {uniface.__version__}") + +# Check available ONNX providers +import onnxruntime as ort +print(f"Available providers: {ort.get_available_providers()}") + +# Quick test +from uniface import RetinaFace +detector = RetinaFace() +print("Installation successful!") +``` + +--- + +## Troubleshooting + +### Import Errors + +If you encounter import errors, ensure you're using Python 3.11+: + +```bash +python --version +# Should show: Python 3.11.x or higher +``` + +### Model Download Issues + +Models are automatically downloaded on first use. If downloads fail: + +```python +from uniface.model_store import verify_model_weights +from uniface.constants import RetinaFaceWeights + +# Manually download a model +model_path = verify_model_weights(RetinaFaceWeights.MNET_V2) +print(f"Model downloaded to: {model_path}") +``` + +### Performance Issues on Mac + +Verify you're using the ARM64 build (not x86_64 via Rosetta): + +```bash +python -c "import platform; print(platform.machine())" +# Should show: arm64 (not x86_64) +``` + +--- + +## Next Steps + +- [Quickstart](quickstart.md) - Get started with common use cases +- [Concepts Overview](concepts/overview.md) - Understand the architecture diff --git a/docs/license-attribution.md b/docs/license-attribution.md new file mode 100644 index 0000000..73919cf --- /dev/null +++ b/docs/license-attribution.md @@ -0,0 +1,43 @@ +# Licenses & Attribution + +## UniFace License + +UniFace is released under the [MIT License](https://opensource.org/licenses/MIT). + +--- + +## Model Credits + +| Model | Source | License | +|-------|--------|---------| +| RetinaFace | [yakhyo/retinaface-pytorch](https://github.com/yakhyo/retinaface-pytorch) | MIT | +| SCRFD | [InsightFace](https://github.com/deepinsight/insightface) | MIT | +| YOLOv5-Face | [deepcam-cn/yolov5-face](https://github.com/deepcam-cn/yolov5-face) | GPL-3.0 | +| ArcFace | [InsightFace](https://github.com/deepinsight/insightface) | MIT | +| MobileFace | [yakhyo/face-recognition](https://github.com/yakhyo/face-recognition) | MIT | +| SphereFace | [yakhyo/face-recognition](https://github.com/yakhyo/face-recognition) | MIT | +| BiSeNet | [yakhyo/face-parsing](https://github.com/yakhyo/face-parsing) | MIT | +| MobileGaze | [yakhyo/gaze-estimation](https://github.com/yakhyo/gaze-estimation) | MIT | +| MiniFASNet | [minivision-ai/Silent-Face-Anti-Spoofing](https://github.com/minivision-ai/Silent-Face-Anti-Spoofing) | Apache-2.0 | +| FairFace | [yakhyo/fairface-onnx](https://github.com/yakhyo/fairface-onnx) | CC BY 4.0 | + +--- + +## Papers + +- **RetinaFace**: [arXiv:1905.00641](https://arxiv.org/abs/1905.00641) +- **SCRFD**: [arXiv:2105.04714](https://arxiv.org/abs/2105.04714) +- **YOLOv5-Face**: [arXiv:2105.12931](https://arxiv.org/abs/2105.12931) +- **ArcFace**: [arXiv:1801.07698](https://arxiv.org/abs/1801.07698) +- **SphereFace**: [arXiv:1704.08063](https://arxiv.org/abs/1704.08063) +- **BiSeNet**: [arXiv:1808.00897](https://arxiv.org/abs/1808.00897) + +--- + +## Third-Party Libraries + +| Library | License | +|---------|---------| +| ONNX Runtime | MIT | +| OpenCV | Apache-2.0 | +| NumPy | BSD-3-Clause | diff --git a/docs/modules/attributes.md b/docs/modules/attributes.md new file mode 100644 index 0000000..87dfd76 --- /dev/null +++ b/docs/modules/attributes.md @@ -0,0 +1,279 @@ +# Attributes + +Facial attribute analysis for age, gender, race, and emotion detection. + +--- + +## Available Models + +| Model | Attributes | Size | Notes | +|-------|------------|------|-------| +| **AgeGender** | Age, Gender | 8 MB | Exact age prediction | +| **FairFace** | Gender, Age Group, Race | 44 MB | Balanced demographics | +| **Emotion** | 7-8 emotions | 2 MB | Requires PyTorch | + +--- + +## AgeGender + +Predicts exact age and binary gender. + +### Basic Usage + +```python +from uniface import RetinaFace, AgeGender + +detector = RetinaFace() +age_gender = AgeGender() + +faces = detector.detect(image) + +for face in faces: + result = age_gender.predict(image, face.bbox) + print(f"Gender: {result.sex}") # "Female" or "Male" + print(f"Age: {result.age} years") +``` + +### Output + +```python +# AttributeResult fields +result.gender # 0=Female, 1=Male +result.sex # "Female" or "Male" (property) +result.age # int, age in years +result.age_group # None (not provided by this model) +result.race # None (not provided by this model) +``` + +--- + +## FairFace + +Predicts gender, age group, and race with balanced demographics. + +### Basic Usage + +```python +from uniface import RetinaFace, FairFace + +detector = RetinaFace() +fairface = FairFace() + +faces = detector.detect(image) + +for face in faces: + result = fairface.predict(image, face.bbox) + print(f"Gender: {result.sex}") + print(f"Age Group: {result.age_group}") + print(f"Race: {result.race}") +``` + +### Output + +```python +# AttributeResult fields +result.gender # 0=Female, 1=Male +result.sex # "Female" or "Male" +result.age # None (not provided by this model) +result.age_group # "20-29", "30-39", etc. +result.race # Race/ethnicity label +``` + +### Race Categories + +| Label | +|-------| +| White | +| Black | +| Latino Hispanic | +| East Asian | +| Southeast Asian | +| Indian | +| Middle Eastern | + +### Age Groups + +| Group | +|-------| +| 0-2 | +| 3-9 | +| 10-19 | +| 20-29 | +| 30-39 | +| 40-49 | +| 50-59 | +| 60-69 | +| 70+ | + +--- + +## Emotion + +Predicts facial emotions. Requires PyTorch. + +!!! warning "Optional Dependency" + Emotion detection requires PyTorch. Install with: + ```bash + pip install torch + ``` + +### Basic Usage + +```python +from uniface import RetinaFace +from uniface.attribute import Emotion +from uniface.constants import DDAMFNWeights + +detector = RetinaFace() +emotion = Emotion(model_name=DDAMFNWeights.AFFECNET7) + +faces = detector.detect(image) + +for face in faces: + result = emotion.predict(image, face.landmarks) + print(f"Emotion: {result.emotion}") + print(f"Confidence: {result.confidence:.2%}") +``` + +### Emotion Classes + +=== "7-Class (AFFECNET7)" + + | Label | + |-------| + | Neutral | + | Happy | + | Sad | + | Surprise | + | Fear | + | Disgust | + | Anger | + +=== "8-Class (AFFECNET8)" + + | Label | + |-------| + | Neutral | + | Happy | + | Sad | + | Surprise | + | Fear | + | Disgust | + | Anger | + | Contempt | + +### Model Variants + +```python +from uniface.attribute import Emotion +from uniface.constants import DDAMFNWeights + +# 7-class emotion +emotion = Emotion(model_name=DDAMFNWeights.AFFECNET7) + +# 8-class emotion +emotion = Emotion(model_name=DDAMFNWeights.AFFECNET8) +``` + +--- + +## Combining Models + +### Full Attribute Analysis + +```python +from uniface import RetinaFace, AgeGender, FairFace + +detector = RetinaFace() +age_gender = AgeGender() +fairface = FairFace() + +faces = detector.detect(image) + +for face in faces: + # Get exact age from AgeGender + ag_result = age_gender.predict(image, face.bbox) + + # Get race from FairFace + ff_result = fairface.predict(image, face.bbox) + + print(f"Gender: {ag_result.sex}") + print(f"Exact Age: {ag_result.age}") + print(f"Age Group: {ff_result.age_group}") + print(f"Race: {ff_result.race}") +``` + +### Using FaceAnalyzer + +```python +from uniface import FaceAnalyzer + +analyzer = FaceAnalyzer( + detect=True, + recognize=False, + attributes=True # Uses AgeGender +) + +faces = analyzer.analyze(image) + +for face in faces: + print(f"Age: {face.age}, Gender: {face.sex}") +``` + +--- + +## Visualization + +```python +import cv2 + +def draw_attributes(image, face, result): + """Draw attributes on image.""" + x1, y1, x2, y2 = map(int, face.bbox) + + # Draw bounding box + cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2) + + # Build label + label = f"{result.sex}" + if result.age: + label += f", {result.age}y" + if result.age_group: + label += f", {result.age_group}" + if result.race: + label += f", {result.race}" + + # Draw label + cv2.putText( + image, label, (x1, y1 - 10), + cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2 + ) + + return image + +# Usage +for face in faces: + result = age_gender.predict(image, face.bbox) + image = draw_attributes(image, face, result) + +cv2.imwrite("attributes.jpg", image) +``` + +--- + +## Accuracy Notes + +!!! note "Model Limitations" + - **AgeGender**: Trained on CelebA; accuracy varies by demographic + - **FairFace**: Trained for balanced demographics; better cross-racial accuracy + - **Emotion**: Accuracy depends on facial expression clarity + + Always test on your specific use case and consider cultural context. + +--- + +## Next Steps + +- [Parsing](parsing.md) - Face semantic segmentation +- [Gaze](gaze.md) - Gaze estimation +- [Image Pipeline Recipe](../recipes/image-pipeline.md) - Complete workflow diff --git a/docs/modules/detection.md b/docs/modules/detection.md new file mode 100644 index 0000000..3d49b3b --- /dev/null +++ b/docs/modules/detection.md @@ -0,0 +1,251 @@ +# Detection + +Face detection is the first step in any face analysis pipeline. UniFace provides three detection models. + +--- + +## Available Models + +| Model | Backbone | Size | WIDER FACE (Easy/Medium/Hard) | Best For | +|-------|----------|------|-------------------------------|----------| +| **RetinaFace** | MobileNet V2 | 3.5 MB | 91.7% / 91.0% / 86.6% | Balanced (recommended) | +| **SCRFD** | SCRFD-10G | 17 MB | 95.2% / 93.9% / 83.1% | High accuracy | +| **YOLOv5-Face** | YOLOv5s | 28 MB | 94.3% / 92.6% / 83.2% | Real-time | + +--- + +## RetinaFace + +The recommended detector for most use cases. + +### Basic Usage + +```python +from uniface import RetinaFace + +detector = RetinaFace() +faces = detector.detect(image) + +for face in faces: + print(f"Confidence: {face.confidence:.2f}") + print(f"BBox: {face.bbox}") + print(f"Landmarks: {face.landmarks.shape}") # (5, 2) +``` + +### Model Variants + +```python +from uniface import RetinaFace +from uniface.constants import RetinaFaceWeights + +# Lightweight (mobile/edge) +detector = RetinaFace(model_name=RetinaFaceWeights.MNET_025) + +# Balanced (default) +detector = RetinaFace(model_name=RetinaFaceWeights.MNET_V2) + +# High accuracy +detector = RetinaFace(model_name=RetinaFaceWeights.RESNET34) +``` + +| Variant | Params | Size | Easy | Medium | Hard | +|---------|--------|------|------|--------|------| +| MNET_025 | 0.4M | 1.7 MB | 88.5% | 87.0% | 80.6% | +| MNET_050 | 1.0M | 2.6 MB | 89.4% | 88.0% | 82.4% | +| MNET_V1 | 3.5M | 3.8 MB | 90.6% | 89.1% | 84.1% | +| **MNET_V2** ⭐ | 3.2M | 3.5 MB | 91.7% | 91.0% | 86.6% | +| RESNET18 | 11.7M | 27 MB | 92.5% | 91.0% | 86.6% | +| RESNET34 | 24.8M | 56 MB | 94.2% | 93.1% | 88.9% | + +### Configuration + +```python +detector = RetinaFace( + model_name=RetinaFaceWeights.MNET_V2, + confidence_threshold=0.5, # Min confidence + nms_threshold=0.4, # NMS IoU threshold + input_size=(640, 640), # Input resolution + dynamic_size=False # Enable dynamic input size +) +``` + +--- + +## SCRFD + +State-of-the-art detection with excellent accuracy-speed tradeoff. + +### Basic Usage + +```python +from uniface import SCRFD + +detector = SCRFD() +faces = detector.detect(image) +``` + +### Model Variants + +```python +from uniface import SCRFD +from uniface.constants import SCRFDWeights + +# Real-time (lightweight) +detector = SCRFD(model_name=SCRFDWeights.SCRFD_500M_KPS) + +# High accuracy (default) +detector = SCRFD(model_name=SCRFDWeights.SCRFD_10G_KPS) +``` + +| Variant | Params | Size | Easy | Medium | Hard | +|---------|--------|------|------|--------|------| +| SCRFD_500M_KPS | 0.6M | 2.5 MB | 90.6% | 88.1% | 68.5% | +| **SCRFD_10G_KPS** ⭐ | 4.2M | 17 MB | 95.2% | 93.9% | 83.1% | + +### Configuration + +```python +detector = SCRFD( + model_name=SCRFDWeights.SCRFD_10G_KPS, + confidence_threshold=0.5, + nms_threshold=0.4, + input_size=(640, 640) +) +``` + +--- + +## YOLOv5-Face + +YOLO-based detection optimized for faces. + +### Basic Usage + +```python +from uniface import YOLOv5Face + +detector = YOLOv5Face() +faces = detector.detect(image) +``` + +### Model Variants + +```python +from uniface import YOLOv5Face +from uniface.constants import YOLOv5FaceWeights + +# Lightweight +detector = YOLOv5Face(model_name=YOLOv5FaceWeights.YOLOV5N) + +# Balanced (default) +detector = YOLOv5Face(model_name=YOLOv5FaceWeights.YOLOV5S) + +# High accuracy +detector = YOLOv5Face(model_name=YOLOv5FaceWeights.YOLOV5M) +``` + +| Variant | Size | Easy | Medium | Hard | +|---------|------|------|--------|------| +| YOLOV5N | 11 MB | 93.6% | 91.5% | 80.5% | +| **YOLOV5S** ⭐ | 28 MB | 94.3% | 92.6% | 83.2% | +| YOLOV5M | 82 MB | 95.3% | 93.8% | 85.3% | + +!!! note "Fixed Input Size" + YOLOv5-Face uses a fixed input size of 640×640. + +### Configuration + +```python +detector = YOLOv5Face( + model_name=YOLOv5FaceWeights.YOLOV5S, + confidence_threshold=0.6, + nms_threshold=0.5 +) +``` + +--- + +## Factory Function + +Create detectors dynamically: + +```python +from uniface import create_detector + +detector = create_detector('retinaface') +# or +detector = create_detector('scrfd') +# or +detector = create_detector('yolov5face') +``` + +--- + +## High-Level API + +One-line detection: + +```python +from uniface import detect_faces + +faces = detect_faces( + image, + method='retinaface', + confidence_threshold=0.5 +) +``` + +--- + +## Output Format + +All detectors return `list[Face]`: + +```python +for face in faces: + # Bounding box [x1, y1, x2, y2] + bbox = face.bbox + + # Detection confidence (0-1) + confidence = face.confidence + + # 5-point landmarks (5, 2) + landmarks = face.landmarks + # [left_eye, right_eye, nose, left_mouth, right_mouth] +``` + +--- + +## Visualization + +```python +from uniface.visualization import draw_detections + +draw_detections( + image=image, + bboxes=[f.bbox for f in faces], + scores=[f.confidence for f in faces], + landmarks=[f.landmarks for f in faces], + vis_threshold=0.6 +) + +cv2.imwrite("result.jpg", image) +``` + +--- + +## Performance Comparison + +Benchmark on your hardware: + +```bash +python tools/detection.py --source image.jpg --iterations 100 +``` + +--- + +## Next Steps + +- [Recognition](recognition.md) - Extract face embeddings +- [Landmarks](landmarks.md) - 106-point landmarks +- [Image Pipeline Recipe](../recipes/image-pipeline.md) - Complete workflow diff --git a/docs/modules/gaze.md b/docs/modules/gaze.md new file mode 100644 index 0000000..c55737a --- /dev/null +++ b/docs/modules/gaze.md @@ -0,0 +1,270 @@ +# Gaze Estimation + +Gaze estimation predicts where a person is looking (pitch and yaw angles). + +--- + +## Available Models + +| Model | Backbone | Size | MAE* | Best For | +|-------|----------|------|------|----------| +| ResNet18 | ResNet18 | 43 MB | 12.84° | Balanced | +| **ResNet34** ⭐ | ResNet34 | 82 MB | 11.33° | Recommended | +| ResNet50 | ResNet50 | 91 MB | 11.34° | High accuracy | +| MobileNetV2 | MobileNetV2 | 9.6 MB | 13.07° | Mobile | +| MobileOne-S0 | MobileOne | 4.8 MB | 12.58° | Lightweight | + +*MAE = Mean Absolute Error on Gaze360 test set (lower is better) + +--- + +## Basic Usage + +```python +import cv2 +import numpy as np +from uniface import RetinaFace, MobileGaze + +detector = RetinaFace() +gaze_estimator = MobileGaze() + +image = cv2.imread("photo.jpg") +faces = detector.detect(image) + +for face in faces: + # Crop face + x1, y1, x2, y2 = map(int, face.bbox) + face_crop = image[y1:y2, x1:x2] + + if face_crop.size > 0: + # Estimate gaze + result = gaze_estimator.estimate(face_crop) + + # Convert to degrees + pitch_deg = np.degrees(result.pitch) + yaw_deg = np.degrees(result.yaw) + + print(f"Pitch: {pitch_deg:.1f}°, Yaw: {yaw_deg:.1f}°") +``` + +--- + +## Model Variants + +```python +from uniface import MobileGaze +from uniface.constants import GazeWeights + +# Default (ResNet34, recommended) +gaze = MobileGaze() + +# Lightweight for mobile/edge +gaze = MobileGaze(model_name=GazeWeights.MOBILEONE_S0) + +# Higher accuracy +gaze = MobileGaze(model_name=GazeWeights.RESNET50) +``` + +--- + +## Output Format + +```python +result = gaze_estimator.estimate(face_crop) + +# GazeResult dataclass +result.pitch # Vertical angle in radians +result.yaw # Horizontal angle in radians +``` + +### Angle Convention + +``` + pitch = +90° (looking up) + │ + │ +yaw = -90° ────┼──── yaw = +90° +(looking left) │ (looking right) + │ + pitch = -90° (looking down) +``` + +- **Pitch**: Vertical gaze angle + - Positive = looking up + - Negative = looking down + +- **Yaw**: Horizontal gaze angle + - Positive = looking right + - Negative = looking left + +--- + +## Visualization + +```python +from uniface.visualization import draw_gaze + +# Detect faces +faces = detector.detect(image) + +for face in faces: + x1, y1, x2, y2 = map(int, face.bbox) + face_crop = image[y1:y2, x1:x2] + + if face_crop.size > 0: + result = gaze_estimator.estimate(face_crop) + + # Draw gaze arrow on image + draw_gaze(image, face.bbox, result.pitch, result.yaw) + +cv2.imwrite("gaze_output.jpg", image) +``` + +### Custom Visualization + +```python +import cv2 +import numpy as np + +def draw_gaze_custom(image, bbox, pitch, yaw, length=100, color=(0, 255, 0)): + """Draw custom gaze arrow.""" + x1, y1, x2, y2 = map(int, bbox) + + # Face center + cx = (x1 + x2) // 2 + cy = (y1 + y2) // 2 + + # Calculate endpoint + dx = -length * np.sin(yaw) * np.cos(pitch) + dy = -length * np.sin(pitch) + + # Draw arrow + end_x = int(cx + dx) + end_y = int(cy + dy) + + cv2.arrowedLine(image, (cx, cy), (end_x, end_y), color, 2, tipLength=0.3) + + return image +``` + +--- + +## Real-Time Gaze Tracking + +```python +import cv2 +import numpy as np +from uniface import RetinaFace, MobileGaze +from uniface.visualization import draw_gaze + +detector = RetinaFace() +gaze_estimator = MobileGaze() + +cap = cv2.VideoCapture(0) + +while True: + ret, frame = cap.read() + if not ret: + break + + faces = detector.detect(frame) + + for face in faces: + x1, y1, x2, y2 = map(int, face.bbox) + face_crop = frame[y1:y2, x1:x2] + + if face_crop.size > 0: + result = gaze_estimator.estimate(face_crop) + + # Draw bounding box + cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2) + + # Draw gaze + draw_gaze(frame, face.bbox, result.pitch, result.yaw) + + # Display angles + pitch_deg = np.degrees(result.pitch) + yaw_deg = np.degrees(result.yaw) + label = f"P:{pitch_deg:.0f} Y:{yaw_deg:.0f}" + cv2.putText(frame, label, (x1, y1 - 10), + cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) + + cv2.imshow("Gaze Estimation", frame) + + if cv2.waitKey(1) & 0xFF == ord('q'): + break + +cap.release() +cv2.destroyAllWindows() +``` + +--- + +## Use Cases + +### Attention Detection + +```python +def is_looking_at_camera(result, threshold=15): + """Check if person is looking at camera.""" + pitch_deg = abs(np.degrees(result.pitch)) + yaw_deg = abs(np.degrees(result.yaw)) + + return pitch_deg < threshold and yaw_deg < threshold + +# Usage +result = gaze_estimator.estimate(face_crop) +if is_looking_at_camera(result): + print("Looking at camera") +else: + print("Looking away") +``` + +### Gaze Direction Classification + +```python +def classify_gaze_direction(result, threshold=20): + """Classify gaze into directions.""" + pitch_deg = np.degrees(result.pitch) + yaw_deg = np.degrees(result.yaw) + + directions = [] + + if pitch_deg > threshold: + directions.append("up") + elif pitch_deg < -threshold: + directions.append("down") + + if yaw_deg > threshold: + directions.append("right") + elif yaw_deg < -threshold: + directions.append("left") + + if not directions: + return "center" + + return " ".join(directions) + +# Usage +result = gaze_estimator.estimate(face_crop) +direction = classify_gaze_direction(result) +print(f"Looking: {direction}") +``` + +--- + +## Factory Function + +```python +from uniface import create_gaze_estimator + +gaze = create_gaze_estimator() # Returns MobileGaze +``` + +--- + +## Next Steps + +- [Anti-Spoofing](spoofing.md) - Face liveness detection +- [Privacy](privacy.md) - Face anonymization +- [Video Recipe](../recipes/video-webcam.md) - Real-time processing diff --git a/docs/modules/landmarks.md b/docs/modules/landmarks.md new file mode 100644 index 0000000..bfe5407 --- /dev/null +++ b/docs/modules/landmarks.md @@ -0,0 +1,250 @@ +# Landmarks + +Facial landmark detection provides precise localization of facial features. + +--- + +## Available Models + +| Model | Points | Size | Use Case | +|-------|--------|------|----------| +| **Landmark106** | 106 | 14 MB | Detailed face analysis | + +!!! info "5-Point Landmarks" + Basic 5-point landmarks are included with all detection models (RetinaFace, SCRFD, YOLOv5-Face). + +--- + +## 106-Point Landmarks + +### Basic Usage + +```python +from uniface import RetinaFace, Landmark106 + +detector = RetinaFace() +landmarker = Landmark106() + +# Detect face +faces = detector.detect(image) + +# Get detailed landmarks +if faces: + landmarks = landmarker.get_landmarks(image, faces[0].bbox) + print(f"Landmarks shape: {landmarks.shape}") # (106, 2) +``` + +### Landmark Groups + +| Range | Group | Points | +|-------|-------|--------| +| 0-32 | Face Contour | 33 | +| 33-50 | Eyebrows | 18 | +| 51-62 | Nose | 12 | +| 63-86 | Eyes | 24 | +| 87-105 | Mouth | 19 | + +### Extract Specific Features + +```python +landmarks = landmarker.get_landmarks(image, face.bbox) + +# Face contour +contour = landmarks[0:33] + +# Left eyebrow +left_eyebrow = landmarks[33:42] + +# Right eyebrow +right_eyebrow = landmarks[42:51] + +# Nose +nose = landmarks[51:63] + +# Left eye +left_eye = landmarks[63:72] + +# Right eye +right_eye = landmarks[76:84] + +# Mouth +mouth = landmarks[87:106] +``` + +--- + +## 5-Point Landmarks (Detection) + +All detection models provide 5-point landmarks: + +```python +from uniface import RetinaFace + +detector = RetinaFace() +faces = detector.detect(image) + +if faces: + landmarks_5 = faces[0].landmarks + print(f"Shape: {landmarks_5.shape}") # (5, 2) + + left_eye = landmarks_5[0] + right_eye = landmarks_5[1] + nose = landmarks_5[2] + left_mouth = landmarks_5[3] + right_mouth = landmarks_5[4] +``` + +--- + +## Visualization + +### Draw 106 Landmarks + +```python +import cv2 + +def draw_landmarks(image, landmarks, color=(0, 255, 0), radius=2): + """Draw landmarks on image.""" + for x, y in landmarks.astype(int): + cv2.circle(image, (x, y), radius, color, -1) + return image + +# Usage +landmarks = landmarker.get_landmarks(image, face.bbox) +image_with_landmarks = draw_landmarks(image.copy(), landmarks) +cv2.imwrite("landmarks.jpg", image_with_landmarks) +``` + +### Draw with Connections + +```python +def draw_landmarks_with_connections(image, landmarks): + """Draw landmarks with facial feature connections.""" + landmarks = landmarks.astype(int) + + # Face contour (0-32) + for i in range(32): + cv2.line(image, tuple(landmarks[i]), tuple(landmarks[i+1]), (255, 255, 0), 1) + + # Left eyebrow (33-41) + for i in range(33, 41): + cv2.line(image, tuple(landmarks[i]), tuple(landmarks[i+1]), (0, 255, 0), 1) + + # Right eyebrow (42-50) + for i in range(42, 50): + cv2.line(image, tuple(landmarks[i]), tuple(landmarks[i+1]), (0, 255, 0), 1) + + # Nose (51-62) + for i in range(51, 62): + cv2.line(image, tuple(landmarks[i]), tuple(landmarks[i+1]), (0, 0, 255), 1) + + # Draw points + for x, y in landmarks: + cv2.circle(image, (x, y), 2, (0, 255, 255), -1) + + return image +``` + +--- + +## Use Cases + +### Face Alignment + +```python +from uniface import face_alignment + +# Align face using 5-point landmarks +aligned = face_alignment(image, faces[0].landmarks) +# Returns: 112x112 aligned face +``` + +### Eye Aspect Ratio (Blink Detection) + +```python +import numpy as np + +def eye_aspect_ratio(eye_landmarks): + """Calculate eye aspect ratio for blink detection.""" + # Vertical distances + v1 = np.linalg.norm(eye_landmarks[1] - eye_landmarks[5]) + v2 = np.linalg.norm(eye_landmarks[2] - eye_landmarks[4]) + + # Horizontal distance + h = np.linalg.norm(eye_landmarks[0] - eye_landmarks[3]) + + ear = (v1 + v2) / (2.0 * h) + return ear + +# Usage with 106-point landmarks +left_eye = landmarks[63:72] # Approximate eye points +ear = eye_aspect_ratio(left_eye) + +if ear < 0.2: + print("Eye closed (blink detected)") +``` + +### Head Pose Estimation + +```python +import cv2 +import numpy as np + +def estimate_head_pose(landmarks, image_shape): + """Estimate head pose from facial landmarks.""" + # 3D model points (generic face model) + model_points = np.array([ + (0.0, 0.0, 0.0), # Nose tip + (0.0, -330.0, -65.0), # Chin + (-225.0, 170.0, -135.0), # Left eye corner + (225.0, 170.0, -135.0), # Right eye corner + (-150.0, -150.0, -125.0), # Left mouth corner + (150.0, -150.0, -125.0) # Right mouth corner + ], dtype=np.float64) + + # 2D image points (from 106 landmarks) + image_points = np.array([ + landmarks[51], # Nose tip + landmarks[16], # Chin + landmarks[63], # Left eye corner + landmarks[76], # Right eye corner + landmarks[87], # Left mouth corner + landmarks[93] # Right mouth corner + ], dtype=np.float64) + + # Camera matrix + h, w = image_shape[:2] + focal_length = w + center = (w / 2, h / 2) + camera_matrix = np.array([ + [focal_length, 0, center[0]], + [0, focal_length, center[1]], + [0, 0, 1] + ], dtype=np.float64) + + # Solve PnP + dist_coeffs = np.zeros((4, 1)) + success, rotation_vector, translation_vector = cv2.solvePnP( + model_points, image_points, camera_matrix, dist_coeffs + ) + + return rotation_vector, translation_vector +``` + +--- + +## Factory Function + +```python +from uniface import create_landmarker + +landmarker = create_landmarker() # Returns Landmark106 +``` + +--- + +## Next Steps + +- [Attributes](attributes.md) - Age, gender, emotion +- [Gaze](gaze.md) - Gaze estimation +- [Detection](detection.md) - Face detection with 5-point landmarks diff --git a/docs/modules/parsing.md b/docs/modules/parsing.md new file mode 100644 index 0000000..46c7dd3 --- /dev/null +++ b/docs/modules/parsing.md @@ -0,0 +1,265 @@ +# Parsing + +Face parsing segments faces into semantic components (skin, eyes, nose, mouth, hair, etc.). + +--- + +## Available Models + +| Model | Backbone | Size | Classes | Best For | +|-------|----------|------|---------|----------| +| **BiSeNet ResNet18** ⭐ | ResNet18 | 51 MB | 19 | Balanced (recommended) | +| **BiSeNet ResNet34** | ResNet34 | 89 MB | 19 | Higher accuracy | + +--- + +## Basic Usage + +```python +import cv2 +from uniface.parsing import BiSeNet +from uniface.visualization import vis_parsing_maps + +# Initialize parser +parser = BiSeNet() + +# Load face image (cropped) +face_image = cv2.imread("face.jpg") + +# Parse face +mask = parser.parse(face_image) +print(f"Mask shape: {mask.shape}") # (H, W) + +# Visualize +face_rgb = cv2.cvtColor(face_image, cv2.COLOR_BGR2RGB) +vis_result = vis_parsing_maps(face_rgb, mask, save_image=False) + +# Save result +vis_bgr = cv2.cvtColor(vis_result, cv2.COLOR_RGB2BGR) +cv2.imwrite("parsed.jpg", vis_bgr) +``` + +--- + +## 19 Facial Component Classes + +| ID | Class | ID | Class | +|----|-------|----|-------| +| 0 | Background | 10 | Ear Ring | +| 1 | Skin | 11 | Nose | +| 2 | Left Eyebrow | 12 | Mouth | +| 3 | Right Eyebrow | 13 | Upper Lip | +| 4 | Left Eye | 14 | Lower Lip | +| 5 | Right Eye | 15 | Neck | +| 6 | Eye Glasses | 16 | Neck Lace | +| 7 | Left Ear | 17 | Cloth | +| 8 | Right Ear | 18 | Hair | +| 9 | Hat | | | + +--- + +## Model Variants + +```python +from uniface.parsing import BiSeNet +from uniface.constants import ParsingWeights + +# Default (ResNet18) +parser = BiSeNet() + +# Higher accuracy (ResNet34) +parser = BiSeNet(model_name=ParsingWeights.RESNET34) +``` + +| Variant | Params | Size | Notes | +|---------|--------|------|-------| +| **RESNET18** ⭐ | 13.3M | 51 MB | Recommended | +| RESNET34 | 24.1M | 89 MB | Higher accuracy | + +--- + +## Full Pipeline + +### With Face Detection + +```python +import cv2 +from uniface import RetinaFace +from uniface.parsing import BiSeNet +from uniface.visualization import vis_parsing_maps + +detector = RetinaFace() +parser = BiSeNet() + +image = cv2.imread("photo.jpg") +faces = detector.detect(image) + +for i, face in enumerate(faces): + # Crop face + x1, y1, x2, y2 = map(int, face.bbox) + face_crop = image[y1:y2, x1:x2] + + # Parse + mask = parser.parse(face_crop) + + # Visualize + face_rgb = cv2.cvtColor(face_crop, cv2.COLOR_BGR2RGB) + vis_result = vis_parsing_maps(face_rgb, mask, save_image=False) + + # Save + vis_bgr = cv2.cvtColor(vis_result, cv2.COLOR_RGB2BGR) + cv2.imwrite(f"face_{i}_parsed.jpg", vis_bgr) +``` + +--- + +## Extract Specific Components + +### Get Single Component Mask + +```python +import numpy as np + +# Parse face +mask = parser.parse(face_image) + +# Extract specific component +SKIN = 1 +HAIR = 18 +LEFT_EYE = 4 +RIGHT_EYE = 5 + +# Binary mask for skin +skin_mask = (mask == SKIN).astype(np.uint8) * 255 + +# Binary mask for hair +hair_mask = (mask == HAIR).astype(np.uint8) * 255 + +# Binary mask for eyes +eyes_mask = ((mask == LEFT_EYE) | (mask == RIGHT_EYE)).astype(np.uint8) * 255 +``` + +### Count Pixels per Component + +```python +import numpy as np + +mask = parser.parse(face_image) + +component_names = { + 0: 'Background', 1: 'Skin', 2: 'L-Eyebrow', 3: 'R-Eyebrow', + 4: 'L-Eye', 5: 'R-Eye', 6: 'Glasses', 7: 'L-Ear', 8: 'R-Ear', + 9: 'Hat', 10: 'Earring', 11: 'Nose', 12: 'Mouth', + 13: 'U-Lip', 14: 'L-Lip', 15: 'Neck', 16: 'Necklace', + 17: 'Cloth', 18: 'Hair' +} + +for class_id in np.unique(mask): + pixel_count = np.sum(mask == class_id) + name = component_names.get(class_id, f'Class {class_id}') + print(f"{name}: {pixel_count} pixels") +``` + +--- + +## Applications + +### Face Makeup + +Apply virtual makeup using component masks: + +```python +import cv2 +import numpy as np + +def apply_lip_color(image, mask, color=(180, 50, 50)): + """Apply lip color using parsing mask.""" + result = image.copy() + + # Get lip mask (upper + lower lip) + lip_mask = ((mask == 13) | (mask == 14)).astype(np.uint8) + + # Create color overlay + overlay = np.zeros_like(image) + overlay[:] = color + + # Blend with original + lip_region = cv2.bitwise_and(overlay, overlay, mask=lip_mask) + non_lip = cv2.bitwise_and(result, result, mask=1 - lip_mask) + + # Combine with alpha blending + alpha = 0.4 + result = cv2.addWeighted(result, 1 - alpha * lip_mask[:,:,np.newaxis] / 255, + lip_region, alpha, 0) + + return result.astype(np.uint8) +``` + +### Background Replacement + +```python +def replace_background(image, mask, background): + """Replace background using parsing mask.""" + # Create foreground mask (everything except background) + foreground_mask = (mask != 0).astype(np.uint8) + + # Resize background to match image + background = cv2.resize(background, (image.shape[1], image.shape[0])) + + # Combine + result = image.copy() + result[foreground_mask == 0] = background[foreground_mask == 0] + + return result +``` + +### Hair Segmentation + +```python +def get_hair_mask(mask): + """Extract clean hair mask.""" + hair_mask = (mask == 18).astype(np.uint8) * 255 + + # Clean up with morphological operations + kernel = np.ones((5, 5), np.uint8) + hair_mask = cv2.morphologyEx(hair_mask, cv2.MORPH_CLOSE, kernel) + hair_mask = cv2.morphologyEx(hair_mask, cv2.MORPH_OPEN, kernel) + + return hair_mask +``` + +--- + +## Visualization Options + +```python +from uniface.visualization import vis_parsing_maps + +# Default visualization +vis_result = vis_parsing_maps(face_rgb, mask) + +# With different parameters +vis_result = vis_parsing_maps( + face_rgb, + mask, + save_image=False, # Don't save to file +) +``` + +--- + +## Factory Function + +```python +from uniface import create_face_parser + +parser = create_face_parser() # Returns BiSeNet +``` + +--- + +## Next Steps + +- [Gaze](gaze.md) - Gaze estimation +- [Privacy](privacy.md) - Face anonymization +- [Detection](detection.md) - Face detection diff --git a/docs/modules/privacy.md b/docs/modules/privacy.md new file mode 100644 index 0000000..fc8d820 --- /dev/null +++ b/docs/modules/privacy.md @@ -0,0 +1,277 @@ +# Privacy + +Face anonymization protects privacy by blurring or obscuring faces in images and videos. + +--- + +## Available Methods + +| Method | Description | Use Case | +|--------|-------------|----------| +| **pixelate** | Blocky pixelation | News media standard | +| **gaussian** | Smooth blur | Natural appearance | +| **blackout** | Solid color fill | Maximum privacy | +| **elliptical** | Oval-shaped blur | Natural face shape | +| **median** | Edge-preserving blur | Artistic effect | + +--- + +## Quick Start + +### One-Line Anonymization + +```python +from uniface.privacy import anonymize_faces +import cv2 + +image = cv2.imread("group_photo.jpg") +anonymized = anonymize_faces(image, method='pixelate') +cv2.imwrite("anonymized.jpg", anonymized) +``` + +--- + +## BlurFace Class + +For more control, use the `BlurFace` class: + +```python +from uniface import RetinaFace +from uniface.privacy import BlurFace +import cv2 + +detector = RetinaFace() +blurrer = BlurFace(method='gaussian', blur_strength=5.0) + +image = cv2.imread("photo.jpg") +faces = detector.detect(image) +anonymized = blurrer.anonymize(image, faces) + +cv2.imwrite("anonymized.jpg", anonymized) +``` + +--- + +## Blur Methods + +### Pixelate + +Blocky pixelation effect (common in news media): + +```python +blurrer = BlurFace(method='pixelate', pixel_blocks=10) +``` + +| Parameter | Default | Description | +|-----------|---------|-------------| +| `pixel_blocks` | 10 | Number of blocks (lower = more pixelated) | + +### Gaussian + +Smooth, natural-looking blur: + +```python +blurrer = BlurFace(method='gaussian', blur_strength=3.0) +``` + +| Parameter | Default | Description | +|-----------|---------|-------------| +| `blur_strength` | 3.0 | Blur intensity (higher = more blur) | + +### Blackout + +Solid color fill for maximum privacy: + +```python +blurrer = BlurFace(method='blackout', color=(0, 0, 0)) +``` + +| Parameter | Default | Description | +|-----------|---------|-------------| +| `color` | (0, 0, 0) | Fill color (BGR format) | + +### Elliptical + +Oval-shaped blur matching natural face shape: + +```python +blurrer = BlurFace(method='elliptical', blur_strength=3.0, margin=20) +``` + +| Parameter | Default | Description | +|-----------|---------|-------------| +| `blur_strength` | 3.0 | Blur intensity | +| `margin` | 20 | Margin around face | + +### Median + +Edge-preserving blur with artistic effect: + +```python +blurrer = BlurFace(method='median', blur_strength=3.0) +``` + +| Parameter | Default | Description | +|-----------|---------|-------------| +| `blur_strength` | 3.0 | Blur intensity | + +--- + +## In-Place Processing + +Modify image directly (faster, saves memory): + +```python +blurrer = BlurFace(method='pixelate') + +# In-place modification +result = blurrer.anonymize(image, faces, inplace=True) +# 'image' and 'result' point to the same array +``` + +--- + +## Real-Time Anonymization + +### Webcam + +```python +import cv2 +from uniface import RetinaFace +from uniface.privacy import BlurFace + +detector = RetinaFace() +blurrer = BlurFace(method='pixelate') + +cap = cv2.VideoCapture(0) + +while True: + ret, frame = cap.read() + if not ret: + break + + faces = detector.detect(frame) + frame = blurrer.anonymize(frame, faces, inplace=True) + + cv2.imshow('Anonymized', frame) + + if cv2.waitKey(1) & 0xFF == ord('q'): + break + +cap.release() +cv2.destroyAllWindows() +``` + +### Video File + +```python +import cv2 +from uniface import RetinaFace +from uniface.privacy import BlurFace + +detector = RetinaFace() +blurrer = BlurFace(method='gaussian') + +cap = cv2.VideoCapture("input_video.mp4") +fps = cap.get(cv2.CAP_PROP_FPS) +width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) +height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + +fourcc = cv2.VideoWriter_fourcc(*'mp4v') +out = cv2.VideoWriter('output_video.mp4', fourcc, fps, (width, height)) + +while True: + ret, frame = cap.read() + if not ret: + break + + faces = detector.detect(frame) + frame = blurrer.anonymize(frame, faces, inplace=True) + out.write(frame) + +cap.release() +out.release() +``` + +--- + +## Selective Anonymization + +### Exclude Specific Faces + +```python +def anonymize_except(image, all_faces, exclude_embeddings, recognizer, threshold=0.6): + """Anonymize all faces except those matching exclude_embeddings.""" + faces_to_blur = [] + + for face in all_faces: + # Get embedding + embedding = recognizer.get_normalized_embedding(image, face.landmarks) + + # Check if should be excluded + should_exclude = False + for ref_emb in exclude_embeddings: + similarity = np.dot(embedding, ref_emb.T)[0][0] + if similarity > threshold: + should_exclude = True + break + + if not should_exclude: + faces_to_blur.append(face) + + # Blur remaining faces + return blurrer.anonymize(image, faces_to_blur) +``` + +### Confidence-Based + +```python +def anonymize_low_confidence(image, faces, blurrer, confidence_threshold=0.8): + """Anonymize faces below confidence threshold.""" + faces_to_blur = [f for f in faces if f.confidence < confidence_threshold] + return blurrer.anonymize(image, faces_to_blur) +``` + +--- + +## Comparison + +```python +import cv2 +from uniface import RetinaFace +from uniface.privacy import BlurFace + +detector = RetinaFace() +image = cv2.imread("photo.jpg") +faces = detector.detect(image) + +methods = ['pixelate', 'gaussian', 'blackout', 'elliptical', 'median'] + +for method in methods: + blurrer = BlurFace(method=method) + result = blurrer.anonymize(image.copy(), faces) + cv2.imwrite(f"anonymized_{method}.jpg", result) +``` + +--- + +## Command-Line Tool + +```bash +# Anonymize image with pixelation +python tools/face_anonymize.py --source photo.jpg + +# Real-time webcam +python tools/face_anonymize.py --source 0 --method gaussian + +# Custom blur strength +python tools/face_anonymize.py --source photo.jpg --method gaussian --blur-strength 5.0 +``` + +--- + +## Next Steps + +- [Anonymize Stream Recipe](../recipes/anonymize-stream.md) - Video pipeline +- [Detection](detection.md) - Face detection options +- [Batch Processing Recipe](../recipes/batch-processing.md) - Process multiple files diff --git a/docs/modules/recognition.md b/docs/modules/recognition.md new file mode 100644 index 0000000..fc4ef94 --- /dev/null +++ b/docs/modules/recognition.md @@ -0,0 +1,240 @@ +# Recognition + +Face recognition extracts embeddings for identity verification and face search. + +--- + +## Available Models + +| Model | Backbone | Size | Embedding Dim | Best For | +|-------|----------|------|---------------|----------| +| **ArcFace** | MobileNet/ResNet | 8-166 MB | 512 | General use (recommended) | +| **MobileFace** | MobileNet V2/V3 | 1-10 MB | 512 | Mobile/Edge | +| **SphereFace** | Sphere20/36 | 50-92 MB | 512 | Research | + +--- + +## ArcFace + +State-of-the-art recognition using additive angular margin loss. + +### Basic Usage + +```python +from uniface import RetinaFace, ArcFace + +detector = RetinaFace() +recognizer = ArcFace() + +# Detect face +faces = detector.detect(image) + +# Extract embedding +if faces: + embedding = recognizer.get_normalized_embedding(image, faces[0].landmarks) + print(f"Embedding shape: {embedding.shape}") # (1, 512) +``` + +### Model Variants + +```python +from uniface import ArcFace +from uniface.constants import ArcFaceWeights + +# Lightweight (default) +recognizer = ArcFace(model_name=ArcFaceWeights.MNET) + +# High accuracy +recognizer = ArcFace(model_name=ArcFaceWeights.RESNET) +``` + +| Variant | Backbone | Size | Use Case | +|---------|----------|------|----------| +| **MNET** ⭐ | MobileNet | 8 MB | Balanced (recommended) | +| RESNET | ResNet50 | 166 MB | Maximum accuracy | + +--- + +## MobileFace + +Lightweight recognition for resource-constrained environments. + +### Basic Usage + +```python +from uniface import MobileFace + +recognizer = MobileFace() +embedding = recognizer.get_normalized_embedding(image, landmarks) +``` + +### Model Variants + +```python +from uniface import MobileFace +from uniface.constants import MobileFaceWeights + +# Ultra-lightweight +recognizer = MobileFace(model_name=MobileFaceWeights.MNET_025) + +# Balanced (default) +recognizer = MobileFace(model_name=MobileFaceWeights.MNET_V2) + +# Higher accuracy +recognizer = MobileFace(model_name=MobileFaceWeights.MNET_V3_LARGE) +``` + +| Variant | Params | Size | LFW | Use Case | +|---------|--------|------|-----|----------| +| MNET_025 | 0.36M | 1 MB | 98.8% | Ultra-lightweight | +| **MNET_V2** ⭐ | 2.29M | 4 MB | 99.6% | Mobile/Edge | +| MNET_V3_SMALL | 1.25M | 3 MB | 99.3% | Mobile optimized | +| MNET_V3_LARGE | 3.52M | 10 MB | 99.5% | Balanced mobile | + +--- + +## SphereFace + +Recognition using angular softmax loss (A-Softmax). + +### Basic Usage + +```python +from uniface import SphereFace +from uniface.constants import SphereFaceWeights + +recognizer = SphereFace(model_name=SphereFaceWeights.SPHERE20) +embedding = recognizer.get_normalized_embedding(image, landmarks) +``` + +| Variant | Params | Size | LFW | Use Case | +|---------|--------|------|-----|----------| +| SPHERE20 | 24.5M | 50 MB | 99.7% | Research | +| SPHERE36 | 34.6M | 92 MB | 99.7% | Research | + +--- + +## Face Comparison + +### Compute Similarity + +```python +from uniface import compute_similarity +import numpy as np + +# Extract embeddings +emb1 = recognizer.get_normalized_embedding(image1, landmarks1) +emb2 = recognizer.get_normalized_embedding(image2, landmarks2) + +# Method 1: Using utility function +similarity = compute_similarity(emb1, emb2) + +# Method 2: Direct computation +similarity = np.dot(emb1, emb2.T)[0][0] + +print(f"Similarity: {similarity:.4f}") +``` + +### Threshold Guidelines + +| Threshold | Decision | Use Case | +|-----------|----------|----------| +| > 0.7 | Very high confidence | Security-critical | +| > 0.6 | Same person | General verification | +| 0.4 - 0.6 | Uncertain | Manual review needed | +| < 0.4 | Different people | Rejection | + +--- + +## Face Alignment + +Recognition models require aligned faces. UniFace handles this internally: + +```python +# Alignment is done automatically +embedding = recognizer.get_normalized_embedding(image, landmarks) + +# Or manually align +from uniface import face_alignment + +aligned_face = face_alignment(image, landmarks) +# Returns: 112x112 aligned face image +``` + +--- + +## Building a Face Database + +```python +import numpy as np +from uniface import RetinaFace, ArcFace + +detector = RetinaFace() +recognizer = ArcFace() + +# Build database +database = {} +for person_id, image_path in person_images.items(): + image = cv2.imread(image_path) + faces = detector.detect(image) + + if faces: + embedding = recognizer.get_normalized_embedding(image, faces[0].landmarks) + database[person_id] = embedding + +# Save for later use +np.savez('face_database.npz', **database) + +# Load database +data = np.load('face_database.npz') +database = {key: data[key] for key in data.files} +``` + +--- + +## Face Search + +Find a person in a database: + +```python +def search_face(query_embedding, database, threshold=0.6): + """Find best match in database.""" + best_match = None + best_similarity = -1 + + for person_id, db_embedding in database.items(): + similarity = np.dot(query_embedding, db_embedding.T)[0][0] + + if similarity > best_similarity and similarity > threshold: + best_similarity = similarity + best_match = person_id + + return best_match, best_similarity + +# Usage +query_embedding = recognizer.get_normalized_embedding(query_image, landmarks) +match, similarity = search_face(query_embedding, database) + +if match: + print(f"Found: {match} (similarity: {similarity:.4f})") +else: + print("No match found") +``` + +--- + +## Factory Function + +```python +from uniface import create_recognizer + +recognizer = create_recognizer('arcface') +``` + +--- + +## Next Steps + +- [Landmarks](landmarks.md) - 106-point landmarks +- [Face Search Recipe](../recipes/face-search.md) - Complete search system +- [Thresholds](../concepts/thresholds-calibration.md) - Calibration guide diff --git a/docs/modules/spoofing.md b/docs/modules/spoofing.md new file mode 100644 index 0000000..1d2ff34 --- /dev/null +++ b/docs/modules/spoofing.md @@ -0,0 +1,266 @@ +# Anti-Spoofing + +Face anti-spoofing detects whether a face is real (live) or fake (photo, video replay, mask). + +--- + +## Available Models + +| Model | Size | Notes | +|-------|------|-------| +| MiniFASNet V1SE | 1.2 MB | Squeeze-and-Excitation variant | +| **MiniFASNet V2** ⭐ | 1.2 MB | Improved version (recommended) | + +--- + +## Basic Usage + +```python +import cv2 +from uniface import RetinaFace +from uniface.spoofing import MiniFASNet + +detector = RetinaFace() +spoofer = MiniFASNet() + +image = cv2.imread("photo.jpg") +faces = detector.detect(image) + +for face in faces: + result = spoofer.predict(image, face.bbox) + + label = "Real" if result.is_real else "Fake" + print(f"{label}: {result.confidence:.1%}") +``` + +--- + +## Output Format + +```python +result = spoofer.predict(image, face.bbox) + +# SpoofingResult dataclass +result.is_real # True = real, False = fake +result.confidence # 0.0 to 1.0 +``` + +--- + +## Model Variants + +```python +from uniface.spoofing import MiniFASNet +from uniface.constants import MiniFASNetWeights + +# Default (V2, recommended) +spoofer = MiniFASNet() + +# V1SE variant +spoofer = MiniFASNet(model_name=MiniFASNetWeights.V1SE) +``` + +| Variant | Size | Scale Factor | +|---------|------|--------------| +| V1SE | 1.2 MB | 4.0 | +| **V2** ⭐ | 1.2 MB | 2.7 | + +--- + +## Confidence Thresholds + +The default threshold is 0.5. Adjust for your use case: + +```python +result = spoofer.predict(image, face.bbox) + +# High security (fewer false accepts) +HIGH_THRESHOLD = 0.7 +if result.confidence > HIGH_THRESHOLD: + print("Real (high confidence)") +else: + print("Suspicious") + +# Balanced +if result.is_real: # Uses default 0.5 threshold + print("Real") +else: + print("Fake") +``` + +--- + +## Visualization + +```python +import cv2 + +def draw_spoofing_result(image, face, result): + """Draw spoofing result on image.""" + x1, y1, x2, y2 = map(int, face.bbox) + + # Color based on result + color = (0, 255, 0) if result.is_real else (0, 0, 255) + label = "Real" if result.is_real else "Fake" + + # Draw bounding box + cv2.rectangle(image, (x1, y1), (x2, y2), color, 2) + + # Draw label + text = f"{label}: {result.confidence:.1%}" + cv2.putText(image, text, (x1, y1 - 10), + cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2) + + return image + +# Usage +for face in faces: + result = spoofer.predict(image, face.bbox) + image = draw_spoofing_result(image, face, result) + +cv2.imwrite("spoofing_result.jpg", image) +``` + +--- + +## Real-Time Liveness Detection + +```python +import cv2 +from uniface import RetinaFace +from uniface.spoofing import MiniFASNet + +detector = RetinaFace() +spoofer = MiniFASNet() + +cap = cv2.VideoCapture(0) + +while True: + ret, frame = cap.read() + if not ret: + break + + faces = detector.detect(frame) + + for face in faces: + result = spoofer.predict(frame, face.bbox) + + # Draw result + x1, y1, x2, y2 = map(int, face.bbox) + color = (0, 255, 0) if result.is_real else (0, 0, 255) + label = f"{'Real' if result.is_real else 'Fake'}: {result.confidence:.0%}" + + cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2) + cv2.putText(frame, label, (x1, y1 - 10), + cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2) + + cv2.imshow("Liveness Detection", frame) + + if cv2.waitKey(1) & 0xFF == ord('q'): + break + +cap.release() +cv2.destroyAllWindows() +``` + +--- + +## Use Cases + +### Access Control + +```python +def verify_liveness(image, face, spoofer, threshold=0.6): + """Verify face is real for access control.""" + result = spoofer.predict(image, face.bbox) + + if result.is_real and result.confidence > threshold: + return True, result.confidence + return False, result.confidence + +# Usage +is_live, confidence = verify_liveness(image, face, spoofer) +if is_live: + print(f"Access granted (confidence: {confidence:.1%})") +else: + print(f"Access denied - possible spoof attempt") +``` + +### Multi-Frame Verification + +For higher security, verify across multiple frames: + +```python +def verify_liveness_multiframe(frames, detector, spoofer, min_real=3): + """Verify liveness across multiple frames.""" + real_count = 0 + + for frame in frames: + faces = detector.detect(frame) + if not faces: + continue + + result = spoofer.predict(frame, faces[0].bbox) + if result.is_real: + real_count += 1 + + return real_count >= min_real + +# Collect frames and verify +frames = [] +for _ in range(5): + ret, frame = cap.read() + if ret: + frames.append(frame) + +is_verified = verify_liveness_multiframe(frames, detector, spoofer) +``` + +--- + +## Attack Types Detected + +MiniFASNet can detect various spoof attacks: + +| Attack Type | Detection | +|-------------|-----------| +| Printed photos | ✅ | +| Screen replay | ✅ | +| Video replay | ✅ | +| Paper masks | ✅ | +| 3D masks | Limited | + +!!! warning "Limitations" + - High-quality 3D masks may not be detected + - Performance varies with lighting and image quality + - Always combine with other verification methods for high-security applications + +--- + +## Command-Line Tool + +```bash +# Image +python tools/spoofing.py --source photo.jpg + +# Webcam +python tools/spoofing.py --source 0 +``` + +--- + +## Factory Function + +```python +from uniface import create_spoofer + +spoofer = create_spoofer() # Returns MiniFASNet +``` + +--- + +## Next Steps + +- [Privacy](privacy.md) - Face anonymization +- [Detection](detection.md) - Face detection +- [Recognition](recognition.md) - Face recognition diff --git a/docs/quickstart.md b/docs/quickstart.md new file mode 100644 index 0000000..2825ed6 --- /dev/null +++ b/docs/quickstart.md @@ -0,0 +1,362 @@ +# Quickstart + +Get up and running with UniFace in 5 minutes. This guide covers the most common use cases. + +--- + +## 1. Face Detection + +Detect faces in an image: + +```python +import cv2 +from uniface import RetinaFace + +# Load image +image = cv2.imread("photo.jpg") + +# Initialize detector (models auto-download on first use) +detector = RetinaFace() + +# Detect faces +faces = detector.detect(image) + +# Print results +for i, face in enumerate(faces): + print(f"Face {i+1}:") + print(f" Confidence: {face.confidence:.2f}") + print(f" BBox: {face.bbox}") + print(f" Landmarks: {len(face.landmarks)} points") +``` + +**Output:** + +``` +Face 1: + Confidence: 0.99 + BBox: [120.5, 85.3, 245.8, 210.6] + Landmarks: 5 points +``` + +--- + +## 2. Visualize Detections + +Draw bounding boxes and landmarks: + +```python +import cv2 +from uniface import RetinaFace +from uniface.visualization import draw_detections + +# Detect faces +detector = RetinaFace() +image = cv2.imread("photo.jpg") +faces = detector.detect(image) + +# Extract visualization data +bboxes = [f.bbox for f in faces] +scores = [f.confidence for f in faces] +landmarks = [f.landmarks for f in faces] + +# Draw on image +draw_detections( + image=image, + bboxes=bboxes, + scores=scores, + landmarks=landmarks, + vis_threshold=0.6, +) + +# Save result +cv2.imwrite("output.jpg", image) +``` + +--- + +## 3. Face Recognition + +Compare two faces: + +```python +import cv2 +import numpy as np +from uniface import RetinaFace, ArcFace + +# Initialize models +detector = RetinaFace() +recognizer = ArcFace() + +# Load two images +image1 = cv2.imread("person1.jpg") +image2 = cv2.imread("person2.jpg") + +# Detect faces +faces1 = detector.detect(image1) +faces2 = detector.detect(image2) + +if faces1 and faces2: + # Extract embeddings + emb1 = recognizer.get_normalized_embedding(image1, faces1[0].landmarks) + emb2 = recognizer.get_normalized_embedding(image2, faces2[0].landmarks) + + # Compute similarity (cosine similarity) + similarity = np.dot(emb1, emb2.T)[0][0] + + # Interpret result + if similarity > 0.6: + print(f"Same person (similarity: {similarity:.3f})") + else: + print(f"Different people (similarity: {similarity:.3f})") +``` + +!!! tip "Similarity Thresholds" + - `> 0.6`: Same person (high confidence) + - `0.4 - 0.6`: Uncertain (manual review) + - `< 0.4`: Different people + +--- + +## 4. Age & Gender Detection + +```python +import cv2 +from uniface import RetinaFace, AgeGender + +# Initialize models +detector = RetinaFace() +age_gender = AgeGender() + +# Load image +image = cv2.imread("photo.jpg") +faces = detector.detect(image) + +# Predict attributes +for i, face in enumerate(faces): + result = age_gender.predict(image, face.bbox) + print(f"Face {i+1}: {result.sex}, {result.age} years old") +``` + +**Output:** + +``` +Face 1: Male, 32 years old +Face 2: Female, 28 years old +``` + +--- + +## 5. FairFace Attributes + +Detect race, gender, and age group: + +```python +import cv2 +from uniface import RetinaFace, FairFace + +detector = RetinaFace() +fairface = FairFace() + +image = cv2.imread("photo.jpg") +faces = detector.detect(image) + +for i, face in enumerate(faces): + result = fairface.predict(image, face.bbox) + print(f"Face {i+1}: {result.sex}, {result.age_group}, {result.race}") +``` + +**Output:** + +``` +Face 1: Male, 30-39, East Asian +Face 2: Female, 20-29, White +``` + +--- + +## 6. Facial Landmarks (106 Points) + +```python +import cv2 +from uniface import RetinaFace, Landmark106 + +detector = RetinaFace() +landmarker = Landmark106() + +image = cv2.imread("photo.jpg") +faces = detector.detect(image) + +if faces: + landmarks = landmarker.get_landmarks(image, faces[0].bbox) + print(f"Detected {len(landmarks)} landmarks") + + # Draw landmarks + for x, y in landmarks.astype(int): + cv2.circle(image, (x, y), 2, (0, 255, 0), -1) + + cv2.imwrite("landmarks.jpg", image) +``` + +--- + +## 7. Gaze Estimation + +```python +import cv2 +import numpy as np +from uniface import RetinaFace, MobileGaze +from uniface.visualization import draw_gaze + +detector = RetinaFace() +gaze_estimator = MobileGaze() + +image = cv2.imread("photo.jpg") +faces = detector.detect(image) + +for i, face in enumerate(faces): + x1, y1, x2, y2 = map(int, face.bbox[:4]) + face_crop = image[y1:y2, x1:x2] + + if face_crop.size > 0: + result = gaze_estimator.estimate(face_crop) + print(f"Face {i+1}: pitch={np.degrees(result.pitch):.1f}°, yaw={np.degrees(result.yaw):.1f}°") + + # Draw gaze direction + draw_gaze(image, face.bbox, result.pitch, result.yaw) + +cv2.imwrite("gaze_output.jpg", image) +``` + +--- + +## 8. Face Parsing + +Segment face into semantic components: + +```python +import cv2 +import numpy as np +from uniface.parsing import BiSeNet +from uniface.visualization import vis_parsing_maps + +parser = BiSeNet() + +# Load face image (already cropped) +face_image = cv2.imread("face.jpg") + +# Parse face into 19 components +mask = parser.parse(face_image) + +# Visualize with overlay +face_rgb = cv2.cvtColor(face_image, cv2.COLOR_BGR2RGB) +vis_result = vis_parsing_maps(face_rgb, mask, save_image=False) + +print(f"Detected {len(np.unique(mask))} facial components") +``` + +--- + +## 9. Face Anonymization + +Blur faces for privacy protection: + +```python +from uniface.privacy import anonymize_faces +import cv2 + +# One-liner: automatic detection and blurring +image = cv2.imread("group_photo.jpg") +anonymized = anonymize_faces(image, method='pixelate') +cv2.imwrite("anonymized.jpg", anonymized) +``` + +**Manual control:** + +```python +from uniface import RetinaFace +from uniface.privacy import BlurFace + +detector = RetinaFace() +blurrer = BlurFace(method='gaussian', blur_strength=5.0) + +faces = detector.detect(image) +anonymized = blurrer.anonymize(image, faces) +``` + +**Available methods:** + +| Method | Description | +|--------|-------------| +| `pixelate` | Blocky effect (news media standard) | +| `gaussian` | Smooth, natural blur | +| `blackout` | Solid color boxes (maximum privacy) | +| `elliptical` | Soft oval blur (natural face shape) | +| `median` | Edge-preserving blur | + +--- + +## 10. Face Anti-Spoofing + +Detect real vs. fake faces: + +```python +import cv2 +from uniface import RetinaFace +from uniface.spoofing import MiniFASNet + +detector = RetinaFace() +spoofer = MiniFASNet() + +image = cv2.imread("photo.jpg") +faces = detector.detect(image) + +for i, face in enumerate(faces): + result = spoofer.predict(image, face.bbox) + label = 'Real' if result.is_real else 'Fake' + print(f"Face {i+1}: {label} ({result.confidence:.1%})") +``` + +--- + +## 11. Webcam Demo + +Real-time face detection: + +```python +import cv2 +from uniface import RetinaFace +from uniface.visualization import draw_detections + +detector = RetinaFace() +cap = cv2.VideoCapture(0) + +print("Press 'q' to quit") + +while True: + ret, frame = cap.read() + if not ret: + break + + faces = detector.detect(frame) + + bboxes = [f.bbox for f in faces] + scores = [f.confidence for f in faces] + landmarks = [f.landmarks for f in faces] + draw_detections(image=frame, bboxes=bboxes, scores=scores, landmarks=landmarks) + + cv2.imshow("UniFace - Press 'q' to quit", frame) + + if cv2.waitKey(1) & 0xFF == ord('q'): + break + +cap.release() +cv2.destroyAllWindows() +``` + +--- + +## Next Steps + +- [Concepts Overview](concepts/overview.md) - Understand the architecture +- [Detection Module](modules/detection.md) - Deep dive into detection models +- [Recipes](recipes/image-pipeline.md) - Complete workflow examples diff --git a/docs/recipes/anonymize-stream.md b/docs/recipes/anonymize-stream.md new file mode 100644 index 0000000..f361bda --- /dev/null +++ b/docs/recipes/anonymize-stream.md @@ -0,0 +1,88 @@ +# Anonymize Stream + +Blur faces in real-time video streams for privacy protection. + +--- + +## Webcam + +```python +import cv2 +from uniface import RetinaFace +from uniface.privacy import BlurFace + +detector = RetinaFace() +blurrer = BlurFace(method='pixelate') +cap = cv2.VideoCapture(0) + +while True: + ret, frame = cap.read() + if not ret: + break + + faces = detector.detect(frame) + frame = blurrer.anonymize(frame, faces, inplace=True) + + cv2.imshow('Anonymized', frame) + if cv2.waitKey(1) & 0xFF == ord('q'): + break + +cap.release() +cv2.destroyAllWindows() +``` + +--- + +## Video File + +```python +import cv2 +from uniface import RetinaFace +from uniface.privacy import BlurFace + +detector = RetinaFace() +blurrer = BlurFace(method='gaussian') + +cap = cv2.VideoCapture("input.mp4") +fps = cap.get(cv2.CAP_PROP_FPS) +w, h = int(cap.get(3)), int(cap.get(4)) + +out = cv2.VideoWriter('output.mp4', cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) + +while cap.read()[0]: + ret, frame = cap.read() + if not ret: + break + + faces = detector.detect(frame) + blurrer.anonymize(frame, faces, inplace=True) + out.write(frame) + +cap.release() +out.release() +``` + +--- + +## One-Liner + +```python +from uniface.privacy import anonymize_faces +import cv2 + +image = cv2.imread("photo.jpg") +result = anonymize_faces(image, method='pixelate') +cv2.imwrite("anonymized.jpg", result) +``` + +--- + +## Blur Methods + +| Method | Code | +|--------|------| +| Pixelate | `BlurFace(method='pixelate', pixel_blocks=10)` | +| Gaussian | `BlurFace(method='gaussian', blur_strength=3.0)` | +| Blackout | `BlurFace(method='blackout', color=(0,0,0))` | +| Elliptical | `BlurFace(method='elliptical', margin=20)` | +| Median | `BlurFace(method='median', blur_strength=3.0)` | diff --git a/docs/recipes/batch-processing.md b/docs/recipes/batch-processing.md new file mode 100644 index 0000000..7fa8880 --- /dev/null +++ b/docs/recipes/batch-processing.md @@ -0,0 +1,353 @@ +# Batch Processing + +Process multiple images efficiently. + +--- + +## Basic Batch Processing + +```python +import cv2 +from pathlib import Path +from uniface import RetinaFace +from uniface.visualization import draw_detections + +detector = RetinaFace() + +def process_directory(input_dir, output_dir): + """Process all images in a directory.""" + input_path = Path(input_dir) + output_path = Path(output_dir) + output_path.mkdir(parents=True, exist_ok=True) + + # Supported image formats + extensions = ['*.jpg', '*.jpeg', '*.png', '*.bmp'] + image_files = [] + for ext in extensions: + image_files.extend(input_path.glob(ext)) + image_files.extend(input_path.glob(ext.upper())) + + print(f"Found {len(image_files)} images") + + results = {} + + for image_path in image_files: + print(f"Processing {image_path.name}...") + + image = cv2.imread(str(image_path)) + if image is None: + print(f" Failed to load {image_path.name}") + continue + + faces = detector.detect(image) + print(f" Found {len(faces)} face(s)") + + # Store results + results[image_path.name] = { + 'num_faces': len(faces), + 'faces': [ + { + 'bbox': face.bbox.tolist(), + 'confidence': float(face.confidence) + } + for face in faces + ] + } + + # Visualize and save + if faces: + draw_detections( + image=image, + bboxes=[f.bbox for f in faces], + scores=[f.confidence for f in faces], + landmarks=[f.landmarks for f in faces] + ) + + output_file = output_path / image_path.name + cv2.imwrite(str(output_file), image) + + return results + +# Usage +results = process_directory("input_images/", "output_images/") +print(f"\nProcessed {len(results)} images") +``` + +--- + +## Parallel Processing + +Use multiprocessing for faster batch processing: + +```python +import cv2 +from pathlib import Path +from concurrent.futures import ProcessPoolExecutor, as_completed +from uniface import RetinaFace + +def process_single_image(image_path, output_dir): + """Process a single image (runs in worker process).""" + # Create detector in each process + detector = RetinaFace() + + image = cv2.imread(str(image_path)) + if image is None: + return image_path.name, {'error': 'Failed to load'} + + faces = detector.detect(image) + + result = { + 'num_faces': len(faces), + 'faces': [ + { + 'bbox': face.bbox.tolist(), + 'confidence': float(face.confidence) + } + for face in faces + ] + } + + # Save result + output_path = Path(output_dir) / image_path.name + cv2.imwrite(str(output_path), image) + + return image_path.name, result + +def batch_process_parallel(input_dir, output_dir, max_workers=4): + """Process images in parallel.""" + input_path = Path(input_dir) + output_path = Path(output_dir) + output_path.mkdir(parents=True, exist_ok=True) + + image_files = list(input_path.glob("*.jpg")) + list(input_path.glob("*.png")) + + results = {} + + with ProcessPoolExecutor(max_workers=max_workers) as executor: + futures = { + executor.submit(process_single_image, img, output_dir): img + for img in image_files + } + + for future in as_completed(futures): + name, result = future.result() + results[name] = result + print(f"Completed: {name} - {result.get('num_faces', 'error')} faces") + + return results + +# Usage +results = batch_process_parallel("input_images/", "output_images/", max_workers=4) +``` + +--- + +## Progress Tracking + +Use tqdm for progress bars: + +```python +from tqdm import tqdm + +def process_with_progress(input_dir, output_dir): + """Process with progress bar.""" + detector = RetinaFace() + + input_path = Path(input_dir) + output_path = Path(output_dir) + output_path.mkdir(parents=True, exist_ok=True) + + image_files = list(input_path.glob("*.jpg")) + list(input_path.glob("*.png")) + + results = {} + + for image_path in tqdm(image_files, desc="Processing images"): + image = cv2.imread(str(image_path)) + if image is None: + continue + + faces = detector.detect(image) + results[image_path.name] = len(faces) + + cv2.imwrite(str(output_path / image_path.name), image) + + return results + +# Usage +results = process_with_progress("input/", "output/") +print(f"Total faces found: {sum(results.values())}") +``` + +--- + +## Batch Embedding Extraction + +Extract embeddings for a face database: + +```python +import numpy as np +from pathlib import Path +from uniface import RetinaFace, ArcFace + +def extract_embeddings(image_dir): + """Extract embeddings from all faces.""" + detector = RetinaFace() + recognizer = ArcFace() + + embeddings = {} + + for image_path in Path(image_dir).glob("*.jpg"): + image = cv2.imread(str(image_path)) + if image is None: + continue + + faces = detector.detect(image) + + if faces: + # Use first face + embedding = recognizer.get_normalized_embedding( + image, faces[0].landmarks + ) + embeddings[image_path.stem] = embedding + print(f"Extracted: {image_path.stem}") + + return embeddings + +def save_embeddings(embeddings, output_path): + """Save embeddings to file.""" + np.savez(output_path, **embeddings) + print(f"Saved {len(embeddings)} embeddings to {output_path}") + +def load_embeddings(input_path): + """Load embeddings from file.""" + data = np.load(input_path) + return {key: data[key] for key in data.files} + +# Usage +embeddings = extract_embeddings("faces/") +save_embeddings(embeddings, "embeddings.npz") + +# Later... +loaded = load_embeddings("embeddings.npz") +``` + +--- + +## CSV Output + +Export results to CSV: + +```python +import csv +from pathlib import Path + +def export_to_csv(results, output_path): + """Export detection results to CSV.""" + with open(output_path, 'w', newline='') as f: + writer = csv.writer(f) + writer.writerow(['filename', 'face_id', 'x1', 'y1', 'x2', 'y2', 'confidence']) + + for filename, data in results.items(): + for i, face in enumerate(data['faces']): + bbox = face['bbox'] + writer.writerow([ + filename, i, + bbox[0], bbox[1], bbox[2], bbox[3], + face['confidence'] + ]) + + print(f"Exported to {output_path}") + +# Usage +results = process_directory("input/", "output/") +export_to_csv(results, "detections.csv") +``` + +--- + +## Memory-Efficient Processing + +For large batches, process in chunks: + +```python +def process_in_chunks(image_files, chunk_size=100): + """Process images in memory-efficient chunks.""" + detector = RetinaFace() + + all_results = {} + + for i in range(0, len(image_files), chunk_size): + chunk = image_files[i:i + chunk_size] + print(f"Processing chunk {i//chunk_size + 1}/{(len(image_files)-1)//chunk_size + 1}") + + for image_path in chunk: + image = cv2.imread(str(image_path)) + if image is None: + continue + + faces = detector.detect(image) + all_results[image_path.name] = len(faces) + + # Free memory + del image + + # Optional: force garbage collection + import gc + gc.collect() + + return all_results +``` + +--- + +## Error Handling + +Robust batch processing with error handling: + +```python +import logging +from pathlib import Path + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +def robust_batch_process(input_dir, output_dir): + """Batch process with error handling.""" + detector = RetinaFace() + + input_path = Path(input_dir) + output_path = Path(output_dir) + output_path.mkdir(parents=True, exist_ok=True) + + image_files = list(input_path.glob("*.[jJ][pP][gG]")) + + success_count = 0 + error_count = 0 + + for image_path in image_files: + try: + image = cv2.imread(str(image_path)) + if image is None: + raise ValueError("Failed to load image") + + faces = detector.detect(image) + + cv2.imwrite(str(output_path / image_path.name), image) + success_count += 1 + logger.info(f"Processed {image_path.name}: {len(faces)} faces") + + except Exception as e: + error_count += 1 + logger.error(f"Error processing {image_path.name}: {e}") + + logger.info(f"Completed: {success_count} success, {error_count} errors") + return success_count, error_count +``` + +--- + +## Next Steps + +- [Video & Webcam](video-webcam.md) - Real-time processing +- [Face Search](face-search.md) - Search through embeddings +- [Image Pipeline](image-pipeline.md) - Full analysis pipeline diff --git a/docs/recipes/custom-models.md b/docs/recipes/custom-models.md new file mode 100644 index 0000000..fd077c5 --- /dev/null +++ b/docs/recipes/custom-models.md @@ -0,0 +1,96 @@ +# Custom Models + +Add your own ONNX models to UniFace. + +--- + +## Add Detection Model + +```python +from uniface.detection.base import BaseDetector +from uniface.onnx_utils import create_onnx_session +from uniface.types import Face +import numpy as np + +class MyDetector(BaseDetector): + def __init__(self, model_path: str, confidence_threshold: float = 0.5): + self.session = create_onnx_session(model_path) + self.threshold = confidence_threshold + + def detect(self, image: np.ndarray) -> list[Face]: + # Preprocess + input_tensor = self._preprocess(image) + + # Inference + outputs = self.session.run(None, {'input': input_tensor}) + + # Postprocess + faces = self._postprocess(outputs, image.shape) + return faces + + def _preprocess(self, image): + # Your preprocessing logic + pass + + def _postprocess(self, outputs, shape): + # Your postprocessing logic + pass +``` + +--- + +## Add Recognition Model + +```python +from uniface.recognition.base import BaseRecognizer +from uniface.onnx_utils import create_onnx_session +from uniface import face_alignment +import numpy as np + +class MyRecognizer(BaseRecognizer): + def __init__(self, model_path: str): + self.session = create_onnx_session(model_path) + + def get_normalized_embedding(self, image: np.ndarray, landmarks: np.ndarray) -> np.ndarray: + # Align face + aligned = face_alignment(image, landmarks) + + # Preprocess + input_tensor = self._preprocess(aligned) + + # Inference + embedding = self.session.run(None, {'input': input_tensor})[0] + + # Normalize + embedding = embedding / np.linalg.norm(embedding) + return embedding + + def _preprocess(self, image): + # Your preprocessing logic + pass +``` + +--- + +## Register Weights + +Add to `uniface/constants.py`: + +```python +class MyModelWeights(str, Enum): + DEFAULT = "my_model" + +MODEL_URLS[MyModelWeights.DEFAULT] = 'https://...' +MODEL_SHA256[MyModelWeights.DEFAULT] = 'sha256hash...' +``` + +--- + +## Use Custom Model + +```python +from my_module import MyDetector + +detector = MyDetector("path/to/model.onnx") +faces = detector.detect(image) +``` diff --git a/docs/recipes/face-search.md b/docs/recipes/face-search.md new file mode 100644 index 0000000..ffedae2 --- /dev/null +++ b/docs/recipes/face-search.md @@ -0,0 +1,340 @@ +# Face Search + +Build a face search system for finding people in images. + +--- + +## Build Face Database + +```python +import numpy as np +import cv2 +from pathlib import Path +from uniface import RetinaFace, ArcFace + +class FaceDatabase: + def __init__(self): + self.detector = RetinaFace() + self.recognizer = ArcFace() + self.embeddings = {} + self.metadata = {} + + def add_face(self, person_id, image, metadata=None): + """Add a face to the database.""" + faces = self.detector.detect(image) + + if not faces: + raise ValueError(f"No face found for {person_id}") + + # Use highest confidence face + face = max(faces, key=lambda f: f.confidence) + embedding = self.recognizer.get_normalized_embedding(image, face.landmarks) + + self.embeddings[person_id] = embedding + self.metadata[person_id] = metadata or {} + + return True + + def add_from_directory(self, directory): + """Add faces from a directory (filename = person_id).""" + dir_path = Path(directory) + + for image_path in dir_path.glob("*.jpg"): + person_id = image_path.stem + image = cv2.imread(str(image_path)) + + try: + self.add_face(person_id, image, {'source': str(image_path)}) + print(f"Added: {person_id}") + except ValueError as e: + print(f"Skipped {person_id}: {e}") + + def search(self, image, threshold=0.6): + """Search for faces in an image.""" + faces = self.detector.detect(image) + results = [] + + for face in faces: + embedding = self.recognizer.get_normalized_embedding(image, face.landmarks) + + best_match = None + best_similarity = -1 + + for person_id, db_embedding in self.embeddings.items(): + similarity = np.dot(embedding, db_embedding.T)[0][0] + + if similarity > best_similarity: + best_similarity = similarity + best_match = person_id + + results.append({ + 'bbox': face.bbox, + 'confidence': face.confidence, + 'match': best_match if best_similarity >= threshold else None, + 'similarity': best_similarity, + 'metadata': self.metadata.get(best_match, {}) + }) + + return results + + def save(self, path): + """Save database to file.""" + np.savez( + path, + embeddings=dict(self.embeddings), + metadata=self.metadata + ) + print(f"Saved database to {path}") + + def load(self, path): + """Load database from file.""" + data = np.load(path, allow_pickle=True) + self.embeddings = data['embeddings'].item() + self.metadata = data['metadata'].item() + print(f"Loaded {len(self.embeddings)} faces from {path}") + +# Usage +db = FaceDatabase() + +# Add faces from directory +db.add_from_directory("known_faces/") + +# Save for later +db.save("face_database.npz") + +# Search for person +query_image = cv2.imread("group_photo.jpg") +results = db.search(query_image) + +for r in results: + if r['match']: + print(f"Found: {r['match']} (similarity: {r['similarity']:.3f})") + else: + print(f"Unknown face (best similarity: {r['similarity']:.3f})") +``` + +--- + +## Visualization + +```python +import cv2 + +def visualize_search_results(image, results): + """Draw search results on image.""" + for r in results: + x1, y1, x2, y2 = map(int, r['bbox']) + + if r['match']: + color = (0, 255, 0) # Green for match + label = f"{r['match']} ({r['similarity']:.2f})" + else: + color = (0, 0, 255) # Red for unknown + label = f"Unknown ({r['similarity']:.2f})" + + cv2.rectangle(image, (x1, y1), (x2, y2), color, 2) + cv2.putText(image, label, (x1, y1 - 10), + cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2) + + return image + +# Usage +results = db.search(image) +annotated = visualize_search_results(image.copy(), results) +cv2.imwrite("search_result.jpg", annotated) +``` + +--- + +## Real-Time Search + +```python +import cv2 + +def realtime_search(db): + """Real-time face search from webcam.""" + cap = cv2.VideoCapture(0) + + while True: + ret, frame = cap.read() + if not ret: + break + + results = db.search(frame, threshold=0.5) + + for r in results: + x1, y1, x2, y2 = map(int, r['bbox']) + + if r['match']: + color = (0, 255, 0) + label = r['match'] + else: + color = (0, 0, 255) + label = "Unknown" + + cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2) + cv2.putText(frame, label, (x1, y1 - 10), + cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2) + + cv2.imshow("Face Search", frame) + + if cv2.waitKey(1) & 0xFF == ord('q'): + break + + cap.release() + cv2.destroyAllWindows() + +# Usage +db = FaceDatabase() +db.load("face_database.npz") +realtime_search(db) +``` + +--- + +## Top-K Search + +Find top K matches instead of best match only: + +```python +def search_top_k(self, embedding, k=5): + """Find top K matches for an embedding.""" + similarities = [] + + for person_id, db_embedding in self.embeddings.items(): + similarity = np.dot(embedding, db_embedding.T)[0][0] + similarities.append((person_id, similarity)) + + # Sort by similarity (descending) + similarities.sort(key=lambda x: x[1], reverse=True) + + return similarities[:k] + +# Usage +query_embedding = recognizer.get_normalized_embedding(image, face.landmarks) +top_matches = search_top_k(query_embedding, k=3) + +for person_id, similarity in top_matches: + print(f"{person_id}: {similarity:.4f}") +``` + +--- + +## Batch Search + +Search through multiple query images: + +```python +from pathlib import Path + +def batch_search(db, query_dir, threshold=0.6): + """Search for faces in multiple images.""" + all_results = {} + + for image_path in Path(query_dir).glob("*.jpg"): + image = cv2.imread(str(image_path)) + results = db.search(image, threshold) + + matches = [r['match'] for r in results if r['match']] + all_results[image_path.name] = matches + + print(f"{image_path.name}: {matches}") + + return all_results + +# Usage +results = batch_search(db, "query_images/") +``` + +--- + +## Find Person in Group Photo + +```python +def find_person_in_group(db, person_id, group_image, threshold=0.6): + """Find a specific person in a group photo.""" + if person_id not in db.embeddings: + raise ValueError(f"Person {person_id} not in database") + + reference_embedding = db.embeddings[person_id] + faces = db.detector.detect(group_image) + + best_match = None + best_similarity = -1 + + for face in faces: + embedding = db.recognizer.get_normalized_embedding( + group_image, face.landmarks + ) + similarity = np.dot(embedding, reference_embedding.T)[0][0] + + if similarity > best_similarity: + best_similarity = similarity + best_match = face + + if best_match and best_similarity >= threshold: + return { + 'found': True, + 'face': best_match, + 'similarity': best_similarity + } + + return {'found': False, 'similarity': best_similarity} + +# Usage +group = cv2.imread("group_photo.jpg") +result = find_person_in_group(db, "john_doe", group) + +if result['found']: + print(f"Found with similarity: {result['similarity']:.3f}") + # Draw the found face + x1, y1, x2, y2 = map(int, result['face'].bbox) + cv2.rectangle(group, (x1, y1), (x2, y2), (0, 255, 0), 3) + cv2.imwrite("found.jpg", group) +``` + +--- + +## Update Database + +Add or update faces: + +```python +def update_face(db, person_id, new_image): + """Update a person's face in the database.""" + faces = db.detector.detect(new_image) + + if not faces: + print(f"No face found in new image for {person_id}") + return False + + face = max(faces, key=lambda f: f.confidence) + new_embedding = db.recognizer.get_normalized_embedding( + new_image, face.landmarks + ) + + if person_id in db.embeddings: + # Average with existing embedding + old_embedding = db.embeddings[person_id] + db.embeddings[person_id] = (old_embedding + new_embedding) / 2 + # Re-normalize + db.embeddings[person_id] /= np.linalg.norm(db.embeddings[person_id]) + print(f"Updated: {person_id}") + else: + db.embeddings[person_id] = new_embedding + print(f"Added: {person_id}") + + return True + +# Usage +update_face(db, "john_doe", cv2.imread("john_new.jpg")) +db.save("face_database.npz") +``` + +--- + +## Next Steps + +- [Anonymize Stream](anonymize-stream.md) - Privacy protection +- [Batch Processing](batch-processing.md) - Process multiple files +- [Recognition Module](../modules/recognition.md) - Model details diff --git a/docs/recipes/image-pipeline.md b/docs/recipes/image-pipeline.md new file mode 100644 index 0000000..3e9745b --- /dev/null +++ b/docs/recipes/image-pipeline.md @@ -0,0 +1,279 @@ +# Image Pipeline + +A complete pipeline for processing images with detection, recognition, and attribute analysis. + +--- + +## Basic Pipeline + +```python +import cv2 +from uniface import RetinaFace, ArcFace, AgeGender +from uniface.visualization import draw_detections + +# Initialize models +detector = RetinaFace() +recognizer = ArcFace() +age_gender = AgeGender() + +def process_image(image_path): + """Process a single image through the full pipeline.""" + # Load image + image = cv2.imread(image_path) + + # Step 1: Detect faces + faces = detector.detect(image) + print(f"Found {len(faces)} face(s)") + + results = [] + + for i, face in enumerate(faces): + # Step 2: Extract embedding + embedding = recognizer.get_normalized_embedding(image, face.landmarks) + + # Step 3: Predict attributes + attrs = age_gender.predict(image, face.bbox) + + results.append({ + 'face_id': i, + 'bbox': face.bbox, + 'confidence': face.confidence, + 'embedding': embedding, + 'gender': attrs.sex, + 'age': attrs.age + }) + + print(f" Face {i+1}: {attrs.sex}, {attrs.age} years old") + + # Visualize + draw_detections( + image=image, + bboxes=[f.bbox for f in faces], + scores=[f.confidence for f in faces], + landmarks=[f.landmarks for f in faces] + ) + + return image, results + +# Usage +result_image, results = process_image("photo.jpg") +cv2.imwrite("result.jpg", result_image) +``` + +--- + +## Using FaceAnalyzer + +For convenience, use the built-in `FaceAnalyzer`: + +```python +from uniface import FaceAnalyzer +import cv2 + +# Initialize with desired modules +analyzer = FaceAnalyzer( + detect=True, + recognize=True, + attributes=True +) + +# Process image +image = cv2.imread("photo.jpg") +faces = analyzer.analyze(image) + +# Access enriched Face objects +for face in faces: + print(f"Confidence: {face.confidence:.2f}") + print(f"Embedding: {face.embedding.shape}") + print(f"Age: {face.age}, Gender: {face.sex}") +``` + +--- + +## Full Analysis Pipeline + +Complete pipeline with all modules: + +```python +import cv2 +import numpy as np +from uniface import ( + RetinaFace, ArcFace, AgeGender, FairFace, + Landmark106, MobileGaze +) +from uniface.parsing import BiSeNet +from uniface.spoofing import MiniFASNet +from uniface.visualization import draw_detections, draw_gaze + +class FaceAnalysisPipeline: + def __init__(self): + # Initialize all models + self.detector = RetinaFace() + self.recognizer = ArcFace() + self.age_gender = AgeGender() + self.fairface = FairFace() + self.landmarker = Landmark106() + self.gaze = MobileGaze() + self.parser = BiSeNet() + self.spoofer = MiniFASNet() + + def analyze(self, image): + """Run full analysis pipeline.""" + faces = self.detector.detect(image) + results = [] + + for face in faces: + result = { + 'bbox': face.bbox, + 'confidence': face.confidence, + 'landmarks_5': face.landmarks + } + + # Recognition embedding + result['embedding'] = self.recognizer.get_normalized_embedding( + image, face.landmarks + ) + + # Attributes + ag_result = self.age_gender.predict(image, face.bbox) + result['age'] = ag_result.age + result['gender'] = ag_result.sex + + # FairFace attributes + ff_result = self.fairface.predict(image, face.bbox) + result['age_group'] = ff_result.age_group + result['race'] = ff_result.race + + # 106-point landmarks + result['landmarks_106'] = self.landmarker.get_landmarks( + image, face.bbox + ) + + # Gaze estimation + x1, y1, x2, y2 = map(int, face.bbox) + face_crop = image[y1:y2, x1:x2] + if face_crop.size > 0: + gaze_result = self.gaze.estimate(face_crop) + result['gaze_pitch'] = gaze_result.pitch + result['gaze_yaw'] = gaze_result.yaw + + # Face parsing + if face_crop.size > 0: + result['parsing_mask'] = self.parser.parse(face_crop) + + # Anti-spoofing + spoof_result = self.spoofer.predict(image, face.bbox) + result['is_real'] = spoof_result.is_real + result['spoof_confidence'] = spoof_result.confidence + + results.append(result) + + return results + +# Usage +pipeline = FaceAnalysisPipeline() +results = pipeline.analyze(cv2.imread("photo.jpg")) + +for i, r in enumerate(results): + print(f"\nFace {i+1}:") + print(f" Gender: {r['gender']}, Age: {r['age']}") + print(f" Race: {r['race']}, Age Group: {r['age_group']}") + print(f" Gaze: pitch={np.degrees(r['gaze_pitch']):.1f}°") + print(f" Real: {r['is_real']} ({r['spoof_confidence']:.1%})") +``` + +--- + +## Visualization Pipeline + +```python +import cv2 +import numpy as np +from uniface import RetinaFace, AgeGender, MobileGaze +from uniface.visualization import draw_detections, draw_gaze + +def visualize_analysis(image_path, output_path): + """Create annotated visualization of face analysis.""" + detector = RetinaFace() + age_gender = AgeGender() + gaze = MobileGaze() + + image = cv2.imread(image_path) + faces = detector.detect(image) + + for face in faces: + x1, y1, x2, y2 = map(int, face.bbox) + + # Draw bounding box + cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2) + + # Age and gender + attrs = age_gender.predict(image, face.bbox) + label = f"{attrs.sex}, {attrs.age}y" + cv2.putText(image, label, (x1, y1 - 10), + cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2) + + # Gaze + face_crop = image[y1:y2, x1:x2] + if face_crop.size > 0: + gaze_result = gaze.estimate(face_crop) + draw_gaze(image, face.bbox, gaze_result.pitch, gaze_result.yaw) + + # Confidence + conf_label = f"{face.confidence:.0%}" + cv2.putText(image, conf_label, (x1, y2 + 20), + cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1) + + cv2.imwrite(output_path, image) + print(f"Saved to {output_path}") + +# Usage +visualize_analysis("input.jpg", "output.jpg") +``` + +--- + +## JSON Output + +Export results to JSON: + +```python +import json +import numpy as np + +def results_to_json(results): + """Convert analysis results to JSON-serializable format.""" + output = [] + + for r in results: + item = { + 'bbox': r['bbox'].tolist(), + 'confidence': float(r['confidence']), + 'age': int(r['age']) if r.get('age') else None, + 'gender': r.get('gender'), + 'race': r.get('race'), + 'is_real': r.get('is_real'), + 'gaze': { + 'pitch_deg': float(np.degrees(r['gaze_pitch'])) if 'gaze_pitch' in r else None, + 'yaw_deg': float(np.degrees(r['gaze_yaw'])) if 'gaze_yaw' in r else None + } + } + output.append(item) + + return output + +# Usage +results = pipeline.analyze(image) +json_data = results_to_json(results) + +with open('results.json', 'w') as f: + json.dump(json_data, f, indent=2) +``` + +--- + +## Next Steps + +- [Batch Processing](batch-processing.md) - Process multiple images +- [Video & Webcam](video-webcam.md) - Real-time processing +- [Face Search](face-search.md) - Build a search system diff --git a/docs/recipes/video-webcam.md b/docs/recipes/video-webcam.md new file mode 100644 index 0000000..713faf9 --- /dev/null +++ b/docs/recipes/video-webcam.md @@ -0,0 +1,392 @@ +# Video & Webcam + +Real-time face analysis for video streams. + +--- + +## Webcam Detection + +```python +import cv2 +from uniface import RetinaFace +from uniface.visualization import draw_detections + +detector = RetinaFace() +cap = cv2.VideoCapture(0) + +print("Press 'q' to quit") + +while True: + ret, frame = cap.read() + if not ret: + break + + # Detect faces + faces = detector.detect(frame) + + # Draw results + draw_detections( + image=frame, + bboxes=[f.bbox for f in faces], + scores=[f.confidence for f in faces], + landmarks=[f.landmarks for f in faces] + ) + + cv2.imshow("Face Detection", frame) + + if cv2.waitKey(1) & 0xFF == ord('q'): + break + +cap.release() +cv2.destroyAllWindows() +``` + +--- + +## Video File Processing + +```python +import cv2 +from uniface import RetinaFace +from uniface.visualization import draw_detections + +def process_video(input_path, output_path): + """Process a video file.""" + detector = RetinaFace() + + cap = cv2.VideoCapture(input_path) + + # Get video properties + fps = cap.get(cv2.CAP_PROP_FPS) + width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + + # Setup output video + fourcc = cv2.VideoWriter_fourcc(*'mp4v') + out = cv2.VideoWriter(output_path, fourcc, fps, (width, height)) + + frame_count = 0 + + while True: + ret, frame = cap.read() + if not ret: + break + + # Detect and draw + faces = detector.detect(frame) + draw_detections( + image=frame, + bboxes=[f.bbox for f in faces], + scores=[f.confidence for f in faces], + landmarks=[f.landmarks for f in faces] + ) + + out.write(frame) + + frame_count += 1 + if frame_count % 100 == 0: + print(f"Processed {frame_count}/{total_frames} frames") + + cap.release() + out.release() + print(f"Saved to {output_path}") + +# Usage +process_video("input.mp4", "output.mp4") +``` + +--- + +## FPS Counter + +Add frame rate display: + +```python +import cv2 +import time +from uniface import RetinaFace + +detector = RetinaFace() +cap = cv2.VideoCapture(0) + +prev_time = time.time() +fps = 0 + +while True: + ret, frame = cap.read() + if not ret: + break + + # Calculate FPS + curr_time = time.time() + fps = 1 / (curr_time - prev_time) + prev_time = curr_time + + # Detect faces + faces = detector.detect(frame) + + # Draw FPS + cv2.putText(frame, f"FPS: {fps:.1f}", (10, 30), + cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2) + cv2.putText(frame, f"Faces: {len(faces)}", (10, 70), + cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2) + + # Draw detections + for face in faces: + x1, y1, x2, y2 = map(int, face.bbox) + cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2) + + cv2.imshow("Face Detection", frame) + + if cv2.waitKey(1) & 0xFF == ord('q'): + break + +cap.release() +cv2.destroyAllWindows() +``` + +--- + +## Skip Frames for Performance + +Process every N frames for better performance: + +```python +import cv2 +from uniface import RetinaFace + +detector = RetinaFace() +cap = cv2.VideoCapture(0) + +PROCESS_EVERY_N = 3 # Process every 3rd frame +frame_count = 0 +last_faces = [] + +while True: + ret, frame = cap.read() + if not ret: + break + + frame_count += 1 + + # Only detect every N frames + if frame_count % PROCESS_EVERY_N == 0: + last_faces = detector.detect(frame) + + # Draw last detection results + for face in last_faces: + x1, y1, x2, y2 = map(int, face.bbox) + cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2) + + cv2.imshow("Detection", frame) + + if cv2.waitKey(1) & 0xFF == ord('q'): + break + +cap.release() +cv2.destroyAllWindows() +``` + +--- + +## Full Analysis Pipeline + +Real-time detection with age/gender: + +```python +import cv2 +from uniface import RetinaFace, AgeGender + +detector = RetinaFace() +age_gender = AgeGender() +cap = cv2.VideoCapture(0) + +while True: + ret, frame = cap.read() + if not ret: + break + + faces = detector.detect(frame) + + for face in faces: + x1, y1, x2, y2 = map(int, face.bbox) + + # Draw box + cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2) + + # Predict age/gender + result = age_gender.predict(frame, face.bbox) + label = f"{result.sex}, {result.age}y" + + cv2.putText(frame, label, (x1, y1 - 10), + cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2) + + cv2.imshow("Age/Gender Detection", frame) + + if cv2.waitKey(1) & 0xFF == ord('q'): + break + +cap.release() +cv2.destroyAllWindows() +``` + +--- + +## Gaze Tracking + +Real-time gaze estimation: + +```python +import cv2 +import numpy as np +from uniface import RetinaFace, MobileGaze +from uniface.visualization import draw_gaze + +detector = RetinaFace() +gaze = MobileGaze() +cap = cv2.VideoCapture(0) + +while True: + ret, frame = cap.read() + if not ret: + break + + faces = detector.detect(frame) + + for face in faces: + x1, y1, x2, y2 = map(int, face.bbox) + face_crop = frame[y1:y2, x1:x2] + + if face_crop.size > 0: + result = gaze.estimate(face_crop) + + # Draw box + cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2) + + # Draw gaze arrow + draw_gaze(frame, face.bbox, result.pitch, result.yaw) + + # Display angles + label = f"P:{np.degrees(result.pitch):.0f} Y:{np.degrees(result.yaw):.0f}" + cv2.putText(frame, label, (x1, y1 - 10), + cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) + + cv2.imshow("Gaze Estimation", frame) + + if cv2.waitKey(1) & 0xFF == ord('q'): + break + +cap.release() +cv2.destroyAllWindows() +``` + +--- + +## Recording Output + +Record processed video: + +```python +import cv2 +from uniface import RetinaFace + +detector = RetinaFace() +cap = cv2.VideoCapture(0) + +# Get camera properties +fps = 30 +width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) +height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + +# Setup recording +fourcc = cv2.VideoWriter_fourcc(*'mp4v') +out = cv2.VideoWriter('recording.mp4', fourcc, fps, (width, height)) + +is_recording = False + +print("Press 'r' to start/stop recording, 'q' to quit") + +while True: + ret, frame = cap.read() + if not ret: + break + + faces = detector.detect(frame) + + # Draw detections + for face in faces: + x1, y1, x2, y2 = map(int, face.bbox) + cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2) + + # Recording indicator + if is_recording: + cv2.circle(frame, (30, 30), 10, (0, 0, 255), -1) + out.write(frame) + + cv2.imshow("Detection", frame) + + key = cv2.waitKey(1) & 0xFF + if key == ord('r'): + is_recording = not is_recording + print(f"Recording: {is_recording}") + elif key == ord('q'): + break + +cap.release() +out.release() +cv2.destroyAllWindows() +``` + +--- + +## Multi-Camera + +Process multiple cameras: + +```python +import cv2 +from uniface import RetinaFace + +detector = RetinaFace() + +# Open multiple cameras +caps = [ + cv2.VideoCapture(0), + cv2.VideoCapture(1) # Second camera +] + +while True: + frames = [] + + for i, cap in enumerate(caps): + ret, frame = cap.read() + if ret: + faces = detector.detect(frame) + + for face in faces: + x1, y1, x2, y2 = map(int, face.bbox) + cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2) + + frames.append(frame) + + # Display side by side + if len(frames) == 2: + combined = cv2.hconcat(frames) + cv2.imshow("Multi-Camera", combined) + + if cv2.waitKey(1) & 0xFF == ord('q'): + break + +for cap in caps: + cap.release() +cv2.destroyAllWindows() +``` + +--- + +## Next Steps + +- [Anonymize Stream](anonymize-stream.md) - Privacy in video +- [Face Search](face-search.md) - Identity search +- [Image Pipeline](image-pipeline.md) - Full analysis diff --git a/docs/stylesheets/extra.css b/docs/stylesheets/extra.css new file mode 100644 index 0000000..7fad898 --- /dev/null +++ b/docs/stylesheets/extra.css @@ -0,0 +1,43 @@ +/* UniFace Documentation - Custom Styles */ + +/* Hero section */ +.hero { + text-align: center; + padding: 2rem 0; +} + +.hero-title { + font-size: 3rem !important; + font-weight: 700 !important; + margin-bottom: 0.5rem !important; +} + +/* Feature grid */ +.feature-grid { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(250px, 1fr)); + gap: 1rem; + margin: 1.5rem 0; +} + +.feature-card { + padding: 1rem; + border-radius: 8px; + background: var(--md-code-bg-color); + border: 1px solid var(--md-default-fg-color--lightest); +} + +.feature-card h3 { + margin-top: 0; + font-size: 0.9rem; +} + +@media (max-width: 768px) { + .hero-title { + font-size: 2rem !important; + } + + .feature-grid { + grid-template-columns: 1fr; + } +} diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md new file mode 100644 index 0000000..6009e59 --- /dev/null +++ b/docs/troubleshooting.md @@ -0,0 +1,159 @@ +# Troubleshooting + +Common issues and solutions. + +--- + +## Installation Issues + +### Import Error + +``` +ModuleNotFoundError: No module named 'uniface' +``` + +**Solution:** Install the package: + +```bash +pip install uniface +``` + +### Python Version + +``` +Python 3.10+ required +``` + +**Solution:** Check your Python version: + +```bash +python --version # Should be 3.11+ +``` + +--- + +## Model Issues + +### Model Download Failed + +``` +Failed to download model +``` + +**Solution:** Manually download: + +```python +from uniface.model_store import verify_model_weights +from uniface.constants import RetinaFaceWeights + +path = verify_model_weights(RetinaFaceWeights.MNET_V2) +``` + +### Model Not Found + +**Solution:** Check cache directory: + +```bash +ls ~/.uniface/models/ +``` + +--- + +## Performance Issues + +### Slow on Mac + +**Check:** Verify ARM64 Python: + +```bash +python -c "import platform; print(platform.machine())" +# Should show: arm64 +``` + +### No GPU Acceleration + +**Check:** Verify CUDA: + +```python +import onnxruntime as ort +print(ort.get_available_providers()) +# Should include 'CUDAExecutionProvider' +``` + +**Solution:** Install GPU version: + +```bash +pip install uniface[gpu] +``` + +--- + +## Detection Issues + +### No Faces Detected + +**Try:** + +1. Lower confidence threshold: + ```python + detector = RetinaFace(confidence_threshold=0.3) + ``` + +2. Check image format (should be BGR): + ```python + image = cv2.imread("photo.jpg") # BGR format + ``` + +### Wrong Bounding Boxes + +**Check:** Image orientation. Some cameras return rotated images. + +--- + +## Recognition Issues + +### Low Similarity Scores + +**Try:** + +1. Ensure face alignment is working +2. Use higher quality images +3. Check lighting conditions + +### Different Results Each Time + +**Note:** Results should be deterministic. If not, check: + +- Image preprocessing +- Model loading + +--- + +## Memory Issues + +### Out of Memory + +**Solutions:** + +1. Process images in batches +2. Use smaller input size: + ```python + detector = RetinaFace(input_size=(320, 320)) + ``` +3. Release resources: + ```python + del detector + import gc + gc.collect() + ``` + +--- + +## Still Having Issues? + +1. Check [GitHub Issues](https://github.com/yakhyo/uniface/issues) +2. Open a new issue with: + - Python version + - UniFace version + - Error message + - Minimal code to reproduce diff --git a/mkdocs.yml b/mkdocs.yml new file mode 100644 index 0000000..b55d392 --- /dev/null +++ b/mkdocs.yml @@ -0,0 +1,152 @@ +site_name: UniFace +site_description: All-in-One Face Analysis Library with ONNX Runtime +site_author: Yakhyokhuja Valikhujaev +site_url: https://yakhyo.github.io/uniface + +repo_name: yakhyo/uniface +repo_url: https://github.com/yakhyo/uniface +edit_uri: edit/main/docs/ + +copyright: Copyright © 2025 Yakhyokhuja Valikhujaev + +theme: + name: material + palette: + - media: "(prefers-color-scheme: light)" + scheme: default + primary: custom + accent: custom + toggle: + icon: material/brightness-7 + name: Switch to dark mode + - media: "(prefers-color-scheme: dark)" + scheme: slate + primary: custom + accent: custom + toggle: + icon: material/brightness-4 + name: Switch to light mode + font: + text: Roboto + code: Roboto Mono + features: + - navigation.tabs + - navigation.sections + - navigation.path + - navigation.top + - navigation.footer + - navigation.indexes + - navigation.instant + - navigation.tracking + - search.suggest + - search.highlight + - content.code.copy + - content.code.annotate + - content.action.edit + - content.tabs.link + - toc.follow + icon: + logo: material/book-open-page-variant + repo: fontawesome/brands/github + admonition: + note: octicons/tag-16 + abstract: octicons/checklist-16 + info: octicons/info-16 + tip: octicons/squirrel-16 + success: octicons/check-16 + question: octicons/question-16 + warning: octicons/alert-16 + failure: octicons/x-circle-16 + danger: octicons/zap-16 + bug: octicons/bug-16 + example: octicons/beaker-16 + quote: octicons/quote-16 + +extra: + social: + - icon: fontawesome/brands/github + link: https://github.com/yakhyo + - icon: fontawesome/brands/python + link: https://pypi.org/project/uniface/ + version: + provider: mike + analytics: + provider: google + property: G-XXXXXXXXXX + +extra_css: + - stylesheets/extra.css + +markdown_extensions: + - admonition + - footnotes + - attr_list + - md_in_html + - def_list + - tables + - toc: + permalink: true + toc_depth: 3 + - pymdownx.superfences: + custom_fences: + - name: mermaid + class: mermaid + format: !!python/name:pymdownx.superfences.fence_code_format + - pymdownx.details + - pymdownx.highlight: + anchor_linenums: true + line_spans: __span + pygments_lang_class: true + - pymdownx.inlinehilite + - pymdownx.snippets + - pymdownx.tabbed: + alternate_style: true + - pymdownx.emoji: + emoji_index: !!python/name:material.extensions.emoji.twemoji + emoji_generator: !!python/name:material.extensions.emoji.to_svg + - pymdownx.tasklist: + custom_checkbox: true + - pymdownx.keys + - pymdownx.mark + - pymdownx.critic + - pymdownx.caret + - pymdownx.tilde + +plugins: + - search + +nav: + - Home: index.md + - Getting started: + - Installation: installation.md + - Quickstart: quickstart.md + - Concepts: + - Overview: concepts/overview.md + - Inputs & Outputs: concepts/inputs-outputs.md + - Coordinate Systems: concepts/coordinate-systems.md + - Execution Providers: concepts/execution-providers.md + - Model Cache: concepts/model-cache-offline.md + - Thresholds: concepts/thresholds-calibration.md + - API: + - Detection: modules/detection.md + - Recognition: modules/recognition.md + - Landmarks: modules/landmarks.md + - Attributes: modules/attributes.md + - Parsing: modules/parsing.md + - Gaze: modules/gaze.md + - Anti-Spoofing: modules/spoofing.md + - Privacy: modules/privacy.md + - Examples: + - Image Pipeline: recipes/image-pipeline.md + - Batch Processing: recipes/batch-processing.md + - Video & Webcam: recipes/video-webcam.md + - Face Search: recipes/face-search.md + - Anonymize Stream: recipes/anonymize-stream.md + - Custom Models: recipes/custom-models.md + - Reference: + - API Reference: api/reference.md + - Troubleshooting: troubleshooting.md + - FAQ: faq.md + - Changelog: changelog.md + - Contributing: contributing.md + - License: license-attribution.md