From 16dba1d5c6d7fd5d1b4125adcbb287fce0d0c5bf Mon Sep 17 00:00:00 2001 From: Yann Date: Thu, 7 Aug 2025 20:53:56 +0200 Subject: [PATCH] update readme --- Notice d'installation.odt | Bin 0 -> 29652 bytes README.md | 1049 +++++++++++++++++++++++++++++++------ 2 files changed, 901 insertions(+), 148 deletions(-) create mode 100644 Notice d'installation.odt diff --git a/Notice d'installation.odt b/Notice d'installation.odt new file mode 100644 index 0000000000000000000000000000000000000000..3ec232ec5fb8b398bd963e71d4fa158296bda267 GIT binary patch literal 29652 zcmb5U18}a*vM3zeE7po_+gPz}+qP}nw(VraPTtsFv9166_TBsZ=bYNLZ{4Yyex{z9 zp6;3Hnbyclfr6m|0YL!))n!9y>5eeLkplq%{aye11!QAkW8&;#Z(?9?Z*5^@;A~-M zOYdrHOlN1{WZ^_-XK!L_Y-i+RV`A$}=WOEcEdM_S`wRcS3g%xeVLMw>3o{qTe+=Wq zME@6>i=lxd{r?-5skNPfv&nzL!u~h3?CtDb?EfbCAE^C1JSS%ZXBVgc%q8x>qvvR6 z=4j&df5H5BtKw{DXZ@e){X0JY#~56J&+ zk_JXbCe|i@?Wf(pwgUK}6S7p4CyaDTOI3~Vh-O`M$R9F0wF+X8wtHifR27OV&+-5VwPMUWvCUbR2DoEUjpYN%EJl>1 z6Ti$`PN})N=sHR^xYk!&sefkT-C72f7&cCocsF)%lh`OxGVe=?czBkXK zt5>X8l+7v`O$l?mFz;s=6;^ugkf=SXgF$G*Lqeo+7M_d zLG4GHXbifFnz?r@>L|o+Lxw;Ra@}f2QQ3aPI5rEy!_>P20Un z>x_7M0VxIC^ImtZ$FJ!F@^2yVjoGf9{Ywbe{-Y55eTJM&oSiLf&7A%ggdQyoCu~k6 z-yOZZ{jCA<{h-Di5Ni~+ReAq|_3nrai00*AN{J)setlcm^>_$b3=e$3PR>#_e7$v~ zbiI7!mef`-PXgj(sh-Xno!9J7vyyssU!4p?1=0B^Srk$N?>>;*2gJ6B zdMmXkSiH$ELZ&XmP~3cjFwb&;2kga zb*4{~ey^tCqcl!$UocU2(d@-j%b}3bt5Vl^UO&6}?%%7x7TPT=e8lFJx{G7+Qt9B~ zXZvvS^-<7Iu4%i%hWeJ=1Q|O1G`<+62LG9pgZ~rl#~0qAdAH|kn0VP}UR%;OPL>3Q zp)*T)TnK)E)*J(aSioEfwH@=Sthmxjbp}T}D`RyMAL@h+jW0j)X)l10N4VR%dPx_3 zTQII|aawvi8YA?qJH^=A@ehbp$MUl*BBER?Mi-rSFRl^Unrd|tD0mlQDnTlFZ_uBs zb%6o~8n%nf+ieo7LZpKmd+yV|Xo)uptND|WkvAcW3er-tdzYd{5&F<4>9 z4bdVjTS9A*QFcj3of}ObU2niNFImFDA3~<%>Th!OLXgaj*Xjp~+7b}Vkgqxf6kt(D z`et`t92($$Sp(}6TFRg%H?jJZmx8`}`i6%ny6k^K#T+8Yy8t2&9F1g)Rx9oKiB{BP z10^26S5Rg116&R7^DCD}!fvz^PNL8%HiH=ATwfnLxFykZ&2?d9FOkwUS zP{{`Q9Lw7!6#0AbgDJh!7~5m(4WH>;Xz_-fyf}(i|6r0^Y(c+W`AYU!@6x6|hh}zq z$W(>u@kk}N#-BVWBJVkS8a;H2uf#8Vbas15sV*wZSR{z479VIiM};;--1s=9k%HFq zuzjX_Ahw>R0YA|dKPN=YNU9=GS-I~uoC)EV)X~K9`?s^ZxCc>Bf}^=46~p9uC8SjN zsVzS7A)~6~pl0gHz%oC5=a<$jJ>UbQtgQ23;H) z8C83n8^|b_9W<)yjI)Ha6BFlYr0%gNBX29kv@@%pi#bN9UkOe2;LCGCTOO|^3eUPB zg>#s)7C?v;2j5?(EE9Fh6nyx*BVDhA#0p&UQ81!!P$6jcZwIz9?K9vO`WEK44S>2I zcF|Ex8OY7C%NS>Q(vDJF(#&{n0?bLlLZ6nV+x0!gi}i7qNqBFt#2@?QjP?Rp9)PV# zeRn=10nAno6?NdHn0rVjBoTABk;j;_?pDhI{@09oQ=URiCVJJ_qshFcQd{f+cq@Vq z@~d-Qm8RR$<|liCZ@08U=B0-%o(8IJ+wS49_$V>8$LAR2R=0BP+Spl zr~-$Wcd29pzejNPUu`>z+B49U5Oh>~(lpzH2 z>MH$=IN9E{60{f2N%eXLca_&3+5J~xTP}_bD8YB$&i1~pp4~^irn?f1zwedRDQZGn zu}&shnB}_u(7J;>%9%4e7EXjR=Dt`co!F%J79dQ)i^FzR9{wuAc3dM@F9A`0s7Sx)BnL|l8wrmgF6}@?QcrD@`mD_soc@_m@;Py#Z z@xEXX&2Ee%SUtwPbzj$Cf{2@=oDFMXf&>n|bS`cZVw@4~7Ejl4wx5y;)qpDuds?ZJ z>Iy1r9SrVPGL(0^09cBp7uv$1Dw>MG5 zG@Qcdu~@{hImbX&Z7gRJJ)S!+h03V zJJ2k}H}H`OCDNni>XErt7Obxy`*8K=+__LqYAR+wEXfh2Gfbzc(8)T7Bbi|kU1H`E z&E201eJyk>$0@f65yc;O`aD{J`blV$o02jPxj(_`WN>H#UOX`)naBeC<7Da4E;c@= zzRl;BT}*Jrn|EONYYjR|5^>uvynq$+Uq$M%nA6XJAiAH@J7s0n^_vyT!G+dH&VD<= znTGUohgcR%na-gWw|YJdJiORvlAU0Fi{0TnU-%T)W9$E2!h!^tHK1oU_P z+niT6cd;?FHL$RDqIdpRlg{4OEJ9vZ93BQ6=C3aB62C+gfq;N5fq+1Op#IYPK-mGy z_J4g$<)xKHK|nxYVPSv#_<@6iLrhFeO-;?r%*@TrEi5c7B_*Y#q@<;#Wn^SzWo6~$ zOetv#`fB*IM z^%tG*@9)MB*CHSwp*RT`9A^ZSpo#$p8)vr7wFkPfxl-l z@Z*060=x_Up8o{-Ulq!JuY-Ci<=@GN zKVqhuDH{Bmis&WpOE57DmQN=W6*MoFIbP1`I(DFpIg?r@>R`ESB$J%lfBXGBdm-6u zvMXWxaQm=$e1U#Qk11=^>b95)4%}?j)a;^QdjzG;q7y&jzC*j`0+^N0$4wM1t*^Ft zK$?|w$K$%nIWAwiFw1BUEgX_|HgRWG(DP=umYNfzHy$YQ*ov@Du6uVqk2o;3l151} zpFH5kYIUo9vy}2xzEz&?K)QG|UXG&*w)cgvO;)8xZLr(fMaL@vb*jHA-44@y4cab4 zALUx4c-yEAACtXk);47&ZnjCA20B|j^XihfBQ{*8iMavySS!EV+#gTaT5yv}9*Cj4 zO*c8SOXg_T_%?dhI+vLc!Ok?Swpq1YPHAeHdIUKL4jF9k8QMH-mMWRZ>mHMHZXAdO z1{d~F!OP8wc?NQFLA%Y&&QA^-JXyNDKJwMnhRS!xVQVfSjFobWI@eE>Zv;a_^Htm|fgWoTNa;3(;+U!BI;O4HhJamoJ0vok>b3-X z1{?VPK3cJG7I+|(- zisGws&Bg9m@I+~d8H)4gpySI4!|AxjR9_|3MqNOYH> z>vr)+Y=smwf_Dg%Hw`vzw%)N8hjV}?L}Iey>_@B>)2nYoL&=&IH9Pu(#`Fu3ftsLG zm(Jc#d!*AXKZyv)zn$>(p50S*oq=435{O{ z_eZR~2bJ(={ouD2%uA9{BqynQO94kN=?9`D$ozf@I!rcdH9V{4B_!d9CNE#c@JT8P zfO<}nhB9g^+a`pqp75xx+Vk8X+(e1Db>M#sAJ?;wgJMS&^43-0E4pJ=yl3@l=I%S({1TFtwDR#KnvQXG zHUqolXKw$PcHI1BbnsSIJbX}rbJ`|w$tL?q9Y6-R>bqfeD{*4B?f9oIJ@A9Pn{_ut z{SN4+=GAhsRVzKZMlZ26wM6^DMdU=HOAnI+|Z?m%I1vwu~vytxc&GcLeW!j(hE`o8ph$Nxb zeck!sGuw%cy*udFHLUfik@ge@9V|cS-PcE`ewrMy2Xf6;-nMY2M39P2>d!OGq?3RF znHE>mc%>9IYHTdmRYUtRJ;xta&C3p5tL{bycb;#5a3T$486-HNblrR(I?oa5aE5i| zSTK+8e^FkbcXs?4Id+C$F;qv2#!hV9$AbTn1G@~nbEdj+)^jI{MPEoeA&OhDL}G=} z5iT**P0fJ}f~Z{227Q{ZkL22B52-Q;mGku38>u8gZKku_Nl{P_-99|nI5DDPbgm_w zg9;rfR){rA)0EG-BI8t`PQsvXL^7LUl?uD$WgQ9&&0@mQW@{tOSgTna;&I9m#?`VV zGFfI1&76hacndEX)|72lV#l1f5603|Z%QPTGlZ=V!p;^Nn zuDB&waA*4>-i8Hn3TkxkcA}c0O(lq-n-MT@-xA+m0^*OB3qRQb+=vwlDbI}XpVKISyfD3>g zhcm=20XLDgU!XZCD%HQb5`WV>D5e--!Io@H4n7qx4Pr~g*ls6$3$cT3SynV-g;pt1 zr0)tFL{83}mVn^jZDJe?Yoh@Lmn+q-m~jo>To$jAPN&@{Lfn?of5Q}?V1og1IC4kE z1yE#9JR`fGDVRscC`C9&FQ^2;<(>CJNXmGWJ*A^4(_WBiF?QA$vIOF-6Qfi|mo8@b zj1@;uaL0L|C21Q34H1%(WFX}gf+uzgG5?_+{2D((rBPnC1m&;klu`^57%S#Q%8Uq7 z2~C?}mf%@+!?%;Y)`j!Q7wwM-yX(11$)3L^w>Em*+ZB7*+jpI^a2Qh7Kg(sF|B6Pf zsQZ$Aw=0FO5f@GW-Hl1M!xL0rV!o&_naVzuVfm$owfsIcdQmpXT(LN2~_ zAm-;Vd;dMF_TFbvrvUKVCzfS>w7EVP4rjR5PI+0BU(wcuJ3XrlknhvEB$qwsXP|y8 zt6cJfvV^pQ2JT+RSeyUu>k)s12I%YVB)kgoK#7*6_qm*3c@>_?(a3+%{dygudgc!L zNY2~03*~}By}|Cj4`?eXb)>}Y&KEv#Pg5wQG^(wjic^g-{}a7tJ_zS01)NKt0tGJA zGXnK3|BX87x>>;wWmwVvT9jNE|KbW^@UY5_ybfDHt_J(}^v?ISy9@y9#*-05huV z#t}R4+b+=2x=8bb1^g)p#z7LprL`jd%N5wNBzMkL%-W!C1L}1KuH@L`d71*ED~DOu0;O!*Z-7!!(m2QeYl+96mbD}i zOh1`4qW>qE)X;HZAWu7aq#_2jSRf6)|140zZ=1ALk@DWKm(sO&(jyVlh}5^{lK zGbr|BA>bL94w0Bg3Q9x)SjRFD;Jp60KRN7svKHen9mswKGcMIo))Gs`EOShjD^Xk_ zLpIk3X(#bU2Nc06X52|jx%1(Uu}Wx~WjAyW=JU?}2G7vACm;ztv=Yb$8-TQH62jXm z57TW{5cqk@uFzt|5w!_gTD4OHR0u;*1sYJu4F*F3BLqALrr$O!MjS&yo-Qpz1eB&P z0edVkA>@M+pIC4MmLEwqgve6}V4tK}QKf_-Cjeny)CW=YKM|IwkT}8*SH=*<TEx8GXA_U3OZHoq+7}!8y7XmtIl7bv6d5+)5fKi<4_;ZSVPYy)N_15 zsy-v0_AQ-(?d^~7_es8YG~c;0yerTn*h5BCj5>0pK}Grvae0)6IA9|5Sf4IQY8yo_ zigu((6M>6O9zQWmHwh6v%qqxcKqzeb#p@?S(GtX1&AiHw`hu6Nd|NUp zMWDFpiMBpk?<}6EAu0ZwrPF)J!k0Xy!sV{ahWu`2>~N_PY78T{c6Edp$`5e5gRj>X zT+3z87o+AQ3IP84j4)`%)A6FtlE#&!WVQT+P6rj7CBx6kd0*(06OI>vam7z?%W=W0 zB!ojQG@AO)epOQr61c;DiWXtY6o*Q4)^#U@ePg8w!k_3Izg&KU)LA858xn}fI}eP3 zjS(nVU?;owQ!#7R3l4>}Fi{r>>VjsfPH`!(ihm&dsZPWKV{IcP@FSeow_c%vlPgsSR*U<@ZS3O7kC%nr`$5zHT(vVvXUr;irGfjJcZ(e`4H z-x?baz#OCfE%^f4W=#=3405HpgwhH_(Kr>b3na?3>SPljL7NCvVFf-QsN~P(xY&0# zZzur^wyOwV3P&bmeg=Ft1wS4?jtb)}&`9Bg2_hw9;|2a&*I#5TK`Wm~xtr9}-1$m7 z8VB5BTB@0X24#THzw%y=%QMIIha~AnVaPCm3nrt6>PA~ULm$grg=(}U3S>?ipOl3> zWgd9xM}dS=3|Qb(x%l0>a8TnoR3Af%z}K89hylNl%BF(8NaO^1dM8a1(+%Xg5C%jl z%jNsVBd7rF*OyOx-|Z6AGQ~EZ{#yktyPuz$%Nuv9#Ho?z9S+gdf4SWMaK7{LEd)Ll z3ceI8l)@aYaD;8#eB*#0hsj^N;emG0&+Mr%EPALb-Om#rF%70R%o7KvkwJg62fta< z8|VT$>E38kA4|!+me~CsR~Sx#A8~~6@?@-&EHJR9YHGX=r8+VxxA#lt_WDm{R?f`) z@2RlYqB>c4M>!<&EnFKgHZxfTyt&Jrqk`r@dV^A%6=8 zWBs3y17i6%6@0+)WkD^$a(kVd2iT)V#!XA?Sslr1SBC+hHErt5$!S17vx)cSBrNPr zG^?$k<=1I|ypm*x@os}?MeS30P1xyfEMbZcuq@9~wvu2N7zEZmw&cJiDds_KKCt1i zKoy$Q#L(uPok{}4WupIvUV-;u1TD@~J$cF}uzZtvyQqIZStMIzsu^X`9-FNK#q{r5 ziL25eKv5jlw7Gr+MBt3k(^*9F@Lm0yYDaL@vXnu@CK{}W%@W z>2}~8JcM@9*MYFjF}KDwRLdW>NC3KFrD?+Gmc`iQAQB6nh(LDhOg{UurK&fkCQ^!+ z_&gZJg>DNamMsl^HADOFiva}(^EtJ#9H z*+e$}PtBIA_jl>f+1|@d?&#TfG5PO_p%3DNfw!cd9`PriUs3-RJNjh@=kn1oF7--! zB>x$>RDO#X)~Wb()2q2L)h9H0MIg(!dUQ|y0+V+4KG)a&ea7kzcFUK}k9w9CZoy1T zt|llZOI1hoR+!Yb)h7c}+-hHGY$>~9YTb^iBuE4xkZ+A(`&k(&fn_%T-C(0|1kqR^ z8WRyU(tJoHK_EMUB*J{EBT4hk4|q70b%V9}vGIyZhQ~%U*-7&zB>W??khtF7a?cuF za_MYZMuT0(Qp4mqc5{8g%qQW7K5trv-dn-RcoGfF+XSo)P|`zooQx<*O{MV&>u2Lbj~u3DWV$|hGB>LmT9yX;_WrBooQul~e$DvK$ zAZzVLW&$S*aWjPT7Lq=uwY4wC% zigW>gzRA#@Pil$XRzI1;acM5~*Z~>Afl1_-L_Mzxvg85>S%KwJY-SW}njiUd_8$1w zVrs|7;&mq4LVCcd=G>m^b*F&J`V&2OyH*{*8eH7^Xd%1ML>*(7qUPZKbdM+TG zqV18Z{Nh)90e1OJHtfWHhPXRy@^KIJLR-S7`qt6(O?r;LGX`tKLN2cm zX=$5jC0DP0CDSjIHsa&gV3tS&7wr35$Q5hE(TiWqZq|qMV{L<9jb6Esd)Sga0{3bG zdw0S<=~hTBo1B`T-iP%7YP8pXmk;XkU#|03Q&izl_jx5myVbn6*ACp z(^n~B0u!FyTnl{`t`S>Tg*wSdlL(ASnwQyxts`rKE6^$|!(l`Qc*_(K2vKBW)nU3O zi?S#lsyG2DNHl|P87YmRY=@^PXewxtvN}vVHoz2#iJW4x0M|&U%^SMShJb_0%#Fc4 zZw^RXHrqW6Ld73+?1^eXJ_zRZbeil@|oQ`89~dAW#np5SlAxgCEuMN6FY%ln0RA)Juvn^iLwwDr5R{!ITfO z@T^XR#jsn-qd`7WM+r%2`oGu<0r_cqS}`DpK7mJ)JwqX7GRVg)>Kb2_q|(B82yAr; zAL#0IM+ai$I_%Oq0g8<+HCs^WAP{B~Y#118Y6w*Zb{;^7cJ!*;w4VBHTCl?@ohZG( zX@p6TlINM2bJR&9I-;czl+HMuN7UOTe+<=lfuMWuwX>7$cUE^uYo1Dm0ANZ}z|_%k zVdWvd465Zfv<_5vVa2Djn2@;(04Fh>+HU5?SwcvLM@BOz~^}q5rpUm=U313u4cWm-u313Vl z4gS$B|H{t%m#O%#!}OQYn46D}?6!@0=6F$1Bj(dVDkigcw`xuLG+Rge@X{iADtD&s z@_zi_UZQuo{s#DyP`Gdv=ihl%21@mnj3_iiyU@Ebk$vynU$ z$*AmMwbw+hjxG2xI})PkRwCWZ3$?O_=iP{SRpD2jP2s6Z$-8Ug`smyQWT9nnnbM)t z21X+)-Ii~pEZt1*qSJ$8%iFlT=95k-?71wFoA1`%WymPEmVAN4w9x?3i@H9wKOq z7a!Y?i4(EohYPyuMlC6e^({*NQIs;?{*sjGTd=u0Y8ogx8`6(fIw%)gwTTmVwlc+s zjB?klI?Ft7%j4g*K18s)J$hRe*^g^W?qtmg7F!QZh&6mt#Scp9;8}}Gy%b)S7>uxt zO;zk>u3rN3pqRb%;PB>elpV17tER0N_~`NN3r7f#AOielHbZm6HiQ?U8$1b>d92#p zQOtaV_i$rrtrz}z9L1a?a93z7~_kw0|QTpZ@(r@yeTcw^;wQPO<{uuS34aMvlmy7;W%3}Dz@gR((D9N}d7>kLH{S|x#PsmG! zrdO{j;0v-p2XEiXK$k8thA1wNX!!XZ_*Z{ZuJuZ>%RBz@`~4B(n?e zUBk&+B|7$Y0v4V=O49ezf|f{<#PDIQDu3ttyDzopM2MTgifUP)PGJw2WisLEQ zDfS$!R|M(NnF1~E!fqAhzp}Ck+i+)Z7%*{fDiwW}$D*1OeqJO$K`bY=2Fj^sqq ze9$6@((N{D8`-?k?Bq{cNbzij z1cd1<+ez1~tMPSC)^L9zG8i+|B0&&YE-@QjVBC(iOqNm|=?wXcKs=}5D@3lBg`cQ^ zcc3mx|I(>L@$Ua6I%geC>0XX%18&|jI^!)x=75$sBbIh$x}2oyOn^>IBMRpsA{@P^ zLrXB_PMUM0c#cUS-~)yMna*I&Sr_2BU*UeJVUf9{+54MVJgv0WydWRXro_Z`Rv<9f zsi!Z}y|<|!1MAhh2?ON5nG`ZEsGwq?ZZ{OHRJ|%|12=#VFDgL;#XVn>s-+CVh?1$bpO@)UM9!RWW(pe6Ajb}aDZxnNm5QSg|Aap-QTCr2~ldM;f^et3|B-! zsDeXJt+b7)%b^;zuOk@K5Ei@~%k56&_TaM_6txIqehB3(gV!>6Lo5efnjg+ox1XR@ zK#40knUfd*{ue!>2*N+QM7j5@cOmqiu?^=c-3-bwwDXJ2hnS~Oycx^9j|5@@OI$@! zdS@9Ysif+i;R%FHHn<0sVLXE`65HM6APym!dUe+&{(AUHkIZh2hMq^`j#u!#vCIYire5sC4ZP=GAq+))cCqf3Je z;!q&?t`(B)<#vlo0K|AIaKDwee*pY4A~E%Vza)@N^LdMh|NnOYe$Z#@E(ov8wudh9 zcOcq!<$036rPi<6ps`0um#PzW_kGpx9%B!)IIogw>D%yt=kS9Re>hnSc5|9Kctg%g zl~Xgxdm9dGFZ`ymVwXE;K7SJ+RK$dBDy_Nl4{2f zm+8T`DqWX?Yki|7-YfN`YyTuG)R-^&WmI0vpNFK|*tBKXDsQjb7~MyEayf-^w~vR^ zT^-lmO7HsCxn_0*lT%5;GsBrl9)WW^E4l{=)&y zN;m5S0Ys^XXp=?AOeJ-XM`c|sk+Ov~b$5=hd90}dBMnw)m^L;Eq(#&k><~*!cD{Ki zoO4a9hps*YwfHS~Pn%`2S(Lgqy#}vaQqe*8kV)^EmfF)|0DkQpPD8rJ$V|VEkpHap zAe9F(PZ6Tij$s#YQ0Xd!`{j%gx^)4e%o{<>)w%k#73NZLi4w8erf>V$D#xcgL{s-z zN}095DqBAn+yoEP);mDz(6D#5-WXQj4`7BU>IxZdj(R|u1bp9t5V;TSs-_&$CTL}e znpE|aW`Pxoc%uK&H&hLdciv0ueIC#IJPh3OiTbhmVLT~1<3J41xlFLasfK~4$V^&d zo@OVoQ?F3_3Xh#ZXBIbWU4Y7{$VuuOLpw`h<{_i*yNlTQ7zPfhY92FgH*e8HNwHNh z*wV3p*?N!afi@?_qzfQ>@WVLEG>j_>m+Zzis_AW3SBkCJBeciqXG^XUWdj{b`{|Wu zd8-q(!lX=DpzVCA4Tim9SX=ivH+s}-29puzn~py}?<5kUXQpE)!kf!KB!N)AM$P(g zscKHSnP2oiQ;B_xf^e`DwTLP@He0A^&w(FjNyvEx zVhlS^VqAAZLQ$8^fnQ7N|2Qj4mdD4+#8ascQI857ie1ZenMPpAe=9deVvwaIdZAY; zv9fv*5l|?`5PB!YGubQMZ@Y}EGD)ROMN1g8p8cB89tD1s>)Gy1ELu+A@TsALMhD;g zaIRj8v{2@6?kZ863K?-!rUdVcJ)DC8d_!{INlM+OY~AO9U-pe&l+^XQ?V?+Jy0KdI zO)*0_tKjxAM%z`#LB;`;8AcU!I{Y*N!@^y2h2d?Ko4pba<5XWJ7!ykUUE;*H`C zUi5Jh)C-?3-$p|3SDKRPu@-#}Sv8=!)yD@NE&ZW75EVDuQa;(!CHNaYsb6)!RT@)) zsh{N%In05^CkJ+za0jD3$fE`=mDNp68b3;V&Ap-WiQ&#iOFhyd`!)By;hX8Qhf|~1W`A;W}NR2$uE;c5ZF~+P@Sy`GwuxYn@Qj3@oakJ1 zH3%bJ(~kjW230Y#61wIB16zZMvL%8@(U$j6r zjrQt4Ed;Gi5~6O>Cn7x2AK+uFGh9#d)RxtDSGUXOUq{f3r_3F_pddhAh|mHXlif6`p6G&$0ebe9`J#ITPNG{HG{@+tsJEqjJVFjcwNo0vDUzuh%a69V z`^GPp`b?jE)A!>f%Rb!beYJZ#UCYLVjowP-y+A(_B~-$m>WPD0WPuQY?eai42Oi8i z%@M0~VP{BB-VB<31wN?{ySUfc+|M~u!;dnP^aCVP8;KAHe&f4sPO0Fu&A=9+8bmAD z?!{}VLIC=JSh?Nl*Mm<~>9x%Ron`gv-ggN1%wJoybPjJ!zAOziQMbgp={!j<2uWE^1sk3zfcj%>}cMdBdygv zE;cW#B|fF-VL;D*o_gsvtOoVGV5Il_<5}PTdeQey2YAE@+^41wGr#@V>Va+PdOt02 zeIHGcQy}WyTfG7O9rHCU$F!OnD9;F?z&qk<)EV)U44Unt# z8$RF3(-Vz|SNeX{Zy@r;Is?3H4W)5#e!yKxGtQHE(Qc#1Y;V%>J#D!!zrlV0XVaCw zqRJgvvo9nLP1DzAN4KM@G{TUjdL=`#=-0*Sw}(G{sw1;~62gvc32blp*t{K^Nn-R+ z@fN(^HA)6^HQ>yholQ^ABw0U!QOm%ad9`4^p!Z>1RCQvxRAI26)`$CO440~M?hvhW z(ED~)T#rVsUgiO}bn!1r-Z?onX-+q$%VenRXkR(B)TI&uVEJisjbO_b=!p2UR%Kgj z>Se1F8!6K}whmibQ{;&5WbEFr9JNApSXAF_tHa;SXb3Emka>Gd7S5XP+L~=6ZIf=) zxYs;W+N&odfc#DGp7zOMqd2yPmflXnWq}4^th%>V2^?H%>Wk}nihP*GA{dTczoekzxM3W`6 zjbuIchgGpV#cxQQOsUIKM!m7~mTt@A$zDH>v9}z-mA9JSKEF6Sa2F4(W$Kco9hzhJ zf(e}&fj%#~mdL`Xsu&J)CV%eP)u<%Cc0}v)?J(cCx_R{-BKVH>(Xy2<7RV#tXQE}^ zmVMp+ata04s9=Jh437ppZ?(@c0&W*qPh5x;J)RbSC@^Ay2xQydu+?~!YvXR+glbmz z#Nq;JW^_%nE*TxS%HiEcLt-2dm^@{gYNM-0R0ffV!$C8x!fVWHRmQPpbg-=w?orw+ zvn75Hg!4+^qnK&*$G{-tomLK0sozMG!c6bb^3vF--V{bYjcAxxj2GhV4S=7tD|5iX zhH<=SV-1V`_^4+Qla48OvRozP>P?N9EOmY3#;oYmYd2$cb` z>`z(+wE!*5Op%QQF$7||Jmi4G@RbBj&+y%DAV8^5h7fL5?qsP`Tn?z)IE}=VgrFNa z%p9_Wrj#6QHr2ef`4lpBOzT8`gGyoRwOdUk_*J0&rX(qRAiZ=HHB zs-Vp@d?3s(?CBf;2%DZl$e6+3_Z_%RrwwD|6`2J6d2Y>?Lx7W~ko@4rbh}&_0fAY6=Yjretdz=HnWw@cXv|HGPYRQM5I5m{FSWH2eaH4%-7M{pM1EuH@q(DbRessa zle2;hehbTE@aN;hj{VQb#D5F3^_IP@&FUp%%lFBS^RVTK#pJ5?(kYa_n?7^Bk|9iD zIeIU)NcLIlplwufLG-QFiaRr~mt8hlr286nRmIX$+EmSJz9WLTP{gT6eqiYs?2!+u zE5hUWK1Q`azXj&~aL9XHVpU@1YHzB2l!04uR`@J3q;RwaW;Y>!(8jpcS|!R1JG)Jk zmV(Bvrl0o$XV`3?|rmEN06|&g-{5ckh#z~ zsX;HPv&EI+VPFp1G>aOagfY7kGn2gfSITK#rfoGlO($~eZd$Wv>ZuX<#nuSQMX_yR za7oS*@gtEvx?nj=gKfk+s%jjkAUS+mJ6yLUBo?Z3hvisteXY)m8N<8Y_LNv)>uT9K zGfApx3*D5piz%xYn*X3Bs}lzLw+6YJQR1b8&|(@EK7@mn^?X%^ZEwXrU|Hvdnk!uy z2t@#%B}20vcvY=-**i5aGeK=>qPs&Y9ZDsuj}16(ScA$Fwg$;Z@lr^cGyhAj_OlPJ zg;sI@`AEphx7Ak&mOfwG!tNLwXE&hKiJbzNAET{tM6DshX9<%mS?^v3vRaTT&yDCf z6KIpntg|p@As88qF*UC(s=M^O%(ODiO3L;Un6aR_hs6(GzR-Ozb2&!)99oUgWw1ui z>I+Doe3LxDRJ>H>=n|g&lLY=+GN*gyMY58LGy=b*C03No!AZahJ?+u7f#B-gzjA;~ z1@1%83nS_{W<=Zyr5WAXU+=RL3oUTkCrkp+VQB=4RIf%_`A46HtzTk}gRaW{lHpO9 zhM-PScS$dGZ$rOg2k7kWMX7HRZ)MhP;%h*6H?*1KU(QEgMB*v=^Aq50z92O(0#>Ef zRF~TO%+2KRqR)X5dQ)|K!<(gue}TxGQeHclQ{&LCbc2JQB$jkW-yT; z!8Q4sn^zhsvE5O->q(47x(MhsO;d(@3P*1p^;FH#GohecWDreI^wHY}?E@u`14{IT z6_jCTYJO=$XH<(V7CkT)#7c^sg3q#m0NSNd#8D=tJ@aida*#%d0Nq*_BJ&f+-lz%p zQ$~u_qRl6k86W@-ybUQZ!pw{UIt#0*oXQYFXfnurSdC zn-n`qVApz8e1)!Rg*6zZ;EP!PWQTe%WfrJGrKn>h#w#jvb?E9ql_0JUXP_3?^@R(1 zmQ&Jd{7SHjP7^`_yk-QtA}+=*TVUvEt;_Cd)lB@|{Pnml{|{*>FjUh{{P#uDGMvbUt?TXv6y9dK4A7uuz| z)fW6g8t;K5JcG#V=Cb$I$svM{i0hrmSnvaf*I7HQRC@e4wewK_7LhRVusf&Nk4J{y z={DcajE6g<ef&zdg?Jfio>c|*3NU1%u6G9tFe|%s(jPf zPDrZI9b*UvVRK~sfsb2Q)8e&3Y%Nq0z3N(p!%abMas0<1^fNDH&vQ~>%Z>M?lEZ$` zmYEdm0ME6v+>SB=G5vCuZB7W|71UjdlH-IFnoKA$Zn8k3q?1tTf*=r{VX-zc&f8R% zLNq=M{pk)Q5FDZM0Hp-X^Ws5z`;cm7T@SEY(hy9}TWF{0ZZ zk3B~+m+4Vt%c&7!&V*;~$dH&qu`vemWXonaFV5DB(AfS4gV)$q>)+QQhu_eugACmU zA{)1Zjb*?E=)GOXV6HM%6py_Hf=1z1xr!d*9puS))y>k>2l0X4Oh?iN; z(D~>>R7-du0zp8mp~vVEEDc2#E+Okcfyv0LBDhe$7%IevgQ1mj63pr^fX*|xlt7GV zvfR?CgP;2)T7it_!wkwD7&hgLOx?<@c6l9?Z)@66^Q;Ig_fKA;pd-bv z>RSXuE{htkC@&RqnJJph1XraSsi=TtT$u_e0}Atjd1*k|q$@96M^V`{8=hw-&_4mVKI)O=g$-^szx@pG)`g*;iH*@yv^U<(%I>_O>6C^q- zW*VESZV|GQ;Z+e`DZq=Ec#m0t7?lic8kHzhC(Hj=*;z-`u`CN8C&674+})kv?(XjH z1b6pf!QElw?(QBWXwU!~cZZMMd(X=`=iRf``)dEw-P2VwtM^_#vumpACl*weHZWW| zGdkR>!hs8`R^1<+D=BM_k}3HZpf|795GkB#KveH$37jR73YPjjkw=kLk_d>uAk0PZ z#|P5OV8|>u>I~Ce&Djd;EW+C~Om7*Q)}OI=s36YA1i@DZv)w&^e-WuavjNo7*O6y58E?-EZcY zKDnyHl-wELYs%&VdO_we5H^#|e2(SiUP9#eV0pl?!i$Z)9VUK^Mgtx{d5)6u%dipL z`#?(0QFk|%$SC4eMcB)dwUuVdm8Ifdy;BC*Sz#TrCoZj0XAh+w9k=JR0xj8#pN)T> zd)`^zcF;R0@ZRjEe#X8yS_^0JEn=T1q(%65uG-r#XyR+F=KZD=Lkn~{N6yPgqBLD; zzoEE9Yq>~mk6#a^-4GNlbnyavn@fiR6(w|BB`=kLA}R~J-J#!u8A{T78-t+y@cTZD zkc3ulRR(=mTYqW`>~nutDz$Z-Ep;SKsLt(|74c>)`B&)(MKSc?lGV!v8-r%jVAc>% zIM~=p;ldyZg&PrJNF%rHL1D_{^?I9R4)(WXI8x1n(cqd4Rqr4+%n;H5+#*k!SfXYc z!9~e748C_K?=lR9$z`NSawU?W=J=9Ei~Wq~LC0ZUoy$@VJ!0jM$G?|jnVf08(jY$A zO&oPROeYyW;#pmYN&f5#Tq^&;6Ma7Rp?=lvJ4;s(TEv_Gfwe9av`(=xo7od8rGwZH zO|=Zae7F}4@&MQp5U`- z>@#UyD8_Evw#&D9p_n2t8Ktx%KdQbCdn}4c$UGcWZiYRbz6rqfl@UX*-PF+xSik1? zH{Z;;t&+Zs&?UAX8o}i1w3x8uj!wK|`};Ai>jcX-M(iI|wdn<3Xhml-DZJMaOQ_vP z;fv$CoGBgR7RoBBv|evP>3y1{l?Rix5Te9ThRWkmyH^)|Tq zew)(a@mir0Yk7T6Sl-@FA?3L=bO{vEIux7v+Rj>c3sdl6MZ637--(NV_i!-GVb7zo zHlSQa=AM~R@@b<~}pNmp>WA1b0+CH6o9 zlxo6l(M^SIElUU4S~(*hz2R{N>yJzqoPh$;wrH1;F^-f)sc}(=fV2l_q0FN-F(VGh zr3I52GdjB$WsI7>k>C)rIJ~tSQ*oJN%d^u%*E*`fKC41O#}qOUZ5#JNHoecM%d? zm2_R!E)Alq#y-ayY8dKqRwOnFwe)sYu=_;W%F;`KbzAHr{Riw9f7me8lQXoiB7S4~ zhuNp4%+c{B-8snaO+R+M{ybY%#?G?TBgaix!s;A5YsM8E)y`A-^vT3rXf_WAr+%+S zGANPj58ISaV&KBhPRbuI*6Iv>H-i(6Aa9716E_O_J&q*B;~WKp9R)wt}hF+h5LTys^3+fzq-v=02w32oOPTvi_V~DdlKEV-U-)ol`fSK`no4ivHOZ9$Tw^2+HF8>=xYnlAw(R*9RcB#v} zQp~^9h~QtNYNY^4=sH8S9$C_Hv>gV5|jB-P3JOI@%hoPeizv-lY{PK&rMb4 zoxE39q={mkDiJ>n-)_$vZyMpgDa29RThh zGjng#fzY9?t$ld+q#H)JJ@<=)5h^~>(ry4qPA_*=qX{Xks&NNCYE2XWg^K45c_l3w zP~*Zwd_Bu$hpMPc-R;4(G$r)A+?>RCCt1n`45^-v9W2{kiE}6zqb)%pStuJst?J#b zREF&`d0BgqCm`GQ=8CEjOd!M>ytk%A=^sRDt4unQpm@uV3P1M_j~MpunP6HM862XXg;h*)Nh)4b z1FyNa)ZaCX*c+5F*bOh0IG0HTe!^v_<7<}4Pv*c>Y9 zk6ax!oZoRiii$P7uVndJ>z_2|o?a_ec@Rg_d0N}lOa<*6q-1?w*&=U!WTIKiCYE{L zU~WV$k23i|*9nqOxfLXICfJgYeA!rP;r$Es$+qCX6RdOakl_AE&i+@L_FK;13vx-< zHd|iNsr6&5$;JbFF4@X?dDGBP-0Yfl*7NSb(iJ&7DwgB9ipf|KHp5#swpZgvBk$7X z6b$QBli5z4URGnBin{BD^ziD4eSBN8RZ7;i_DbEuoP=*&ti@JLoCi!fFHAKuc|t*^ zbjLJRj%>Sm37mW7T}$poM}uQ#wq&MMRN3U7OESc;%FgoQGR{S~dOKi?w<;m->(Fz0 zWUa-pwtUbfRR(NNX|`FBQmr8pI@WofkE;{?KB+4npP30Ot5=8TD=eAM)NCE%(^Dss zbSm)WeN+1-3kl9VCo#DlnnHtRwt)0M4b>EG8#HBfXJMll`CKv{SWB(_}@Yx|3fi%fTZpSPw{-HszVDPItF8Z2_k6F6Tb8;_a z`6nK4AFjJZ$%i=kbfJrP0ScSULeZi2KR#D;c9n?IRk01IFT35D-nVXl+UsA%e214Xv5fVw_rQ2`e>+N=UP*mPb0!X`zphI81JkmV9YI#L6B!v^>mZ4reo;`{D+0Xae?4?Fm{ZNM-K=hgT7MOXJ^CPK@EW|uYe5^Jz&5@K26+wRA^|$k84=EH)VI8rd;61Y-NcfUXrPF zo?cw9jgf2p*hcyHIw^ka(?TBd^gLHm=wf>)9@oy7g!Kjwl&#o&1vC zz+}zD{IJe{obNYldl+&PIs>K)(M@ihn%++Cf_8o6uvG!&*#jSJH#O48!U1h<<)i%<+XUff2mO0SSoYdm` zSu~nGdD-XX>TXi$+W7l z(qw3L-DKH39WX!3sxb~lBiV(iZ>Svo{0#0QbFPnK3GtKYarvWjBbt)n8dSk|tm2hE zD{`(q;hzN|>Sa@AtEbWDX1Ut&!y3R71MBXcDFY9S52&AZH?F! z4wSc=SOyfsou#z+-tOP9_02vrfEO@FO1SDl*9)&mQYhq*-L>5zMCeDl1CM;!v zpr%}Zaj7m-=SgJeTg5bX4Asc|c0rbwNjd}Zm2%?3(G&Igaa-=oJy z%6r#k!jP3}JUTNlUqx4vB6THjOOxSE2EMO@9p2C){eX0CO?y~_8k3p& z+83*6xfG57V^C3-QT791YQ(v=NU}c7Q797Z)=Bp1+cUu;?--}4Yo3Iz4}x5M&T_b^ zIbX>vO)5+ju>==|M1=TThG?!zUl@eR0U9a|T2*mfU8;-O4C1O*dZdj_l(o510KJg5 z#pC89Or3Td(w8;WOl0l0ku#j*uV6 z+95EQ;;GFCF_!L5BFB%%s6G+w-6w8FFxirC+j4tQoizH3i+>J_&O&>CG8ut%mgwF_ zNE6Mw3?D~s=g*0=qM^Um8T5&Wz52+JfWkC;+jsPd`99WL4x~Tnl$v2GfZh=10Y*pkfd5N^z0O?A3`c` z7?h={>4@tJYVH7SbKHqD-f*)4mgmnhi5axBd0*Ym!aiI$!0kfE9KsY(Kx|%+VAn(VrTK>SU8aK~E?OBP169lY0=~fGLa3*P>YMIVb6u z>;my-t7^6;E?H$Whn2x4!&OuV;{@06d6>{dby7Fmnu^0I{F1njH-(kVqx=9X8j0)$ zZ3oAjU zVo^Bo6T;78xYgzgyS$cBbk4iz=LT+NZtYQv==Y=gfQZ5zrs%l#VSp|1c7f$+BD*F3 zmym7H!QWxWtuYz0{MwBf(0!5H0vbK3mmO!NAxQ`|u<*Sno$bGVq zb}SF~D?UQB(N*5iTOq+@NcKZiL~d+AIHQB!Di1Q|d&Y9syDjZ952}nGU|eQ?FAIZk zh^QHphg^bwcMDfT14G8SEyEzi5KFYMGR%i}Y8J&rz6%i=2<2Icc^j!1QNIQDNwDwq za_5$H0>RrbQ*sE>Fb<)|IElhA*-4|n{@7}?g<~@C!IaI8Sq}#JT|Rr*T~cr+gvN7X z?Sp`EEW&l^D%P+g(}_&{<3k%9Up5`}%SK{Ji{)HjQ(TdmIkCk)U^>@pmHK&PuB@=8 zSCQ1Q4nn5EFvcml?Coj$q>90QWq*O$>c<_P9VRG z?KF^1WU`#oPO3MRS^lY%Z3Z01==*RloA30?$GrN~q7bgz1s&PXQj6eGSsQx#Qy#BT z-_pmG$|j)MBdQo$Hj&d^8(9<@{#_iaEYM9EiSUmP}OaaXd?rLQG;iYwnNiBQ7x@D zi%lCRX@k4ww)$zHbq;&yAh=dHKf4-&rxNGn=QpBOF&Wsnh)-mQG@+N>7rq(L$$HF)JS}KXvX|XYaw*VHnm2N3&|sFIm=K>L z4bCfI9AcP}N6c&6kDF<5AB;YBv0))hhDus_ zgc`Fs)4Ee|d8$;)atpqvhrq80ImTD+Cju+0XH4NK*m>c{P2SqvP)u2`2GUsh>VJJc zS-p%s_21TsVlInl@x_>zSJ`A0tN~1}8^& zw|Z7@N=Gl_kRCeeg`cBR?k4bq*fEpo&yxo02=x;dg;)}G_(OcD zRuavY!k?-n}m`3h{njz5*UcUw|t7|6pqIR(liL5LBb)*#z_wZssJ*%d& zcd*st*ObHRn##8+n1v(!DrrxAspV3RHtLEoR^QVS)4f*qkDE#RAG{odC^5SpY1qDD zwgU45xrK3P=$%dS57f1!4!-u0YClLL#F|d&m5Eh<1yWJSw%ghLFOwH)}&0I%RDV z;iQquQ~`0AJ24LJJ_Ce}tgenEbtx2l<;G?H>EVc}B-a!aYKPIJ5y94>bAO1^Vm>{hn<5UTe`qXIjDSvD)61_?4vHrze59HG7LX$-oN}0%Q>RK)Mkm!iVyDxvq6qR&roN)5Ga6qj?jfTYg(ROQbf{3ZbW)i6+tpTNwABpXKUrgd>Q^UB~kFKU{JqiK$6umjIXo^A-jFfzbTZlQjvCF%=l2Z zrA{XDh_!0roUh^&(ja=UM~jMI3tWko5%Q3YFY2kteGgd%AW}71qrmcnE?Dya$U>mr(&QdQ$f~%YQxcxRbg;3k-dq;y?-e_d9cl9aw8lm zTq6k9x7k^kW<+sOe|8LIN6wp)u^o>3!Co_6uvZQ(kBY>iuAa*Bg~8gTRyz1SQmpJ$ zyN%q3y$KaX-E`skbwlvcEAk-mwmz6v;eg5ExokD!^5t&;=crN;SQl@ci}1SjgNZ?1 zJ21Rj0+oio4*K1YP&p+L=quChxCdU;F=LV%Hc3N;AxT_4^y~a?g<$J3V}w3`sk$G> z#V5hn-D`%;0UE%1C1m#vt&M#~pQ8mc&>^uoh55l2jN0arv&qs5D-{kcZtamv^Idy@ z86&{fP~I-?Mnd2^-R5XjIk*<5Vi_|z7%3Hf4k5(#8YslA#eUWY?}(QUdMScr+VRA8 zLPe`xphU>f|IQNz6tP0{j;W}CYT^ym3c60h zq6r~D6MM|`kjoGjG4eXmf^ek(vjr7HnT=rh3*xHvXsVG~9jrmK;~GVEqb*%!YA$5o zq)9KR^THR?hXJ%^KgL1~W@}T6YD?Sx zQ^14Z3zL$l?+jrJSxp6_5gy#Tt-SUGLMJB?FFeh-F$|WC(VmG3CLITU-O}iG*VJKsnXwt^V`r z>1wz=O#djaHfsQ~1d}~bO9)sz9j;-zb$-hNBd`#s6=F%*;C*c;ThR1e-=vO^4dsj> z@0-QrM<1tIjwq^4t&df1fyMOrE$8+CCqR_^fW)TztS3?f$&qYC8RQGB=_<3}rk=A0 zhT9;pE&QDjNkPZS{?n>trfALm3PWBDp|>UF314B1GJkq1p$*m#eY5SzNN(+)uzK*S z9`s21cW7WA$ZqnV5xw1Vg!tB9r^Rq5PIf3EIHZr{ny|SL&_j8UZ*=bC9&s7@R`iC_ zjmGa^xbSR`nt!b5O{E{Vb*XHXEnJ@e%xaRPocH*&GN!6kpHW{ z@2!V7+s7+WL|%yr8@1k@_bOcpv>VlsPKK%EU$(2`_CxP@87^X8Qa;Xgl^b%n%Gmk+ zIM9HSriGg?iQ&Y|%)a$qmk9|&G=UKE!!}{JRry>(F(a~f>iZQ~?rJ5Z*zbBMpeXD18BD^cRSP$ss8tIw-f^%ERUGGXZ8;9@e9a>+QE5Gse>UZ8 zA*3MeY$32936EsA;4^s~OHq3RA!T0sH36kr2e*F#=N=$hf^ZKY zu`p_JYAbIc?SZOn8NY5I<)JmZ62G2&)mYF(+Ch6@BVqSz=uwHhBmeJ%|Cx7NEb9@w zlF%-LFj-IT{?<5_Sid2$QxabOqv;vn#7Ocss#xYZTY1`e*mnG2+t#`A_a>kJwNBkt zK=B=@F8vpuZlGqEIFnF>h54N`R6JCa&lk*0hA?%(MAAn>!*w88X{a7B*_zHfbRC-r z=Hm$?@q$zy1|aVA19^OIr!MT!Qc2C>jwQep(48HP>kg<=vtFIFyQ6{mJ_-iYlg-C+ zWry8h#4NYJugTXpxyudQ21NBQsHK&1n|u`0bAe(OPe+z=Q%dCb#r)d$I5d2^6y$Y> z5b2GmqAj@}ps~|X-C0eL?4m2tu2AEaKe>CGFf|Ty?+oC4HE=4ay&6p%W2b=w7v&f4 zUNod2Zqr%UQg}zWb=Z&C5jxS12%a1K&~P|)4kLtQ$ogFvQvMbN7qFHHMzt6b6@bNT zs3aFZOx$yUCls>50C6V-GnK#bea!R1&?uA$hNiLAa3TZttI4B>yRcw4mO(DcL!qvC zck;V_2ty(AG0!_<7|tsZAt=X=>Y&3dr+`J1kSKXvA~dyim?=FJN{;oMC_%^eXpSJL z)m^<@gzLcFSPzzQ-Ssw>cLZsE0up(rIw23WxKexUED241>L9i~yJ@-l9A4jlF0-=o z7~8fObnqT)`<%U7pKg9ahH@R~*iUXhvj8C*(8ljxDoC{2KV(g}cU!>UN&JG{u0bI! z@|H!TKPk}Exbxk(3$zW1$;^ZVfoRP0zFmmqhiF2E{&Ia!wAUHh6%+Nvj6K*c=qe<5 z(k{lbIm9Ulyl8&^w@Y0>f2tyELEqhuyfZudMKe0IAz~o*D1}C~pK*sm#XRONkvBEi z3$*`j36Z`2YIM(;KF@jL|9K3po;M^Xc5 zPKAK^2WTV;o=$Eh`#C8jM6t+UHJw2Gy;6jAu>hfR=*PA?W>M^Jo1q4$&fodCv9BXd zp{>}lcntf@t~(fHd&l?^rpz~}#`sOOUT71|QF-P?E`r?Ibj|7C8x!5hDT@2WN5LlU z*u^=Yg%wz7D2g`vS)=1H$5CF`3|q5ue_-2ZfD-TysQ^kC8$qq(!7GH}U`&1}!h&X? z67d&`OZhSFXYv!>?P~*dyJ2ZltfrRv6FLP~xIWVBh~1AD)gLrN*c4o-C_rF6Sw9S= z5!DYpWXRp`EH)0?oB_)^S`yQcPdE!dQLOlB`pPxKN=m%(eGD>dN`;?=eN{wN=M znn0|MRF$Yp{o;1!R}Q_6D!$a(fY0={t&t*%*o*m`3z78dE%SeUjf7fw3TV9<+3%u0 z^!K{%{gNOD>V%aPg_R7ETzbO02#^1k1izIg=Pyl z`muAsp{9?%fz+6gKxtrJ<6_?*C`Kdl`1fUB``v41X>+}J%|;*!r^d^a~T z+z9twjyV(GSzvN?^t2O`{BW}C2hqJI0Ej^^fhj@jT>?(_ZrxLu2U$W8bHDlE0x9U) z`d+ke6o7^|ZwI@I4MzKd7UR%IhCD@281IH0fb!WLe=@k^0@K$C>f`)K&a zaSzuG0fDp=W+FFdT>r(ZkqieynPAAW3PWS>l>@@2;HT9nb$rV&`$N~g{*MDc`4_$N zdb;wn)WCd#JLN1VHIKPEZ1L-xX30t*uLz0c{ihaL6LhMU^qLA>_v!2|Jge|1b92SvcCJJhgc;O6W1K;cKxr@`xUV1m%09BKtRdlQ zIz(L(h+hr0*a9x?=a!>dB-tHHD11AEQE&D9N$LS8HL#0sxS1xjrv`aa3mK^Sl$B#v z-Kw5OoaW{FA%fn1NQ~&(K>O)L3meHKF^l1ae%ppRQC{&+Ok1$4)eEA?b_zpA(v3?u z2Jcf%a-A2D@B6E*;$b0_WH>QcMugn-iP>Y4DsK>PK_?AD5PvcpI#nOvJh2ofA=}#; ziam-0MwEBn1@Nhb(u4h6TK1^}S8QzCWRS&~r|IB}%ep8r{65gNQar}0Q5Ip=zwMT4 z`JH#IzP>BTZJvkm=AxAvE}A=L3>bjX$#0-rnE_lH=)w0^3?&>iW@`$d!(6gXLRPN2 z`54fQ2WuyMwtKG<-Ziqjj$OkuMK_6EHmeu)Nns_VCG|tX%R$Zx-|0>3v+ik2q0q#p z&#ghpg!q<1)go^8^`yrXYElHh;d%hLo>Eul)9QtJ$@inAH zDy$@95+71fYs6x#4Dj)1kYMAj^|g#3>iAgpCOtzaP}#V?Q#_f>QvBFMdy1Dz7)ww* ztjk@t#7XGX*p=e6wwNaYb&ta-c%&aVf&gXkX*Wm3RMiW(1fWVtY7jQ1QBMzkEv>VE zZ7Jj)r}0`n{22TAJY(<7VR2WD66NY^lw>lx_+^=<=9>V%bO!DR)eNEC70}T8=ajdi zoTtJG6A49fYi6NPI;!+*JZ%(aR35|TV5SKbJq7nAW;b%K81}copiq>k%Sd*v4+*dL zU9IIUZCj;UFn$x2$()BLAH4R6{JUw>Wq=f%;c;CSXEomNAV8|Nxc5$lM-Rn8*ys+M z_}B7f+b!o15hX$j4XPGxh>k$y6aB3;nyfoXy}16Q{vBE(xfM*kOp5Ryg<2~x^qz&b zZHs$2M`*mJ9*_@oQrj)8)(s#!(8sLnady%+VMTV-rfh7AUiLz|TdP2pKu&!y#=O!;IS!t313n0 z(y6+WGZ>jCxpHKi&@;pqETbuN2Q0z&v|mItMV)nQ5XcW@!2ifyuhgBQU1DC|g19g5 zXXZD@zW3=~i(Z*^0m$QLd?89ximB3`fqDLd32dGLQh@eT8JzESc)2H9^J1>7plYDu zosbRYh0EypJ5Bqwj^X#EJr3UcC?Snp_=tY=jwnNFqnk0)kCL|^uZlQqj7eV4q0mW?eqKOA(S^=E~VCC8oPs2G+uC$=n9u zV*m7UzlZ@l!gG9a_}Wjt3AnNbg9xvMe{5wtm%J2fMdFdM02(_j(8LTAeTsVBzzHLE zEROwH5=CQFn`TrmFIsb)@@nZxRSYOJvy?1?ByT8!jO}1)I`?xVM4bwzb@3oeXTi)^ zfC;=~oA^F=YBC-SH`F|qatPK8#DF(#a>B1~Nx$#Gm{yH+O0(Pi35k}#;|o91YQb7~ ze0Wd89;^+ayk-Q}@=2dFP&P(pKxugV8vdc0VECh`8(hied08m)1dN&YEmCqPa@MYJ zDyBZrn2;)5inEOCQwR2`mM7PgT&@kA3!ann7Nv16r35re&T7RGMtFWldwlId*vc4P z%d&C4F$T@6Vd;Q1`_j_9Hi@LG19Op+u;F@&=-{d{8#6$4KPh;DBJ)JGI)$iF>YLia zoPe#%x;pS0HcSGQ3{q`r#BrZWM`B>-LmarV(%wsKSg}@W;1J_PLh~SB3i6?&JZhg$K%UVx7g=m z7Jlq0?inwhjx9gEC_pB4Kx@_b-c(cipGG9twSY?Vz6^XAyYVKctBrkoW%cPw|%5zY!QR_5;)IY`vv>E7( z)!T*UK4Nc^&7DD#kPRj3>T`poP6m@K&I?CTfmTqY&4jkr&g9~c_dVP#agM}*T3^t28cRj-G}my{(+W5f$6?ergnedmm{AK)w%~T>Tq2X+8y0Q1C2*V)x3}QV)UJ!N z5H9zbhvfluQ~NqzM9mVUvR>ZKcn^D7X4512Wao_ zT7R1IFTBL>zRX+mpNV>ZJ@;>|e?uyM_XGcnmEVNDf41_ALHAo)-kN{EmOm5s{;zh< z|BIbJ6Zrnw&M!ROZ=rnq4*u8m{DsK(&z62cJbsJOf3ftN(D%<){<=P?|6t`0V&6a8 z`Niz}Ei?bc&i^9#{j;&ZE)oLR|6ZiO68-*d=?};BpK*1+cx1mt{LRw;>>F={zdwG& ze`x-harO&m_gg~W=qGQ4$$!D^{;u_B=E`5s7X|qi_wgqh+8>(##ANw>J|_5oCbaxv z<&UfWvy1*$q{(l|lmBzv$=~( [!TIP] +> **Looking for an [Enterprise Plan](https://docs.openwebui.com/enterprise)?** – **[Speak with Our Sales Team Today!](mailto:sales@openwebui.com)** +> +> Get **enhanced capabilities**, including **custom theming and branding**, **Service Level Agreement (SLA) support**, **Long-Term Support (LTS) versions**, and **more!** + +For more information, be sure to check out our [Open WebUI Documentation](https://docs.openwebui.com/). + +## Key Features of Open WebUI ⭐ + +| Feature | Description | +| - | - | +| πŸš€ **Effortless Setup** | Install seamlessly using Docker or Kubernetes (kubectl, kustomize or helm) for a hassle-free experience with support for both `:ollama` and `:cuda` tagged images. | +| 🀝 **Ollama/OpenAI API Integration** | Effortlessly integrate OpenAI-compatible APIs for versatile conversations alongside Ollama models. Customize the OpenAI API URL to link with **LMStudio, GroqCloud, Mistral, OpenRouter, and more**. | +| πŸ›‘οΈ **Granular Permissions and User Groups** | By allowing administrators to create detailed user roles and permissions, we ensure a secure user environment. This granularity not only enhances security but also allows for customized user experiences, fostering a sense of ownership and responsibility amongst users. | +| πŸ“± **Responsive Design** | Enjoy a seamless experience across Desktop PC, Laptop, and Mobile devices. | +| πŸ“± **Progressive Web App (PWA) for Mobile** | Enjoy a native app-like experience on your mobile device with our PWA, providing offline access on localhost and a seamless user interface. | +| βœ’οΈπŸ”’ **Full Markdown and LaTeX Support** | Elevate your LLM experience with comprehensive Markdown and LaTeX capabilities for enriched interaction. | +| πŸŽ€πŸ“Ή **Hands-Free Voice/Video Call** | Experience seamless communication with integrated hands-free voice and video call features, allowing for a more dynamic and interactive chat environment. | +| πŸ› οΈ **Model Builder** | Easily create Ollama models via the Web UI. Create and add custom characters/agents, customize chat elements, and import models effortlessly through [Open WebUI Community](https://openwebui.com/) integration. | +| 🐍 **Native Python Function Calling Tool** | Enhance your LLMs with built-in code editor support in the tools workspace. Bring Your Own Function (BYOF) by simply adding your pure Python functions, enabling seamless integration with LLMs. | +| πŸ“š **Local RAG Integration** | Dive into the future of chat interactions with groundbreaking Retrieval Augmented Generation (RAG) support. This feature seamlessly integrates document interactions into your chat experience. You can load documents directly into the chat or add files to your document library, effortlessly accessing them using the `#` command before a query. | +| πŸ” **Web Search for RAG** | Perform web searches using providers like `SearXNG`, `Google PSE`, `Brave Search`, `serpstack`, `serper`, `Serply`, `DuckDuckGo`, `TavilySearch`, `SearchApi` and `Bing` and inject the results directly into your chat experience. | +| 🌐 **Web Browsing Capability** | Seamlessly integrate websites into your chat experience using the `#` command followed by a URL. This feature allows you to incorporate web content directly into your conversations, enhancing the richness and depth of your interactions. | +| 🎨 **Image Generation Integration** | Seamlessly incorporate image generation capabilities using options such as AUTOMATIC1111 API or ComfyUI (local), and OpenAI's DALL-E (external), enriching your chat experience with dynamic visual content. | +| βš™οΈ **Many Models Conversations** | Effortlessly engage with various models simultaneously, harnessing their unique strengths for optimal responses. Enhance your experience by leveraging a diverse set of models in parallel. | +| πŸ” **Role-Based Access Control (RBAC)** | Ensure secure access with restricted permissions; only authorized individuals can access your Ollama, and exclusive model creation/pulling rights are reserved for administrators. | +| 🌐🌍 **Multilingual Support** | Experience Open WebUI in your preferred language with our internationalization (i18n) support. Join us in expanding our supported languages! We're actively seeking contributors! | +| 🧩 **Pipelines, Open WebUI Plugin Support** | Seamlessly integrate custom logic and Python libraries into Open WebUI using [Pipelines Plugin Framework](https://github.com/open-webui/pipelines). Launch your Pipelines instance, set the OpenAI URL to the Pipelines URL, and explore endless possibilities. [Examples](https://github.com/open-webui/pipelines/tree/main/examples) include **Function Calling**, User **Rate Limiting** to control access, **Usage Monitoring** with tools like Langfuse, **Live Translation with LibreTranslate** for multilingual support, **Toxic Message Filtering** and much more. | +| 🌟 **Continuous Updates** | We are committed to improving Open WebUI with regular updates, fixes, and new features. | + +Want to learn more about Open WebUI's features? Check out our [Open WebUI documentation](https://docs.openwebui.com/features) for a comprehensive overview! + +## Sponsors πŸ™Œ + +#### Emerald + +| Logo | Description | +| - | - | +| ![n8n](https://docs.openwebui.com/sponsors/logos/n8n.png) | [n8n](https://n8n.io/) β€’ Does your interface have a backend yet? Try [n8n](https://n8n.io/) | +| ![Tailscale](https://docs.openwebui.com/sponsors/logos/tailscale.png) | [Tailscale](https://tailscale.com/blog/self-host-a-local-ai-stack/?utm_source=OpenWebUI&utm_medium=paid-ad-placement&utm_campaign=OpenWebUI-Docs) β€’ Connect self-hosted AI to any device with Tailscale | + +--- + +We are incredibly grateful for the generous support of our sponsors. Their contributions help us to maintain and improve our project, ensuring we can continue to deliver quality work to our community. Thank you! + +## How to Install πŸš€ + +### Installation via Python pip 🐍 + +Open WebUI can be installed using pip, the Python package installer. Before proceeding, ensure you're using **Python 3.11** to avoid compatibility issues. + +1. **Install Open WebUI**: +Open your terminal and run the following command to install Open WebUI: + +```bash +pip install open-webui +``` + +2. **Running Open WebUI**: +After installation, you can start Open WebUI by executing: + +```bash +open-webui serve +``` + +This will start the Open WebUI server, which you can access at [http://localhost:8080](http://localhost:8080) + +### Quick Start with Docker 🐳 + +> [!NOTE] +> Please note that for certain Docker environments, additional configurations might be needed. If you encounter any connection issues, our detailed guide on [Open WebUI Documentation](https://docs.openwebui.com/) is ready to assist you. + +> [!WARNING] +> When using Docker to install Open WebUI, make sure to include the `-v open-webui:/app/backend/data` in your Docker command. This step is crucial as it ensures your database is properly mounted and prevents any loss of data. + +> [!TIP] +> If you wish to utilize Open WebUI with Ollama included or CUDA acceleration, we recommend utilizing our official images tagged with either `:cuda` or `:ollama`. To enable CUDA, you must install the [Nvidia CUDA container toolkit](https://docs.nvidia.com/dgx/nvidia-container-runtime-upgrade/) on your Linux/WSL system. + +### Installation with Default Configuration + +| Command | Description | +| - | - | +| `docker run -d -p 3000:8080 --add-host=host.docker.internal:host-gateway -v open-webui:/app/backend/data --name open-webui --restart always ghcr.io/open-webui/open-webui:main` | If Ollama is on your computer | +| `docker run -d -p 3000:8080 -e OLLAMA_BASE_URL=https://example.com -v open-webui:/app/backend/data --name open-webui --restart always ghcr.io/open-webui/open-webui:main` | If Ollama is on a Different Server | +| `docker run -d -p 3000:8080 --gpus all --add-host=host.docker.internal:host-gateway -v open-webui:/app/backend/data --name open-webui --restart always ghcr.io/open-webui/open-webui:cuda` | To run Open WebUI with Nvidia GPU support | +| `docker run -d -p 3000:8080 -e OPENAI_API_KEY=your_secret_key -v open-webui:/app/backend/data --name open-webui --restart always ghcr.io/open-webui/open-webui:main` | If you're only using OpenAI API | + +### Installation for OpenAI API Usage Only + +| Command | Description | +| - | - | +| `docker run -d -p 3000:8080 -e OPENAI_API_KEY=your_secret_key -v open-webui:/app/backend/data --name open-webui --restart always ghcr.io/open-webui/open-webui:main` | If you're only using OpenAI API | + +### Installing Open WebUI with Bundled Ollama Support + +This installation method uses a single container image that bundles Open WebUI with Ollama, allowing for a streamlined setup via a single command. Choose the appropriate command based on your hardware setup: + +| Command | Description | +| - | - | +| `docker run -d -p 3000:8080 --gpus=all -v ollama:/root/.ollama -v open-webui:/app/backend/data --name open-webui --restart always ghcr.io/open-webui/open-webui:ollama` | With GPU Support | +| `docker run -d -p 3000:8080 -v ollama:/root/.ollama -v open-webui:/app/backend/data --name open-webui --restart always ghcr.io/open-webui/open-webui:ollama` | For CPU Only | + +Both commands facilitate a built-in, hassle-free installation of both Open WebUI and Ollama, ensuring that you can get everything up and running swiftly. + +After installation, you can access Open WebUI at [http://localhost:3000](http://localhost:3000). Enjoy! πŸ˜„ + +### Other Installation Methods + +We offer various installation alternatives, including non-Docker native installation methods, Docker Compose, Kustomize, and Helm. Visit our [Open WebUI Documentation](https://docs.openwebui.com/getting-started/) or join our [Discord community](https://discord.gg/5rJgQTnV4s) for comprehensive guidance. + +Look at the [Local Development Guide](https://docs.openwebui.com/getting-started/advanced-topics/development) for instructions on setting up a local development environment. + +### Troubleshooting + +Encountering connection issues? Our [Open WebUI Documentation](https://docs.openwebui.com/troubleshooting/) has got you covered. For further assistance and to join our vibrant community, visit the [Open WebUI Discord](https://discord.gg/5rJgQTnV4s). + +#### Open WebUI: Server Connection Error + +If you're experiencing connection issues, it’s often due to the WebUI docker container not being able to reach the Ollama server at 127.0.0.1:11434 (host.docker.internal:11434) inside the container . Use the `--network=host` flag in your docker command to resolve this. Note that the port changes from 3000 to 8080, resulting in the link: `http://localhost:8080`. + +**Example Docker Command**: + +```bash +docker run -d --network=host -v open-webui:/app/backend/data -e OLLAMA_BASE_URL=http://127.0.0.1:11434 --name open-webui --restart always ghcr.io/open-webui/open-webui:main +``` + +### Keeping Your Docker Installation Up-to-Date + +In case you want to update your local Docker installation to the latest version, you can do it with [Watchtower](https://containrrr.dev/watchtower/): + +```bash +docker run --rm --volume /var/run/docker.sock:/var/run/docker.sock containrrr/watchtower --run-once open-webui +``` + +In the last part of the command, replace `open-webui` with your container name if it is different. + +Check our Updating Guide available in our [Open WebUI Documentation](https://docs.openwebui.com/getting-started/updating). + +### Using the Dev Branch πŸŒ™ + +> [!WARNING] +> The `:dev` branch contains the latest unstable features and changes. Use it at your own risk as it may have bugs or incomplete features. + +If you want to try out the latest bleeding-edge features and are okay with occasional instability, you can use the `:dev` tag like this: + +```bash +docker run -d -p 3000:8080 -v open-webui:/app/backend/data --name open-webui --add-host=host.docker.internal:host-gateway --restart always ghcr.io/open-webui/open-webui:dev +``` + +### Offline Mode + +If you are running Open WebUI in an offline environment, you can set the `HF_HUB_OFFLINE` environment variable to `1` to prevent attempts to download models from the internet. + +```bash +export HF_HUB_OFFLINE=1 +``` + +## What's Next? 🌟 + +Discover upcoming features on our roadmap in the [Open WebUI Documentation](https://docs.openwebui.com/roadmap/). + +## License πŸ“œ + +This project is licensed under the [Open WebUI License](LICENSE), a revised BSD-3-Clause license. You receive all the same rights as the classic BSD-3 license: you can use, modify, and distribute the software, including in proprietary and commercial products, with minimal restrictions. The only additional requirement is to preserve the "Open WebUI" branding, as detailed in the LICENSE file. For full terms, see the [LICENSE](LICENSE) document. πŸ“„ + +## Support πŸ’¬ + +If you have any questions, suggestions, or need assistance, please open an issue or join our +[Open WebUI Discord community](https://discord.gg/5rJgQTnV4s) to connect with us! 🀝 + +## Star History + + + + + + Star History Chart + + + +--- + +Created by [Timothy Jaeryang Baek](https://github.com/tjbck) - Let's make Open WebUI even more amazing together! πŸ’ͺ + + + +# Ollama + +Get up and running with large language models. + +### macOS + +[Download](https://ollama.com/download/Ollama.dmg) + +### Windows + +[Download](https://ollama.com/download/OllamaSetup.exe) + +### Linux + +```shell +curl -fsSL https://ollama.com/install.sh | sh +``` + +[Manual install instructions](https://github.com/ollama/ollama/blob/main/docs/linux.md) + +### Docker + +The official [Ollama Docker image](https://hub.docker.com/r/ollama/ollama) `ollama/ollama` is available on Docker Hub. + +### Libraries + +| Library | Description | +| - | - | +| [ollama-python](https://github.com/ollama/ollama-python) | Python library for Ollama | +| [ollama-js](https://github.com/ollama/ollama-js) | JavaScript library for Ollama | + +### Community + +| Platform | Link | +| - | - | +| [Discord](https://discord.gg/ollama) | Discord community | +| [Reddit](https://reddit.com/r/ollama) | Reddit community | + +## Quickstart + +To run and chat with [Gemma 3](https://ollama.com/library/gemma3): + +```shell +ollama run gemma3 +``` + +## Model library + +Ollama supports a list of models available on [ollama.com/library](https://ollama.com/library 'ollama model library') + +Here are some example models that can be downloaded: + +| Model | Parameters | Size | Download | +| - | - | - | - | +| Gemma 3 | 1B | 815MB | `ollama run gemma3:1b` | +| Gemma 3 | 4B | 3.3GB | `ollama run gemma3` | +| Gemma 3 | 12B | 8.1GB | `ollama run gemma3:12b` | +| Gemma 3 | 27B | 17GB | `ollama run gemma3:27b` | +| QwQ | 32B | 20GB | `ollama run qwq` | +| DeepSeek-R1 | 7B | 4.7GB | `ollama run deepseek-r1` | +| DeepSeek-R1 | 671B | 404GB | `ollama run deepseek-r1:671b` | +| Llama 4 | 109B | 67GB | `ollama run llama4:scout` | +| Llama 4 | 400B | 245GB | `ollama run llama4:maverick` | +| Llama 3.3 | 70B | 43GB | `ollama run llama3.3` | +| Llama 3.2 | 3B | 2.0GB | `ollama run llama3.2` | +| Llama 3.2 | 1B | 1.3GB | `ollama run llama3.2:1b` | +| Llama 3.2 Vision | 11B | 7.9GB | `ollama run llama3.2-vision` | +| Llama 3.2 Vision | 90B | 55GB | `ollama run llama3.2-vision:90b` | +| Llama 3.1 | 8B | 4.7GB | `ollama run llama3.1` | +| Llama 3.1 | 405B | 231GB | `ollama run llama3.1:405b` | +| Phi 4 | 14B | 9.1GB | `ollama run phi4` | +| Phi 4 Mini | 3.8B | 2.5GB | `ollama run phi4-mini` | +| Mistral | 7B | 4.1GB | `ollama run mistral` | +| Moondream 2 | 1.4B | 829MB | `ollama run moondream` | +| Neural Chat | 7B | 4.1GB | `ollama run neural-chat` | +| Starling | 7B | 4.1GB | `ollama run starling-lm` | +| Code Llama | 7B | 3.8GB | `ollama run codellama` | +| Llama 2 Uncensored | 7B | 3.8GB | `ollama run llama2-uncensored` | +| LLaVA | 7B | 4.5GB | `ollama run llava` | +| Granite-3.3 | 8B | 4.9GB | `ollama run granite3.3` | + +> [!NOTE] +> You should have at least 8 GB of RAM available to run the 7B models, 16 GB to run the 13B models, and 32 GB to run the 33B models. + +## Customize a model + +### Import from GGUF + +Ollama supports importing GGUF models in the Modelfile: + +1. Create a file named `Modelfile`, with a `FROM` instruction with the local filepath to the model you want to import. + +``` +FROM ./vicuna-33b.Q4_0.gguf +``` + +2. Create the model in Ollama + +```shell +ollama create example -f Modelfile +``` + +3. Run the model + +```shell +ollama run example +``` + +### Import from Safetensors + +See the [guide](docs/import.md) on importing models for more information. + +### Customize a prompt + +Models from the Ollama library can be customized with a prompt. For example, to customize the `llama3.2` model: + +```shell +ollama pull llama3.2 +``` + +Create a `Modelfile`: + +``` +FROM llama3.2 + +# set the temperature to 1 [higher is more creative, lower is more coherent] +PARAMETER temperature 1 + +# set the system message +SYSTEM """ +You are Mario from Super Mario Bros. Answer as Mario, the assistant, only. +""" +``` + +Next, create and run the model: + +``` +ollama create mario -f ./Modelfile +ollama run mario +>>> hi +Hello! It's your friend Mario. +``` + +For more information on working with a Modelfile, see the [Modelfile](docs/modelfile.md) documentation. + +## CLI Reference + +### Create a model + +`ollama create` is used to create a model from a Modelfile. + +```shell +ollama create mymodel -f ./Modelfile +``` + +### Pull a model + +```shell +ollama pull llama3.2 +``` + +> This command can also be used to update a local model. Only the diff will be pulled. + +### Remove a model + +```shell +ollama rm llama3.2 +``` + +### Copy a model + +```shell +ollama cp llama3.2 my-model +``` + +### Multiline input + +For multiline input, you can wrap text with `"""`: + +``` +>>> """Hello, +... world! +... """ +I'm a basic program that prints the famous "Hello, world!" message to the console. +``` + +### Multimodal models + +``` +ollama run llava "What's in this image? /Users/jmorgan/Desktop/smile.png" +``` + +> **Output**: The image features a yellow smiley face, which is likely the central focus of the picture. + +### Pass the prompt as an argument + +```shell +ollama run llama3.2 "Summarize this file: $(cat README.md)" +``` + +> **Output**: Ollama is a lightweight, extensible framework for building and running language models on the local machine. It provides a simple API for creating, running, and managing models, as well as a library of pre-built models that can be easily used in a variety of applications. + +### Show model information + +```shell +ollama show llama3.2 +``` + +### List models on your computer + +```shell +ollama list +``` + +### List which models are currently loaded + +```shell +ollama ps +``` + +### Stop a model which is currently running + +```shell +ollama stop llama3.2 +``` + +### Start Ollama + +`ollama serve` is used when you want to start ollama without running the desktop application. + +## REST API + +Ollama has a REST API for running and managing models. + +### Generate a response + +```shell +curl http://localhost:11434/api/generate -d '{ +"model": "llama3.2", +"prompt":"Why is the sky blue?" +}' +``` + +### Chat with a model + +```shell +curl http://localhost:11434/api/chat -d '{ +"model": "llama3.2", +"messages": [ +{ "role": "user", "content": "why is the sky blue?" } +] +}' +``` + +See the [API documentation](./docs/api.md) for all endpoints. + +## Community Integrations + +### Web & Desktop + +| Integration | Description | +| - | - | +| [Open WebUI](https://github.com/open-webui/open-webui) | Open WebUI integration | +| [SwiftChat (macOS with ReactNative)](https://github.com/aws-samples/swift-chat) | SwiftChat integration | +| [Enchanted (macOS native)](https://github.com/AugustDev/enchanted) | Enchanted integration | +| [Hollama](https://github.com/fmaclen/hollama) | Hollama integration | +| [Lollms-Webui](https://github.com/ParisNeo/lollms-webui) | Lollms-Webui integration | +| [LibreChat](https://github.com/danny-avila/LibreChat) | LibreChat integration | +| [Bionic GPT](https://github.com/bionic-gpt/bionic-gpt) | Bionic GPT integration | +| [HTML UI](https://github.com/rtcfirefly/ollama-ui) | HTML UI integration | +| [Saddle](https://github.com/jikkuatwork/saddle) | Saddle integration | +| [TagSpaces](https://www.tagspaces.org) | TagSpaces integration | +| [Chatbot UI](https://github.com/ivanfioravanti/chatbot-ollama) | Chatbot UI integration | +| [Chatbot UI v2](https://github.com/mckaywrigley/chatbot-ui) | Chatbot UI v2 integration | +| [Typescript UI](https://github.com/ollama-interface/Ollama-Gui?tab=readme-ov-file) | Typescript UI integration | +| [Minimalistic React UI for Ollama Models](https://github.com/richawo/minimal-llm-ui) | Minimalistic React UI integration | +| [Ollamac](https://github.com/kevinhermawan/Ollamac) | Ollamac integration | +| [big-AGI](https://github.com/enricoros/big-AGI) | big-AGI integration | +| [Cheshire Cat assistant framework](https://github.com/cheshire-cat-ai/core) | Cheshire Cat integration | +| [Amica](https://github.com/semperai/amica) | Amica integration | +| [chatd](https://github.com/BruceMacD/chatd) | chatd integration | +| [Ollama-SwiftUI](https://github.com/kghandour/Ollama-SwiftUI) | Ollama-SwiftUI integration | +| [Dify.AI](https://github.com/langgenius/dify) | Dify.AI integration | +| [MindMac](https://mindmac.app) | MindMac integration | +| [NextJS Web Interface for Ollama](https://github.com/jakobhoeg/nextjs-ollama-llm-ui) | NextJS Web Interface integration | +| [Msty](https://msty.app) | Msty integration | +| [Chatbox](https://github.com/Bin-Huang/Chatbox) | Chatbox integration | +| [WinForm Ollama Copilot](https://github.com/tgraupmann/WinForm_Ollama_Copilot) | WinForm Ollama Copilot integration | +| [NextChat](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web) | NextChat integration | +| [Alpaca WebUI](https://github.com/mmo80/alpaca-webui) | Alpaca WebUI integration | +| [OllamaGUI](https://github.com/enoch1118/ollamaGUI) | OllamaGUI integration | +| [OpenAOE](https://github.com/InternLM/OpenAOE) | OpenAOE integration | +| [Odin Runes](https://github.com/leonid20000/OdinRunes) | Odin Runes integration | +| [LLM-X](https://github.com/mrdjohnson/llm-x) | LLM-X integration | +| [AnythingLLM (Docker + MacOs/Windows/Linux native app)](https://github.com/Mintplex-Labs/anything-llm) | AnythingLLM integration | +| [Ollama Basic Chat: Uses HyperDiv Reactive UI](https://github.com/rapidarchitect/ollama_basic_chat) | Ollama Basic Chat integration | +| [Ollama-chats RPG](https://github.com/drazdra/ollama-chats) | Ollama-chats RPG integration | +| [IntelliBar](https://intellibar.app/) | IntelliBar integration | +| [Jirapt](https://github.com/AliAhmedNada/jirapt) | Jirapt integration | +| [ojira](https://github.com/AliAhmedNada/ojira) | ojira integration | +| [QA-Pilot](https://github.com/reid41/QA-Pilot) | QA-Pilot integration | +| [ChatOllama](https://github.com/sugarforever/chat-ollama) | ChatOllama integration | +| [CRAG Ollama Chat](https://github.com/Nagi-ovo/CRAG-Ollama-Chat) | CRAG Ollama Chat integration | +| [RAGFlow](https://github.com/infiniflow/ragflow) | RAGFlow integration | +| [StreamDeploy](https://github.com/StreamDeploy-DevRel/streamdeploy-llm-app-scaffold) | StreamDeploy integration | +| [chat](https://github.com/swuecho/chat) | chat integration | +| [Lobe Chat](https://github.com/lobehub/lobe-chat) | Lobe Chat integration | +| [Ollama RAG Chatbot](https://github.com/datvodinh/rag-chatbot.git) | Ollama RAG Chatbot integration | +| [BrainSoup](https://www.nurgo-software.com/products/brainsoup) | BrainSoup integration | +| [macai](https://github.com/Renset/macai) | macai integration | +| [RWKV-Runner](https://github.com/josStorer/RWKV-Runner) | RWKV-Runner integration | +| [Ollama Grid Search](https://github.com/dezoito/ollama-grid-search) | Ollama Grid Search integration | +| [Olpaka](https://github.com/Otacon/olpaka) | Olpaka integration | +| [Casibase](https://casibase.org) | Casibase integration | +| [OllamaSpring](https://github.com/CrazyNeil/OllamaSpring) | OllamaSpring integration | +| [LLocal.in](https://github.com/kartikm7/llocal) | LLocal.in integration | +| [Shinkai Desktop](https://github.com/dcSpark/shinkai-apps) | Shinkai Desktop integration | +| [AiLama](https://github.com/zeyoyt/ailama) | AiLama integration | +| [Ollama with Google Mesop](https://github.com/rapidarchitect/ollama_mesop/) | Ollama with Google Mesop integration | +| [R2R](https://github.com/SciPhi-AI/R2R) | R2R integration | +| [Ollama-Kis](https://github.com/elearningshow/ollama-kis) | Ollama-Kis integration | +| [OpenGPA](https://opengpa.org) | OpenGPA integration | +| [Painting Droid](https://github.com/mateuszmigas/painting-droid) | Painting Droid integration | +| [Kerlig AI](https://www.kerlig.com/) | Kerlig AI integration | +| [AI Studio](https://github.com/MindWorkAI/AI-Studio) | AI Studio integration | +| [Sidellama](https://github.com/gyopak/sidellama) | Sidellama integration | +| [LLMStack](https://github.com/trypromptly/LLMStack) | LLMStack integration | +| [BoltAI for Mac](https://boltai.com) | BoltAI for Mac integration | +| [Harbor](https://github.com/av/harbor) | Harbor integration | +| [PyGPT](https://github.com/szczyglis-dev/py-gpt) | PyGPT integration | +| [Alpaca](https://github.com/Jeffser/Alpaca) | Alpaca integration | +| [AutoGPT](https://github.com/Significant-Gravitas/AutoGPT/blob/master/docs/content/platform/ollama.md) | AutoGPT integration | +| [Go-CREW](https://www.jonathanhecl.com/go-crew/) | Go-CREW integration | +| [PartCAD](https://github.com/openvmp/partcad/) | PartCAD integration | +| [Ollama4j Web UI](https://github.com/ollama4j/ollama4j-web-ui) | Ollama4j Web UI integration | +| [PyOllaMx](https://github.com/kspviswa/pyOllaMx) | PyOllaMx integration | +| [Cline](https://github.com/cline/cline) | Cline integration | +| [Cherry Studio](https://github.com/kangfenmao/cherry-studio) | Cherry Studio integration | +| [ConfiChat](https://github.com/1runeberg/confichat) | ConfiChat integration | +| [Archyve](https://github.com/nickthecook/archyve) | Archyve integration | +| [crewAI with Mesop](https://github.com/rapidarchitect/ollama-crew-mesop) | crewAI with Mesop integration | +| [Tkinter-based client](https://github.com/chyok/ollama-gui) | Tkinter-based client integration | +| [LLMChat](https://github.com/trendy-design/llmchat) | LLMChat integration | +| [Local Multimodal AI Chat](https://github.com/Leon-Sander/Local-Multimodal-AI-Chat) | Local Multimodal AI Chat integration | +| [ARGO](https://github.com/xark-argo/argo) | ARGO integration | +| [OrionChat](https://github.com/EliasPereirah/OrionChat) | OrionChat integration | +| [G1](https://github.com/bklieger-groq/g1) | G1 integration | +| [Web management](https://github.com/lemonit-eric-mao/ollama-web-management) | Web management integration | +| [Promptery](https://github.com/promptery/promptery) | Promptery integration | +| [Ollama App](https://github.com/JHubi1/ollama-app) | Ollama App integration | +| [chat-ollama](https://github.com/anan1213095357/ollama-chat-app) | chat-ollama integration | +| [SpaceLlama](https://github.com/tcsenpai/spacellama) | SpaceLlama integration | +| [YouLama](https://github.com/tcsenpai/youlama) | YouLama integration | +| [DualMind](https://github.com/tcsenpai/dualmind) | DualMind integration | +| [ollamarama-matrix](https://github.com/h1ddenpr0cess20/ollamarama-matrix) | ollamarama-matrix integration | +| [ollama-chat-app](https://github.com/anan1213095357/ollama-chat-app) | ollama-chat-app integration | +| [Perfect Memory AI](https://www.perfectmemory.ai/) | Perfect Memory AI integration | +| [Hexabot](https://github.com/hexastack/hexabot) | Hexabot integration | +| [Reddit Rate](https://github.com/rapidarchitect/reddit_analyzer) | Reddit Rate integration | +| [OpenTalkGpt](https://github.com/adarshM84/OpenTalkGpt) | OpenTalkGpt integration | +| [VT](https://github.com/vinhnx/vt.ai) | VT integration | +| [Nosia](https://github.com/nosia-ai/nosia) | Nosia integration | +| [Witsy](https://github.com/nbonamy/witsy) | Witsy integration | +| [Abbey](https://github.com/US-Artificial-Intelligence/abbey) | Abbey integration | +| [Minima](https://github.com/dmayboroda/minima) | Minima integration | +| [aidful-ollama-model-delete](https://github.com/AidfulAI/aidful-ollama-model-delete) | aidful-ollama-model-delete integration | +| [Perplexica](https://github.com/ItzCrazyKns/Perplexica) | Perplexica integration | +| [Ollama Chat WebUI for Docker](https://github.com/oslook/ollama-webui) | Ollama Chat WebUI for Docker integration | +| [AI Toolkit for Visual Studio Code](https://aka.ms/ai-tooklit/ollama-docs) | AI Toolkit for Visual Studio Code integration | +| [MinimalNextOllamaChat](https://github.com/anilkay/MinimalNextOllamaChat) | MinimalNextOllamaChat integration | +| [Chipper](https://github.com/TilmanGriesel/chipper) | Chipper integration | +| [ChibiChat](https://github.com/CosmicEventHorizon/ChibiChat) | ChibiChat integration | +| [LocalLLM](https://github.com/qusaismael/localllm) | LocalLLM integration | +| [Ollamazing](https://github.com/buiducnhat/ollamazing) | Ollamazing integration | +| [OpenDeepResearcher-via-searxng](https://github.com/benhaotang/OpenDeepResearcher-via-searxng) | OpenDeepResearcher-via-searxng integration | +| [AntSK](https://github.com/AIDotNet/AntSK) | AntSK integration | +| [MaxKB](https://github.com/1Panel-dev/MaxKB/) | MaxKB integration | +| [yla](https://github.com/danielekp/yla) | yla integration | +| [LangBot](https://github.com/RockChinQ/LangBot) | LangBot integration | +| [1Panel](https://github.com/1Panel-dev/1Panel/) | 1Panel integration | +| [AstrBot](https://github.com/Soulter/AstrBot/) | AstrBot integration | +| [Reins](https://github.com/ibrahimcetin/reins) | Reins integration | +| [Flufy](https://github.com/Aharon-Bensadoun/Flufy) | Flufy integration | +| [Ellama](https://github.com/zeozeozeo/ellama) | Ellama integration | +| [screenpipe](https://github.com/mediar-ai/screenpipe) | screenpipe integration | +| [Ollamb](https://github.com/hengkysteen/ollamb) | Ollamb integration | +| [Writeopia](https://github.com/Writeopia/Writeopia) | Writeopia integration | +| [AppFlowy](https://github.com/AppFlowy-IO/AppFlowy) | AppFlowy integration | +| [Lumina](https://github.com/cushydigit/lumina.git) | Lumina integration | +| [Tiny Notepad](https://pypi.org/project/tiny-notepad) | Tiny Notepad integration | +| [macLlama (macOS native)](https://github.com/hellotunamayo/macLlama) | macLlama integration | +| [GPTranslate](https://github.com/philberndt/GPTranslate) | GPTranslate integration | +| [ollama launcher](https://github.com/NGC13009/ollama-launcher) | ollama launcher integration | +| [ai-hub](https://github.com/Aj-Seven/ai-hub) | ai-hub integration | +| [Mayan EDMS](https://gitlab.com/mayan-edms/mayan-edms) | Mayan EDMS integration | + +### Cloud + +| Cloud | Link | +| - | - | +| [Google Cloud](https://cloud.google.com/run/docs/tutorials/gpu-gemma2-with-ollama) | Google Cloud integration | +| [Fly.io](https://fly.io/docs/python/do-more/add-ollama/) | Fly.io integration | +| [Koyeb](https://www.koyeb.com/deploy/ollama) | Koyeb integration | + +### Terminal + +| Terminal | Link | +| - | - | +| [oterm](https://github.com/ggozad/oterm) | oterm integration | +| [Ellama Emacs client](https://github.com/s-kostyaev/ellama) | Ellama Emacs client integration | +| [Emacs client](https://github.com/zweifisch/ollama) | Emacs client integration | +| [neollama](https://github.com/paradoxical-dev/neollama) | neollama integration | +| [gen.nvim](https://github.com/David-Kunz/gen.nvim) | gen.nvim integration | +| [ollama.nvim](https://github.com/nomnivore/ollama.nvim) | ollama.nvim integration | +| [ollero.nvim](https://github.com/marco-souza/ollero.nvim) | ollero.nvim integration | +| [ollama-chat.nvim](https://github.com/gerazov/ollama-chat.nvim) | ollama-chat.nvim integration | +| [ogpt.nvim](https://github.com/huynle/ogpt.nvim) | ogpt.nvim integration | +| [gptel Emacs client](https://github.com/karthink/gptel) | gptel Emacs client integration | +| [Oatmeal](https://github.com/dustinblackman/oatmeal) | Oatmeal integration | +| [cmdh](https://github.com/pgibler/cmdh) | cmdh integration | +| [ooo](https://github.com/npahlfer/ooo) | ooo integration | +| [shell-pilot](https://github.com/reid41/shell-pilot) | shell-pilot integration | +| [tenere](https://github.com/pythops/tenere) | tenere integration | +| [llm-ollama](https://github.com/taketwo/llm-ollama) | llm-ollama integration | +| [typechat-cli](https://github.com/anaisbetts/typechat-cli) | typechat-cli integration | +| [ShellOracle](https://github.com/djcopley/ShellOracle) | ShellOracle integration | +| [tlm](https://github.com/yusufcanb/tlm) | tlm integration | +| [podman-ollama](https://github.com/ericcurtin/podman-ollama) | podman-ollama integration | +| [gollama](https://github.com/sammcj/gollama) | gollama integration | +| [ParLlama](https://github.com/paulrobello/parllama) | ParLlama integration | +| [Ollama eBook Summary](https://github.com/cognitivetech/ollama-ebook-summary/) | Ollama eBook Summary integration | +| [Ollama Mixture of Experts (MOE) in 50 lines of code](https://github.com/rapidarchitect/ollama_moe) | Ollama Mixture of Experts integration | +| [vim-intelligence-bridge](https://github.com/pepo-ec/vim-intelligence-bridge) | vim-intelligence-bridge integration | +| [x-cmd ollama](https://x-cmd.com/mod/ollama) | x-cmd ollama integration | +| [bb7](https://github.com/drunkwcodes/bb7) | bb7 integration | +| [SwollamaCLI](https://github.com/marcusziade/Swollama) | SwollamaCLI integration | +| [aichat](https://github.com/sigoden/aichat) | aichat integration | +| [PowershAI](https://github.com/rrg92/powershai) | PowershAI integration | +| [DeepShell](https://github.com/Abyss-c0re/deepshell) | DeepShell integration | +| [orbiton](https://github.com/xyproto/orbiton) | orbiton integration | +| [orca-cli](https://github.com/molbal/orca-cli) | orca-cli integration | +| [GGUF-to-Ollama](https://github.com/jonathanhecl/gguf-to-ollama) | GGUF-to-Ollama integration | +| [AWS-Strands-With-Ollama](https://github.com/rapidarchitect/ollama_strands) | AWS-Strands-With-Ollama integration | +| [ollama-multirun](https://github.com/attogram/ollama-multirun) | ollama-multirun integration | +| [ollama-bash-toolshed](https://github.com/attogram/ollama-bash-toolshed) | ollama-bash-toolshed integration | + +### Apple Vision Pro + +| Integration | Link | +| - | - | +| [SwiftChat](https://github.com/aws-samples/swift-chat) | SwiftChat integration | +| [Enchanted](https://github.com/AugustDev/enchanted) | Enchanted integration | + +### Database + +| Integration | Link | +| - | - | +| [pgai](https://github.com/timescale/pgai) | pgai integration | +| [MindsDB](https://github.com/mindsdb/mindsdb/blob/staging/mindsdb/integrations/handlers/ollama_handler/README.md) | MindsDB integration | +| [chromem-go](https://github.com/philippgille/chromem-go/blob/v0.5.0/embed_ollama.go) | chromem-go integration | +| [Kangaroo](https://github.com/dbkangaroo/kangaroo) | Kangaroo integration | + +### Package managers + +| Package Manager | Link | +| - | - | +| [Pacman](https://archlinux.org/packages/extra/x86_64/ollama/) | Pacman integration | +| [Gentoo](https://github.com/gentoo/guru/tree/master/app-misc/ollama) | Gentoo integration | +| [Homebrew](https://formulae.brew.sh/formula/ollama) | Homebrew integration | +| [Helm Chart](https://artifacthub.io/packages/helm/ollama-helm/ollama) | Helm Chart integration | +| [Guix channel](https://codeberg.org/tusharhero/ollama-guix) | Guix channel integration | +| [Nix package](https://search.nixos.org/packages?show=ollama&from=0&size=50&sort=relevance&type=packages&query=ollama) | Nix package integration | +| [Flox](https://flox.dev/blog/ollama-part-one) | Flox integration | + +### Libraries + +| Library | Link | +| - | - | +| [LangChain](https://python.langchain.com/docs/integrations/chat/ollama/) | LangChain integration | +| [LangChain.js](https://js.langchain.com/docs/integrations/chat/ollama/) | LangChain.js integration | +| [Firebase Genkit](https://firebase.google.com/docs/genkit/plugins/ollama) | Firebase Genkit integration | +| [crewAI](https://github.com/crewAIInc/crewAI) | crewAI integration | +| [Yacana](https://remembersoftwares.github.io/yacana/) | Yacana integration | +| [Spring AI](https://github.com/spring-projects/spring-ai) | Spring AI integration | +| [LangChainGo](https://github.com/tmc/langchaingo/) | LangChainGo integration | +| [LangChain4j](https://github.com/langchain4j/langchain4j) | LangChain4j integration | +| [LangChainRust](https://github.com/Abraxas-365/langchain-rust) | LangChainRust integration | +| [LangChain for .NET](https://github.com/tryAGI/LangChain) | LangChain for .NET integration | +| [LLPhant](https://github.com/theodo-group/LLPhant?tab=readme-ov-file#ollama) | LLPhant integration | +| [LlamaIndex](https://docs.llamaindex.ai/en/stable/examples/llm/ollama/) | LlamaIndex integration | +| [LlamaIndexTS](https://ts.llamaindex.ai/modules/llms/available_llms/ollama) | LlamaIndexTS integration | +| [LiteLLM](https://github.com/BerriAI/litellm) | LiteLLM integration | +| [OllamaFarm for Go](https://github.com/presbrey/ollamafarm) | OllamaFarm for Go integration | +| [OllamaSharp for .NET](https://github.com/awaescher/OllamaSharp) | OllamaSharp for .NET integration | +| [Ollama for Ruby](https://github.com/gbaptista/ollama-ai) | Ollama for Ruby integration | +| [Ollama-rs for Rust](https://github.com/pepperoni21/ollama-rs) | Ollama-rs for Rust integration | +| [Ollama-hpp for C++](https://github.com/jmont-dev/ollama-hpp) | Ollama-hpp for C++ integration | +| [Ollama4j for Java](https://github.com/ollama4j/ollama4j) | Ollama4j for Java integration | +| [ModelFusion Typescript Library](https://modelfusion.dev/integration/model-provider/ollama) | ModelFusion Typescript Library integration | +| [OllamaKit for Swift](https://github.com/kevinhermawan/OllamaKit) | OllamaKit for Swift integration | +| [Ollama for Dart](https://github.com/breitburg/dart-ollama) | Ollama for Dart integration | +| [Ollama for Laravel](https://github.com/cloudstudio/ollama-laravel) | Ollama for Laravel integration | +| [LangChainDart](https://github.com/davidmigloz/langchain_dart) | LangChainDart integration | +| [Semantic Kernel - Python](https://github.com/microsoft/semantic-kernel/tree/main/python/semantic_kernel/connectors/ai/ollama) | Semantic Kernel - Python integration | +| [Haystack](https://github.com/deepset-ai/haystack-integrations/blob/main/integrations/ollama.md) | Haystack integration | +| [Elixir LangChain](https://github.com/brainlid/langchain) | Elixir LangChain integration | +| [Ollama for R - rollama](https://github.com/JBGruber/rollama) | Ollama for R - rollama integration | +| [Ollama for R - ollama-r](https://github.com/hauselin/ollama-r) | Ollama for R - ollama-r integration | +| [Ollama-ex for Elixir](https://github.com/lebrunel/ollama-ex) | Ollama-ex for Elixir integration | +| [Ollama Connector for SAP ABAP](https://github.com/b-tocs/abap_btocs_ollama) | Ollama Connector for SAP ABAP integration | +| [Testcontainers](https://testcontainers.com/modules/ollama/) | Testcontainers integration | +| [Portkey](https://portkey.ai/docs/welcome/integration-guides/ollama) | Portkey integration | +| [PromptingTools.jl](https://github.com/svilupp/PromptingTools.jl) | PromptingTools.jl integration | +| [LlamaScript](https://github.com/Project-Llama/llamascript) | LlamaScript integration | +| [llm-axe](https://github.com/emirsahin1/llm-axe) | llm-axe integration | +| [Gollm](https://docs.gollm.co/examples/ollama-example) | Gollm integration | +| [Gollama for Golang](https://github.com/jonathanhecl/gollama) | Gollama for Golang integration | +| [Ollamaclient for Golang](https://github.com/xyproto/ollamaclient) | Ollamaclient for Golang integration | +| [High-level function abstraction in Go](https://gitlab.com/tozd/go/fun) | High-level function abstraction in Go integration | +| [Ollama PHP](https://github.com/ArdaGnsrn/ollama-php) | Ollama PHP integration | +| [Agents-Flex for Java](https://github.com/agents-flex/agents-flex) | Agents-Flex for Java integration | +| [Parakeet](https://github.com/parakeet-nest/parakeet) | Parakeet integration | +| [Haverscript](https://github.com/andygill/haverscript) | Haverscript integration | +| [Ollama for Swift](https://github.com/mattt/ollama-swift) | Ollama for Swift integration | +| [Swollama for Swift](https://github.com/marcusziade/Swollama) | Swollama for Swift integration | +| [GoLamify](https://github.com/prasad89/golamify) | GoLamify integration | +| [Ollama for Haskell](https://github.com/tusharad/ollama-haskell) | Ollama for Haskell integration | +| [multi-llm-ts](https://github.com/nbonamy/multi-llm-ts) | multi-llm-ts integration | +| [LlmTornado](https://github.com/lofcz/llmtornado) | LlmTornado integration | +| [Ollama for Zig](https://github.com/dravenk/ollama-zig) | Ollama for Zig integration | +| [Abso](https://github.com/lunary-ai/abso) | Abso integration | +| [Nichey](https://github.com/goodreasonai/nichey) | Nichey integration | +| [Ollama for D](https://github.com/kassane/ollama-d) | Ollama for D integration | +| [OllamaPlusPlus](https://github.com/HardCodeDev777/OllamaPlusPlus) | OllamaPlusPlus integration | + +### Mobile + +| Integration | Link | +| - | - | +| [SwiftChat](https://github.com/aws-samples/swift-chat) | SwiftChat integration | +| [Enchanted](https://github.com/AugustDev/enchanted) | Enchanted integration | +| [Maid](https://github.com/Mobile-Artificial-Intelligence/maid) | Maid integration | +| [Ollama App](https://github.com/JHubi1/ollama-app) | Ollama App integration | +| [ConfiChat](https://github.com/1runeberg/confichat) | ConfiChat integration | +| [Ollama Android Chat](https://github.com/sunshine0523/OllamaServer) | Ollama Android Chat integration | +| [Reins](https://github.com/ibrahimcetin/reins) | Reins integration | + +### Extensions & Plugins + +| Integration | Link | +| - | - | +| [Raycast extension](https://github.com/MassimilianoPasquini97/raycast_ollama) | Raycast extension integration | +| [Discollama](https://github.com/mxyng/discollama) | Discollama integration | +| [Continue](https://github.com/continuedev/continue) | Continue integration | +| [Vibe](https://github.com/thewh1teagle/vibe) | Vibe integration | +| [Obsidian Ollama plugin](https://github.com/hinterdupfinger/obsidian-ollama) | Obsidian Ollama plugin integration | +| [Logseq Ollama plugin](https://github.com/omagdy7/ollama-logseq) | Logseq Ollama plugin integration | +| [NotesOllama](https://github.com/andersrex/notesollama) | NotesOllama integration | +| [Dagger Chatbot](https://github.com/samalba/dagger-chatbot) | Dagger Chatbot integration | +| [Discord AI Bot](https://github.com/mekb-turtle/discord-ai-bot) | Discord AI Bot integration | +| [Ollama Telegram Bot](https://github.com/ruecat/ollama-telegram) | Ollama Telegram Bot integration | +| [Hass Ollama Conversation](https://github.com/ej52/hass-ollama-conversation) | Hass Ollama Conversation integration | +| [Rivet plugin](https://github.com/abrenneke/rivet-plugin-ollama) | Rivet plugin integration | +| [Obsidian BMO Chatbot plugin](https://github.com/longy2k/obsidian-bmo-chatbot) | Obsidian BMO Chatbot plugin integration | +| [Cliobot](https://github.com/herval/cliobot) | Cliobot integration | +| [Copilot for Obsidian plugin](https://github.com/logancyang/obsidian-copilot) | Copilot for Obsidian plugin integration | +| [Obsidian Local GPT plugin](https://github.com/pfrankov/obsidian-local-gpt) | Obsidian Local GPT plugin integration | +| [Open Interpreter](https://docs.openinterpreter.com/language-model-setup/local-models/ollama) | Open Interpreter integration | +| [Llama Coder](https://github.com/ex3ndr/llama-coder) | Llama Coder integration | +| [Ollama Copilot](https://github.com/bernardo-bruning/ollama-copilot) | Ollama Copilot integration | +| [twinny](https://github.com/rjmacarthy/twinny) | twinny integration | +| [Wingman-AI](https://github.com/RussellCanfield/wingman-ai) | Wingman-AI integration | +| [Page Assist](https://github.com/n4ze3m/page-assist) | Page Assist integration | +| [Plasmoid Ollama Control](https://github.com/imoize/plasmoid-ollamacontrol) | Plasmoid Ollama Control integration | +| [AI Telegram Bot](https://github.com/tusharhero/aitelegrambot) | AI Telegram Bot integration | +| [AI ST Completion](https://github.com/yaroslavyaroslav/OpenAI-sublime-text) | AI ST Completion integration | +| [Discord-Ollama Chat Bot](https://github.com/kevinthedang/discord-ollama) | Discord-Ollama Chat Bot integration | +| [ChatGPTBox: All in one browser extension](https://github.com/josStorer/chatGPTBox) | ChatGPTBox integration | +| [Discord AI chat/moderation bot](https://github.com/rapmd73/Companion) | Discord AI chat/moderation bot integration | +| [Headless Ollama](https://github.com/nischalj10/headless-ollama) | Headless Ollama integration | +| [Terraform AWS Ollama & Open WebUI](https://github.com/xuyangbocn/terraform-aws-self-host-llm) | Terraform AWS Ollama & Open WebUI integration | +| [node-red-contrib-ollama](https://github.com/jakubburkiewicz/node-red-contrib-ollama) | node-red-contrib-ollama integration | +| [Local AI Helper](https://github.com/ivostoykov/localAI) | Local AI Helper integration | +| [vnc-lm](https://github.com/jake83741/vnc-lm) | vnc-lm integration | +| [LSP-AI](https://github.com/SilasMarvin/lsp-ai) | LSP-AI integration | +| [QodeAssist](https://github.com/Palm1r/QodeAssist) | QodeAssist integration | +| [Obsidian Quiz Generator plugin](https://github.com/ECuiDev/obsidian-quiz-generator) | Obsidian Quiz Generator plugin integration | +| [AI Summmary Helper plugin](https://github.com/philffm/ai-summary-helper) | AI Summmary Helper plugin integration | +| [TextCraft](https://github.com/suncloudsmoon/TextCraft) | TextCraft integration | +| [Alfred Ollama](https://github.com/zeitlings/alfred-ollama) | Alfred Ollama integration | +| [TextLLaMA](https://github.com/adarshM84/TextLLaMA) | TextLLaMA integration | +| [Simple-Discord-AI](https://github.com/zyphixor/simple-discord-ai) | Simple-Discord-AI integration | +| [LLM Telegram Bot](https://github.com/innightwolfsleep/llm_telegram_bot) | LLM Telegram Bot integration | +| [mcp-llm](https://github.com/sammcj/mcp-llm) | mcp-llm integration | +| [SimpleOllamaUnity](https://github.com/HardCodeDev777/SimpleOllamaUnity) | SimpleOllamaUnity integration | +| [UnityCodeLama](https://github.com/HardCodeDev777/UnityCodeLama) | UnityCodeLama integration | +| [NativeMind](https://github.com/NativeMindBrowser/NativeMindExtension) | NativeMind integration | +| [GMAI - Gradle Managed AI](https://gmai.premex.se/) | GMAI integration | + +### Supported backends + +| Backend | Link | +| - | - | +| [llama.cpp](https://github.com/ggml-org/llama.cpp) | llama.cpp integration | + +### Observability + +| Tool | Link | +| - | - | +| [Opik](https://www.comet.com/docs/opik/cookbook/ollama) | Opik integration | +| [Lunary](https://lunary.ai/docs/integrations/ollama) | Lunary integration | +| [OpenLIT](https://github.com/openlit/openlit) | OpenLIT integration | +| [HoneyHive](https://docs.honeyhive.ai/integrations/ollama) | HoneyHive integration | +| [Langfuse](https://langfuse.com/docs/integrations/ollama) | Langfuse integration | +| [MLflow Tracing](https://mlflow.org/docs/latest/llms/tracing/index.html#automatic-tracing) | MLflow Tracing integration | + # OpenedAI Speech Notice: This software is mostly obsolete and will no longer be updated. @@ -9,8 +833,6 @@ Some Alternatives: * https://github.com/astramind-ai/Auralis * https://lightning.ai/docs/litserve/home?code_sample=speech ----- - An OpenAI API compatible text to speech server. * Compatible with the OpenAI audio/speech API @@ -19,148 +841,67 @@ An OpenAI API compatible text to speech server. * A free, private, text-to-speech server with custom voice cloning Full Compatibility: -* `tts-1`: `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer` (configurable) -* `tts-1-hd`: `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer` (configurable, uses OpenAI samples by default) -* response_format: `mp3`, `opus`, `aac`, `flac`, `wav` and `pcm` -* speed 0.25-4.0 (and more) + +| Feature | Description | +| - | - | +| `tts-1` | `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer` (configurable) | +| `tts-1-hd` | `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer` (configurable, uses OpenAI samples by default) | +| `response_format` | `mp3`, `opus`, `aac`, `flac`, `wav` and `pcm` | +| `speed` | 0.25-4.0 (and more) | Details: -* Model `tts-1` via [piper tts](https://github.com/rhasspy/piper) (very fast, runs on cpu) - * You can map your own [piper voices](https://rhasspy.github.io/piper-samples/) via the `voice_to_speaker.yaml` configuration file -* Model `tts-1-hd` via [coqui-ai/TTS](https://github.com/coqui-ai/TTS) xtts_v2 voice cloning (fast, but requires around 4GB GPU VRAM) - * Custom cloned voices can be used for tts-1-hd, See: [Custom Voices Howto](#custom-voices-howto) - * 🌐 [Multilingual](#multilingual) support with XTTS voices, the language is automatically detected if not set - * [Custom fine-tuned XTTS model support](#custom-fine-tuned-model-support) - * Configurable [generation parameters](#generation-parameters) - * Streamed output while generating -* Occasionally, certain words or symbols may sound incorrect, you can fix them with regex via `pre_process_map.yaml` -* Tested with python 3.9-3.11, piper does not install on python 3.12 yet +| Detail | Description | +| - | - | +| Model `tts-1` via [piper tts](https://github.com/rhasspy/piper) | Very fast, runs on CPU | +| Model `tts-1-hd` via [coqui-ai/TTS](https://github.com/coqui-ai/TTS) xtts_v2 voice cloning | Fast, but requires around 4GB GPU VRAM | +| Custom cloned voices | Can be used for tts-1-hd | +| 🌐 Multilingual support | With XTTS voices, the language is automatically detected if not set | +| Custom fine-tuned XTTS model support | See: [Custom fine-tuned XTTS model support](#custom-fine-tuned-model-support) | +| Configurable generation parameters | See: [Generation parameters](#generation-parameters) | +| Streamed output | While generating | +| Occasionally, certain words or symbols may sound incorrect | Can be fixed with regex via `pre_process_map.yaml` | +| Tested with python | 3.9-3.11, piper does not install on python 3.12 yet | If you find a better voice match for `tts-1` or `tts-1-hd`, please let me know so I can update the defaults. ## Recent Changes -Version 0.18.2, 2024-08-16 - -* Fix docker building for amd64, refactor github actions again, free up more disk space - -Version 0.18.1, 2024-08-15 - -* refactor github actions - -Version 0.18.0, 2024-08-15 - -* Allow folders of wav samples in xtts. Samples will be combined, allowing for mixed voices and collections of small samples. Still limited to 30 seconds total. Thanks @nathanhere. -* Fix missing yaml requirement in -min image -* fix fr_FR-tom-medium and other 44khz piper voices (detect non-default sample rates) -* minor updates - -Version 0.17.2, 2024-07-01 - -* fix -min image (re: langdetect) - -Version 0.17.1, 2024-07-01 - -* fix ROCm (add langdetect to requirements-rocm.txt) -* Fix zh-cn for xtts - -Version 0.17.0, 2024-07-01 - -* Automatic language detection, thanks [@RodolfoCastanheira](https://github.com/RodolfoCastanheira) - -Version 0.16.0, 2024-06-29 - -* Multi-client safe version. Audio generation is synchronized in a single process. The estimated 'realtime' factor of XTTS on a GPU is roughly 1/3, this means that multiple streams simultaneously, or `speed` over 2, may experience audio underrun (delays or pauses in playback). This makes multiple clients possible and safe, but in practice 2 or 3 simultaneous streams is the maximum without audio underrun. - -Version 0.15.1, 2024-06-27 - -* Remove deepspeed from requirements.txt, it's too complex for typical users. A more detailed deepspeed install document will be required. - -Version 0.15.0, 2024-06-26 - -* Switch to [coqui-tts](https://github.com/idiap/coqui-ai-TTS) (updated fork), updated simpler dependencies, torch 2.3, etc. -* Resolve cuda threading issues - -Version 0.14.1, 2024-06-26 - -* Make deepspeed possible (`--use-deepspeed`), but not enabled in pre-built docker images (too large). Requires the cuda-toolkit installed, see the Dockerfile comment for details - -Version 0.14.0, 2024-06-26 - -* Added `response_format`: `wav` and `pcm` support -* Output streaming (while generating) for `tts-1` and `tts-1-hd` -* Enhanced [generation parameters](#generation-parameters) for xtts models (temperature, top_p, etc.) -* Idle unload timer (optional) - doesn't work perfectly yet -* Improved error handling - -Version 0.13.0, 2024-06-25 - -* Added [Custom fine-tuned XTTS model support](#custom-fine-tuned-model-support) -* Initial prebuilt arm64 image support (Apple M-series, Raspberry Pi - MPS is not supported in XTTS/torch), thanks [@JakeStevenson](https://github.com/JakeStevenson), [@hchasens](https://github.com/hchasens) -* Initial attempt at AMD GPU (ROCm 5.7) support -* Parler-tts support removed -* Move the *.default.yaml to the root folder -* Run the docker as a service by default (`restart: unless-stopped`) -* Added `audio_reader.py` for streaming text input and reading long texts - -Version 0.12.3, 2024-06-17 - -* Additional logging details for BadRequests (400) - -Version 0.12.2, 2024-06-16 - -* Fix :min image requirements (numpy<2?) - -Version 0.12.0, 2024-06-16 - -* Improved error handling and logging -* Restore the original alloy tts-1-hd voice by default, use alloy-alt for the old voice. - -Version 0.11.0, 2024-05-29 - -* 🌐 [Multilingual](#multilingual) support (16 languages) with XTTS -* Remove high Unicode filtering from the default `config/pre_process_map.yaml` -* Update Docker build & app startup. thanks @justinh-rahb -* Fix: "Plan failed with a cudnnException" -* Remove piper cuda support - -Version: 0.10.1, 2024-05-05 - -* Remove `runtime: nvidia` from docker-compose.yml, this assumes nvidia/cuda compatible runtime is available by default. thanks [@jmtatsch](https://github.com/jmtatsch) - -Version: 0.10.0, 2024-04-27 - -* Pre-built & tested docker images, smaller docker images (8GB or 860MB) -* Better upgrades: reorganize config files under `config/`, voice models under `voices/` -* **Compatibility!** If you customized your `voice_to_speaker.yaml` or `pre_process_map.yaml` you need to move them to the `config/` folder. -* default listen host to 0.0.0.0 - -Version: 0.9.0, 2024-04-23 - -* Fix bug with yaml and loading UTF-8 -* New sample text-to-speech application `say.py` -* Smaller docker base image -* Add beta [parler-tts](https://huggingface.co/parler-tts/parler_tts_mini_v0.1) support (you can describe very basic features of the speaker voice), See: (https://www.text-description-to-speech.com/) for some examples of how to describe voices. Voices can be defined in the `voice_to_speaker.default.yaml`. Two example [parler-tts](https://huggingface.co/parler-tts/parler_tts_mini_v0.1) voices are included in the `voice_to_speaker.default.yaml` file. `parler-tts` is experimental software and is kind of slow. The exact voice will be slightly different each generation but should be similar to the basic description. - -... - -Version: 0.7.3, 2024-03-20 - -* Allow different xtts versions per voice in `voice_to_speaker.yaml`, ex. xtts_v2.0.2 -* Quality: Fix xtts sample rate (24000 vs. 22050 for piper) and pops - +| Version | Date | Changes | +| - | - | - | +| 0.18.2 | 2024-08-16 | Fix docker building for amd64, refactor github actions again, free up more disk space | +| 0.18.1 | 2024-08-15 | Refactor github actions | +| 0.18.0 | 2024-08-15 | Allow folders of wav samples in xtts. Samples will be combined, allowing for mixed voices and collections of small samples. Still limited to 30 seconds total. Fix missing yaml requirement in -min image. Fix fr_FR-tom-medium and other 44khz piper voices (detect non-default sample rates). Minor updates | +| 0.17.2 | 2024-07-01 | Fix -min image (re: langdetect) | +| 0.17.1 | 2024-07-01 | Fix ROCm (add langdetect to requirements-rocm.txt). Fix zh-cn for xtts | +| 0.17.0 | 2024-07-01 | Automatic language detection | +| 0.16.0 | 2024-06-29 | Multi-client safe version. Audio generation is synchronized in a single process. The estimated 'realtime' factor of XTTS on a GPU is roughly 1/3, this means that multiple streams simultaneously, or `speed` over 2, may experience audio underrun (delays or pauses in playback). This makes multiple clients possible and safe, but in practice 2 or 3 simultaneous streams is the maximum without audio underrun | +| 0.15.1 | 2024-06-27 | Remove deepspeed from requirements.txt, it's too complex for typical users. A more detailed deepspeed install document will be required | +| 0.15.0 | 2024-06-26 | Switch to [coqui-tts](https://github.com/idiap/coqui-ai-TTS) (updated fork), updated simpler dependencies, torch 2.3, etc. Resolve cuda threading issues | +| 0.14.1 | 2024-06-26 | Make deepspeed possible (`--use-deepspeed`), but not enabled in pre-built docker images (too large). Requires the cuda-toolkit installed, see the Dockerfile comment for details | +| 0.14.0 | 2024-06-26 | Added `response_format`: `wav` and `pcm` support. Output streaming (while generating) for `tts-1` and `tts-1-hd`. Enhanced [generation parameters](#generation-parameters) for xtts models (temperature, top_p, etc.). Idle unload timer (optional) - doesn't work perfectly yet. Improved error handling | +| 0.13.0 | 2024-06-25 | Added [Custom fine-tuned XTTS model support](#custom-fine-tuned-model-support). Initial prebuilt arm64 image support (Apple M-series, Raspberry Pi - MPS is not supported in XTTS/torch). Initial attempt at AMD GPU (ROCm 5.7) support. Parler-tts support removed. Move the *.default.yaml to the root folder. Run the docker as a service by default (`restart: unless-stopped`). Added `audio_reader.py` for streaming text input and reading long texts | +| 0.12.3 | 2024-06-17 | Additional logging details for BadRequests (400) | +| 0.12.2 | 2024-06-16 | Fix :min image requirements (numpy<2?) | +| 0.12.0 | 2024-06-16 | Improved error handling and logging. Restore the original alloy tts-1-hd voice by default, use alloy-alt for the old voice | +| 0.11.0 | 2024-05-29 | 🌐 [Multilingual](#multilingual) support (16 languages) with XTTS. Remove high Unicode filtering from the default `config/pre_process_map.yaml`. Update Docker build & app startup. Fix: "Plan failed with a cudnnException". Remove piper cuda support | +| 0.10.1 | 2024-05-05 | Remove `runtime: nvidia` from docker-compose.yml, this assumes nvidia/cuda compatible runtime is available by default | +| 0.10.0 | 2024-04-27 | Pre-built & tested docker images, smaller docker images (8GB or 860MB). Better upgrades: reorganize config files under `config/`, voice models under `voices/`. Default listen host to 0.0.0.0 | +| 0.9.0 | 2024-04-23 | Fix bug with yaml and loading UTF-8. New sample text-to-speech application `say.py`. Smaller docker base image. Add beta [parler-tts](https://huggingface.co/parler-tts/parler_tts_mini_v0.1) support (you can describe very basic features of the speaker voice) | +| 0.7.3 | 2024-03-20 | Allow different xtts versions per voice in `voice_to_speaker.yaml`, ex. xtts_v2.0.2. Quality: Fix xtts sample rate (24000 vs. 22050 for piper) and pops | ## Installation instructions ### Create a `speech.env` environment file Copy the `sample.env` to `speech.env` (customize if needed) + ```bash cp sample.env speech.env ``` #### Defaults + ```bash TTS_HOME=voices HF_HOME=voices @@ -171,6 +912,7 @@ HF_HOME=voices ``` ### Option A: Manual installation + ```shell # install curl and ffmpeg sudo apt install curl ffmpeg @@ -236,7 +978,6 @@ options: Set the log level (default: INFO) ``` - ## Sample Usage You can use it like this: @@ -291,22 +1032,25 @@ python say.py -t "The quick brown fox jumped over the lazy dog." -m tts-1-hd -v You can also try the included `audio_reader.py` for listening to longer text and streamed input. Example usage: + ```bash python audio_reader.py -s 2 < LICENSE # read the software license - fast ``` ## OpenAI API Documentation and Guide -* [OpenAI Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech) -* [OpenAI API Reference](https://platform.openai.com/docs/api-reference/audio/createSpeech) - +| Documentation | Link | +| - | - | +| [OpenAI Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech) | OpenAI Text to speech guide | +| [OpenAI API Reference](https://platform.openai.com/docs/api-reference/audio/createSpeech) | OpenAI API Reference | ## Custom Voices Howto ### Piper - 1. Select the piper voice and model from the [piper samples](https://rhasspy.github.io/piper-samples/) - 2. Update the `config/voice_to_speaker.yaml` with a new section for the voice, for example: +1. Select the piper voice and model from the [piper samples](https://rhasspy.github.io/piper-samples/) +2. Update the `config/voice_to_speaker.yaml` with a new section for the voice, for example: + ```yaml ... tts-1: @@ -314,7 +1058,9 @@ tts-1: model: voices/en_US-ryan-high.onnx speaker: # default speaker ``` - 3. New models will be downloaded as needed, of you can download them in advance with `download_voices_tts-1.sh`. For example: + +3. New models will be downloaded as needed, of you can download them in advance with `download_voices_tts-1.sh`. For example: + ```shell bash download_voices_tts-1.sh en_US-ryan-high ``` @@ -324,12 +1070,15 @@ bash download_voices_tts-1.sh en_US-ryan-high Coqui XTTS v2 voice cloning can work with as little as 6 seconds of clear audio. To create a custom voice clone, you must prepare a WAV file sample of the voice. #### Guidelines for preparing good sample files for Coqui XTTS v2 -* Mono (single channel) 22050 Hz WAV file -* 6-30 seconds long - longer isn't always better (I've had some good results with as little as 4 seconds) -* low noise (no hiss or hum) -* No partial words, breathing, laughing, music or backgrounds sounds -* An even speaking pace with a variety of words is best, like in interviews or audiobooks. -* Audio longer than 30 seconds will be silently truncated. + +| Guideline | Description | +| - | - | +| Mono (single channel) 22050 Hz WAV file | | +| 6-30 seconds long | Longer isn't always better (I've had some good results with as little as 4 seconds) | +| Low noise | No hiss or hum | +| No partial words, breathing, laughing, music or backgrounds sounds | | +| An even speaking pace with a variety of words is best | Like in interviews or audiobooks | +| Audio longer than 30 seconds will be silently truncated | | You can use FFmpeg to prepare your audio files, here are some examples: @@ -376,8 +1125,8 @@ Coqui XTTSv2 has support for multiple languages: English (`en`), Spanish (`es`), Unfortunately the OpenAI API does not support language, but you can create your own custom speaker voice and set the language for that. -1) Create the WAV file for your speaker, as in [Custom Voices Howto](#custom-voices-howto) -2) Add the voice to `config/voice_to_speaker.yaml` and include the correct Coqui `language` code for the speaker. For example: +1. Create the WAV file for your speaker, as in [Custom Voices Howto](#custom-voices-howto) +2. Add the voice to `config/voice_to_speaker.yaml` and include the correct Coqui `language` code for the speaker. For example: ```yaml xunjiang: @@ -386,9 +1135,10 @@ Unfortunately the OpenAI API does not support language, but you can create your language: zh-cn ``` -3) Don't remove high unicode characters in your `config/pre_process_map.yaml`! If you have these lines, you will need to remove them. For example: +3. Don't remove high unicode characters in your `config/pre_process_map.yaml`! If you have these lines, you will need to remove them. For example: Remove: + ```yaml - - '[\U0001F600-\U0001F64F\U0001F300-\U0001F5FF\U0001F680-\U0001F6FF\U0001F700-\U0001F77F\U0001F780-\U0001F7FF\U0001F800-\U0001F8FF\U0001F900-\U0001F9FF\U0001FA00-\U0001FA6F\U0001FA70-\U0001FAFF\U00002702-\U000027B0\U000024C2-\U0001F251]+' - '' @@ -396,19 +1146,21 @@ Remove: These lines were added to the `config/pre_process_map.yaml` config file by default before version 0.11.0: -4) Your new multi-lingual speaker voice is ready to use! - +4. Your new multi-lingual speaker voice is ready to use! ## Custom Fine-Tuned Model Support Adding a custom xtts model is simple. Here is an example of how to add a custom fine-tuned 'halo' XTTS model. -1) Save the model folder under `voices/` (all 4 files are required, including the vocab.json from the model) -``` +1. Save the model folder under `voices/` (all 4 files are required, including the vocab.json from the model) + +```bash openedai-speech$ ls voices/halo/ -config.json vocab.json model.pth sample.wav +config.json vocab.json model.pth sample.wav ``` -2) Add the custom voice entry under the `tts-1-hd` section of `config/voice_to_speaker.yaml`: + +2. Add the custom voice entry under the `tts-1-hd` section of `config/voice_to_speaker.yaml`: + ```yaml tts-1-hd: ... @@ -417,7 +1169,8 @@ tts-1-hd: speaker: voices/halo/sample.wav # voice sample is required model_path: voices/halo ``` -3) The model will be loaded when you access the voice for the first time (`--preload` doesn't work with custom models yet) + +3. The model will be loaded when you access the voice for the first time (`--preload` doesn't work with custom models yet) ## Generation Parameters