.l4Q1`C*s"tGM@cWLO]d"gXLtOtXFEF'C-FT&cb?Rtcpr!P@17f;7+egiiN5_lg%pQ$ki+5=e.cK'FATW!IFLM!RK^9@YkB>7c'TBcK:mT.RP(Mf-@5$SZFeH/.qOP8[CoK)io(%@c:lSb3O2BbV*kQ.5V;hA/k^3n[[Y*ugbok\A!W9pbiUa<#SI*r%'kEe"A5b)h$J1fY:qTN/A8ZWP@;.2sQH*BO9?RQ4o?dl(_&eE7@qRJ<6Ya+eY(TVVaQ%FO.`'.!Ri+K^`:31ld!Nd5iq]W5QOq=f.eq(V)Uk6r8-.gY$-^8Z9Zfcn;as,G^"!SdqT[dZ"JY_lae)gr$22\D8&P@BB*(3ER:t[/cOcf5l\f^s%$d+EPY7T<=jb_'Z6V5)GJPlp_Emr+-nTf!Xh[,W5l/:+XCh`)m@aY`(`<>GLXN!KXd:IoJG\W:Fj^_I^.*u#>,p49j0FBeWqI.E08o4>Ou1#E4n=k=2V3329+Ai#pNL/dG5gide_XZ@>arOj@;ST5dpT^:1Si;i^7m^g`-bDF-JS5A=4?.KcVHT`92&AFq0_Kf*G7Fp7IQp<6X5X$i$qeXD6W,s2p/`jD#C5UEN#p\%gK7=E`E4,.#hI+Sk$W`Ve`%r;HM%re66(8W4ReZ@>'cl1CQJi_:)8)A1hHi/(JhV4r6M5f:g-&S2@TKmD0eRj[I3&-*UB@uEgomJ+:HTM3tBADhg7"n'RlouaO+=ilTr`.t>d*Lq]"f.@)DdYqtqYfj4>"tm40(rX6nXbOt'ZkSV9AZpT$CK5.h#AA%O=LZEQEAkCYF$Z1"H$l!'i/&a0Mj4$Pn9[bE`9$,S%g*$5\Aa0q>PC>GT6/-("@qMqK86T8&T4"FY!#s#E%h6Ak2[#=)7!j$*%`B?p\6h;CY5CBF~>
+Gatm<>Ar7S'Roe[d+a`"AjfeOV&3Aec!7%mL28U<;YPa..%>`<3g:=+q=UL.b@V"_?mhjt'#PNNV.an@])ri:3icENl?9k1RgmWEf+OSW8kX-uaTI^2^kobh)40R<\*V(MG=kL/l67E0crDcC!C;Mism%J.g;tGdZ\QYYIVbDcfl%kgK:cbeG3u4iPM2e\77,Br:.(BXM2P%Qflra'nMm>`!UTCB>!7rQh2j.O.aU,4AoSG_;!8M8bHFAs,Cq@7E9WE3>XON"J2j'#so=HfQ;49-Je2Nf.,bN-QdVGi?.IfoV]"TIC7YBHSF))gS&?09M>R?/28)pJ5SH(3p[f^a"(ZV7p28AO)FO;$P\*!jL(&oY1$Bn2];0\Nj>5V`QB:.k0,[,3[*[(*"IFF\j772RG-*LNUPK91+!%?8o9-;7"AIt#kPu-]]U:3[=67XYNlE/pjD8HSQB`Qj=h6=ru"758YpQf.?J3OJ?IF*;b+EH-K->/H?T[jF2KJrC8\Ei7D`aB;b$@&oUeLlPg[k&+%Asr1dp9t1#@h!9kpm=gF"L`"10LYRB,FUeXncg+cL=QSkk(h4@uumAk%W3NO:W]9=fC;Be.jC#HO1t/V^CUm=`>F;&m,FVtp$@DSCrH7uefqNrAJ902=ITnC/tYf?Z9QVs%kOOK2OJW$lP=)S1b[5o=s>M*j[]&-!>a6Z`5nofdFd'BP&`MQRCi4?#[\%sq.TqfG7?"NOFT'&8NbfYg/P"/mbdeZ?PEM*gjMYgc/r>3XFi,ms\=8aFcmcHDLk!5IZuodgT$\ji,0-D]\C7ASNVA`)<8r8UkJ:Am.dr%9/4BBX@m>:L8J1:,4IBjP!M[4GSG'j'jRd*Sd2$DS>f&5@L%kWtXCBs[W(CX]W,-<9A<@n"7`J8iiDJ\>+BH?\$)>M%#R@"$@%p\^l^o@;'+,32,,qlY^eLcD(88aOD:hqtocD68h,LuL,XS;V<._?t^,U9/;P#\uO#?uFV1U:AXg[@(cQHE0jOaZ`U&b=A!dc2']Ea'o]@6:tc9p/)ODI2-)Qr,;Z.`blUW.U@=n:V\e%'8#4o30CBqYdaOCZ$hpc[;LC.7]l>HJqoWS]"tW)O'fL&Y[4rU(Kn0#^FXIg1Jnr;4V[6G=e6.^Q?NYdXCUmaV'&M4$T`BKh>~>
endstream
endobj
96 0 obj
@@ -574,10 +574,10 @@ endobj
>>
endobj
97 0 obj
-<< /Length 1845 /Filter [ /ASCII85Decode /FlateDecode ]
+<< /Length 1852 /Filter [ /ASCII85Decode /FlateDecode ]
>>
stream
-Gb!#\gMZ%0&:O:SkcGdHAjhNfm1oj`?$kO!S"tn1(9!-X[O>@1/:'b<8c&+VAB>h6U?r"fTElO,'JF+EbT%+83us9WKTl[?%%(n]&>Zm9@UR)%M3?@SZ\=[lU1p*/DdY+A,phqTg@uDXQ,$Fl!POI7)H9h^PMUnm[Gpp7#5`5D'LuQCr#6ggcZR9m@YM]Okn_R8IR08JX$:AJ$X_/?Z%aHoO/cHd5b.\^d1_2$9<#s2*P\J\X3)LQW,X=:9`/ST1GaihhhgAVcJ)(r'XkI!$Z)^`iRcJs!2pX!$g:]hbLM[j0e7e.Qt5E&$pY##"1D'EYB0"e:a_1Q1@j"s>1ge.VVIAK`SBi*o]9LXe^lujk"h^/+3GGjH=p#Q0'H4.@I7$ErJdj>k/;\+0-6J'b!H=b=3:6Ws(VT5DW7'F@"gu'f=20eZ7g[^5_1i*OpM/^:"hY10C`Pr+ca1*@'rjN%Dtk+5HPLhP(Xls!UKlXXoJZ`dRnA9MOgER)bX.>p(I3R<$]-_\tao;'oJ]T%LsYT9e@VANClXrR_4<@f`n_oL*S^@8%DLe(bF@u>*iU2Vp^/aW*L&Pa/eN6*-?b\^1F:DkGVR4V$;#`)TpGW6j9R/2T=LM+>IkQ#uPq(?QBj.7h'D;3bkAe?]Q]>g(p&s[p@1^U)+H)G8c`hLW'_7:E[2(=O5:XP\medO\'OEd.mV1kE"*,Sk"i;PP[FN'\]1f$^LlZ&k!0s'9B'tn(M]qr2Ttf;2Fj(U^R9(Ko;N/-T8:V!cnl;B89mKY0l)9VdelS0IdTh;n%kAX)c7!mIZ&Lu*^=Hd$D\:3tf$7N,*"mnig"f5-\H@b[Jh23PLg!:&kC'2Qc;k,u#Y)-=;!3mA5'Q=['nmk$eo$!-EBHN;1C5:N&$!<~>
+Gatm<>Ar7S'Roe[cs'ZrAjfPHAISj>>MQ&WP:Z7P!R!1<2J!8X79>^pl)u0T9B)Whg)a')WHBKWn2n+?L\s+P>u!oI7bCkD^JCWG7BY\J^A?_seao0U&:'3q"a=RR5=Q_*:7qVb5C3+XKK6Pg2,-PX4j-k*Yd9h@#kD9IRBE*qh[tP!4(D0hGo,WO+O%b;s%1D_'ef?="9\:k9_2i8[>aZcXed:shrZ+9j"sgnJ>t0L68"/'<"&`d9L&95;iJ@)q*gTT=!RCVa*QR^p\N-Zr3I1(dD,3r_2He6>(@^(lUf7*L&@C*X%)OX!:1oX!pHb(XEFO4[H'TekKEbp4!2nm_?d],c5fB4k*,+pduuldpZ:K.JJU'Vq4LbQ5[g_$)`O_#Ji]jIo6$W5!aEXZISW2RPX]qujXc^oQ:.I>T3(9)V8X/4.p!j-'"*)qF?K4J;?PiVW2NY)Tf]*Bb!joZRnZi*,,^9\RY3-F=]VMj&l`]Y2d/gV2Tui7X`UAFm(e)FFdn`0&cOXcckMHb=:R8c\%h^,-5il&W6d(poYgE&rRRN`.D_$SJM@GEbkHo*7cnb*AiKG#`%5rE7bj/R,LNn0:l)KCJ=\\WMN^]O2Mi)*=2cQ*h\DR]!UCW&9VW@31DMh8]nbHX8Z?_gpKR^8:7V^mRk2>Nr?^!8MqU2%JA90FD"<]d%>@T]"Gl`WE(bQQt:j8scT`.#a.<9iA\BG!Ou#IemeE9r]PD$bQEslrDNPLoKB[?_4Je:%6C6d!%'>\.AVsQrrH7s9:D9rspiVcY&2*(]Nc3)UjSCoB__J%&-E>gPO4?f+S6#'=n>iqWesgIl3U@N@5SWHE6:j57G*cd5Rp":U00HJdi8:XQjaUY;MD:"_I3HY*"T9l:6&@$4A-U't](N]p^_KqM(\d:W]B+-`Dn\C='D;3<)'VJckD?@aYCeAhTMDLXKK7)2`1OA0&dKHq6;\l!UJPiiEi/^jsJX6!DhWEFkWrFN7djD"T\taEG\BqOf0(BZ/4V:+9$#X29oD0[mHrlR\/.i3NZgunu/hlS"0bH3n*;ml]bUS^OjaZK`17I?*Xnugsm2fBQA1gj^S)T_r]_Qe]ftV'(,/B3R9u2$1_nmVG(WIhgPsq58@khK1OmsPU3$'F>5;>_f[j\:8iuN?aG;-=%o?c$/MOHW:d.*7tFI/Xkg%_+a*%r]pU_UWI@_%$5j7Wh0.B8A%,(8L4!8p=@Knp9>,Q]F6'#KBHZ3P962g-!/n+oiuPPnOMGAaMA&q1N0.&_H]`e1fHK!3WBaH]aQ=bm(q01T@h2\-b>"drm[M4/#tb!gu\f4R)!2H1!mBSj`h~>
endstream
endobj
98 0 obj
@@ -589,10 +589,10 @@ endobj
>>
endobj
99 0 obj
-<< /Length 1472 /Filter [ /ASCII85Decode /FlateDecode ]
+<< /Length 1313 /Filter [ /ASCII85Decode /FlateDecode ]
>>
stream
-Gat%$=`<%S&:XAW&G"6RL`'&&FY&Ca9t+H)8<[j6pgpc[A=Kk-aJ,`QNrOO,_racBh&&G0N&7(#jrX%N*.82b^IHjXo&"pdJVd(!u+jL$s@f+5*$>RdSs,M!CB"soK"'b[t(luo7Pf\EZJPEm>8hB%lNY/FQs#rWrX%>pi7Qbe%<9(2#]GNmmAN^7a>=4.tW]-8bo`+pmYkq"C;*$.NR/0ekejei`rk/&!/53H=CGhV(=h?H-`>+4iV5bY(qD$^6A'T=h[kYF'K<`0VRJ7W\t5N`G#P5V80UPUk#XXir<#c('LZg^]bIlK/BQj#d>Z'#4P[#=:&_0k7oM_d?9\)9p>`hS'Z`s0W7*CgEQZ$Cr#I$B#Q67goY`hRVRlChMc#aU0d`i"dA;r+K(jVSg+=&&:38kUT:JL%[AE[T#8*I,*^.p/"E/<204%?aO$/(YIm4c'&P)tDNQ9qqulP:8(U0)?7M7,bolqc7e:(rmr)^mg("_Taakh1?Y/&7m3ja6+OO_Oo:qcS'L'%i2NQb/O7(:3-tj$TS["$8(s?D='K-m]Zn!$S+,!jinG`TkB_e!`&$B$_W%#kYn[TnL.CMIt\A\MR=E-;u6gVt*[=E1!V?6uj8Xcg0,?9$VsIi*M\_Er.aDX0o4f-7N1<<](kP1Ki(L:%O'V,98g\%%IS2i*s]HHD+J$NmOfTLRc^m4Q/8o'>,P/\*Q8fNoC5)m4L4!BS1K1BhI)/)h(uk`eF?X`HOQoj^?P<9.@jlmYXJ1lP:m[H+"YWd.h+5nhm]1Rd&cu_(pE!_!_EHj="j9!me:4??CEKQDclO@W0\Y8cKAF*jdM?h\Y!nL??LKrNa8:LHO!l1@R"aROa,;J&QqnYR-@2LMR)'o"?+;;m_e^@3=-])o9c>"nF7k+n2@\&/;Xmn/^otgr]E8f"3,3->g#4I_/T`":N@-9ik/>GO5[o]p=+UEOG3*VL((,FhZLHl\AtQ-AY]H$#!Mj@O$U5GM[8Y9LF=~>
+Gb!#[gMYb*&:O:SkV3*QQD?o?#)X,>VI!eqD6f#L0?mt"g6=2)Q<]QpjmSPI:`RW@';I4=^k%7^cC>ctS09P\ak(4sF$cD)qYWOF@>#ZYpXObSf>C7--lKgdLNQ5VOe+#iQqc1?]G<>smo>8AQZE\+N4oYiF@JD%f_A=,n)KPOrRlPXFt3UG5HS0idID1$4;;4o4.=C:l8OpfC:p1oC4B3_q4IR%?gFcWInqX$%IWaiZL406ZH\fROoWs>E=eO$*$"o%7c\XW>oTjCP[*3ADU(;Q6a;uX`i8e3gYkB$jW[2jhQcY/M4ER3Y[0@9Xjrm*7F^Pf;i&(e;JW0K8C<&`p%BAfj*\TP"$6VB^\G'Or+];X*e=i%8?n1KnL<^(5O/&%:RSQ5'+^kW_h?J0!*VkWj34@7rG+`"2Wgf#upc^gJ]34(LHs!H(C_tHV,l.#.C*[-/*#/QRV"$O^9]6Fj"V$+b_PR6Fu#-+0f$0Y!>(`NAU>D3\5)N+Zh2n\'3$ptoKbgbO^=A;&7GE6)&f&Urg\%-hU\o1AN(NQ3lP]JpDhUijbeM@^2dN6#DBq.)7Hhm%N4n'(+07nW^r6fDa*!lFJO:KSYQart6PCifcH+$DWYY#I/b4ep'<0J.MmLLpt`b##Y304>sPC.&\Z#H*#WR60\E++*]2Dp]WH+Bi0R&WY/2"DVI5a@Mi;ZQ594StS=/6^R5oCj:d6d"eB?s0Xijd+bYlpu^s,,WIr+1F$5%b*3jF7r'q`DZ5QDTsiSZV[F[5)Y,h^m9)V!F_'*`im.HAW:STRaFg@uMh:?H.hN)3[7#VD=kg4#7D$6&YRl?JGHM#i[2@)28u^2"2?0?o*OL5''%dAUlVGI`qJh7Cq@*,oM@nK2>$r6o"Cpe;hpG!84F7LHL^FE<0,&H'tck1O%T^>9hY3"d$:Q`=lT%PJ$nqSmh0s~>
endstream
endobj
100 0 obj
@@ -604,10 +604,10 @@ endobj
>>
endobj
101 0 obj
-<< /Length 1726 /Filter [ /ASCII85Decode /FlateDecode ]
+<< /Length 1805 /Filter [ /ASCII85Decode /FlateDecode ]
>>
stream
-Gatmdu-q)f;o[4UZ+up11'7:[-#?Gm6#*a=$JdIT/@T..6*oW]G$emV7sT48:U.sRPmhr>#J6#-&ph?gO]sP_(Ei!U6ht:n1?d"a*fBTUt7,Q50[gJ?_,`JXI6<'&P4R#`:KrQCr=H`fF<7.>`@mg%?ihT\a;8I5tFKSN1r;KY+pGmKW*clN[)TDGL#i9^YLmPQ"1lI':"B/YX,slA`s]7-drBBGKl0nj:^/k%T*MEq2XQ15^1!'S$nE#C*7>FcbB>AHJo7,>r"uc'1Eb2fI&fh,Do$4!6-j.p)rouEAP^&d#o#Lk@OW$,&kaEaN18B""BAJ=RQSAJRmt5P*I@cRF9oa&LDbHHln%ibC7[ST!ur:L.Ths_&.7Lo,qk4!ZLeA`YRfD'p4@G(]&+fB>\]PhLRP0=H%q"@Pq#uHX[:i/1M5\d;l]\5rX1BoYaUV0JDFg@'CdqEPctH_I2b/\n&CrZ"OMCE\Q1F.*QQ0k\fZZQ"mYIVO?/Sifd,#(&[RSXQ:8o=M9$/,jWV`MG#=9Wm8T>bH\g:.hdWl5CZ;SY-?!IV[aku@NAMRSD_?PtqI\CrNsl^\3;,Hu)u$Yd1`6#TIVK:9Y,(b2[Ya\-"5Bf^\r#"ih\!It,h0aocb=>'aiHe)2T?._N-PuGS1ZF?j4k1a#mpKSD=$9)^/_nY&5RKkp3NG'E7(of^a:.QQceTOFGnrlN)j;j"La/n&I"2]0J4XV!UHAl;.hW@6dfYi&QZB%ZU_('Ba7'!m1K;F=1CF6N(YbP:U^en`*ohkg,jF7$aVsEdQt;+;`mI74+2ViR?bu=].b<5qj&db'+SonaJ`EAA@Q,&r%eE""GV))ciD3$Y+=_We7R_*8[`BURNKfD[k24>.k5E&Uc>R,O-eke!&V+MHPu);)gi?R-[LX,R,"tgj;clS;*UG^I=@Tni6-08<3XjT\!CS4kJA)hkb!'Fb-GSd4:)oTN=Lsknh-Xh>2]#ib:rCIV5Y:h-E\c\.0[lTIY]fgqT5NfN\~>
+Gau0D>Ar7S'Roe[&G!+0M]%?aFX`%H903q-MDe\Om@ics,#bp(=`ee+ls`2.Ua2GmaHW0f(kUBe7@s+A.mNPg%M/6X*^@06@O/_#8k'A:`Y#TN.C%C$>p7!"kcmBf9IPYK"6fjMW3jIFF[et1t`t$X#7Yu&1epM=_])7qm'sFYgCJ%f6Qdb274o"9f];!$j@l8WskN."jnh4(qB5@M4:q5g&mi_O\W-S'Y][$$Q4fj"+PY!0]dqQ?/4o'NWi$:+Al^)fleIe$*n`Q=iD9_YGjcUB'Fecg/'JB+tS8&-W0@@i0[t.U'=+3__Up%f7Roe:O&T$">@j9BCQU>-h2f0p(nqWkV?GC,>DmZb1a]:&&H*dcYJVH]LW1J6.K!d'OqDmc"!^F)(64g^)oTWn])hH;dN)@,=.u_DEVG[VimpA,%0RoBp<8Q30,TM<`:.%V`tfMP8!83$j.'2AgN,6f"Re_#5?,@T8+V%$C]_RlV]S>j'/!=Kre#@r'&]sP[,KpN*0O3n&dN"4_UVpS9LB9o,a;f^bc+,ij@\[s"c9]fRYJGq5G?IpU-3?d`L9Y'f;$"ecf5XjL,irm8)7HKh12<$:@+ikO]b8R&1s-7\VnES)DU>b1UXp-863DiccNmih@cVc,`-NXHka3"kPrc6'DA85k@#Jk!;]2rDK7(KEpT]_8j-?3%D"UiW_`&3'I=7U8`ud\:dHM--_@F/RhN9M4:L_M^$e)"Vb7-[C)o)L8),-ZPJAQO#"t:-hcj11cWYL0'&ZI0@)l>4AB$`bY[uD4EQ_X;hu0YqB1_+C5_1K?:F@*tsFuP.XqfhNtg=[]?_8jNSRH0>>T-Rq;gm@=s-gm`&qUINu8`n($E6UM1qD=r^9sET]%Eh3@(-A5Fr1^V+@Aa,_dP5.S^Z]XB/m&'=Th/!3*KTfObnl4+S)+F7+e1detOo=mt0Q$Z=T/p10oeShI"$_-0XXG<[\Mimqd?H%EV*SZgZnDX.u#.^%K1cN*;o#VF)+;8;?KXW97n7RCd>0"TaG]T3@TMRtMT&C5PK).\4u2XQOJIU@"q96)tfN>qg(d^faI)D*jYbcKTm@4%@GV8&CpSHlk#H1+*,=4)2#62Oo3,<#ES>HV-g#@Z?r.6)=0phls\[RGsn,$TJ3rjgcG>\goJs'ed&m@$.&X(SEf<_>~>
endstream
endobj
102 0 obj
@@ -619,10 +619,10 @@ endobj
>>
endobj
103 0 obj
-<< /Length 697 /Filter [ /ASCII85Decode /FlateDecode ]
+<< /Length 1120 /Filter [ /ASCII85Decode /FlateDecode ]
>>
stream
-GarnU995Pr&AI`dI*>3>f/T/gIp:R#praU4%i8Qq=ca"GoS,fM#7_+j81qp07!%5&)MpM*UNATa9X+@Naqr.Z/N?P#u4>F:_EU6?Lm$"a=oMnBDP)Qnf:ipSBLBg"/+*F^XMi)/A&J'#jRC-L@;9c`Y`]G`\nehWcAcY:nXLV5q1ie`eeqHE(8>kP=27aV/VqSMq1Sa@r7_\VD2+g>I&Zjg.hjPTke$c%FAot1hg;Mb1tOU`V0`d8GjMg!_2W-2c5aB)+e(r9Kq0Nn-)_-AQ%ic4P$d\/CJu".B\.GSTJ<=%"@[5/#H8d"7&/;KQ[8$Hirp]PZ&AZIiDK3Xj&G?Y.D;gRYY?=7]/t4GCq^OZ1:9J9%KRPuHW;DQm]$h90j.I8o;3>%#FAR!@pmN]J\9k!"Ra?B=.k%=^5p1>`B>k[cLU(g,8/2%DU68;1P8h>g\AeU"s8%tV`/4jfH`"]>fpNrT]e&FbYWY?PHAN,[N1Fs2$(,PBP@ASo,L*XSAS!O]&PRT^Q>'<_e>;d]T[O%\"_J(c,rIZ3iI$=Wf+dnZ6:2(=qYf];PE#(6o7TP(ZMY1*oYFRf*itWj(T]BJ&6AHs4Ys\>ged-eP,Q=MiF.(Y=AD-3RQ/!pS3kJ+G5(PH_$"2NQh-#upM>&*u#@5C~>
+Gat%b99Z,/&:j6K'fsW>figri*kB7ZG\U-U<$"deR2gkl2H>90&GA)u52tO$Ma2Ga;]`"$^t?rj_d2U$-;V0:S>:Jq>+!s*`b-GEc"L:a8iCq.6o4hs)9n*)Uc#51_BpiiBNR*82,VYcBOkSjff^`E[,1fd#+]9'*[$Qd&O1qHGibUj:Qg2kS('[A'TtR&0_,UIMe1AT=!>'M$21=!k6K6HBO['9:0Zmk83\V73@p7[*`K`(/?XqX&S(?7H+2X>R\qdVM&:E:aFsTKjjp?'KIT5ooR@W=3Sk!JQ=@ij<4-EYka-rT>Y.ZDDC'Yn\@_e0)&"YME/Z!&k3DfuPQk)@l@r:L5QnDZXLd:[]D9Kh:,=,d`X5%!E/7f@qd,@#s[Q`Y(b/$e\W^T?ukC<"3lVQ^dPRs9\1XM#34?u@oe&0uqPGM!l%`ML[A^d^3I'e`SDuTe..U(9k\0:WWp`Z2^'n'S7K`/uVcI8["BLuJ1e%LFDifHIRD)BH*-VnTKchCTN5;d(cUVF5h#c8OH?6Kt#,ZYF!6;")$i~>
endstream
endobj
104 0 obj
@@ -1038,73 +1038,73 @@ endobj
43 0 obj
<<
/S /GoTo
-/D [82 0 R /XYZ 85.0 230.6 null]
+/D [84 0 R /XYZ 85.0 659.0 null]
>>
endobj
45 0 obj
<<
/S /GoTo
-/D [84 0 R /XYZ 85.0 616.6 null]
+/D [84 0 R /XYZ 85.0 520.547 null]
>>
endobj
47 0 obj
<<
/S /GoTo
-/D [84 0 R /XYZ 85.0 538.947 null]
+/D [84 0 R /XYZ 85.0 442.894 null]
>>
endobj
49 0 obj
<<
/S /GoTo
-/D [84 0 R /XYZ 85.0 286.494 null]
+/D [84 0 R /XYZ 85.0 190.441 null]
>>
endobj
51 0 obj
<<
/S /GoTo
-/D [84 0 R /XYZ 85.0 234.16 null]
+/D [86 0 R /XYZ 85.0 659.0 null]
>>
endobj
56 0 obj
<<
/S /GoTo
-/D [88 0 R /XYZ 85.0 528.6 null]
+/D [88 0 R /XYZ 85.0 409.4 null]
>>
endobj
58 0 obj
<<
/S /GoTo
-/D [92 0 R /XYZ 85.0 516.2 null]
+/D [92 0 R /XYZ 85.0 383.8 null]
>>
endobj
60 0 obj
<<
/S /GoTo
-/D [94 0 R /XYZ 85.0 291.8 null]
+/D [94 0 R /XYZ 85.0 154.2 null]
>>
endobj
62 0 obj
<<
/S /GoTo
-/D [96 0 R /XYZ 85.0 357.4 null]
+/D [96 0 R /XYZ 85.0 230.2 null]
>>
endobj
64 0 obj
<<
/S /GoTo
-/D [98 0 R /XYZ 85.0 414.2 null]
+/D [98 0 R /XYZ 85.0 287.0 null]
>>
endobj
66 0 obj
<<
/S /GoTo
-/D [102 0 R /XYZ 85.0 613.4 null]
+/D [102 0 R /XYZ 85.0 478.6 null]
>>
endobj
68 0 obj
<<
/S /GoTo
-/D [102 0 R /XYZ 85.0 151.747 null]
+/D [104 0 R /XYZ 85.0 547.8 null]
>>
endobj
105 0 obj
@@ -1115,74 +1115,74 @@ endobj
xref
0 142
0000000000 65535 f
-0000048388 00000 n
-0000048583 00000 n
-0000048676 00000 n
+0000048685 00000 n
+0000048880 00000 n
+0000048973 00000 n
0000000015 00000 n
0000000071 00000 n
-0000001278 00000 n
-0000001398 00000 n
-0000001570 00000 n
-0000048828 00000 n
-0000001705 00000 n
-0000048891 00000 n
-0000001840 00000 n
-0000048957 00000 n
-0000001977 00000 n
-0000049021 00000 n
-0000002114 00000 n
-0000049087 00000 n
-0000002251 00000 n
-0000049153 00000 n
-0000002388 00000 n
-0000049219 00000 n
-0000002525 00000 n
-0000049283 00000 n
-0000002662 00000 n
-0000049349 00000 n
-0000002799 00000 n
-0000049413 00000 n
-0000002936 00000 n
-0000049479 00000 n
-0000003073 00000 n
-0000049545 00000 n
-0000003210 00000 n
-0000049610 00000 n
-0000003347 00000 n
-0000049676 00000 n
-0000003484 00000 n
-0000049740 00000 n
-0000003620 00000 n
-0000049806 00000 n
-0000003757 00000 n
-0000049870 00000 n
-0000003893 00000 n
-0000049936 00000 n
-0000004030 00000 n
-0000050000 00000 n
-0000004167 00000 n
-0000050064 00000 n
-0000004303 00000 n
-0000050130 00000 n
-0000004440 00000 n
-0000050196 00000 n
-0000004576 00000 n
+0000001276 00000 n
+0000001396 00000 n
+0000001568 00000 n
+0000049125 00000 n
+0000001703 00000 n
+0000049188 00000 n
+0000001838 00000 n
+0000049254 00000 n
+0000001975 00000 n
+0000049318 00000 n
+0000002112 00000 n
+0000049384 00000 n
+0000002249 00000 n
+0000049450 00000 n
+0000002386 00000 n
+0000049516 00000 n
+0000002523 00000 n
+0000049580 00000 n
+0000002660 00000 n
+0000049646 00000 n
+0000002797 00000 n
+0000049710 00000 n
+0000002934 00000 n
+0000049776 00000 n
+0000003071 00000 n
+0000049842 00000 n
+0000003208 00000 n
+0000049907 00000 n
+0000003345 00000 n
+0000049973 00000 n
+0000003482 00000 n
+0000050037 00000 n
+0000003618 00000 n
+0000050103 00000 n
+0000003755 00000 n
+0000050167 00000 n
+0000003891 00000 n
+0000050233 00000 n
+0000004028 00000 n
+0000050297 00000 n
+0000004165 00000 n
+0000050363 00000 n
+0000004301 00000 n
+0000050429 00000 n
+0000004438 00000 n
+0000050495 00000 n
+0000004574 00000 n
0000005293 00000 n
0000005416 00000 n
0000005485 00000 n
-0000050261 00000 n
+0000050559 00000 n
0000005618 00000 n
-0000050325 00000 n
+0000050623 00000 n
0000005751 00000 n
-0000050389 00000 n
+0000050687 00000 n
0000005884 00000 n
-0000050453 00000 n
+0000050751 00000 n
0000006017 00000 n
-0000050517 00000 n
+0000050815 00000 n
0000006150 00000 n
-0000050581 00000 n
+0000050879 00000 n
0000006282 00000 n
-0000050646 00000 n
+0000050944 00000 n
0000006415 00000 n
0000008563 00000 n
0000008671 00000 n
@@ -1194,68 +1194,68 @@ xref
0000015503 00000 n
0000017991 00000 n
0000018099 00000 n
+0000019975 00000 n
0000020083 00000 n
-0000020191 00000 n
-0000022539 00000 n
-0000022647 00000 n
-0000024428 00000 n
-0000024536 00000 n
-0000025961 00000 n
-0000026069 00000 n
-0000027602 00000 n
-0000027710 00000 n
-0000029426 00000 n
-0000029534 00000 n
-0000031444 00000 n
-0000031552 00000 n
-0000033595 00000 n
-0000033703 00000 n
-0000035553 00000 n
-0000035661 00000 n
-0000037599 00000 n
-0000037707 00000 n
-0000039272 00000 n
-0000039381 00000 n
-0000041201 00000 n
-0000041311 00000 n
-0000042101 00000 n
-0000050713 00000 n
-0000042211 00000 n
-0000042411 00000 n
-0000042629 00000 n
-0000042835 00000 n
-0000043043 00000 n
-0000043211 00000 n
-0000043411 00000 n
-0000043569 00000 n
-0000043744 00000 n
-0000043985 00000 n
-0000044114 00000 n
-0000044268 00000 n
-0000044422 00000 n
-0000044566 00000 n
-0000044716 00000 n
-0000044857 00000 n
-0000045097 00000 n
-0000045279 00000 n
-0000045452 00000 n
-0000045655 00000 n
-0000045843 00000 n
-0000046095 00000 n
-0000046236 00000 n
-0000046445 00000 n
-0000046631 00000 n
-0000046805 00000 n
-0000047050 00000 n
-0000047241 00000 n
-0000047447 00000 n
-0000047608 00000 n
-0000047722 00000 n
-0000047833 00000 n
-0000047945 00000 n
-0000048054 00000 n
-0000048161 00000 n
-0000048278 00000 n
+0000022353 00000 n
+0000022461 00000 n
+0000024441 00000 n
+0000024549 00000 n
+0000026017 00000 n
+0000026125 00000 n
+0000027509 00000 n
+0000027617 00000 n
+0000029478 00000 n
+0000029586 00000 n
+0000031316 00000 n
+0000031424 00000 n
+0000033608 00000 n
+0000033716 00000 n
+0000035499 00000 n
+0000035607 00000 n
+0000037552 00000 n
+0000037660 00000 n
+0000039066 00000 n
+0000039175 00000 n
+0000041074 00000 n
+0000041184 00000 n
+0000042398 00000 n
+0000051009 00000 n
+0000042508 00000 n
+0000042708 00000 n
+0000042926 00000 n
+0000043132 00000 n
+0000043340 00000 n
+0000043508 00000 n
+0000043708 00000 n
+0000043866 00000 n
+0000044041 00000 n
+0000044282 00000 n
+0000044411 00000 n
+0000044565 00000 n
+0000044719 00000 n
+0000044863 00000 n
+0000045013 00000 n
+0000045154 00000 n
+0000045394 00000 n
+0000045576 00000 n
+0000045749 00000 n
+0000045952 00000 n
+0000046140 00000 n
+0000046392 00000 n
+0000046533 00000 n
+0000046742 00000 n
+0000046928 00000 n
+0000047102 00000 n
+0000047347 00000 n
+0000047538 00000 n
+0000047744 00000 n
+0000047905 00000 n
+0000048019 00000 n
+0000048130 00000 n
+0000048242 00000 n
+0000048351 00000 n
+0000048458 00000 n
+0000048575 00000 n
trailer
<<
/Size 142
@@ -1263,5 +1263,5 @@ trailer
/Info 4 0 R
>>
startxref
-50767
+51063
%%EOF
diff --git a/src/java/org/apache/lucene/index/CheckIndex.java b/src/java/org/apache/lucene/index/CheckIndex.java
index 795042514fb..b11f799accb 100644
--- a/src/java/org/apache/lucene/index/CheckIndex.java
+++ b/src/java/org/apache/lucene/index/CheckIndex.java
@@ -113,7 +113,10 @@ public class CheckIndex {
sFormat = "FORMAT_SINGLE_NORM_FILE [Lucene 2.2]";
else if (format == SegmentInfos.FORMAT_SHARED_DOC_STORE)
sFormat = "FORMAT_SHARED_DOC_STORE [Lucene 2.3]";
- else if (format < SegmentInfos.FORMAT_SHARED_DOC_STORE) {
+ else if (format < SegmentInfos.FORMAT_CHECKSUM) {
+ sFormat = "FORMAT_CHECKSUM [Lucene 2.4]";
+ skip = true;
+ } else if (format < SegmentInfos.FORMAT_CHECKSUM) {
sFormat = "int=" + format + " [newer version of Lucene than this tool]";
skip = true;
} else {
@@ -320,7 +323,7 @@ public class CheckIndex {
}
out.print("Writing...");
try {
- newSIS.write(dir);
+ newSIS.commit(dir);
} catch (Throwable t) {
out.println("FAILED; exiting");
t.printStackTrace(out);
diff --git a/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java b/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java
index b954f965713..9ea7903b120 100644
--- a/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java
+++ b/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java
@@ -46,6 +46,7 @@ public class ConcurrentMergeScheduler extends MergeScheduler {
private boolean closed;
protected IndexWriter writer;
+ protected int mergeThreadCount;
public ConcurrentMergeScheduler() {
if (allInstances != null) {
@@ -211,10 +212,11 @@ public class ConcurrentMergeScheduler extends MergeScheduler {
}
/** Create and return a new MergeThread */
- protected MergeThread getMergeThread(IndexWriter writer, MergePolicy.OneMerge merge) throws IOException {
+ protected synchronized MergeThread getMergeThread(IndexWriter writer, MergePolicy.OneMerge merge) throws IOException {
final MergeThread thread = new MergeThread(writer, merge);
thread.setThreadPriority(mergeThreadPriority);
thread.setDaemon(true);
+ thread.setName("Lucene Merge Thread #" + mergeThreadCount++);
return thread;
}
@@ -297,9 +299,9 @@ public class ConcurrentMergeScheduler extends MergeScheduler {
}
} finally {
synchronized(ConcurrentMergeScheduler.this) {
+ ConcurrentMergeScheduler.this.notifyAll();
boolean removed = mergeThreads.remove(this);
assert removed;
- ConcurrentMergeScheduler.this.notifyAll();
}
}
}
@@ -334,6 +336,12 @@ public class ConcurrentMergeScheduler extends MergeScheduler {
}
}
+ public static void clearUnhandledExceptions() {
+ synchronized(allInstances) {
+ anyExceptions = false;
+ }
+ }
+
/** Used for testing */
private void addMyself() {
synchronized(allInstances) {
diff --git a/src/java/org/apache/lucene/index/DirectoryIndexReader.java b/src/java/org/apache/lucene/index/DirectoryIndexReader.java
index 658656e3e91..a1726e8a9d2 100644
--- a/src/java/org/apache/lucene/index/DirectoryIndexReader.java
+++ b/src/java/org/apache/lucene/index/DirectoryIndexReader.java
@@ -19,6 +19,9 @@ package org.apache.lucene.index;
import java.io.IOException;
+import java.util.HashSet;
+import java.util.List;
+
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.Lock;
import org.apache.lucene.store.LockObtainFailedException;
@@ -37,6 +40,7 @@ abstract class DirectoryIndexReader extends IndexReader {
private SegmentInfos segmentInfos;
private Lock writeLock;
private boolean stale;
+ private HashSet synced = new HashSet();
/** Used by commit() to record pre-commit state in case
* rollback is necessary */
@@ -44,16 +48,28 @@ abstract class DirectoryIndexReader extends IndexReader {
private SegmentInfos rollbackSegmentInfos;
- void init(Directory directory, SegmentInfos segmentInfos, boolean closeDirectory) {
+ void init(Directory directory, SegmentInfos segmentInfos, boolean closeDirectory)
+ throws IOException {
this.directory = directory;
this.segmentInfos = segmentInfos;
this.closeDirectory = closeDirectory;
+
+ if (segmentInfos != null) {
+ // We assume that this segments_N was previously
+ // properly sync'd:
+ for(int i=0;i 0) {
- for(int i=0;i 0) {
+ for(int i=0;i
- The optional autoCommit
argument to the
- constructors
- controls visibility of the changes to {@link IndexReader} instances reading the same index.
- When this is false
, changes are not
- visible until {@link #close()} is called.
- Note that changes will still be flushed to the
- {@link org.apache.lucene.store.Directory} as new files,
- but are not committed (no new segments_N
file
- is written referencing the new files) until {@link #close} is
- called. If something goes terribly wrong (for example the
- JVM crashes) before {@link #close()}, then
- the index will reflect none of the changes made (it will
- remain in its starting state).
- You can also call {@link #abort()}, which closes the writer without committing any
- changes, and removes any index
+
[Deprecated: Note that in 3.0, IndexWriter will
+ no longer accept autoCommit=true (it will be hardwired to
+ false). You can always call {@link IndexWriter#commit()} yourself
+ when needed]. The optional autoCommit
argument to the constructors
+ controls visibility of the changes to {@link IndexReader}
+ instances reading the same index. When this is
+ false
, changes are not visible until {@link
+ #close()} is called. Note that changes will still be
+ flushed to the {@link org.apache.lucene.store.Directory}
+ as new files, but are not committed (no new
+ segments_N
file is written referencing the
+ new files, nor are the files sync'd to stable storage)
+ until {@link #commit} or {@link #close} is called. If something
+ goes terribly wrong (for example the JVM crashes), then
+ the index will reflect none of the changes made since the
+ last commit, or the starting state if commit was not called.
+ You can also call {@link #abort}, which closes the writer
+ without committing any changes, and removes any index
files that had been flushed but are now unreferenced.
This mode is useful for preventing readers from refreshing
at a bad time (for example after you've done all your
- deletes but before you've done your adds).
- It can also be used to implement simple single-writer
- transactional semantics ("all or none").
+ deletes but before you've done your adds). It can also be
+ used to implement simple single-writer transactional
+ semantics ("all or none").
When autoCommit
is true
then
- every flush is also a commit ({@link IndexReader}
- instances will see each flush as changes to the index).
- This is the default, to match the behavior before 2.2.
- When running in this mode, be careful not to refresh your
+ the writer will periodically commit on its own. This is
+ the default, to match the behavior before 2.2. However,
+ in 3.0, autoCommit will be hardwired to false. There is
+ no guarantee when exactly an auto commit will occur (it
+ used to be after every flush, but it is now after every
+ completed merge, as of 2.4). If you want to force a
+ commit, call {@link #commit}, or, close the writer. Once
+ a commit has finished, ({@link IndexReader} instances will
+ see the changes to the index as of that commit. When
+ running in this mode, be careful not to refresh your
readers while optimize or segment merges are taking place
as this can tie up substantial disk space.
@@ -250,7 +264,20 @@ public class IndexWriter {
* set (see {@link #setInfoStream}).
*/
public final static int MAX_TERM_LENGTH = DocumentsWriter.MAX_TERM_LENGTH;
-
+
+ /**
+ * Default for {@link #getMaxSyncPauseSeconds}. On
+ * Windows this defaults to 10.0 seconds; elsewhere it's
+ * 0.
+ */
+ public final static double DEFAULT_MAX_SYNC_PAUSE_SECONDS;
+ static {
+ if (Constants.WINDOWS)
+ DEFAULT_MAX_SYNC_PAUSE_SECONDS = 10.0;
+ else
+ DEFAULT_MAX_SYNC_PAUSE_SECONDS = 0.0;
+ }
+
// The normal read buffer size defaults to 1024, but
// increasing this during merging seems to yield
// performance gains. However we don't want to increase
@@ -269,14 +296,18 @@ public class IndexWriter {
private Similarity similarity = Similarity.getDefault(); // how to normalize
- private boolean commitPending; // true if segmentInfos has changes not yet committed
+ private volatile boolean commitPending; // true if segmentInfos has changes not yet committed
private SegmentInfos rollbackSegmentInfos; // segmentInfos we will fallback to if the commit fails
+ private HashMap rollbackSegments;
private SegmentInfos localRollbackSegmentInfos; // segmentInfos we will fallback to if the commit fails
private boolean localAutoCommit; // saved autoCommit during local transaction
private boolean autoCommit = true; // false if we should commit only on close
private SegmentInfos segmentInfos = new SegmentInfos(); // the segments
+ private int syncCount;
+ private int syncCountSaved = -1;
+
private DocumentsWriter docWriter;
private IndexFileDeleter deleter;
@@ -302,6 +333,12 @@ public class IndexWriter {
private long mergeGen;
private boolean stopMerges;
+ private int flushCount;
+ private double maxSyncPauseSeconds = DEFAULT_MAX_SYNC_PAUSE_SECONDS;
+
+ // Last (right most) SegmentInfo created by a merge
+ private SegmentInfo lastMergeInfo;
+
/**
* Used internally to throw an {@link
* AlreadyClosedException} if this IndexWriter has been
@@ -432,7 +469,9 @@ public class IndexWriter {
* Constructs an IndexWriter for the index in path
.
* Text will be analyzed with a
. If create
* is true, then a new, empty index will be created in
- * path
, replacing the index already there, if any.
+ * path
, replacing the index already there,
+ * if any. Note that autoCommit defaults to true, but
+ * starting in 3.0 it will be hardwired to false.
*
* @param path the path to the index directory
* @param a the analyzer to use
@@ -487,6 +526,8 @@ public class IndexWriter {
* Text will be analyzed with a
. If create
* is true, then a new, empty index will be created in
* path
, replacing the index already there, if any.
+ * Note that autoCommit defaults to true, but starting in 3.0
+ * it will be hardwired to false.
*
* @param path the path to the index directory
* @param a the analyzer to use
@@ -541,6 +582,8 @@ public class IndexWriter {
* Text will be analyzed with a
. If create
* is true, then a new, empty index will be created in
* d
, replacing the index already there, if any.
+ * Note that autoCommit defaults to true, but starting in 3.0
+ * it will be hardwired to false.
*
* @param d the index directory
* @param a the analyzer to use
@@ -595,6 +638,8 @@ public class IndexWriter {
* path
, first creating it if it does not
* already exist. Text will be analyzed with
* a
.
+ * Note that autoCommit defaults to true, but starting in 3.0
+ * it will be hardwired to false.
*
* @param path the path to the index directory
* @param a the analyzer to use
@@ -641,6 +686,8 @@ public class IndexWriter {
* path
, first creating it if it does not
* already exist. Text will be analyzed with
* a
.
+ * Note that autoCommit defaults to true, but starting in 3.0
+ * it will be hardwired to false.
*
* @param path the path to the index directory
* @param a the analyzer to use
@@ -687,6 +734,8 @@ public class IndexWriter {
* d
, first creating it if it does not
* already exist. Text will be analyzed with
* a
.
+ * Note that autoCommit defaults to true, but starting in 3.0
+ * it will be hardwired to false.
*
* @param d the index directory
* @param a the analyzer to use
@@ -746,6 +795,10 @@ public class IndexWriter {
* @throws IOException if the directory cannot be
* read/written to or if there is any other low-level
* IO error
+ * @deprecated This will be removed in 3.0, when
+ * autoCommit will be hardwired to false. Use {@link
+ * #IndexWriter(Directory,Analyzer,MaxFieldLength)}
+ * instead, and call {@link #commit} when needed.
*/
public IndexWriter(Directory d, boolean autoCommit, Analyzer a, MaxFieldLength mfl)
throws CorruptIndexException, LockObtainFailedException, IOException {
@@ -798,6 +851,10 @@ public class IndexWriter {
* if it does not exist and create
is
* false
or if there is any other low-level
* IO error
+ * @deprecated This will be removed in 3.0, when
+ * autoCommit will be hardwired to false. Use {@link
+ * #IndexWriter(Directory,Analyzer,boolean,MaxFieldLength)}
+ * instead, and call {@link #commit} when needed.
*/
public IndexWriter(Directory d, boolean autoCommit, Analyzer a, boolean create, MaxFieldLength mfl)
throws CorruptIndexException, LockObtainFailedException, IOException {
@@ -832,6 +889,31 @@ public class IndexWriter {
init(d, a, create, false, null, autoCommit, DEFAULT_MAX_FIELD_LENGTH);
}
+ /**
+ * Expert: constructs an IndexWriter with a custom {@link
+ * IndexDeletionPolicy}, for the index in d
,
+ * first creating it if it does not already exist. Text
+ * will be analyzed with a
.
+ * Note that autoCommit defaults to true, but starting in 3.0
+ * it will be hardwired to false.
+ *
+ * @param d the index directory
+ * @param a the analyzer to use
+ * @param deletionPolicy see above
+ * @param mfl whether or not to limit field lengths
+ * @throws CorruptIndexException if the index is corrupt
+ * @throws LockObtainFailedException if another writer
+ * has this index open (write.lock
could not
+ * be obtained)
+ * @throws IOException if the directory cannot be
+ * read/written to or if there is any other low-level
+ * IO error
+ */
+ public IndexWriter(Directory d, Analyzer a, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl)
+ throws CorruptIndexException, LockObtainFailedException, IOException {
+ init(d, a, false, deletionPolicy, true, mfl.getLimit());
+ }
+
/**
* Expert: constructs an IndexWriter with a custom {@link
* IndexDeletionPolicy}, for the index in d
,
@@ -851,6 +933,10 @@ public class IndexWriter {
* @throws IOException if the directory cannot be
* read/written to or if there is any other low-level
* IO error
+ * @deprecated This will be removed in 3.0, when
+ * autoCommit will be hardwired to false. Use {@link
+ * #IndexWriter(Directory,Analyzer,IndexDeletionPolicy,MaxFieldLength)}
+ * instead, and call {@link #commit} when needed.
*/
public IndexWriter(Directory d, boolean autoCommit, Analyzer a, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl)
throws CorruptIndexException, LockObtainFailedException, IOException {
@@ -882,6 +968,37 @@ public class IndexWriter {
init(d, a, false, deletionPolicy, autoCommit, DEFAULT_MAX_FIELD_LENGTH);
}
+ /**
+ * Expert: constructs an IndexWriter with a custom {@link
+ * IndexDeletionPolicy}, for the index in d
.
+ * Text will be analyzed with a
. If
+ * create
is true, then a new, empty index
+ * will be created in d
, replacing the index
+ * already there, if any.
+ * Note that autoCommit defaults to true, but starting in 3.0
+ * it will be hardwired to false.
+ *
+ * @param d the index directory
+ * @param a the analyzer to use
+ * @param create true
to create the index or overwrite
+ * the existing one; false
to append to the existing
+ * index
+ * @param deletionPolicy see above
+ * @param mfl whether or not to limit field lengths
+ * @throws CorruptIndexException if the index is corrupt
+ * @throws LockObtainFailedException if another writer
+ * has this index open (write.lock
could not
+ * be obtained)
+ * @throws IOException if the directory cannot be read/written to, or
+ * if it does not exist and create
is
+ * false
or if there is any other low-level
+ * IO error
+ */
+ public IndexWriter(Directory d, Analyzer a, boolean create, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl)
+ throws CorruptIndexException, LockObtainFailedException, IOException {
+ init(d, a, create, false, deletionPolicy, true, mfl.getLimit());
+ }
+
/**
* Expert: constructs an IndexWriter with a custom {@link
* IndexDeletionPolicy}, for the index in d
.
@@ -907,6 +1024,10 @@ public class IndexWriter {
* if it does not exist and create
is
* false
or if there is any other low-level
* IO error
+ * @deprecated This will be removed in 3.0, when
+ * autoCommit will be hardwired to false. Use {@link
+ * #IndexWriter(Directory,Analyzer,boolean,IndexDeletionPolicy,MaxFieldLength)}
+ * instead, and call {@link #commit} when needed.
*/
public IndexWriter(Directory d, boolean autoCommit, Analyzer a, boolean create, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl)
throws CorruptIndexException, LockObtainFailedException, IOException {
@@ -984,15 +1105,22 @@ public class IndexWriter {
} catch (IOException e) {
// Likely this means it's a fresh directory
}
- segmentInfos.write(directory);
+ segmentInfos.commit(directory);
} else {
segmentInfos.read(directory);
+
+ // We assume that this segments_N was previously
+ // properly sync'd:
+ for(int i=0;i If an Exception is hit during close, eg due to disk
* full or some other reason, then both the on-disk index
@@ -1490,33 +1654,16 @@ public class IndexWriter {
mergeScheduler.close();
+ if (infoStream != null)
+ message("now call final sync()");
+
+ sync(true, 0);
+
+ if (infoStream != null)
+ message("at close: " + segString());
+
synchronized(this) {
- if (commitPending) {
- boolean success = false;
- try {
- segmentInfos.write(directory); // now commit changes
- success = true;
- } finally {
- if (!success) {
- if (infoStream != null)
- message("hit exception committing segments file during close");
- deletePartialSegmentsFile();
- }
- }
- if (infoStream != null)
- message("close: wrote segments file \"" + segmentInfos.getCurrentSegmentFileName() + "\"");
-
- deleter.checkpoint(segmentInfos, true);
-
- commitPending = false;
- rollbackSegmentInfos = null;
- }
-
- if (infoStream != null)
- message("at close: " + segString());
-
docWriter = null;
-
deleter.close();
}
@@ -1527,7 +1674,9 @@ public class IndexWriter {
writeLock.release(); // release write lock
writeLock = null;
}
- closed = true;
+ synchronized(this) {
+ closed = true;
+ }
} finally {
synchronized(this) {
@@ -1581,34 +1730,24 @@ public class IndexWriter {
// Perform the merge
cfsWriter.close();
-
- for(int i=0;iNote: if autoCommit=false
, flushed data would still
- * not be visible to readers, until {@link #close} is called.
+ * Note: while this will force buffered docs to be
+ * pushed into the index, it will not make these docs
+ * visible to a reader. Use {@link #commit} instead
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
+ * @deprecated please call {@link #commit}) instead
*/
public final void flush() throws CorruptIndexException, IOException {
flush(true, false);
}
+ /**
+ *
Commits all pending updates (added & deleted documents)
+ * to the index, and syncs all referenced index files,
+ * such that a reader will see the changes. Note that
+ * this does not wait for any running background merges to
+ * finish. This may be a costly operation, so you should
+ * test the cost in your application and do it only when
+ * really necessary.
+ *
+ * Note that this operation calls Directory.sync on
+ * the index files. That call should not return until the
+ * file contents & metadata are on stable storage. For
+ * FSDirectory, this calls the OS's fsync. But, beware:
+ * some hardware devices may in fact cache writes even
+ * during fsync, and return before the bits are actually
+ * on stable storage, to give the appearance of faster
+ * performance. If you have such a device, and it does
+ * not have a battery backup (for example) then on power
+ * loss it may still lose data. Lucene cannot guarantee
+ * consistency on such devices.
+ */
+ public final void commit() throws CorruptIndexException, IOException {
+ commit(true);
+ }
+
+ private final void commit(boolean triggerMerges) throws CorruptIndexException, IOException {
+ flush(triggerMerges, true);
+ sync(true, 0);
+ }
+
/**
* Flush all in-memory buffered udpates (adds and deletes)
* to the Directory.
@@ -2681,10 +2852,15 @@ public class IndexWriter {
maybeMerge();
}
+ // TODO: this method should not have to be entirely
+ // synchronized, ie, merges should be allowed to commit
+ // even while a flush is happening
private synchronized final boolean doFlush(boolean flushDocStores) throws CorruptIndexException, IOException {
// Make sure no threads are actively adding a document
+ flushCount++;
+
// Returns true if docWriter is currently aborting, in
// which case we skip flushing this segment
if (docWriter.pauseAllThreads()) {
@@ -2717,18 +2893,6 @@ public class IndexWriter {
// apply to more than just the last flushed segment
boolean flushDeletes = docWriter.hasDeletes();
- if (infoStream != null) {
- message(" flush: segment=" + docWriter.getSegment() +
- " docStoreSegment=" + docWriter.getDocStoreSegment() +
- " docStoreOffset=" + docWriter.getDocStoreOffset() +
- " flushDocs=" + flushDocs +
- " flushDeletes=" + flushDeletes +
- " flushDocStores=" + flushDocStores +
- " numDocs=" + numDocs +
- " numBufDelTerms=" + docWriter.getNumBufferedDeleteTerms());
- message(" index before flush " + segString());
- }
-
int docStoreOffset = docWriter.getDocStoreOffset();
// docStoreOffset should only be non-zero when
@@ -2737,6 +2901,18 @@ public class IndexWriter {
boolean docStoreIsCompoundFile = false;
+ if (infoStream != null) {
+ message(" flush: segment=" + docWriter.getSegment() +
+ " docStoreSegment=" + docWriter.getDocStoreSegment() +
+ " docStoreOffset=" + docStoreOffset +
+ " flushDocs=" + flushDocs +
+ " flushDeletes=" + flushDeletes +
+ " flushDocStores=" + flushDocStores +
+ " numDocs=" + numDocs +
+ " numBufDelTerms=" + docWriter.getNumBufferedDeleteTerms());
+ message(" index before flush " + segString());
+ }
+
// Check if the doc stores must be separately flushed
// because other segments, besides the one we are about
// to flush, reference it
@@ -2754,60 +2930,63 @@ public class IndexWriter {
// If we are flushing docs, segment must not be null:
assert segment != null || !flushDocs;
- if (flushDocs || flushDeletes) {
-
- SegmentInfos rollback = null;
-
- if (flushDeletes)
- rollback = (SegmentInfos) segmentInfos.clone();
+ if (flushDocs) {
boolean success = false;
+ final int flushedDocCount;
try {
- if (flushDocs) {
-
- if (0 == docStoreOffset && flushDocStores) {
- // This means we are flushing private doc stores
- // with this segment, so it will not be shared
- // with other segments
- assert docStoreSegment != null;
- assert docStoreSegment.equals(segment);
- docStoreOffset = -1;
- docStoreIsCompoundFile = false;
- docStoreSegment = null;
- }
-
- int flushedDocCount = docWriter.flush(flushDocStores);
-
- newSegment = new SegmentInfo(segment,
- flushedDocCount,
- directory, false, true,
- docStoreOffset, docStoreSegment,
- docStoreIsCompoundFile);
- segmentInfos.addElement(newSegment);
- }
-
- if (flushDeletes) {
- // we should be able to change this so we can
- // buffer deletes longer and then flush them to
- // multiple flushed segments, when
- // autoCommit=false
- applyDeletes(flushDocs);
- doAfterFlush();
- }
-
- checkpoint();
+ flushedDocCount = docWriter.flush(flushDocStores);
success = true;
} finally {
if (!success) {
-
if (infoStream != null)
message("hit exception flushing segment " + segment);
-
- if (flushDeletes) {
+ docWriter.abort(null);
+ deleter.refresh(segment);
+ }
+ }
+
+ if (0 == docStoreOffset && flushDocStores) {
+ // This means we are flushing private doc stores
+ // with this segment, so it will not be shared
+ // with other segments
+ assert docStoreSegment != null;
+ assert docStoreSegment.equals(segment);
+ docStoreOffset = -1;
+ docStoreIsCompoundFile = false;
+ docStoreSegment = null;
+ }
- // Carefully check if any partial .del files
- // should be removed:
+ // Create new SegmentInfo, but do not add to our
+ // segmentInfos until deletes are flushed
+ // successfully.
+ newSegment = new SegmentInfo(segment,
+ flushedDocCount,
+ directory, false, true,
+ docStoreOffset, docStoreSegment,
+ docStoreIsCompoundFile);
+ }
+
+ if (flushDeletes) {
+ try {
+ SegmentInfos rollback = (SegmentInfos) segmentInfos.clone();
+
+ boolean success = false;
+ try {
+ // we should be able to change this so we can
+ // buffer deletes longer and then flush them to
+ // multiple flushed segments only when a commit()
+ // finally happens
+ applyDeletes(newSegment);
+ success = true;
+ } finally {
+ if (!success) {
+ if (infoStream != null)
+ message("hit exception flushing deletes");
+
+ // Carefully remove any partially written .del
+ // files
final int size = rollback.size();
for(int i=0;i 0 &&
- segmentInfos.info(segmentInfos.size()-1) == newSegment)
- segmentInfos.remove(segmentInfos.size()-1);
- }
- if (flushDocs)
- docWriter.abort(null);
- deletePartialSegmentsFile();
- deleter.checkpoint(segmentInfos, false);
-
- if (segment != null)
- deleter.refresh(segment);
+ }
}
+ } finally {
+ // Regardless of success of failure in flushing
+ // deletes, we must clear them from our buffer:
+ docWriter.clearBufferedDeletes();
}
-
- deleter.checkpoint(segmentInfos, autoCommit);
-
- if (flushDocs && mergePolicy.useCompoundFile(segmentInfos,
- newSegment)) {
- success = false;
- try {
- docWriter.createCompoundFile(segment);
- newSegment.setUseCompoundFile(true);
- checkpoint();
- success = true;
- } finally {
- if (!success) {
- if (infoStream != null)
- message("hit exception creating compound file for newly flushed segment " + segment);
- newSegment.setUseCompoundFile(false);
- deleter.deleteFile(segment + "." + IndexFileNames.COMPOUND_FILE_EXTENSION);
- deletePartialSegmentsFile();
- }
- }
-
- deleter.checkpoint(segmentInfos, autoCommit);
- }
-
- return true;
- } else {
- return false;
}
+ if (flushDocs)
+ segmentInfos.addElement(newSegment);
+
+ if (flushDocs || flushDeletes)
+ checkpoint();
+
+ doAfterFlush();
+
+ if (flushDocs && mergePolicy.useCompoundFile(segmentInfos, newSegment)) {
+ // Now build compound file
+ boolean success = false;
+ try {
+ docWriter.createCompoundFile(segment);
+ success = true;
+ } finally {
+ if (!success) {
+ if (infoStream != null)
+ message("hit exception creating compound file for newly flushed segment " + segment);
+ deleter.deleteFile(segment + "." + IndexFileNames.COMPOUND_FILE_EXTENSION);
+ }
+ }
+
+ newSegment.setUseCompoundFile(true);
+ checkpoint();
+ }
+
+ return flushDocs || flushDeletes;
+
} finally {
docWriter.clearFlushPending();
docWriter.resumeAllThreads();
@@ -2913,9 +3086,101 @@ public class IndexWriter {
return first;
}
+ /** Carefully merges deletes for the segments we just
+ * merged. This is tricky because, although merging will
+ * clear all deletes (compacts the documents), new
+ * deletes may have been flushed to the segments since
+ * the merge was started. This method "carries over"
+ * such new deletes onto the newly merged segment, and
+ * saves the results deletes file (incrementing the
+ * delete generation for merge.info). If no deletes were
+ * flushed, no new deletes file is saved. */
+ synchronized private void commitMergedDeletes(MergePolicy.OneMerge merge) throws IOException {
+ final SegmentInfos sourceSegmentsClone = merge.segmentsClone;
+ final SegmentInfos sourceSegments = merge.segments;
+
+ if (infoStream != null)
+ message("commitMerge " + merge.segString(directory));
+
+ // Carefully merge deletes that occurred after we
+ // started merging:
+
+ BitVector deletes = null;
+ int docUpto = 0;
+
+ final int numSegmentsToMerge = sourceSegments.size();
+ for(int i=0;i 0;
-
boolean success = false;
try {
try {
- if (merge.info == null)
- mergeInit(merge);
+ mergeInit(merge);
if (infoStream != null)
message("now merge\n merge=" + merge.segString(directory) + "\n index=" + segString());
@@ -3131,11 +3286,17 @@ public class IndexWriter {
} finally {
synchronized(this) {
try {
- if (!success && infoStream != null)
- message("hit exception during merge");
mergeFinish(merge);
+ if (!success) {
+ if (infoStream != null)
+ message("hit exception during merge");
+ addMergeException(merge);
+ if (merge.info != null && !segmentInfos.contains(merge.info))
+ deleter.refresh(merge.info.name);
+ }
+
// This merge (and, generally, any change to the
// segments) may now enable new merges, so we call
// merge policy & update pending merges.
@@ -3200,6 +3361,11 @@ public class IndexWriter {
final synchronized void mergeInit(MergePolicy.OneMerge merge) throws IOException {
assert merge.registerDone;
+ assert !merge.optimize || merge.maxNumSegmentsOptimize > 0;
+
+ if (merge.info != null)
+ // mergeInit already done
+ return;
if (merge.isAborted())
return;
@@ -3323,6 +3489,50 @@ public class IndexWriter {
docStoreOffset,
docStoreSegment,
docStoreIsCompoundFile);
+
+ // Also enroll the merged segment into mergingSegments;
+ // this prevents it from getting selected for a merge
+ // after our merge is done but while we are building the
+ // CFS:
+ mergingSegments.add(merge.info);
+ }
+
+ /** This is called after merging a segment and before
+ * building its CFS. Return true if the files should be
+ * sync'd. If you return false, then the source segment
+ * files that were merged cannot be deleted until the CFS
+ * file is built & sync'd. So, returning false consumes
+ * more transient disk space, but saves performance of
+ * not having to sync files which will shortly be deleted
+ * anyway.
+ * @deprecated -- this will be removed in 3.0 when
+ * autoCommit is hardwired to false */
+ private synchronized boolean doCommitBeforeMergeCFS(MergePolicy.OneMerge merge) throws IOException {
+ long freeableBytes = 0;
+ final int size = merge.segments.size();
+ for(int i=0;i totalBytes)
+ return true;
+ else
+ return false;
}
/** Does fininishing for a merge, which is fast but holds
@@ -3338,6 +3548,7 @@ public class IndexWriter {
final int end = sourceSegments.size();
for(int i=0;i X minutes or
+ // more than Y bytes have been written, etc.
+ if (autoCommit)
+ sync(false, merge.info.sizeInBytes());
+
return mergedDocCount;
}
@@ -3495,23 +3676,11 @@ public class IndexWriter {
mergeExceptions.add(merge);
}
- private void deletePartialSegmentsFile() throws IOException {
- if (segmentInfos.getLastGeneration() != segmentInfos.getGeneration()) {
- String segmentFileName = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
- "",
- segmentInfos.getGeneration());
- if (infoStream != null)
- message("now delete partial segments file \"" + segmentFileName + "\"");
-
- deleter.deleteFile(segmentFileName);
- }
- }
-
// Called during flush to apply any buffered deletes. If
// flushedNewSegment is true then a new segment was just
// created and flushed from the ram segments, so we will
// selectively apply the deletes to that new segment.
- private final void applyDeletes(boolean flushedNewSegment) throws CorruptIndexException, IOException {
+ private final void applyDeletes(SegmentInfo newSegment) throws CorruptIndexException, IOException {
final HashMap bufferedDeleteTerms = docWriter.getBufferedDeleteTerms();
final List bufferedDeleteDocIDs = docWriter.getBufferedDeleteDocIDs();
@@ -3521,13 +3690,13 @@ public class IndexWriter {
bufferedDeleteDocIDs.size() + " deleted docIDs on "
+ segmentInfos.size() + " segments.");
- if (flushedNewSegment) {
+ if (newSegment != null) {
IndexReader reader = null;
try {
// Open readers w/o opening the stored fields /
// vectors because these files may still be held
// open for writing by docWriter
- reader = SegmentReader.get(segmentInfos.info(segmentInfos.size() - 1), false);
+ reader = SegmentReader.get(newSegment, false);
// Apply delete terms to the segment just flushed from ram
// apply appropriately so that a delete term is only applied to
@@ -3544,10 +3713,7 @@ public class IndexWriter {
}
}
- int infosEnd = segmentInfos.size();
- if (flushedNewSegment) {
- infosEnd--;
- }
+ final int infosEnd = segmentInfos.size();
for (int i = 0; i < infosEnd; i++) {
IndexReader reader = null;
@@ -3567,9 +3733,6 @@ public class IndexWriter {
}
}
}
-
- // Clean up bufferedDeleteTerms.
- docWriter.clearBufferedDeletes();
}
// For test purposes.
@@ -3644,6 +3807,236 @@ public class IndexWriter {
return buffer.toString();
}
+ // Files that have been sync'd already
+ private HashSet synced = new HashSet();
+
+ // Files that are now being sync'd
+ private HashSet syncing = new HashSet();
+
+ private boolean startSync(String fileName, Collection pending) {
+ synchronized(synced) {
+ if (!synced.contains(fileName)) {
+ if (!syncing.contains(fileName)) {
+ syncing.add(fileName);
+ return true;
+ } else {
+ pending.add(fileName);
+ return false;
+ }
+ } else
+ return false;
+ }
+ }
+
+ private void finishSync(String fileName, boolean success) {
+ synchronized(synced) {
+ assert syncing.contains(fileName);
+ syncing.remove(fileName);
+ if (success)
+ synced.add(fileName);
+ synced.notifyAll();
+ }
+ }
+
+ /** Blocks until all files in syncing are sync'd */
+ private boolean waitForAllSynced(Collection syncing) throws IOException {
+ synchronized(synced) {
+ Iterator it = syncing.iterator();
+ while(it.hasNext()) {
+ final String fileName = (String) it.next();
+ while(!synced.contains(fileName)) {
+ if (!syncing.contains(fileName))
+ // There was an error because a file that was
+ // previously syncing failed to appear in synced
+ return false;
+ else
+ try {
+ synced.wait();
+ } catch (InterruptedException ie) {
+ continue;
+ }
+ }
+ }
+ return true;
+ }
+ }
+
+ /** Pauses before syncing. On Windows, at least, it's
+ * best (performance-wise) to pause in order to let OS
+ * flush writes to disk on its own, before forcing a
+ * sync.
+ * @deprecated -- this will be removed in 3.0 when
+ * autoCommit is hardwired to false */
+ private void syncPause(long sizeInBytes) {
+ if (mergeScheduler instanceof ConcurrentMergeScheduler && maxSyncPauseSeconds > 0) {
+ // Rough heuristic: for every 10 MB, we pause for 1
+ // second, up until the max
+ long pauseTime = (long) (1000*sizeInBytes/10/1024/1024);
+ final long maxPauseTime = (long) (maxSyncPauseSeconds*1000);
+ if (pauseTime > maxPauseTime)
+ pauseTime = maxPauseTime;
+ final int sleepCount = (int) (pauseTime / 100);
+ for(int i=0;i 0)
+ // Force all subsequent syncs to include up through
+ // the final info in the current segments. This
+ // ensure that a call to commit() will force another
+ // sync (due to merge finishing) to sync all flushed
+ // segments as well:
+ lastMergeInfo = toSync.info(numSegmentsToSync-1);
+
+ mySyncCount = syncCount++;
+ deleter.incRef(toSync, false);
+
+ commitPending = newCommitPending;
+ }
+
+ boolean success0 = false;
+
+ try {
+
+ // Loop until all files toSync references are sync'd:
+ while(true) {
+
+ final Collection pending = new ArrayList();
+
+ for(int i=0;i syncCountSaved) {
+
+ if (segmentInfos.getGeneration() > toSync.getGeneration())
+ toSync.updateGeneration(segmentInfos);
+
+ boolean success = false;
+ try {
+ toSync.commit(directory);
+ success = true;
+ } finally {
+ // Have our master segmentInfos record the
+ // generations we just sync'd
+ segmentInfos.updateGeneration(toSync);
+ if (!success) {
+ commitPending = true;
+ message("hit exception committing segments file");
+ }
+ }
+ message("commit complete");
+
+ syncCountSaved = mySyncCount;
+
+ deleter.checkpoint(toSync, true);
+ setRollbackSegmentInfos();
+ } else
+ message("sync superseded by newer infos");
+ }
+
+ message("done all syncs");
+
+ success0 = true;
+
+ } finally {
+ synchronized(this) {
+ deleter.decRef(toSync);
+ if (!success0)
+ commitPending = true;
+ }
+ }
+ }
+
/**
* Specifies maximum field length in {@link IndexWriter} constructors.
* {@link IndexWriter#setMaxFieldLength(int)} overrides the value set by
diff --git a/src/java/org/apache/lucene/index/SegmentInfos.java b/src/java/org/apache/lucene/index/SegmentInfos.java
index c5fade9dc29..21f12326c03 100644
--- a/src/java/org/apache/lucene/index/SegmentInfos.java
+++ b/src/java/org/apache/lucene/index/SegmentInfos.java
@@ -20,6 +20,8 @@ package org.apache.lucene.index;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.store.ChecksumIndexOutput;
+import org.apache.lucene.store.ChecksumIndexInput;
import java.io.File;
import java.io.FileNotFoundException;
@@ -55,8 +57,12 @@ final class SegmentInfos extends Vector {
* vectors and stored fields file. */
public static final int FORMAT_SHARED_DOC_STORE = -4;
+ /** This format adds a checksum at the end of the file to
+ * ensure all bytes were successfully written. */
+ public static final int FORMAT_CHECKSUM = -5;
+
/* This must always point to the most recent file format. */
- private static final int CURRENT_FORMAT = FORMAT_SHARED_DOC_STORE;
+ private static final int CURRENT_FORMAT = FORMAT_CHECKSUM;
public int counter = 0; // used to name new segments
/**
@@ -197,7 +203,7 @@ final class SegmentInfos extends Vector {
// Clear any previous segments:
clear();
- IndexInput input = directory.openInput(segmentFileName);
+ ChecksumIndexInput input = new ChecksumIndexInput(directory.openInput(segmentFileName));
generation = generationFromSegmentsFileName(segmentFileName);
@@ -226,6 +232,13 @@ final class SegmentInfos extends Vector {
else
version = input.readLong(); // read version
}
+
+ if (format <= FORMAT_CHECKSUM) {
+ final long checksumNow = input.getChecksum();
+ final long checksumThen = input.readLong();
+ if (checksumNow != checksumThen)
+ throw new CorruptIndexException("checksum mismatch in segments file");
+ }
success = true;
}
finally {
@@ -257,7 +270,7 @@ final class SegmentInfos extends Vector {
}.run();
}
- public final void write(Directory directory) throws IOException {
+ private final void write(Directory directory) throws IOException {
String segmentFileName = getNextSegmentFileName();
@@ -268,7 +281,7 @@ final class SegmentInfos extends Vector {
generation++;
}
- IndexOutput output = directory.createOutput(segmentFileName);
+ ChecksumIndexOutput output = new ChecksumIndexOutput(directory.createOutput(segmentFileName));
boolean success = false;
@@ -280,29 +293,31 @@ final class SegmentInfos extends Vector {
output.writeInt(size()); // write infos
for (int i = 0; i < size(); i++) {
info(i).write(output);
- }
- }
- finally {
+ }
+ final long checksum = output.getChecksum();
+ output.writeLong(checksum);
+ success = true;
+ } finally {
+ boolean success2 = false;
try {
output.close();
- success = true;
+ success2 = true;
} finally {
- if (!success) {
+ if (!success || !success2)
// Try not to leave a truncated segments_N file in
// the index:
directory.deleteFile(segmentFileName);
- }
}
}
try {
- output = directory.createOutput(IndexFileNames.SEGMENTS_GEN);
+ IndexOutput genOutput = directory.createOutput(IndexFileNames.SEGMENTS_GEN);
try {
- output.writeInt(FORMAT_LOCKLESS);
- output.writeLong(generation);
- output.writeLong(generation);
+ genOutput.writeInt(FORMAT_LOCKLESS);
+ genOutput.writeLong(generation);
+ genOutput.writeLong(generation);
} finally {
- output.close();
+ genOutput.close();
}
} catch (IOException e) {
// It's OK if we fail to write this file since it's
@@ -620,7 +635,7 @@ final class SegmentInfos extends Vector {
retry = true;
}
- } else {
+ } else if (0 == method) {
// Segment file has advanced since our last loop, so
// reset retry:
retry = false;
@@ -701,4 +716,50 @@ final class SegmentInfos extends Vector {
infos.addAll(super.subList(first, last));
return infos;
}
+
+ // Carry over generation numbers from another SegmentInfos
+ void updateGeneration(SegmentInfos other) {
+ assert other.generation > generation;
+ lastGeneration = other.lastGeneration;
+ generation = other.generation;
+ }
+
+ /** Writes & syncs to the Directory dir, taking care to
+ * remove the segments file on exception */
+ public final void commit(Directory dir) throws IOException {
+ boolean success = false;
+ try {
+ write(dir);
+ success = true;
+ } finally {
+ if (!success) {
+ // Must carefully compute fileName from "generation"
+ // since lastGeneration isn't incremented:
+ final String segmentFileName = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
+ "",
+ generation);
+ dir.deleteFile(segmentFileName);
+ }
+ }
+
+ // NOTE: if we crash here, we have left a segments_N
+ // file in the directory in a possibly corrupt state (if
+ // some bytes made it to stable storage and others
+ // didn't). But, the segments_N file now includes
+ // checksum at the end, which should catch this case.
+ // So when a reader tries to read it, it will throw a
+ // CorruptIndexException, which should cause the retry
+ // logic in SegmentInfos to kick in and load the last
+ // good (previous) segments_N-1 file.
+
+ final String fileName = getCurrentSegmentFileName();
+ success = false;
+ try {
+ dir.sync(fileName);
+ success = true;
+ } finally {
+ if (!success)
+ dir.deleteFile(fileName);
+ }
+ }
}
diff --git a/src/java/org/apache/lucene/store/ChecksumIndexInput.java b/src/java/org/apache/lucene/store/ChecksumIndexInput.java
new file mode 100644
index 00000000000..e90f6a6d50a
--- /dev/null
+++ b/src/java/org/apache/lucene/store/ChecksumIndexInput.java
@@ -0,0 +1,67 @@
+package org.apache.lucene.store;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.zip.CRC32;
+import java.util.zip.Checksum;
+
+/** Writes bytes through to a primary IndexOutput, computing
+ * checksum as it goes. Note that you cannot use seek(). */
+public class ChecksumIndexInput extends IndexInput {
+ IndexInput main;
+ Checksum digest;
+
+ public ChecksumIndexInput(IndexInput main) {
+ this.main = main;
+ digest = new CRC32();
+ }
+
+ public byte readByte() throws IOException {
+ final byte b = main.readByte();
+ digest.update(b);
+ return b;
+ }
+
+ public void readBytes(byte[] b, int offset, int len)
+ throws IOException {
+ main.readBytes(b, offset, len);
+ digest.update(b, offset, len);
+ }
+
+
+ public long getChecksum() {
+ return digest.getValue();
+ }
+
+ public void close() throws IOException {
+ main.close();
+ }
+
+ public long getFilePointer() {
+ return main.getFilePointer();
+ }
+
+ public void seek(long pos) {
+ throw new RuntimeException("not allowed");
+ }
+
+ public long length() {
+ return main.length();
+ }
+}
diff --git a/src/java/org/apache/lucene/store/ChecksumIndexOutput.java b/src/java/org/apache/lucene/store/ChecksumIndexOutput.java
new file mode 100644
index 00000000000..9b2562b8699
--- /dev/null
+++ b/src/java/org/apache/lucene/store/ChecksumIndexOutput.java
@@ -0,0 +1,68 @@
+package org.apache.lucene.store;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.zip.CRC32;
+import java.util.zip.Checksum;
+
+/** Writes bytes through to a primary IndexOutput, computing
+ * checksum. Note that you cannot use seek().*/
+public class ChecksumIndexOutput extends IndexOutput {
+ IndexOutput main;
+ Checksum digest;
+
+ public ChecksumIndexOutput(IndexOutput main) {
+ this.main = main;
+ digest = new CRC32();
+ }
+
+ public void writeByte(byte b) throws IOException {
+ digest.update(b);
+ main.writeByte(b);
+ }
+
+ public void writeBytes(byte[] b, int offset, int length) throws IOException {
+ digest.update(b, offset, length);
+ main.writeBytes(b, offset, length);
+ }
+
+ public long getChecksum() {
+ return digest.getValue();
+ }
+
+ public void flush() throws IOException {
+ main.flush();
+ }
+
+ public void close() throws IOException {
+ main.close();
+ }
+
+ public long getFilePointer() {
+ return main.getFilePointer();
+ }
+
+ public void seek(long pos) {
+ throw new RuntimeException("not allowed");
+ }
+
+ public long length() throws IOException {
+ return main.length();
+ }
+}
diff --git a/src/java/org/apache/lucene/store/Directory.java b/src/java/org/apache/lucene/store/Directory.java
index fd715e5a48a..d28151bb6c7 100644
--- a/src/java/org/apache/lucene/store/Directory.java
+++ b/src/java/org/apache/lucene/store/Directory.java
@@ -83,6 +83,11 @@ public abstract class Directory {
Returns a stream writing this file. */
public abstract IndexOutput createOutput(String name) throws IOException;
+ /** Ensure that any writes to this file are moved to
+ * stable storage. Lucene uses this to properly commit
+ * changes to the index, to prevent a machine/OS crash
+ * from corrupting the index. */
+ public void sync(String name) throws IOException {}
/** Returns a stream reading an existing file. */
public abstract IndexInput openInput(String name)
diff --git a/src/java/org/apache/lucene/store/FSDirectory.java b/src/java/org/apache/lucene/store/FSDirectory.java
index e06216cf4a7..dc44c2020ae 100644
--- a/src/java/org/apache/lucene/store/FSDirectory.java
+++ b/src/java/org/apache/lucene/store/FSDirectory.java
@@ -435,6 +435,39 @@ public class FSDirectory extends Directory {
return new FSIndexOutput(file);
}
+ public void sync(String name) throws IOException {
+ File fullFile = new File(directory, name);
+ boolean success = false;
+ int retryCount = 0;
+ IOException exc = null;
+ while(!success && retryCount < 5) {
+ retryCount++;
+ RandomAccessFile file = null;
+ try {
+ try {
+ file = new RandomAccessFile(fullFile, "rw");
+ file.getFD().sync();
+ success = true;
+ } finally {
+ if (file != null)
+ file.close();
+ }
+ } catch (IOException ioe) {
+ if (exc == null)
+ exc = ioe;
+ try {
+ // Pause 5 msec
+ Thread.sleep(5);
+ } catch (InterruptedException ie) {
+ Thread.currentThread().interrupt();
+ }
+ }
+ }
+ if (!success)
+ // Throw original exception
+ throw exc;
+ }
+
// Inherit javadoc
public IndexInput openInput(String name) throws IOException {
return openInput(name, BufferedIndexInput.BUFFER_SIZE);
diff --git a/src/site/src/documentation/content/xdocs/fileformats.xml b/src/site/src/documentation/content/xdocs/fileformats.xml
index 2fadcb7ddf9..a776abf36eb 100644
--- a/src/site/src/documentation/content/xdocs/fileformats.xml
+++ b/src/site/src/documentation/content/xdocs/fileformats.xml
@@ -819,18 +819,24 @@
IsCompoundFile>SegCount
- 2.3 and above:
+ 2.3:
Segments --> Format, Version, NameCounter, SegCount, <SegName, SegSize, DelGen, DocStoreOffset, [DocStoreSegment, DocStoreIsCompoundFile], HasSingleNormFile, NumField,
NormGenNumField,
IsCompoundFile>SegCount
+
+ 2.4 and above:
+ Segments --> Format, Version, NameCounter, SegCount, <SegName, SegSize, DelGen, DocStoreOffset, [DocStoreSegment, DocStoreIsCompoundFile], HasSingleNormFile, NumField,
+ NormGenNumField,
+ IsCompoundFile>SegCount, Checksum
+
Format, NameCounter, SegCount, SegSize, NumField, DocStoreOffset --> Int32
- Version, DelGen, NormGen --> Int64
+ Version, DelGen, NormGen, Checksum --> Int64
@@ -842,7 +848,7 @@
- Format is -1 as of Lucene 1.4, -3 (SegmentInfos.FORMAT_SINGLE_NORM_FILE) as of Lucene 2.1 and 2.2, and -4 (SegmentInfos.FORMAT_SHARED_DOC_STORE) as of Lucene 2.3
+ Format is -1 as of Lucene 1.4, -3 (SegmentInfos.FORMAT_SINGLE_NORM_FILE) as of Lucene 2.1 and 2.2, -4 (SegmentInfos.FORMAT_SHARED_DOC_STORE) as of Lucene 2.3 and -5 (SegmentInfos.FORMAT_CHECKSUM) as of Lucene 2.4.
@@ -925,6 +931,13 @@
shares a single set of these files with other
segments.
+
+
+ Checksum contains the CRC32 checksum of all bytes
+ in the segments_N file up until the checksum.
+ This is used to verify integrity of the file on
+ opening the index.
+
diff --git a/src/test/org/apache/lucene/index/TestAtomicUpdate.java b/src/test/org/apache/lucene/index/TestAtomicUpdate.java
index 27db24cc6e1..1693eb62af2 100644
--- a/src/test/org/apache/lucene/index/TestAtomicUpdate.java
+++ b/src/test/org/apache/lucene/index/TestAtomicUpdate.java
@@ -20,12 +20,8 @@ import org.apache.lucene.util.*;
import org.apache.lucene.store.*;
import org.apache.lucene.document.*;
import org.apache.lucene.analysis.*;
-import org.apache.lucene.index.*;
import org.apache.lucene.search.*;
import org.apache.lucene.queryParser.*;
-import org.apache.lucene.util._TestUtil;
-
-import org.apache.lucene.util.LuceneTestCase;
import java.util.Random;
import java.io.File;
@@ -83,7 +79,6 @@ public class TestAtomicUpdate extends LuceneTestCase {
// Update all 100 docs...
for(int i=0; i<100; i++) {
Document d = new Document();
- int n = RANDOM.nextInt();
d.add(new Field("id", Integer.toString(i), Field.Store.YES, Field.Index.UN_TOKENIZED));
d.add(new Field("contents", English.intToEnglish(i+10*count), Field.Store.NO, Field.Index.TOKENIZED));
writer.updateDocument(new Term("id", Integer.toString(i)), d);
@@ -127,7 +122,7 @@ public class TestAtomicUpdate extends LuceneTestCase {
d.add(new Field("contents", English.intToEnglish(i), Field.Store.NO, Field.Index.TOKENIZED));
writer.addDocument(d);
}
- writer.flush();
+ writer.commit();
IndexerThread indexerThread = new IndexerThread(writer, threads);
threads[0] = indexerThread;
diff --git a/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java b/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
index f64ad651277..ad4309f3e83 100644
--- a/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
+++ b/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
@@ -349,7 +349,6 @@ public class TestBackwardsCompatibility extends LuceneTestCase
IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setRAMBufferSizeMB(16.0);
- //IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
for(int i=0;i<35;i++) {
addDoc(writer, i);
}
@@ -390,12 +389,9 @@ public class TestBackwardsCompatibility extends LuceneTestCase
expected = new String[] {"_0.cfs",
"_0_1.del",
"_0_1.s" + contentFieldIndex,
- "segments_4",
+ "segments_3",
"segments.gen"};
- if (!autoCommit)
- expected[3] = "segments_3";
-
String[] actual = dir.list();
Arrays.sort(expected);
Arrays.sort(actual);
diff --git a/src/test/org/apache/lucene/index/TestCrash.java b/src/test/org/apache/lucene/index/TestCrash.java
new file mode 100644
index 00000000000..1dd41953f9c
--- /dev/null
+++ b/src/test/org/apache/lucene/index/TestCrash.java
@@ -0,0 +1,181 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import org.apache.lucene.store.MockRAMDirectory;
+import org.apache.lucene.store.NoLockFactory;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+
+public class TestCrash extends LuceneTestCase {
+
+ private IndexWriter initIndex() throws IOException {
+ return initIndex(new MockRAMDirectory());
+ }
+
+ private IndexWriter initIndex(MockRAMDirectory dir) throws IOException {
+ dir.setLockFactory(NoLockFactory.getNoLockFactory());
+
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer());
+ //writer.setMaxBufferedDocs(2);
+ writer.setMaxBufferedDocs(10);
+ ((ConcurrentMergeScheduler) writer.getMergeScheduler()).setSuppressExceptions();
+
+ Document doc = new Document();
+ doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field("id", "0", Field.Store.YES, Field.Index.TOKENIZED));
+ for(int i=0;i<157;i++)
+ writer.addDocument(doc);
+
+ return writer;
+ }
+
+ private void crash(final IndexWriter writer) throws IOException {
+ final MockRAMDirectory dir = (MockRAMDirectory) writer.getDirectory();
+ ConcurrentMergeScheduler cms = (ConcurrentMergeScheduler) writer.getMergeScheduler();
+ dir.crash();
+ cms.sync();
+ dir.clearCrash();
+ }
+
+ public void testCrashWhileIndexing() throws IOException {
+ IndexWriter writer = initIndex();
+ MockRAMDirectory dir = (MockRAMDirectory) writer.getDirectory();
+ crash(writer);
+ IndexReader reader = IndexReader.open(dir);
+ assertTrue(reader.numDocs() < 157);
+ }
+
+ public void testWriterAfterCrash() throws IOException {
+ IndexWriter writer = initIndex();
+ MockRAMDirectory dir = (MockRAMDirectory) writer.getDirectory();
+ dir.setPreventDoubleWrite(false);
+ crash(writer);
+ writer = initIndex(dir);
+ writer.close();
+
+ IndexReader reader = IndexReader.open(dir);
+ assertTrue(reader.numDocs() < 314);
+ }
+
+ public void testCrashAfterReopen() throws IOException {
+ IndexWriter writer = initIndex();
+ MockRAMDirectory dir = (MockRAMDirectory) writer.getDirectory();
+ writer.close();
+ writer = initIndex(dir);
+ assertEquals(314, writer.docCount());
+ crash(writer);
+
+ /*
+ System.out.println("\n\nTEST: open reader");
+ String[] l = dir.list();
+ Arrays.sort(l);
+ for(int i=0;i= 157);
+ }
+
+ public void testCrashAfterClose() throws IOException {
+
+ IndexWriter writer = initIndex();
+ MockRAMDirectory dir = (MockRAMDirectory) writer.getDirectory();
+
+ writer.close();
+ dir.crash();
+
+ /*
+ String[] l = dir.list();
+ Arrays.sort(l);
+ for(int i=0;i 2);
- } else {
+ if (!autoCommit)
// If we are not auto committing then there should
// be exactly 2 commits (one per close above):
assertEquals(2, policy.numOnCommit);
- }
// Simplistic check: just verify all segments_N's still
// exist, and, I can open a reader on each:
@@ -334,13 +331,10 @@ public class TestDeletionPolicy extends LuceneTestCase
writer.close();
assertEquals(2, policy.numOnInit);
- if (autoCommit) {
- assertTrue(policy.numOnCommit > 2);
- } else {
+ if (!autoCommit)
// If we are not auto committing then there should
// be exactly 2 commits (one per close above):
assertEquals(2, policy.numOnCommit);
- }
// Simplistic check: just verify the index is in fact
// readable:
@@ -459,11 +453,8 @@ public class TestDeletionPolicy extends LuceneTestCase
writer.close();
assertEquals(2*(N+2), policy.numOnInit);
- if (autoCommit) {
- assertTrue(policy.numOnCommit > 2*(N+2)-1);
- } else {
+ if (!autoCommit)
assertEquals(2*(N+2)-1, policy.numOnCommit);
- }
IndexSearcher searcher = new IndexSearcher(dir);
Hits hits = searcher.search(query);
@@ -565,11 +556,8 @@ public class TestDeletionPolicy extends LuceneTestCase
}
assertEquals(1+3*(N+1), policy.numOnInit);
- if (autoCommit) {
- assertTrue(policy.numOnCommit > 3*(N+1)-1);
- } else {
+ if (!autoCommit)
assertEquals(2*(N+1), policy.numOnCommit);
- }
IndexSearcher searcher = new IndexSearcher(dir);
Hits hits = searcher.search(query);
diff --git a/src/test/org/apache/lucene/index/TestIndexFileDeleter.java b/src/test/org/apache/lucene/index/TestIndexFileDeleter.java
index 7ff3fc44d19..b7beb19e1a0 100644
--- a/src/test/org/apache/lucene/index/TestIndexFileDeleter.java
+++ b/src/test/org/apache/lucene/index/TestIndexFileDeleter.java
@@ -18,17 +18,8 @@ package org.apache.lucene.index;
*/
import org.apache.lucene.util.LuceneTestCase;
-import java.util.Vector;
-import java.util.Arrays;
-import java.io.ByteArrayOutputStream;
-import java.io.ObjectOutputStream;
-import java.io.IOException;
-import java.io.File;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.search.Hits;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
@@ -77,8 +68,8 @@ public class TestIndexFileDeleter extends LuceneTestCase
String[] files = dir.list();
/*
- for(int i=0;i lastGen);
- lastGen = gen;
+ assertTrue(flushCount > lastFlushCount);
+ lastFlushCount = flushCount;
writer.setRAMBufferSizeMB(0.000001);
writer.setMaxBufferedDocs(IndexWriter.DISABLE_AUTO_FLUSH);
} else if (j < 20) {
- assertTrue(gen > lastGen);
- lastGen = gen;
+ assertTrue(flushCount > lastFlushCount);
+ lastFlushCount = flushCount;
} else if (20 == j) {
writer.setRAMBufferSizeMB(16);
writer.setMaxBufferedDocs(IndexWriter.DISABLE_AUTO_FLUSH);
- lastGen = gen;
+ lastFlushCount = flushCount;
} else if (j < 30) {
- assertEquals(gen, lastGen);
+ assertEquals(flushCount, lastFlushCount);
} else if (30 == j) {
writer.setRAMBufferSizeMB(0.000001);
writer.setMaxBufferedDocs(IndexWriter.DISABLE_AUTO_FLUSH);
} else if (j < 40) {
- assertTrue(gen> lastGen);
- lastGen = gen;
+ assertTrue(flushCount> lastFlushCount);
+ lastFlushCount = flushCount;
} else if (40 == j) {
writer.setMaxBufferedDocs(10);
writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
- lastGen = gen;
+ lastFlushCount = flushCount;
} else if (j < 50) {
- assertEquals(gen, lastGen);
+ assertEquals(flushCount, lastFlushCount);
writer.setMaxBufferedDocs(10);
writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
} else if (50 == j) {
- assertTrue(gen > lastGen);
+ assertTrue(flushCount > lastFlushCount);
}
}
writer.close();
@@ -1334,46 +1339,46 @@ public class TestIndexWriter extends LuceneTestCase
writer.addDocument(doc);
}
- long lastGen = -1;
+ int lastFlushCount = -1;
for(int j=1;j<52;j++) {
writer.deleteDocuments(new Term("field", "aaa" + j));
_TestUtil.syncConcurrentMerges(writer);
- long gen = SegmentInfos.generationFromSegmentsFileName(SegmentInfos.getCurrentSegmentFileName(dir.list()));
+ int flushCount = writer.getFlushCount();
if (j == 1)
- lastGen = gen;
+ lastFlushCount = flushCount;
else if (j < 10) {
// No new files should be created
- assertEquals(gen, lastGen);
+ assertEquals(flushCount, lastFlushCount);
} else if (10 == j) {
- assertTrue(gen > lastGen);
- lastGen = gen;
+ assertTrue(flushCount > lastFlushCount);
+ lastFlushCount = flushCount;
writer.setRAMBufferSizeMB(0.000001);
writer.setMaxBufferedDeleteTerms(IndexWriter.DISABLE_AUTO_FLUSH);
} else if (j < 20) {
- assertTrue(gen > lastGen);
- lastGen = gen;
+ assertTrue(flushCount > lastFlushCount);
+ lastFlushCount = flushCount;
} else if (20 == j) {
writer.setRAMBufferSizeMB(16);
writer.setMaxBufferedDeleteTerms(IndexWriter.DISABLE_AUTO_FLUSH);
- lastGen = gen;
+ lastFlushCount = flushCount;
} else if (j < 30) {
- assertEquals(gen, lastGen);
+ assertEquals(flushCount, lastFlushCount);
} else if (30 == j) {
writer.setRAMBufferSizeMB(0.000001);
writer.setMaxBufferedDeleteTerms(IndexWriter.DISABLE_AUTO_FLUSH);
} else if (j < 40) {
- assertTrue(gen> lastGen);
- lastGen = gen;
+ assertTrue(flushCount> lastFlushCount);
+ lastFlushCount = flushCount;
} else if (40 == j) {
writer.setMaxBufferedDeleteTerms(10);
writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
- lastGen = gen;
+ lastFlushCount = flushCount;
} else if (j < 50) {
- assertEquals(gen, lastGen);
+ assertEquals(flushCount, lastFlushCount);
writer.setMaxBufferedDeleteTerms(10);
writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
} else if (50 == j) {
- assertTrue(gen > lastGen);
+ assertTrue(flushCount > lastFlushCount);
}
}
writer.close();
@@ -1831,11 +1836,18 @@ public class TestIndexWriter extends LuceneTestCase
public void eval(MockRAMDirectory dir) throws IOException {
if (doFail) {
StackTraceElement[] trace = new Exception().getStackTrace();
+ boolean sawAppend = false;
+ boolean sawFlush = false;
for (int i = 0; i < trace.length; i++) {
- if ("org.apache.lucene.index.DocumentsWriter".equals(trace[i].getClassName()) && "appendPostings".equals(trace[i].getMethodName()) && count++ == 30) {
- doFail = false;
- throw new IOException("now failing during flush");
- }
+ if ("org.apache.lucene.index.DocumentsWriter".equals(trace[i].getClassName()) && "appendPostings".equals(trace[i].getMethodName()))
+ sawAppend = true;
+ if ("doFlush".equals(trace[i].getMethodName()))
+ sawFlush = true;
+ }
+
+ if (sawAppend && sawFlush && count++ >= 30) {
+ doFail = false;
+ throw new IOException("now failing during flush");
}
}
}
@@ -2263,6 +2275,7 @@ public class TestIndexWriter extends LuceneTestCase
try {
writer.updateDocument(new Term("id", ""+(idUpto++)), doc);
} catch (IOException ioe) {
+ //ioe.printStackTrace(System.out);
if (ioe.getMessage().startsWith("fake disk full at") ||
ioe.getMessage().equals("now failing on purpose")) {
diskFull = true;
@@ -2282,6 +2295,7 @@ public class TestIndexWriter extends LuceneTestCase
break;
}
} catch (Throwable t) {
+ //t.printStackTrace(System.out);
if (noErrors) {
System.out.println(Thread.currentThread().getName() + ": ERROR: unexpected Throwable:");
t.printStackTrace(System.out);
@@ -2300,7 +2314,7 @@ public class TestIndexWriter extends LuceneTestCase
public void testCloseWithThreads() throws IOException {
int NUM_THREADS = 3;
- for(int iter=0;iter<50;iter++) {
+ for(int iter=0;iter<20;iter++) {
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
@@ -2310,7 +2324,6 @@ public class TestIndexWriter extends LuceneTestCase
writer.setMergeFactor(4);
IndexerThread[] threads = new IndexerThread[NUM_THREADS];
- boolean diskFull = false;
for(int i=0;i 1 but got " + gen, gen > 1);
+
+ final String segmentsFileName = SegmentInfos.getCurrentSegmentFileName(dir);
+ IndexInput in = dir.openInput(segmentsFileName);
+ IndexOutput out = dir.createOutput(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", 1+gen));
+ out.copyBytes(in, in.length()-1);
+ byte b = in.readByte();
+ out.writeByte((byte) (1+b));
+ out.close();
+ in.close();
+
+ IndexReader reader = null;
+ try {
+ reader = IndexReader.open(dir);
+ } catch (IOException e) {
+ e.printStackTrace(System.out);
+ fail("segmentInfos failed to retry fallback to correct segments_N file");
+ }
+ reader.close();
+ }
+
+ // LUCENE-1044: test writer.commit() when ac=false
+ public void testForceCommit() throws IOException {
+ Directory dir = new MockRAMDirectory();
+
+ IndexWriter writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
+ writer.setMaxBufferedDocs(2);
+ writer.setMergeFactor(5);
+
+ for (int i = 0; i < 23; i++)
+ addDoc(writer);
+
+ IndexReader reader = IndexReader.open(dir);
+ assertEquals(0, reader.numDocs());
+ writer.commit();
+ IndexReader reader2 = reader.reopen();
+ assertEquals(0, reader.numDocs());
+ assertEquals(23, reader2.numDocs());
+ reader.close();
+
+ for (int i = 0; i < 17; i++)
+ addDoc(writer);
+ assertEquals(23, reader2.numDocs());
+ reader2.close();
+ reader = IndexReader.open(dir);
+ assertEquals(23, reader.numDocs());
+ reader.close();
+ writer.commit();
+
+ reader = IndexReader.open(dir);
+ assertEquals(40, reader.numDocs());
+ reader.close();
+ writer.close();
+ dir.close();
+ }
+
+ // Throws IOException during MockRAMDirectory.sync
+ private static class FailOnlyInSync extends MockRAMDirectory.Failure {
+ boolean didFail;
+ public void eval(MockRAMDirectory dir) throws IOException {
+ if (doFail) {
+ StackTraceElement[] trace = new Exception().getStackTrace();
+ for (int i = 0; i < trace.length; i++) {
+ if (doFail && "org.apache.lucene.store.MockRAMDirectory".equals(trace[i].getClassName()) && "sync".equals(trace[i].getMethodName())) {
+ didFail = true;
+ throw new IOException("now failing on purpose during sync");
+ }
+ }
+ }
+ }
+ }
+
+ // LUCENE-1044: test exception during sync
+ public void testExceptionDuringSync() throws IOException {
+ MockRAMDirectory dir = new MockRAMDirectory();
+ FailOnlyInSync failure = new FailOnlyInSync();
+ dir.failOn(failure);
+
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
+ failure.setDoFail();
+
+ ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
+ // We expect sync exceptions in the merge threads
+ cms.setSuppressExceptions();
+ writer.setMergeScheduler(cms);
+ writer.setMaxBufferedDocs(2);
+ writer.setMergeFactor(5);
+
+ for (int i = 0; i < 23; i++)
+ addDoc(writer);
+
+ cms.sync();
+ assertTrue(failure.didFail);
+ failure.clearDoFail();
+ writer.close();
+
+ IndexReader reader = IndexReader.open(dir);
+ assertEquals(23, reader.numDocs());
+ reader.close();
+ dir.close();
+ }
+
// LUCENE-1168
public void testTermVectorCorruption() throws IOException {
diff --git a/src/test/org/apache/lucene/index/TestIndexWriterDelete.java b/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
index 18d190f4ad8..7366ce703af 100644
--- a/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
+++ b/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
@@ -30,7 +30,6 @@ import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.MockRAMDirectory;
-import org.apache.lucene.store.RAMDirectory;
public class TestIndexWriterDelete extends LuceneTestCase {
@@ -45,7 +44,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
for(int pass=0;pass<2;pass++) {
boolean autoCommit = (0==pass);
- Directory dir = new RAMDirectory();
+ Directory dir = new MockRAMDirectory();
IndexWriter modifier = new IndexWriter(dir, autoCommit,
new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
modifier.setUseCompoundFile(true);
@@ -65,28 +64,17 @@ public class TestIndexWriterDelete extends LuceneTestCase {
modifier.addDocument(doc);
}
modifier.optimize();
-
- if (!autoCommit) {
- modifier.close();
- }
+ modifier.commit();
Term term = new Term("city", "Amsterdam");
int hitCount = getHitCount(dir, term);
assertEquals(1, hitCount);
- if (!autoCommit) {
- modifier = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
- modifier.setUseCompoundFile(true);
- }
modifier.deleteDocuments(term);
- if (!autoCommit) {
- modifier.close();
- }
+ modifier.commit();
hitCount = getHitCount(dir, term);
assertEquals(0, hitCount);
- if (autoCommit) {
- modifier.close();
- }
+ modifier.close();
dir.close();
}
}
@@ -96,7 +84,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
for(int pass=0;pass<2;pass++) {
boolean autoCommit = (0==pass);
- Directory dir = new RAMDirectory();
+ Directory dir = new MockRAMDirectory();
IndexWriter modifier = new IndexWriter(dir, autoCommit,
new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
modifier.setMaxBufferedDocs(2);
@@ -108,38 +96,26 @@ public class TestIndexWriterDelete extends LuceneTestCase {
for (int i = 0; i < 7; i++) {
addDoc(modifier, ++id, value);
}
- modifier.flush();
+ modifier.commit();
assertEquals(0, modifier.getNumBufferedDocuments());
assertTrue(0 < modifier.getSegmentCount());
- if (!autoCommit) {
- modifier.close();
- }
+ modifier.commit();
IndexReader reader = IndexReader.open(dir);
assertEquals(7, reader.numDocs());
reader.close();
- if (!autoCommit) {
- modifier = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
- modifier.setMaxBufferedDocs(2);
- modifier.setMaxBufferedDeleteTerms(2);
- }
-
modifier.deleteDocuments(new Term("value", String.valueOf(value)));
modifier.deleteDocuments(new Term("value", String.valueOf(value)));
- if (!autoCommit) {
- modifier.close();
- }
+ modifier.commit();
reader = IndexReader.open(dir);
assertEquals(0, reader.numDocs());
reader.close();
- if (autoCommit) {
- modifier.close();
- }
+ modifier.close();
dir.close();
}
}
@@ -148,7 +124,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
public void testRAMDeletes() throws IOException {
for(int pass=0;pass<2;pass++) {
boolean autoCommit = (0==pass);
- Directory dir = new RAMDirectory();
+ Directory dir = new MockRAMDirectory();
IndexWriter modifier = new IndexWriter(dir, autoCommit,
new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
modifier.setMaxBufferedDocs(4);
@@ -169,9 +145,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
assertEquals(0, modifier.getSegmentCount());
modifier.flush();
- if (!autoCommit) {
- modifier.close();
- }
+ modifier.commit();
IndexReader reader = IndexReader.open(dir);
assertEquals(1, reader.numDocs());
@@ -179,9 +153,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
int hitCount = getHitCount(dir, new Term("id", String.valueOf(id)));
assertEquals(1, hitCount);
reader.close();
- if (autoCommit) {
- modifier.close();
- }
+ modifier.close();
dir.close();
}
}
@@ -191,7 +163,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
for(int pass=0;pass<2;pass++) {
boolean autoCommit = (0==pass);
- Directory dir = new RAMDirectory();
+ Directory dir = new MockRAMDirectory();
IndexWriter modifier = new IndexWriter(dir, autoCommit,
new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
modifier.setMaxBufferedDocs(100);
@@ -208,23 +180,18 @@ public class TestIndexWriterDelete extends LuceneTestCase {
for (int i = 0; i < 5; i++) {
addDoc(modifier, ++id, value);
}
- modifier.flush();
+ modifier.commit();
for (int i = 0; i < 5; i++) {
addDoc(modifier, ++id, value);
}
modifier.deleteDocuments(new Term("value", String.valueOf(value)));
- modifier.flush();
- if (!autoCommit) {
- modifier.close();
- }
+ modifier.commit();
IndexReader reader = IndexReader.open(dir);
assertEquals(5, reader.numDocs());
- if (autoCommit) {
- modifier.close();
- }
+ modifier.close();
}
}
@@ -232,7 +199,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
public void testBatchDeletes() throws IOException {
for(int pass=0;pass<2;pass++) {
boolean autoCommit = (0==pass);
- Directory dir = new RAMDirectory();
+ Directory dir = new MockRAMDirectory();
IndexWriter modifier = new IndexWriter(dir, autoCommit,
new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
modifier.setMaxBufferedDocs(2);
@@ -244,29 +211,17 @@ public class TestIndexWriterDelete extends LuceneTestCase {
for (int i = 0; i < 7; i++) {
addDoc(modifier, ++id, value);
}
- modifier.flush();
- if (!autoCommit) {
- modifier.close();
- }
+ modifier.commit();
IndexReader reader = IndexReader.open(dir);
assertEquals(7, reader.numDocs());
reader.close();
- if (!autoCommit) {
- modifier = new IndexWriter(dir, autoCommit,
- new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
- modifier.setMaxBufferedDocs(2);
- modifier.setMaxBufferedDeleteTerms(2);
- }
-
id = 0;
modifier.deleteDocuments(new Term("id", String.valueOf(++id)));
modifier.deleteDocuments(new Term("id", String.valueOf(++id)));
- if (!autoCommit) {
- modifier.close();
- }
+ modifier.commit();
reader = IndexReader.open(dir);
assertEquals(5, reader.numDocs());
@@ -276,23 +231,13 @@ public class TestIndexWriterDelete extends LuceneTestCase {
for (int i = 0; i < terms.length; i++) {
terms[i] = new Term("id", String.valueOf(++id));
}
- if (!autoCommit) {
- modifier = new IndexWriter(dir, autoCommit,
- new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
- modifier.setMaxBufferedDocs(2);
- modifier.setMaxBufferedDeleteTerms(2);
- }
modifier.deleteDocuments(terms);
- if (!autoCommit) {
- modifier.close();
- }
+ modifier.commit();
reader = IndexReader.open(dir);
assertEquals(2, reader.numDocs());
reader.close();
- if (autoCommit) {
- modifier.close();
- }
+ modifier.close();
dir.close();
}
}
@@ -338,7 +283,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
boolean autoCommit = (0==pass);
// First build up a starting index:
- RAMDirectory startDir = new RAMDirectory();
+ MockRAMDirectory startDir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(startDir, autoCommit,
new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < 157; i++) {
@@ -444,38 +389,10 @@ public class TestIndexWriterDelete extends LuceneTestCase {
}
}
- // Whether we succeeded or failed, check that all
- // un-referenced files were in fact deleted (ie,
- // we did not create garbage). Just create a
- // new IndexFileDeleter, have it delete
- // unreferenced files, then verify that in fact
- // no files were deleted:
- String[] startFiles = dir.list();
- SegmentInfos infos = new SegmentInfos();
- infos.read(dir);
- new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, null);
- String[] endFiles = dir.list();
-
- Arrays.sort(startFiles);
- Arrays.sort(endFiles);
-
- // for(int i=0;i