diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000000..51c89b74529 --- /dev/null +++ b/.gitignore @@ -0,0 +1,11 @@ +.gradle/ +elasticsearch.iws +.idea/workspace.xml +work/ +.DS_Store +build/ +test-output/ +modules/elasticsearch/build/ +modules/test/integration/build/ +modules/test/testng/build/ +modules/benchmark/micro/build/ diff --git a/.idea/ant.xml b/.idea/ant.xml new file mode 100644 index 00000000000..2581ca3fe84 --- /dev/null +++ b/.idea/ant.xml @@ -0,0 +1,7 @@ + + + + + + + diff --git a/.idea/compiler.xml b/.idea/compiler.xml new file mode 100644 index 00000000000..7b9d051692a --- /dev/null +++ b/.idea/compiler.xml @@ -0,0 +1,31 @@ + + + + + + + + diff --git a/.idea/copyright/apache.xml b/.idea/copyright/apache.xml new file mode 100644 index 00000000000..54ae28930e3 --- /dev/null +++ b/.idea/copyright/apache.xml @@ -0,0 +1,9 @@ + + + + \ No newline at end of file diff --git a/.idea/copyright/profiles_settings.xml b/.idea/copyright/profiles_settings.xml new file mode 100644 index 00000000000..ed10bb5311a --- /dev/null +++ b/.idea/copyright/profiles_settings.xml @@ -0,0 +1,105 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/dictionaries/kimchy.xml b/.idea/dictionaries/kimchy.xml new file mode 100644 index 00000000000..28741f87650 --- /dev/null +++ b/.idea/dictionaries/kimchy.xml @@ -0,0 +1,56 @@ + + + + args + asciifolding + attr + banon + birthdate + bool + checksum + commitable + committable + configurator + coord + desc + deserialize + elasticsearch + failover + flushable + formatter + formatters + indices + inet + infos + jgroups + joda + jsonp + lifecycle + lucene + metadata + millis + mmap + multi + nanos + ngram + param + porterstem + rebalance + searchable + snapshotting + stopwords + streamable + successul + throwable + timestamp + translog + traslog + trie + tuple + unregister + uuid + versioned + yaml + + + \ No newline at end of file diff --git a/.idea/encodings.xml b/.idea/encodings.xml new file mode 100644 index 00000000000..a3ba2fdd5fe --- /dev/null +++ b/.idea/encodings.xml @@ -0,0 +1,5 @@ + + + + + diff --git a/.idea/gradle.xml b/.idea/gradle.xml new file mode 100644 index 00000000000..15bc77f38dc --- /dev/null +++ b/.idea/gradle.xml @@ -0,0 +1,7 @@ + + + + + + diff --git a/.idea/inspectionProfiles/Project_Default.xml b/.idea/inspectionProfiles/Project_Default.xml new file mode 100644 index 00000000000..a0a4b92eb02 --- /dev/null +++ b/.idea/inspectionProfiles/Project_Default.xml @@ -0,0 +1,38 @@ + + + + \ No newline at end of file diff --git a/.idea/inspectionProfiles/profiles_settings.xml b/.idea/inspectionProfiles/profiles_settings.xml new file mode 100644 index 00000000000..3b312839bf2 --- /dev/null +++ b/.idea/inspectionProfiles/profiles_settings.xml @@ -0,0 +1,7 @@ + + + + \ No newline at end of file diff --git a/.idea/libraries/config.xml b/.idea/libraries/config.xml new file mode 100644 index 00000000000..2141031e292 --- /dev/null +++ b/.idea/libraries/config.xml @@ -0,0 +1,9 @@ + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/google_collect.xml b/.idea/libraries/google_collect.xml new file mode 100644 index 00000000000..e7950c5b919 --- /dev/null +++ b/.idea/libraries/google_collect.xml @@ -0,0 +1,9 @@ + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/guice.xml b/.idea/libraries/guice.xml new file mode 100644 index 00000000000..b62d5bb76e4 --- /dev/null +++ b/.idea/libraries/guice.xml @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/hamcrest.xml b/.idea/libraries/hamcrest.xml new file mode 100644 index 00000000000..36a68821384 --- /dev/null +++ b/.idea/libraries/hamcrest.xml @@ -0,0 +1,9 @@ + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/jackson.xml b/.idea/libraries/jackson.xml new file mode 100644 index 00000000000..c17528f9b19 --- /dev/null +++ b/.idea/libraries/jackson.xml @@ -0,0 +1,10 @@ + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/jgroups.xml b/.idea/libraries/jgroups.xml new file mode 100644 index 00000000000..8467733db7a --- /dev/null +++ b/.idea/libraries/jgroups.xml @@ -0,0 +1,11 @@ + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/jline.xml b/.idea/libraries/jline.xml new file mode 100644 index 00000000000..f855f5eb30e --- /dev/null +++ b/.idea/libraries/jline.xml @@ -0,0 +1,9 @@ + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/joda_time.xml b/.idea/libraries/joda_time.xml new file mode 100644 index 00000000000..a08f8135087 --- /dev/null +++ b/.idea/libraries/joda_time.xml @@ -0,0 +1,9 @@ + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/logging.xml b/.idea/libraries/logging.xml new file mode 100644 index 00000000000..a7f99f2b116 --- /dev/null +++ b/.idea/libraries/logging.xml @@ -0,0 +1,11 @@ + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/lucene.xml b/.idea/libraries/lucene.xml new file mode 100644 index 00000000000..e8261cc4ad5 --- /dev/null +++ b/.idea/libraries/lucene.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/netty.xml b/.idea/libraries/netty.xml new file mode 100644 index 00000000000..88fa2dabfec --- /dev/null +++ b/.idea/libraries/netty.xml @@ -0,0 +1,11 @@ + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/snakeyaml.xml b/.idea/libraries/snakeyaml.xml new file mode 100644 index 00000000000..11f5511c837 --- /dev/null +++ b/.idea/libraries/snakeyaml.xml @@ -0,0 +1,9 @@ + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/testng.xml b/.idea/libraries/testng.xml new file mode 100644 index 00000000000..6df8a2dbdad --- /dev/null +++ b/.idea/libraries/testng.xml @@ -0,0 +1,9 @@ + + + + + + + + + \ No newline at end of file diff --git a/.idea/misc.xml b/.idea/misc.xml new file mode 100644 index 00000000000..9c0c408b651 --- /dev/null +++ b/.idea/misc.xml @@ -0,0 +1,33 @@ + + + + + + + + + + + + + + + diff --git a/.idea/modules.xml b/.idea/modules.xml new file mode 100644 index 00000000000..8da55ea3b4f --- /dev/null +++ b/.idea/modules.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + diff --git a/.idea/modules/benchmark-micro.iml b/.idea/modules/benchmark-micro.iml new file mode 100644 index 00000000000..66de07d6156 --- /dev/null +++ b/.idea/modules/benchmark-micro.iml @@ -0,0 +1,17 @@ + + + + + + + + + + + + + + + + + diff --git a/.idea/modules/elasticsearch-root.iml b/.idea/modules/elasticsearch-root.iml new file mode 100644 index 00000000000..4511acc6661 --- /dev/null +++ b/.idea/modules/elasticsearch-root.iml @@ -0,0 +1,19 @@ + + + + + + + + + + + + + + + + + + + diff --git a/.idea/modules/elasticsearch.iml b/.idea/modules/elasticsearch.iml new file mode 100644 index 00000000000..18466024c3d --- /dev/null +++ b/.idea/modules/elasticsearch.iml @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.idea/modules/test-integration.iml b/.idea/modules/test-integration.iml new file mode 100644 index 00000000000..845a88c7201 --- /dev/null +++ b/.idea/modules/test-integration.iml @@ -0,0 +1,20 @@ + + + + + + + + + + + + + + + + + + + + diff --git a/.idea/modules/test-testng.iml b/.idea/modules/test-testng.iml new file mode 100644 index 00000000000..90b934048ec --- /dev/null +++ b/.idea/modules/test-testng.iml @@ -0,0 +1,17 @@ + + + + + + + + + + + + + + + + + diff --git a/.idea/projectCodeStyle.xml b/.idea/projectCodeStyle.xml new file mode 100644 index 00000000000..f98da4cdac3 --- /dev/null +++ b/.idea/projectCodeStyle.xml @@ -0,0 +1,105 @@ + + + + + + + diff --git a/.idea/runConfigurations/Bootstrap.xml b/.idea/runConfigurations/Bootstrap.xml new file mode 100644 index 00000000000..6d8c9d38e12 --- /dev/null +++ b/.idea/runConfigurations/Bootstrap.xml @@ -0,0 +1,35 @@ + + + + + + + + + \ No newline at end of file diff --git a/.idea/runConfigurations/Elastic_Search_Tests.xml b/.idea/runConfigurations/Elastic_Search_Tests.xml new file mode 100644 index 00000000000..52513064bbd --- /dev/null +++ b/.idea/runConfigurations/Elastic_Search_Tests.xml @@ -0,0 +1,42 @@ + + + + + + + + \ No newline at end of file diff --git a/.idea/uiDesigner.xml b/.idea/uiDesigner.xml new file mode 100644 index 00000000000..3b000203088 --- /dev/null +++ b/.idea/uiDesigner.xml @@ -0,0 +1,125 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.idea/vcs.xml b/.idea/vcs.xml new file mode 100644 index 00000000000..275077f8255 --- /dev/null +++ b/.idea/vcs.xml @@ -0,0 +1,7 @@ + + + + + + + diff --git a/LICENSE.txt b/LICENSE.txt new file mode 100644 index 00000000000..7b1b5dab229 --- /dev/null +++ b/LICENSE.txt @@ -0,0 +1,563 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +APACHE CASSANDRA THIRD-PARTY DEPENDENCIES + +Apache Cassandra includes convenience copies of a number of third-party +dependencies that have separate copyright notices and license terms. Your +use of these libraries is subject to the terms and conditions of the +following licenses. + +For lib/flexjson-1.7.jar and lib/google-collect-1.0-rc1.jar: + +While developed outside the ASF, these projects are also licensed under +the Apache License 2.0. The full text of the Apache License 2.0 can be +found at top of this file. + +See NOTICE.txt for the respective copyright notices of these libraries. + + +For lib/antlr-3.1.3.jar: +------------------------ + +[The "BSD licence"] +Copyright (c) 2003-2006 Terence Parr +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + 3. The name of the author may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +For lib/high-scale-lib.jar: +--------------------------- + +The person or persons who have associated work with this document (the +"Dedicator" or "Certifier") hereby either (a) certifies that, to the best +of his knowledge, the work of authorship identified is in the public +domain of the country from which the work is published, or (b) hereby +dedicates whatever copyright the dedicators holds in the work of +authorship identified below (the "Work") to the public domain. A +certifier, moreover, dedicates any copyright interest he may have in the +associated work, and for these purposes, is described as a "dedicator" +below. + +A certifier has taken reasonable steps to verify the copyright status of +this work. Certifier recognizes that his good faith efforts may not +shield him from liability if in fact the work certified is not in the +public domain. + +Dedicator makes this dedication for the benefit of the public at large and +to the detriment of the Dedicator's heirs and successors. Dedicator +intends this dedication to be an overt act of relinquishment in perpetuity +of all present and future rights under copyright law, whether vested or +contingent, in the Work. Dedicator understands that such relinquishment of +all rights includes the relinquishment of all rights to enforce (by +lawsuit or otherwise) those copyrights in the Work. + +Dedicator recognizes that, once placed in the public domain, the Work may +be freely reproduced, distributed, transmitted, used, modified, built +upon, or otherwise exploited by anyone for any purpose, commercial or +non-commercial, and in any way, including by methods that have not yet +been invented or conceived. + + +For lib/jline-0.9.94.jar: +------------------------- + +Copyright (c) 2002-2006, Marc Prud'hommeaux +All rights reserved. + +Redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following +conditions are met: + +Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with +the distribution. + +Neither the name of JLine nor the names of its contributors +may be used to endorse or promote products derived from this +software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, +BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY +AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, +OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED +AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +OF THE POSSIBILITY OF SUCH DAMAGE. + + +For lib/junit-4.6.jar: +---------------------- + +Common Public License Version 1.0 + +THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS COMMON PUBLIC +LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM +CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. + +1. DEFINITIONS + +"Contribution" means: + +a) in the case of the initial Contributor, the initial code and +documentation distributed under this Agreement, and + +b) in the case of each subsequent Contributor: + +i) changes to the Program, and + +ii) additions to the Program; + +where such changes and/or additions to the Program originate from and are +distributed by that particular Contributor. A Contribution 'originates' from a +Contributor if it was added to the Program by such Contributor itself or anyone +acting on such Contributor's behalf. Contributions do not include additions to +the Program which: (i) are separate modules of software distributed in +conjunction with the Program under their own license agreement, and (ii) are not +derivative works of the Program. + +"Contributor" means any person or entity that distributes the Program. + +"Licensed Patents " mean patent claims licensable by a Contributor which are +necessarily infringed by the use or sale of its Contribution alone or when +combined with the Program. + +"Program" means the Contributions distributed in accordance with this Agreement. + +"Recipient" means anyone who receives the Program under this Agreement, +including all Contributors. + +2. GRANT OF RIGHTS + +a) Subject to the terms of this Agreement, each Contributor hereby grants +Recipient a non-exclusive, worldwide, royalty-free copyright license to +reproduce, prepare derivative works of, publicly display, publicly perform, +distribute and sublicense the Contribution of such Contributor, if any, and such +derivative works, in source code and object code form. + +b) Subject to the terms of this Agreement, each Contributor hereby grants +Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed +Patents to make, use, sell, offer to sell, import and otherwise transfer the +Contribution of such Contributor, if any, in source code and object code form. +This patent license shall apply to the combination of the Contribution and the +Program if, at the time the Contribution is added by the Contributor, such +addition of the Contribution causes such combination to be covered by the +Licensed Patents. The patent license shall not apply to any other combinations +which include the Contribution. No hardware per se is licensed hereunder. + +c) Recipient understands that although each Contributor grants the licenses +to its Contributions set forth herein, no assurances are provided by any +Contributor that the Program does not infringe the patent or other intellectual +property rights of any other entity. Each Contributor disclaims any liability to +Recipient for claims brought by any other entity based on infringement of +intellectual property rights or otherwise. As a condition to exercising the +rights and licenses granted hereunder, each Recipient hereby assumes sole +responsibility to secure any other intellectual property rights needed, if any. +For example, if a third party patent license is required to allow Recipient to +distribute the Program, it is Recipient's responsibility to acquire that license +before distributing the Program. + +d) Each Contributor represents that to its knowledge it has sufficient +copyright rights in its Contribution, if any, to grant the copyright license set +forth in this Agreement. + +3. REQUIREMENTS + +A Contributor may choose to distribute the Program in object code form under its +own license agreement, provided that: + +a) it complies with the terms and conditions of this Agreement; and + +b) its license agreement: + +i) effectively disclaims on behalf of all Contributors all warranties and +conditions, express and implied, including warranties or conditions of title and +non-infringement, and implied warranties or conditions of merchantability and +fitness for a particular purpose; + +ii) effectively excludes on behalf of all Contributors all liability for +damages, including direct, indirect, special, incidental and consequential +damages, such as lost profits; + +iii) states that any provisions which differ from this Agreement are offered +by that Contributor alone and not by any other party; and + +iv) states that source code for the Program is available from such +Contributor, and informs licensees how to obtain it in a reasonable manner on or +through a medium customarily used for software exchange. + +When the Program is made available in source code form: + +a) it must be made available under this Agreement; and + +b) a copy of this Agreement must be included with each copy of the Program. + +Contributors may not remove or alter any copyright notices contained within the +Program. + +Each Contributor must identify itself as the originator of its Contribution, if +any, in a manner that reasonably allows subsequent Recipients to identify the +originator of the Contribution. + +4. COMMERCIAL DISTRIBUTION + +Commercial distributors of software may accept certain responsibilities with +respect to end users, business partners and the like. While this license is +intended to facilitate the commercial use of the Program, the Contributor who +includes the Program in a commercial product offering should do so in a manner +which does not create potential liability for other Contributors. Therefore, if +a Contributor includes the Program in a commercial product offering, such +Contributor ("Commercial Contributor") hereby agrees to defend and indemnify +every other Contributor ("Indemnified Contributor") against any losses, damages +and costs (collectively "Losses") arising from claims, lawsuits and other legal +actions brought by a third party against the Indemnified Contributor to the +extent caused by the acts or omissions of such Commercial Contributor in +connection with its distribution of the Program in a commercial product +offering. The obligations in this section do not apply to any claims or Losses +relating to any actual or alleged intellectual property infringement. In order +to qualify, an Indemnified Contributor must: a) promptly notify the Commercial +Contributor in writing of such claim, and b) allow the Commercial Contributor to +control, and cooperate with the Commercial Contributor in, the defense and any +related settlement negotiations. The Indemnified Contributor may participate in +any such claim at its own expense. + +For example, a Contributor might include the Program in a commercial product +offering, Product X. That Contributor is then a Commercial Contributor. If that +Commercial Contributor then makes performance claims, or offers warranties +related to Product X, those performance claims and warranties are such +Commercial Contributor's responsibility alone. Under this section, the +Commercial Contributor would have to defend claims against the other +Contributors related to those performance claims and warranties, and if a court +requires any other Contributor to pay any damages as a result, the Commercial +Contributor must pay those damages. + +5. NO WARRANTY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR +IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, +NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each +Recipient is solely responsible for determining the appropriateness of using and +distributing the Program and assumes all risks associated with its exercise of +rights under this Agreement, including but not limited to the risks and costs of +program errors, compliance with applicable laws, damage to or loss of data, +programs or equipment, and unavailability or interruption of operations. + +6. DISCLAIMER OF LIABILITY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY +CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST +PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS +GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +7. GENERAL + +If any provision of this Agreement is invalid or unenforceable under applicable +law, it shall not affect the validity or enforceability of the remainder of the +terms of this Agreement, and without further action by the parties hereto, such +provision shall be reformed to the minimum extent necessary to make such +provision valid and enforceable. + +If Recipient institutes patent litigation against a Contributor with respect to +a patent applicable to software (including a cross-claim or counterclaim in a +lawsuit), then any patent licenses granted by that Contributor to such Recipient +under this Agreement shall terminate as of the date such litigation is filed. In +addition, if Recipient institutes patent litigation against any entity +(including a cross-claim or counterclaim in a lawsuit) alleging that the Program +itself (excluding combinations of the Program with other software or hardware) +infringes such Recipient's patent(s), then such Recipient's rights granted under +Section 2(b) shall terminate as of the date such litigation is filed. + +All Recipient's rights under this Agreement shall terminate if it fails to +comply with any of the material terms or conditions of this Agreement and does +not cure such failure in a reasonable period of time after becoming aware of +such noncompliance. If all Recipient's rights under this Agreement terminate, +Recipient agrees to cease use and distribution of the Program as soon as +reasonably practicable. However, Recipient's obligations under this Agreement +and any licenses granted by Recipient relating to the Program shall continue and +survive. + +Everyone is permitted to copy and distribute copies of this Agreement, but in +order to avoid inconsistency the Agreement is copyrighted and may only be +modified in the following manner. The Agreement Steward reserves the right to +publish new versions (including revisions) of this Agreement from time to time. +No one other than the Agreement Steward has the right to modify this Agreement. +IBM is the initial Agreement Steward. IBM may assign the responsibility to serve +as the Agreement Steward to a suitable separate entity. Each new version of the +Agreement will be given a distinguishing version number. The Program (including +Contributions) may always be distributed subject to the version of the Agreement +under which it was received. In addition, after a new version of the Agreement +is published, Contributor may elect to distribute the Program (including its +Contributions) under the new version. Except as expressly stated in Sections +2(a) and 2(b) above, Recipient receives no rights or licenses to the +intellectual property of any Contributor under this Agreement, whether +expressly, by implication, estoppel or otherwise. All rights in the Program not +expressly granted under this Agreement are reserved. + +This Agreement is governed by the laws of the State of New York and the +intellectual property laws of the United States of America. No party to this +Agreement will bring a legal action under this Agreement more than one year +after the cause of action arose. Each party waives its rights to a jury trial in +any resulting litigation. + + +For lib/slf4j-api-1.5.8.jar and lib/slf4j-log4j12-1.5.8.jar: +------------------------------------------------------------ + +Copyright (c) 2004-2008 QOS.ch +All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/NOTICE.txt b/NOTICE.txt new file mode 100644 index 00000000000..2179a9f245f --- /dev/null +++ b/NOTICE.txt @@ -0,0 +1,16 @@ +ElasticSearch +Copyright 2009-2010 Elastic Search and Shay Banon + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product makes use of the google-collections library from +http://code.google.com/p/google-collections/. +Copyright (C) 2008 Google Inc. + +Some alternate data structures provided by high-scale-lib from +http://sourceforge.net/projects/high-scale-lib/. +Written by Cliff Click and released as Public Domain. + +Logging abstraction provided by SLF4J (http://www.slf4j.org). +Copyright (c) 2004-2008 QOS.ch diff --git a/README.textile b/README.textile new file mode 100644 index 00000000000..9036416581a --- /dev/null +++ b/README.textile @@ -0,0 +1,188 @@ +h1. ElasticSearch + +h2. A Distributed RESTful Search Engine + +h3. "http://www.elasticsearch.com":http://www.elasticsearch.com + +ElasticSearch is a distributed RESTful search engine built for the cloud. Features include: + +* Distributed and Highly Available Search Engine. +** Each index is fully sharded with a configurable number of shards. +** Each shard can have one or more backups. +** Read / Search operations performed on either primary or backup shards. +* Multi Tenant with Multi Types. +** Support for more than one index. +** Support for more than one type per index. +** Index level configuration (number of shards, index storage, ...). +* Various set of APIs +** HTTP RESTful API +** Native Java API. +** All APIs perform automatic node operation rerouting. +* Document oriented +** No need for upfront schema definition. +** Schema can be defined per type for customization of the indexing process. +* Reliable, Asynchronous Write Behind for long term persistency. +* (Near) Real Time Search. +* Built on top of Lucene +** Each shard is a fully functional Lucene index +** All the power of Lucene easily exposed through simple configuration / plugins. +* Per operation consistency +** Single document level operations are atomic, consistent, isolated and durable. +* Open Source under Apache 2 License. + +h2. Getting Started + +Fist of all, DON'T PANIC. It will take 5 minutes to get the gist of what ElasticSearch is all about. + +h3. Installation + +* Download and unzip the ElasticSearch installation. +* Run @bin/elasticsearch -f@ on unix, or @bin/elasticsearch.bat@ on windows. +* Run @curl -X GET http://localhost:9200/@. +* Start more servers ... + +h3. Indexing + +Lets try and index some twitter like information. First, lets create a twitter user, and add some tweets (the @twitter@ index will be created automatically): + +
+curl -XPUT http://localhost:9200/twitter/user/kimchy -d '{ name: "Shay Banon" }'
+
+curl -XPUT http://localhost:9200/twitter/tweet/1 -d \
+'{ user: "kimchy", postDate: "2009-11-15T13:12:00", message: "Trying out Elastic Search, so far so good?" }'
+
+curl -XPUT http://localhost:9200/twitter/tweet/2 -d \
+'{ user: "kimchy", postDate: "2009-11-15T14:12:12", message: "Another tweet, will it be indexed?" }'
+
+ +Now, lets see if the information was added by GETting it: + +
+curl -X GET http://localhost:9200/twitter/user/kimchy?pretty=true
+curl -X GET http://localhost:9200/twitter/tweet/1?pretty=true
+curl -X GET http://localhost:9200/twitter/tweet/2?pretty=true
+
+ +h3. Searching + +Mmm search..., shouldn't it be elastic? +Lets find all the tweets that @kimchy@ posted: + +
+curl -XGET http://localhost:9200/twitter/tweet/_search?q=user:kimchy\&pretty=true
+
+ +We can also use the JSON query language ElasticSearch provides instead of a query string: + +
+curl -XPOST http://localhost:9200/twitter/tweet/_search?pretty=true -d \
+'{ query : { term : { user: "kimchy" } } }'
+
+ +Just for kicks, lets get all the documents stored (we should see the user as well): + +
+curl -XGET http://localhost:9200/twitter/_search?pretty=true -d \
+'{ query : { matchAll : {} } }'
+
+ +We can also do range search (the @postDate@ was automatically identified as date) + +
+curl -XGET http://localhost:9200/twitter/_search?pretty=true -d \
+'{ query : { range : { postDate : { from : "2009-11-15T13:00:00", to : "2009-11-15T14:00:00" } } } }'
+
+ +There are many more options to perform search, after all, its a search product no? All the familiar Lucene queries are available through the JSON query language, or through the query parser. + +h3. Multi Tenant - Indices and Types + +Maan, that twitter index might get big (in this case, index size == valuation). Lets see if we can structure our twitter system a bit differently in order to support such large amount of data. + +ElasticSearch support multiple indices, as well as multiple types per index. In the previous example we used an index called @twitter@, with two types, @user@ and @tweet@. + +Another way to define our simple twitter system is to have a different index per user. Here is the indexing curl's in this case: + +
+curl -XPUT http://localhost:9200/kimchy/info/1 -d '{ name: "Shay Banon" }'
+
+curl -XPUT http://localhost:9200/kimchy/tweet/1 -d \
+'{ user: "kimchy", postDate: "2009-11-15T13:12:00", message: "Trying out Elastic Search, so far so good?" }'
+
+curl -XPUT http://localhost:9200/kimchy/tweet/2 -d \
+'{ user: "kimchy", postDate: "2009-11-15T14:12:12", message: "Another tweet, will it be indexed?" }'
+
+ +The above index information into the @kimchy@ index, with two types, @info@ and @tweet@. Each user will get his own special index. + +Complete control on the index level is allowed. As an example, in the above case, we would want to change from the default 5 shards with 1 backup per index, to only 1 shard with 1 backup per index (== per twitter user). Here is how this can be done (the configuration can be in yaml as well): + +
+curl -XPUT http://localhost:9200/another_user/ -d \
+'{ index : { numberOfShards : 1, numberOfReplicas : 1 } }'
+
+ +Search (and similar operations) are multi index aware. This means that we can easily search on more than one +index (twitter user), for example: + +
+curl -XGET http://localhost:9200/kimchy,another_user/_search?pretty=true -d \
+'{ query : { matchAll : {} } }'
+
+ +Or on all the indices: + +
+curl -XGET http://localhost:9200/_search?pretty=true -d \
+'{ query : { matchAll : {} } }'
+
+ +{One liner teaser}: And the cool part about that? You can easily search on multiple twitter users (indices), with different boost levels per user (index), making social search so much simpler (results from my friends rank higher than results from my friends friends). + +h3. Distributed, Highly Available, and Write Behind + +Lets face it, things will fail.... + +ElasticSearch is a highly available and distributed search engine. Each index is broken down into shards, and each shard can have one or more backups. By default, an index is created with 5 shards and 1 backup per shard (5/1). There are many topologies that can be used, including 1/10 (improve search performance), or 20/1 (improve indexing performance, with search executed in a map reduce fashion across shards). + +In order to play with Elastic Search distributed nature, simply bring more nodes up and shut down nodes. The system will continue to serve requests (make sure you use the correct http port) with the latest data indexed. + +If the whole cluster is brought down, all the indexed data will be lost (each shard local storage is temporal). For long term persistency, write behind should be enabled. This is as simple as configuring the @elasticsearch.yml@ configuration file (which effectively enables write behind to file system for all indices created unless configured otherwise when creating the index): + +
+gateway:
+  type: fs
+
+ +Alternatively, elastic search can be started with the following command line: +@elasticsearch -f -Des.index.gateway.type=fs@. + +The above configuration will persist the indices create on ElasticSearch to a file system (path can be configured) in an asynchronous, reliable fashion. Other gateways implementations can be easily implemented and more will be provided out of the box in later versions (did someone say AmazonS3/Hadoop/Cassandra?). + +h3. Where to go from here? + +We have just covered a very small portion of what ElasticSearch is all about. For more information, please refer to: . + +h3. Building from Source + +ElasticSearch uses Gradle:http://www.gradle.org for its build system. In order to create a distribution, simply run @gradlew devRelease@, the distribution will be created under @build/distributions@. + +h1. License + +
+This software is licensed under the Apache 2 license, quoted below.
+
+Copyright 2009-2010 Elastic Search 
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not
+use this file except in compliance with the License. You may obtain a copy of
+the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+License for the specific language governing permissions and limitations under
+the License.
+
\ No newline at end of file diff --git a/bin/elasticsearch b/bin/elasticsearch new file mode 100755 index 00000000000..0c088a88459 --- /dev/null +++ b/bin/elasticsearch @@ -0,0 +1,168 @@ +#!/bin/sh + +# OPTIONS: +# -f: start in the foreground +# -p : log the pid to a file (useful to kill it later) + +# CONTROLLING STARTUP: +# +# This script relies on few environment variables to determine startup +# behavior, those variables are: +# +# CLASSPATH -- A Java classpath containing everything necessary to run. +# JAVA_OPS -- Additional arguments to the JVM for heap size, etc +# ES_JAVA_OPS -- External Java Opts on top of the defaults set +# +# As a convenience, a fragment of shell is sourced in order to set one or +# more of these variables. This so-called `include' can be placed in a +# number of locations and will be searched for in order. The lowest +# priority search path is the same directory as the startup script, and +# since this is the location of the sample in the project tree, it should +# almost work Out Of The Box. +# +# Any serious use-case though will likely require customization of the +# include. For production installations, it is recommended that you copy +# the sample to one of /usr/share/elasticsearch/elasticsearch.in.sh, +# /usr/local/share/elasticsearch/elasticsearch.in.sh, or +# /opt/elasticsearch/elasticsearch.in.sh and make your modifications there. +# +# Another option is to specify the full path to the include file in the +# environment. For example: +# +# $ ES_INCLUDE=/path/to/in.sh elasticsearch -p /var/run/es.pid +# +# Note: This is particularly handy for running multiple instances on a +# single installation, or for quick tests. +# +# If you would rather configure startup entirely from the environment, you +# can disable the include by exporting an empty ES_INCLUDE, or by +# ensuring that no include files exist in the aforementioned search list. +# Be aware that you will be entirely responsible for populating the needed +# environment variables. + + +SCRIPT="$0" + +# SCRIPT may be an arbitrarily deep series of symlinks. Loop until we have the concrete path. +while [ -h "$SCRIPT" ] ; do + ls=`ls -ld "$SCRIPT"` + # Drop everything prior to -> + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + SCRIPT="$link" + else + SCRIPT=`dirname "$SCRIPT"`/"$link" + fi +done + +# determine elasticsearch home +ES_HOME=`dirname "$SCRIPT"`/.. + +# make ELASTICSEARCH_HOME absolute +ES_HOME=`cd $ES_HOME; pwd` + + +if [ -x $JAVA_HOME/bin/java ]; then + JAVA=$JAVA_HOME/bin/java +else + JAVA=`which java` +fi + +# If an include wasn't specified in the environment, then search for one... +if [ "x$ES_INCLUDE" = "x" ]; then + # Locations (in order) to use when searching for an include file. + for include in /usr/share/elasticsearch/elasticsearch.in.sh \ + /usr/local/share/elasticsearch/elasticsearch.in.sh \ + /opt/elasticsearch/elasticsearch.in.sh \ + ~/.elasticsearch.in.sh \ + `dirname $0`/elasticsearch.in.sh; do + if [ -r $include ]; then + . $include + break + fi + done +# ...otherwise, source the specified include. +elif [ -r $ES_INCLUDE ]; then + . $ES_INCLUDE +fi + +if [ -z $CLASSPATH ]; then + echo "You must set the CLASSPATH var" >&2 + exit 1 +fi + +# Special-case path variables. +case "`uname`" in + CYGWIN*) + CLASSPATH=`cygpath -p -w "$CLASSPATH"` + ;; +esac + +launch_service() +{ + pidpath=$1 + foreground=$2 + props=$3 + es_parms="-Delasticsearch -Des.path.home=$ES_HOME" + + if [ "x$pidpath" != "x" ]; then + es_parms="$es_parms -Des-pidfile=$pidpath" + fi + + # The es-daemon option will tell Bootstrap to close stdout/stderr, + # but it's up to us not to background. + if [ "x$foreground" != "x" ]; then + es_parms="$es_parms -Des-foreground=yes" + $JAVA $JAVA_OPTS $ES_JAVA_OPTS $es_parms -cp $CLASSPATH $props \ + org.elasticsearch.bootstrap.Bootstrap + else + # Startup Bootstrap, background it, and write the pid. + exec $JAVA $JAVA_OPTS $ES_JAVA_OPTS $es_parms -cp $CLASSPATH $props \ + org.elasticsearch.bootstrap.Bootstrap <&- & + [ ! -z $pidpath ] && printf "%d" $! > $pidpath + fi + + return $? +} + +# Parse any command line options. +args=`getopt fhp:bD:X: "$@"` +eval set -- "$args" + +while true; do + case "$1" in + -p) + pidfile="$2" + shift 2 + ;; + -f) + foreground="yes" + shift + ;; + -h) + echo "Usage: $0 [-d] [-h] [-p pidfile]" + exit 0 + ;; + -D) + properties="$properties -D$2" + shift 2 + ;; + -X) + properties="$properties -X$2" + shift 2 + ;; + --) + shift + break + ;; + *) + echo "Error parsing arguments!" >&2 + exit 1 + ;; + esac +done + +# Start up the service +launch_service "$pidfile" "$foreground" "$properties" + +exit $? diff --git a/bin/elasticsearch.bat b/bin/elasticsearch.bat new file mode 100644 index 00000000000..9cec02fc953 Binary files /dev/null and b/bin/elasticsearch.bat differ diff --git a/bin/elasticsearch.in.sh b/bin/elasticsearch.in.sh new file mode 100644 index 00000000000..0e51b8e9f36 --- /dev/null +++ b/bin/elasticsearch.in.sh @@ -0,0 +1,14 @@ +CLASSPATH=$CLASSPATH:$ES_HOME/lib/* + +# Arguments to pass to the JVM +# java.net.preferIPv4Stack=true: Better OOTB experience, especially with jgroups +JAVA_OPTS=" \ + -Xms128M \ + -Xmx1G \ + -Djline.enabled=true \ + -Djava.net.preferIPv4Stack=true \ + -XX:+AggressiveOpts \ + -XX:+UseParNewGC \ + -XX:+UseConcMarkSweepGC \ + -XX:+CMSParallelRemarkEnabled \ + -XX:+HeapDumpOnOutOfMemoryError" diff --git a/build.gradle b/build.gradle new file mode 100644 index 00000000000..079bae6a8c1 --- /dev/null +++ b/build.gradle @@ -0,0 +1,112 @@ +import java.text.SimpleDateFormat + +defaultTasks "clean", "devRelease" + +usePlugin BasePlugin + +archivesBaseName = 'elasticsearch' + +buildTime = new Date() +SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss"); +sdf.setTimeZone(TimeZone.getTimeZone("UTC")); +buildTimeStr = sdf.format(buildTime) + +versionNumber = '0.4.0' +devBuild = true + +explodedDistDir = new File(distsDir, 'exploded') +explodedDistLibDir = new File(explodedDistDir, 'lib') +explodedDistBinDir = new File(explodedDistDir, 'bin') +explodedDistConfigDir = new File(explodedDistDir, 'config') + + +gradle.taskGraph.whenReady {graph -> + if (graph.hasTask(':release')) { + devBuild = false + } +} + +allprojects { + group = 'org.elasticsearch' + version = versionNumber + + plugins.withType(JavaPlugin).whenPluginAdded { + sourceCompatibility = 1.6 + targetCompatibility = 1.6 + } + + repositories { + mavenCentral() + mavenRepo urls: 'http://repository.jboss.com/maven2/' + mavenRepo urls: 'http://snakeyamlrepo.appspot.com/repository' // for snakeyaml + } +} + +configurations { + dists + distLib { + visible = false + } +} + +dependencies { + distLib project(':elasticsearch') +} + +task explodedDist(dependsOn: [configurations.distLib], description: 'Builds a minimal distribution image') << { + [explodedDistDir, explodedDistLibDir, explodedDistBinDir, explodedDistConfigDir]*.mkdirs() + // remove old elasticsearch files + ant.delete { fileset(dir: explodedDistLibDir, includes: "$archivesBaseName-*.jar") } + + copy { + from configurations.distLib + into explodedDistLibDir + } + + copy { from('bin'); into explodedDistBinDir } + copy { from('config'); into explodedDistConfigDir } + + copy { + from('.') + into explodedDistDir + include 'LICENSE.txt' + include 'NOTICE.txt' + include 'README.textile' + } + + ant.chmod(dir: "$explodedDistDir/bin", perm: "ugo+rx", includes: "**/*") +} + +task zip(type: Zip) { + dependsOn explodedDist +// classifier = 'all' +} + +zip.doFirst {task -> + zipRootFolder = "$archivesBaseName-${-> version}" + task.configure { + zipFileSet(dir: explodedDistDir, prefix: zipRootFolder) { + exclude 'bin/*' + } + zipFileSet(dir: explodedDistDir, prefix: zipRootFolder, fileMode: '775') { + include 'bin/*' + exclude 'bin/*.*' + } + zipFileSet(dir: explodedDistDir, prefix: zipRootFolder) { + include 'bin/*.*' + } + } +} + +task devRelease(dependsOn: [zip]) { + +} + +task release(dependsOn: [zip]) { + +} + +task wrapper(type: Wrapper) { + gradleVersion = '0.8' + jarPath = 'gradle' +} diff --git a/README b/config/elasticsearch.yml similarity index 100% rename from README rename to config/elasticsearch.yml diff --git a/config/logging.yml b/config/logging.yml new file mode 100644 index 00000000000..e87252e8ed1 --- /dev/null +++ b/config/logging.yml @@ -0,0 +1,19 @@ +rootLogger: INFO, console, file +logger: + jgroups: WARN + +appender: + console: + type: console + layout: + type: consolePattern + conversionPattern: "[%d{ABSOLUTE}][%-5p][%-25c] %m%n" + + file: + type: dailyRollingFile + file: ${path.logs}/${cluster.name}.log + datePattern: "'.'yyyy-MM-dd" + layout: + type: pattern + conversionPattern: "[%d{ABSOLUTE}][%-5p][%-25c] %m%n" + \ No newline at end of file diff --git a/gradle/gradle-wrapper.jar b/gradle/gradle-wrapper.jar new file mode 100644 index 00000000000..66d374e9635 Binary files /dev/null and b/gradle/gradle-wrapper.jar differ diff --git a/gradle/gradle-wrapper.properties b/gradle/gradle-wrapper.properties new file mode 100644 index 00000000000..549c35e189b --- /dev/null +++ b/gradle/gradle-wrapper.properties @@ -0,0 +1,9 @@ +#Sun Dec 13 20:16:46 IST 2009 +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +zipStoreBase=GRADLE_USER_HOME +distributionVersion=0.8 +zipStorePath=wrapper/dists +urlRoot=http\://dist.codehaus.org/gradle +distributionName=gradle +distributionClassifier=bin diff --git a/gradlew b/gradlew new file mode 100755 index 00000000000..1fef679f431 --- /dev/null +++ b/gradlew @@ -0,0 +1,136 @@ +#!/bin/bash + +############################################################################## +## ## +## Gradle wrapper script for UN*X ## +## ## +############################################################################## + +# Uncomment those lines to set JVM options. GRADLE_OPTS and JAVA_OPTS can be used together. +# GRADLE_OPTS="$GRADLE_OPTS -Xmx512" +# JAVA_OPTS="$JAVA_OPTS -Xmx512" + +warn ( ) { + echo "${PROGNAME}: $*" +} + +die ( ) { + warn "$*" + exit 1 +} + + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +case "`uname`" in + CYGWIN* ) + cygwin=true + ;; + Darwin* ) + darwin=true + ;; + MINGW* ) + msys=true + ;; +esac + +# Attempt to set JAVA_HOME if it's not already set. +if [ -z "$JAVA_HOME" ] ; then + if $darwin ; then + [ -z "$JAVA_HOME" -a -d "/Library/Java/Home" ] && export JAVA_HOME="/Library/Java/Home" + [ -z "$JAVA_HOME" -a -d "/System/Library/Frameworks/JavaVM.framework/Home" ] && export JAVA_HOME="/System/Library/Frameworks/JavaVM.framework/Home" + else + javaExecutable="`which javac`" + [ -z "$javaExecutable" -o "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ] && die "JAVA_HOME not set and cannot find javac to deduce location, please set JAVA_HOME." + # readlink(1) is not available as standard on Solaris 10. + readLink=`which readlink` + [ `expr "$readLink" : '\([^ ]*\)'` = "no" ] && die "JAVA_HOME not set and readlink not available, please set JAVA_HOME." + javaExecutable="`readlink -f \"$javaExecutable\"`" + javaHome="`dirname \"$javaExecutable\"`" + javaHome=`expr "$javaHome" : '\(.*\)/bin'` + export JAVA_HOME="$javaHome" + fi +fi + +# For Cygwin, ensure paths are in UNIX format before anything is touched. +if $cygwin ; then + [ -n "$JAVACMD" ] && JAVACMD=`cygpath --unix "$JAVACMD"` + [ -n "$JAVA_HOME" ] && JAVA_HOME=`cygpath --unix "$JAVA_HOME"` +fi + +STARTER_MAIN_CLASS=org.gradle.wrapper.WrapperMain +CLASSPATH=`dirname "$0"`/gradle/gradle-wrapper.jar +WRAPPER_PROPERTIES=`dirname "$0"`/gradle/gradle-wrapper.properties +# Determine the Java command to use to start the JVM. +if [ -z "$JAVACMD" ] ; then + if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + else + JAVACMD="java" + fi +fi +if [ ! -x "$JAVACMD" ] ; then + die "JAVA_HOME is not defined correctly, can not execute: $JAVACMD" +fi +if [ -z "$JAVA_HOME" ] ; then + warn "JAVA_HOME environment variable is not set" +fi + +# For Cygwin, switch paths to Windows format before running java +if $cygwin ; then + JAVA_HOME=`cygpath --path --mixed "$JAVA_HOME"` + TOOLS_JAR=`cygpath --path --mixed "$TOOLS_JAR"` + CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` + + # We build the pattern for arguments to be converted via cygpath + ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` + SEP="" + for dir in $ROOTDIRSRAW ; do + ROOTDIRS="$ROOTDIRS$SEP$dir" + SEP="|" + done + OURCYGPATTERN="(^($ROOTDIRS))" + # Add a user-defined pattern to the cygpath arguments + if [ "$GRADLE_CYGPATTERN" != "" ] ; then + OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" + fi + # Now convert the arguments - kludge to limit ourselves to /bin/sh + i=0 + for arg in "$@" ; do + CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` + CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option + + if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition + eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` + else + eval `echo args$i`="\"$arg\"" + fi + i=$((i+1)) + done + case $i in + (0) set -- ;; + (1) set -- "$args0" ;; + (2) set -- "$args0" "$args1" ;; + (3) set -- "$args0" "$args1" "$args2" ;; + (4) set -- "$args0" "$args1" "$args2" "$args3" ;; + (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; + (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; + (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; + (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; + (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; + esac +fi + +"$JAVACMD" $JAVA_OPTS $GRADLE_OPTS \ + -classpath "$CLASSPATH" \ + -Dtools.jar="$TOOLS_JAR" \ + -Dorg.gradle.wrapper.properties="$WRAPPER_PROPERTIES" \ + $STARTER_MAIN_CLASS \ + "$@" diff --git a/gradlew.bat b/gradlew.bat new file mode 100644 index 00000000000..16298aa17ca Binary files /dev/null and b/gradlew.bat differ diff --git a/modules/benchmark/jmeter/jmx/index-count.jmx b/modules/benchmark/jmeter/jmx/index-count.jmx new file mode 100644 index 00000000000..0026e4b8d59 --- /dev/null +++ b/modules/benchmark/jmeter/jmx/index-count.jmx @@ -0,0 +1,210 @@ + + + + + + false + false + + + + + + + + + + host + localhost + = + + + numberOfThreads + 20 + = + + + numberOfLoops + 10000 + = + + + + + + + false + ${numberOfLoops} + + ${numberOfThreads} + 0 + 1260471148000 + 1260471148000 + false + continue + + + + + + 10000 + 0 + + true + + personId + + + + + personAge + + 1 + 50 + + true + + + + + + + ${host} + 9200 + + + + + + + + + + + + false + { name : "person${personId}", age : ${personAge} } + = + true + + + + + + + + + + /test/person/${personId} + PUT + false + false + true + false + + + + false + + + + + + + + false + { name : "person${personId}", age : ${personAge} } + = + true + + + + + + + + + + /test/person/${personId} + PUT + false + false + true + false + + + + false + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + true + false + false + false + false + false + 0 + true + + + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + true + false + false + false + false + false + 0 + true + + + + + + + + diff --git a/modules/benchmark/jmeter/jmx/index-get.jmx b/modules/benchmark/jmeter/jmx/index-get.jmx new file mode 100644 index 00000000000..2e14e72f357 --- /dev/null +++ b/modules/benchmark/jmeter/jmx/index-get.jmx @@ -0,0 +1,211 @@ + + + + + + false + false + + + + + + + + + + host + localhost + = + + + numberOfThreads + 20 + = + + + numberOfLoops + 10000 + = + + + + + + + false + ${numberOfLoops} + + ${numberOfThreads} + 0 + 1260471148000 + 1260471148000 + false + continue + + + + + + 10000 + 0 + + true + + personId + + + + + personAge + + 1 + 50 + + true + + + + + + + ${host} + 9200 + + + + + + + + + + + + false + { name : "person${personId}", age : ${personAge} } + = + true + + + + + + + + + + /test/person/${personId} + PUT + false + false + true + false + + + + false + + + + + + + + + + + + + + /test/person/${personId} + GET + false + false + true + false + + + + false + + + + + + Assertion.response_code + false + 2 + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + true + false + false + false + false + false + 0 + true + + + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + true + false + false + false + false + false + 0 + true + + + + + + + + diff --git a/modules/benchmark/jmeter/jmx/index-search.jmx b/modules/benchmark/jmeter/jmx/index-search.jmx new file mode 100644 index 00000000000..0026e4b8d59 --- /dev/null +++ b/modules/benchmark/jmeter/jmx/index-search.jmx @@ -0,0 +1,210 @@ + + + + + + false + false + + + + + + + + + + host + localhost + = + + + numberOfThreads + 20 + = + + + numberOfLoops + 10000 + = + + + + + + + false + ${numberOfLoops} + + ${numberOfThreads} + 0 + 1260471148000 + 1260471148000 + false + continue + + + + + + 10000 + 0 + + true + + personId + + + + + personAge + + 1 + 50 + + true + + + + + + + ${host} + 9200 + + + + + + + + + + + + false + { name : "person${personId}", age : ${personAge} } + = + true + + + + + + + + + + /test/person/${personId} + PUT + false + false + true + false + + + + false + + + + + + + + false + { name : "person${personId}", age : ${personAge} } + = + true + + + + + + + + + + /test/person/${personId} + PUT + false + false + true + false + + + + false + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + true + false + false + false + false + false + 0 + true + + + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + true + false + false + false + false + false + 0 + true + + + + + + + + diff --git a/modules/benchmark/jmeter/jmx/index.jmx b/modules/benchmark/jmeter/jmx/index.jmx new file mode 100644 index 00000000000..0026e4b8d59 --- /dev/null +++ b/modules/benchmark/jmeter/jmx/index.jmx @@ -0,0 +1,210 @@ + + + + + + false + false + + + + + + + + + + host + localhost + = + + + numberOfThreads + 20 + = + + + numberOfLoops + 10000 + = + + + + + + + false + ${numberOfLoops} + + ${numberOfThreads} + 0 + 1260471148000 + 1260471148000 + false + continue + + + + + + 10000 + 0 + + true + + personId + + + + + personAge + + 1 + 50 + + true + + + + + + + ${host} + 9200 + + + + + + + + + + + + false + { name : "person${personId}", age : ${personAge} } + = + true + + + + + + + + + + /test/person/${personId} + PUT + false + false + true + false + + + + false + + + + + + + + false + { name : "person${personId}", age : ${personAge} } + = + true + + + + + + + + + + /test/person/${personId} + PUT + false + false + true + false + + + + false + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + true + false + false + false + false + false + 0 + true + + + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + true + false + false + false + false + false + 0 + true + + + + + + + + diff --git a/modules/benchmark/jmeter/jmx/ping-single.jmx b/modules/benchmark/jmeter/jmx/ping-single.jmx new file mode 100644 index 00000000000..0026e4b8d59 --- /dev/null +++ b/modules/benchmark/jmeter/jmx/ping-single.jmx @@ -0,0 +1,210 @@ + + + + + + false + false + + + + + + + + + + host + localhost + = + + + numberOfThreads + 20 + = + + + numberOfLoops + 10000 + = + + + + + + + false + ${numberOfLoops} + + ${numberOfThreads} + 0 + 1260471148000 + 1260471148000 + false + continue + + + + + + 10000 + 0 + + true + + personId + + + + + personAge + + 1 + 50 + + true + + + + + + + ${host} + 9200 + + + + + + + + + + + + false + { name : "person${personId}", age : ${personAge} } + = + true + + + + + + + + + + /test/person/${personId} + PUT + false + false + true + false + + + + false + + + + + + + + false + { name : "person${personId}", age : ${personAge} } + = + true + + + + + + + + + + /test/person/${personId} + PUT + false + false + true + false + + + + false + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + true + false + false + false + false + false + 0 + true + + + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + true + false + false + false + false + false + 0 + true + + + + + + + + diff --git a/modules/benchmark/micro/build.gradle b/modules/benchmark/micro/build.gradle new file mode 100644 index 00000000000..9aa6af68737 --- /dev/null +++ b/modules/benchmark/micro/build.gradle @@ -0,0 +1,26 @@ +dependsOn(':elasticsearch') + +usePlugin 'java' + +archivesBaseName = "$rootProject.archivesBaseName-$project.archivesBaseName" + +configurations.compile.transitive = true +configurations.testCompile.transitive = true + +// no need to use the resource dir +sourceSets.main.resources.srcDir 'src/main/java' +sourceSets.test.resources.srcDir 'src/test/java' + +dependencies { + compile project(':elasticsearch') + + testCompile('org.testng:testng:5.10:jdk15') { transitive = false } + testCompile 'org.hamcrest:hamcrest-all:1.1' +} + +test { + useTestNG() + options.suiteName = project.name + options.listeners = ["org.elasticsearch.util.testng.Listeners"] + options.systemProperties = ["es.test.log.conf": System.getProperty("es.test.log.conf", "log4j-gradle.properties")] +} diff --git a/modules/benchmark/micro/src/main/java/log4j.properties b/modules/benchmark/micro/src/main/java/log4j.properties new file mode 100644 index 00000000000..c7e207d81d5 --- /dev/null +++ b/modules/benchmark/micro/src/main/java/log4j.properties @@ -0,0 +1,18 @@ +log4j.rootLogger=INFO, out +log4j.logger.jgroups=WARN + +#log4j.logger.discovery=TRACE +#log4j.logger.cluster=TRACE +#log4j.logger.indices.cluster=DEBUG +#log4j.logger.index=TRACE +#log4j.logger.index.engine=DEBUG +#log4j.logger.index.shard=TRACE +#log4j.logger.index.cache=DEBUG +#log4j.logger.http=TRACE +log4j.logger.monitor.memory=TRACE +#log4j.logger.cluster.action.shard=TRACE +#log4j.logger.index.gateway=TRACE + +log4j.appender.out=org.apache.log4j.ConsoleAppender +log4j.appender.out.layout=org.apache.log4j.PatternLayout +log4j.appender.out.layout.ConversionPattern=[%d{ABSOLUTE}][%-5p][%-25c] %m%n diff --git a/modules/benchmark/micro/src/main/java/org/elasticsearch/benchmark/index/engine/SimpleEngineBenchmark.java b/modules/benchmark/micro/src/main/java/org/elasticsearch/benchmark/index/engine/SimpleEngineBenchmark.java new file mode 100644 index 00000000000..95150c9ca15 --- /dev/null +++ b/modules/benchmark/micro/src/main/java/org/elasticsearch/benchmark/index/engine/SimpleEngineBenchmark.java @@ -0,0 +1,305 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.benchmark.index.engine; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.LoadFirstFieldSelector; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.TopDocs; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.analysis.AnalysisService; +import org.elasticsearch.index.deletionpolicy.KeepOnlyLastDeletionPolicy; +import org.elasticsearch.index.deletionpolicy.SnapshotDeletionPolicy; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.robin.RobinEngine; +import org.elasticsearch.index.merge.policy.LogByteSizeMergePolicyProvider; +import org.elasticsearch.index.merge.scheduler.ConcurrentMergeSchedulerProvider; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.similarity.SimilarityService; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.index.store.memory.MemoryStore; +import org.elasticsearch.index.translog.memory.MemoryTranslog; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.threadpool.dynamic.DynamicThreadPool; +import org.elasticsearch.util.StopWatch; +import org.elasticsearch.util.TimeValue; +import org.elasticsearch.util.lucene.Lucene; +import org.elasticsearch.util.settings.Settings; + +import java.util.concurrent.*; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.elasticsearch.util.lucene.DocumentBuilder.*; +import static org.elasticsearch.util.settings.ImmutableSettings.Builder.*; + +/** + * @author kimchy (Shay Banon) + */ +public class SimpleEngineBenchmark { + + private final Store store; + + private final Engine engine; + + + private final AtomicInteger idGenerator = new AtomicInteger(); + + private String[] contentItems = new String[]{"test1", "test2", "test3"}; + + private volatile int lastRefreshedId = 0; + + + private int searcherIterations = 10; + + private Thread[] searcherThreads = new Thread[1]; + + private int writerIterations = 10; + + private Thread[] writerThreads = new Thread[1]; + + private TimeValue refreshSchedule = new TimeValue(1, TimeUnit.SECONDS); + + private TimeValue flushSchedule = new TimeValue(1, TimeUnit.MINUTES); + + + private CountDownLatch latch; + private CyclicBarrier barrier1; + private CyclicBarrier barrier2; + + + // scheduled thread pool for both refresh and flush operations + private ScheduledExecutorService scheduledExecutorService = Executors.newScheduledThreadPool(2); + + public SimpleEngineBenchmark(Store store, Engine engine) { + this.store = store; + this.engine = engine; + } + + public SimpleEngineBenchmark numberOfContentItems(int numberOfContentItems) { + contentItems = new String[numberOfContentItems]; + for (int i = 0; i < contentItems.length; i++) { + contentItems[i] = "content" + i; + } + return this; + } + + public SimpleEngineBenchmark searcherThreads(int numberOfSearcherThreads) { + searcherThreads = new Thread[numberOfSearcherThreads]; + return this; + } + + public SimpleEngineBenchmark searcherIterations(int searcherIterations) { + this.searcherIterations = searcherIterations; + return this; + } + + public SimpleEngineBenchmark writerThreads(int numberOfWriterThreads) { + writerThreads = new Thread[numberOfWriterThreads]; + return this; + } + + public SimpleEngineBenchmark writerIterations(int writerIterations) { + this.writerIterations = writerIterations; + return this; + } + + public SimpleEngineBenchmark refreshSchedule(TimeValue refreshSchedule) { + this.refreshSchedule = refreshSchedule; + return this; + } + + public SimpleEngineBenchmark flushSchedule(TimeValue flushSchedule) { + this.flushSchedule = flushSchedule; + return this; + } + + public SimpleEngineBenchmark build() { + for (int i = 0; i < searcherThreads.length; i++) { + searcherThreads[i] = new Thread(new SearcherThread(), "Searcher[" + i + "]"); + } + for (int i = 0; i < writerThreads.length; i++) { + writerThreads[i] = new Thread(new WriterThread(), "Writer[" + i + "]"); + } + + latch = new CountDownLatch(searcherThreads.length + writerThreads.length); + barrier1 = new CyclicBarrier(searcherThreads.length + writerThreads.length + 1); + barrier2 = new CyclicBarrier(searcherThreads.length + writerThreads.length + 1); + + // warmup by indexing all content items + StopWatch stopWatch = new StopWatch(); + stopWatch.start(); + for (String contentItem : contentItems) { + int id = idGenerator.incrementAndGet(); + String sId = Integer.toString(id); + Document doc = doc().add(field("_id", sId)) + .add(field("content", contentItem)).build(); + engine.index(new Engine.Index(new Term("_id", sId), doc, Lucene.STANDARD_ANALYZER, "type", sId, "{ ... }")); + } + engine.refresh(true); + stopWatch.stop(); + System.out.println("Warmup of [" + contentItems.length + "] content items, took " + stopWatch.totalTime()); + + return this; + } + + public void run() throws Exception { + for (Thread t : searcherThreads) { + t.start(); + } + for (Thread t : writerThreads) { + t.start(); + } + barrier1.await(); + + Refresher refresher = new Refresher(); + scheduledExecutorService.scheduleWithFixedDelay(refresher, refreshSchedule.millis(), refreshSchedule.millis(), TimeUnit.MILLISECONDS); + Flusher flusher = new Flusher(); + scheduledExecutorService.scheduleWithFixedDelay(flusher, flushSchedule.millis(), flushSchedule.millis(), TimeUnit.MILLISECONDS); + + StopWatch stopWatch = new StopWatch(); + stopWatch.start(); + barrier2.await(); + + latch.await(); + stopWatch.stop(); + + System.out.println("Summary"); + System.out.println(" -- Readers [" + searcherThreads.length + "] with [" + searcherIterations + "] iterations"); + System.out.println(" -- Writers [" + writerThreads.length + "] with [" + writerIterations + "] iterations"); + System.out.println(" -- Took: " + stopWatch.totalTime()); + System.out.println(" -- Refresh [" + refresher.id + "] took: " + refresher.stopWatch.totalTime()); + System.out.println(" -- Flush [" + flusher.id + "] took: " + flusher.stopWatch.totalTime()); + System.out.println(" -- Store size " + store.estimateSize()); + + scheduledExecutorService.shutdown(); + + engine.refresh(true); + stopWatch = new StopWatch(); + stopWatch.start(); + Engine.Searcher searcher = engine.searcher(); + TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), idGenerator.get() + 1); + stopWatch.stop(); + System.out.println(" -- Indexed [" + idGenerator.get() + "] docs, found [" + topDocs.totalHits + "] hits, took " + stopWatch.totalTime()); + searcher.release(); + } + + private String content(long number) { + return contentItems[((int) (number % contentItems.length))]; + } + + private class Flusher implements Runnable { + StopWatch stopWatch = new StopWatch(); + private int id; + + @Override public void run() { + stopWatch.start("" + ++id); + engine.flush(); + stopWatch.stop(); + } + } + + private class Refresher implements Runnable { + StopWatch stopWatch = new StopWatch(); + private int id; + + @Override public synchronized void run() { + stopWatch.start("" + ++id); + int lastId = idGenerator.get(); + engine.refresh(true); + lastRefreshedId = lastId; + stopWatch.stop(); + } + } + + private class SearcherThread implements Runnable { + @Override public void run() { + try { + barrier1.await(); + barrier2.await(); + for (int i = 0; i < searcherIterations; i++) { + Engine.Searcher searcher = engine.searcher(); + TopDocs topDocs = searcher.searcher().search(new TermQuery(new Term("content", content(i))), 10); + // read one + searcher.searcher().doc(topDocs.scoreDocs[0].doc, new LoadFirstFieldSelector()); + searcher.release(); + } + } catch (Exception e) { + System.out.println("Searcher thread failed"); + e.printStackTrace(); + } finally { + latch.countDown(); + } + } + } + + private class WriterThread implements Runnable { + @Override public void run() { + try { + barrier1.await(); + barrier2.await(); + for (int i = 0; i < writerIterations; i++) { + int id = idGenerator.incrementAndGet(); + String sId = Integer.toString(id); + Document doc = doc().add(field("_id", sId)) + .add(field("content", content(id))).build(); + engine.index(new Engine.Index(new Term("_id", sId), doc, Lucene.STANDARD_ANALYZER, "type", sId, "{ ... }")); + } + } catch (Exception e) { + System.out.println("Writer thread failed"); + e.printStackTrace(); + } finally { + latch.countDown(); + } + } + } + + public static void main(String[] args) throws Exception { + ShardId shardId = new ShardId(new Index("index"), 1); + Settings settings = EMPTY_SETTINGS; + +// Store store = new RamStore(shardId, settings); + Store store = new MemoryStore(shardId, settings); +// Store store = new NioFsStore(shardId, settings); + + store.deleteContent(); + + ThreadPool threadPool = new DynamicThreadPool(); + SnapshotDeletionPolicy deletionPolicy = new SnapshotDeletionPolicy(new KeepOnlyLastDeletionPolicy(shardId, settings)); + Engine engine = new RobinEngine(shardId, settings, store, deletionPolicy, new MemoryTranslog(shardId, settings), new LogByteSizeMergePolicyProvider(store), + new ConcurrentMergeSchedulerProvider(shardId, settings), new AnalysisService(shardId.index()), new SimilarityService(shardId.index())); + engine.start(); + + SimpleEngineBenchmark benchmark = new SimpleEngineBenchmark(store, engine) + .numberOfContentItems(1000) + .searcherThreads(50).searcherIterations(10000) + .writerThreads(10).writerIterations(10000) + .refreshSchedule(new TimeValue(1, TimeUnit.SECONDS)) + .flushSchedule(new TimeValue(1, TimeUnit.MINUTES)) + .build(); + + benchmark.run(); + + engine.close(); + store.close(); + threadPool.shutdown(); + } +} diff --git a/modules/benchmark/micro/src/main/java/org/elasticsearch/benchmark/index/store/SimpleStoreBenchmark.java b/modules/benchmark/micro/src/main/java/org/elasticsearch/benchmark/index/store/SimpleStoreBenchmark.java new file mode 100644 index 00000000000..90ee69600c8 --- /dev/null +++ b/modules/benchmark/micro/src/main/java/org/elasticsearch/benchmark/index/store/SimpleStoreBenchmark.java @@ -0,0 +1,310 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.benchmark.index.store; + +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.index.store.bytebuffer.ByteBufferStore; +import org.elasticsearch.index.store.fs.MmapFsStore; +import org.elasticsearch.index.store.fs.NioFsStore; +import org.elasticsearch.index.store.fs.SimpleFsStore; +import org.elasticsearch.index.store.memory.MemoryStore; +import org.elasticsearch.index.store.ram.RamStore; +import org.elasticsearch.util.SizeUnit; +import org.elasticsearch.util.SizeValue; +import org.elasticsearch.util.StopWatch; +import org.elasticsearch.util.settings.Settings; + +import java.lang.management.ManagementFactory; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.atomic.AtomicLong; + +import static java.util.concurrent.TimeUnit.*; +import static org.elasticsearch.util.settings.ImmutableSettings.Builder.*; +import static org.elasticsearch.util.settings.ImmutableSettings.*; + +/** + * @author kimchy + */ +public class SimpleStoreBenchmark { + + private final AtomicLong dynamicFilesCounter = new AtomicLong(); + + private final Store store; + + private String[] staticFiles = new String[10]; + + private SizeValue staticFileSize = new SizeValue(5, SizeUnit.MB); + + private SizeValue dynamicFileSize = new SizeValue(1, SizeUnit.MB); + + + private int readerIterations = 10; + + private int writerIterations = 10; + + private Thread[] readerThreads = new Thread[1]; + + private Thread[] writerThreads = new Thread[1]; + + private CountDownLatch latch; + private CyclicBarrier barrier1; + private CyclicBarrier barrier2; + + public SimpleStoreBenchmark(Store store) throws Exception { + this.store = store; + } + + public SimpleStoreBenchmark numberStaticFiles(int numberStaticFiles) { + this.staticFiles = new String[numberStaticFiles]; + return this; + } + + public SimpleStoreBenchmark staticFileSize(SizeValue staticFileSize) { + this.staticFileSize = staticFileSize; + return this; + } + + public SimpleStoreBenchmark dynamicFileSize(SizeValue dynamicFileSize) { + this.dynamicFileSize = dynamicFileSize; + return this; + } + + public SimpleStoreBenchmark readerThreads(int readerThreads) { + this.readerThreads = new Thread[readerThreads]; + return this; + } + + public SimpleStoreBenchmark readerIterations(int readerIterations) { + this.readerIterations = readerIterations; + return this; + } + + public SimpleStoreBenchmark writerIterations(int writerIterations) { + this.writerIterations = writerIterations; + return this; + } + + public SimpleStoreBenchmark writerThreads(int writerThreads) { + this.writerThreads = new Thread[writerThreads]; + return this; + } + + public SimpleStoreBenchmark build() throws Exception { + System.out.println("Creating [" + staticFiles.length + "] static files with size [" + staticFileSize + "]"); + for (int i = 0; i < staticFiles.length; i++) { + staticFiles[i] = "static" + i; + IndexOutput io = store.directory().createOutput(staticFiles[i]); + for (long sizeCounter = 0; sizeCounter < staticFileSize.bytes(); sizeCounter++) { + io.writeByte((byte) 1); + } + io.close(); + } + System.out.println("Using [" + dynamicFileSize + "] size for dynamic files"); + + // warmp + StopWatch stopWatch = new StopWatch("warmup"); + stopWatch.start(); + for (String staticFile : staticFiles) { + IndexInput ii = store.directory().openInput(staticFile); + // do a full read + for (long counter = 0; counter < ii.length(); counter++) { + byte result = ii.readByte(); + if (result != 1) { + System.out.println("Failure, read wrong value [" + result + "]"); + } + } + // do a list of the files + store.directory().listAll(); + } + stopWatch.stop(); + System.out.println("Warmup Took: " + stopWatch.shortSummary()); + + for (int i = 0; i < readerThreads.length; i++) { + readerThreads[i] = new Thread(new ReaderThread(), "Reader[" + i + "]"); + } + for (int i = 0; i < writerThreads.length; i++) { + writerThreads[i] = new Thread(new WriterThread(), "Writer[" + i + "]"); + } + + latch = new CountDownLatch(readerThreads.length + writerThreads.length); + barrier1 = new CyclicBarrier(readerThreads.length + writerThreads.length + 1); + barrier2 = new CyclicBarrier(readerThreads.length + writerThreads.length + 1); + + return this; + } + + public void run() throws Exception { + for (int i = 0; i < 3; i++) { + System.gc(); + MILLISECONDS.sleep(100); + } + + long emptyUsed = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getUsed(); + + System.out.println("Running:"); + System.out.println(" -- Readers [" + readerThreads.length + "] with [" + readerIterations + "] iterations"); + System.out.println(" -- Writers [" + writerThreads.length + "] with [" + writerIterations + "] iterations"); + for (Thread t : readerThreads) { + t.start(); + } + for (Thread t : writerThreads) { + t.start(); + } + barrier1.await(); + + StopWatch stopWatch = new StopWatch(); + stopWatch.start(); + barrier2.await(); + + latch.await(); + stopWatch.stop(); + + System.out.println("Took: " + stopWatch.shortSummary()); + + for (int i = 0; i < 3; i++) { + System.gc(); + MILLISECONDS.sleep(100); + } + long bytesTaken = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getUsed() - emptyUsed; + System.out.println("Size of [" + staticFiles.length + "], each with size [" + staticFileSize + "], is " + new SizeValue(bytesTaken, SizeUnit.BYTES)); + } + + private class ReaderThread implements Runnable { + @Override public void run() { + try { + barrier1.await(); + barrier2.await(); + } catch (Exception e) { + e.printStackTrace(); + } + try { + for (int i = 0; i < readerIterations; i++) { + for (String staticFile : staticFiles) { + // do a list of the files + store.directory().listAll(); + + IndexInput ii = store.directory().openInput(staticFile); + // do a full read + for (long counter = 0; counter < ii.length(); counter++) { + byte result = ii.readByte(); + if (result != 1) { + System.out.println("Failure, read wrong value [" + result + "]"); + } + } + // do a list of the files + store.directory().listAll(); + + // do a seek and read some byes + ii.seek(ii.length() / 2); + ii.readByte(); + ii.readByte(); + + // do a list of the files + store.directory().listAll(); + } + } + } catch (Exception e) { + System.out.println("Reader Thread failed: " + e.getMessage()); + e.printStackTrace(); + } + latch.countDown(); + } + } + + private class WriterThread implements Runnable { + @Override public void run() { + try { + barrier1.await(); + barrier2.await(); + } catch (Exception e) { + e.printStackTrace(); + } + try { + for (int i = 0; i < writerIterations; i++) { + String dynamicFileName = "dynamic" + dynamicFilesCounter.incrementAndGet(); + IndexOutput io = store.directory().createOutput(dynamicFileName); + for (long sizeCounter = 0; sizeCounter < dynamicFileSize.bytes(); sizeCounter++) { + io.writeByte((byte) 1); + } + io.close(); + + store.directory().deleteFile(dynamicFileName); + } + } catch (Exception e) { + System.out.println("Writer thread failed: " + e.getMessage()); + e.printStackTrace(); + } + latch.countDown(); + } + } + + public static void main(String[] args) throws Exception { + Environment environment = new Environment(); + Settings settings = EMPTY_SETTINGS; + String localNodeId = "nodeId"; + ShardId shardId = new ShardId(new Index("index"), 1); + String type = args.length > 0 ? args[0] : "ram"; + Store store; + if (type.equalsIgnoreCase("ram")) { + store = new RamStore(shardId, settings); + } else if (type.equalsIgnoreCase("simple-fs")) { + store = new SimpleFsStore(shardId, settings, environment, localNodeId); + } else if (type.equalsIgnoreCase("mmap-fs")) { + store = new NioFsStore(shardId, settings, environment, localNodeId); + } else if (type.equalsIgnoreCase("nio-fs")) { + store = new MmapFsStore(shardId, settings, environment, localNodeId); + } else if (type.equalsIgnoreCase("bb")) { + Settings byteBufferSettings = settingsBuilder() + .putAll(settings) + .putBoolean("index.store.bytebuffer.direct", false) + .build(); + store = new ByteBufferStore(shardId, byteBufferSettings); + } else if (type.equalsIgnoreCase("bb-direct")) { + Settings byteBufferSettings = settingsBuilder() + .putAll(settings) + .putBoolean("index.store.bytebuffer.direct", true) + .build(); + store = new ByteBufferStore(shardId, byteBufferSettings); + } else if (type.equalsIgnoreCase("mem")) { + Settings memorySettings = settingsBuilder() + .putAll(settings) + .build(); + store = new MemoryStore(shardId, memorySettings); + } else { + throw new IllegalArgumentException("No type store [" + type + "]"); + } + System.out.println("Using Store [" + store + "]"); + store.deleteContent(); + SimpleStoreBenchmark simpleStoreBenchmark = new SimpleStoreBenchmark(store) + .numberStaticFiles(5).staticFileSize(new SizeValue(5, SizeUnit.MB)) + .dynamicFileSize(new SizeValue(1, SizeUnit.MB)) + .readerThreads(5).readerIterations(10) + .writerThreads(2).writerIterations(10) + .build(); + simpleStoreBenchmark.run(); + store.close(); + } +} diff --git a/modules/benchmark/micro/src/main/java/org/elasticsearch/benchmark/micro/deps/jackson/JacksonTypesBenchmark.java b/modules/benchmark/micro/src/main/java/org/elasticsearch/benchmark/micro/deps/jackson/JacksonTypesBenchmark.java new file mode 100644 index 00000000000..825f1c570ee --- /dev/null +++ b/modules/benchmark/micro/src/main/java/org/elasticsearch/benchmark/micro/deps/jackson/JacksonTypesBenchmark.java @@ -0,0 +1,149 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.benchmark.micro.deps.jackson; + +import org.codehaus.jackson.JsonNode; +import org.codehaus.jackson.map.ObjectMapper; +import org.elasticsearch.util.Preconditions; +import org.elasticsearch.util.StopWatch; +import org.elasticsearch.util.io.FastStringReader; +import org.elasticsearch.util.io.Streams; +import org.elasticsearch.util.io.StringBuilderWriter; + +import java.io.IOException; +import java.io.InputStreamReader; +import java.util.Map; + +import static org.elasticsearch.util.json.Jackson.*; + +/** + * A simple Jackson type benchmark to check how well it converts to different types it supports + * such as Map and JsonNode. + * + * @author kimchy (Shay Banon) + */ +@SuppressWarnings({"unchecked"}) +public class JacksonTypesBenchmark { + + private final String jsonString; + + private final int factor; + + private final int cycles; + + private final ObjectMapper objectMapper; + + private final JsonType[] types; + + public JacksonTypesBenchmark(String jsonString) throws IOException { + Preconditions.checkNotNull(jsonString, "jsonString must have a value"); + this.jsonString = jsonString; + this.objectMapper = newObjectMapper(); + this.factor = 10; + this.cycles = 10000; + + // warm things up + JsonType[] types = buildTypes(); + for (JsonType type : types) { + type.runRead(1000); + type.runWrite(1000); + } + + this.types = buildTypes(); + } + + /** + * Runs the test. Will run factor * cycles iterations interleaving the + * different type operations by factor. + */ + public void run() throws IOException { + // interleave the type tests so GC won't be taken into account + for (int i = 0; i < factor; i++) { + for (JsonType type : types) { + type.runRead(cycles); + type.runWrite(cycles); + } + } + + System.out.println("Run [" + (cycles * factor) + "] iterations"); + System.out.println("=============================="); + for (JsonType type : types) { + System.out.println("------------------------------"); + System.out.println("Type [" + type.type.getSimpleName() + "]"); + System.out.println(type.readStopWatch.shortSummary()); + System.out.println(type.writeStopWatch.shortSummary()); + System.out.println("------------------------------"); + } + } + + /** + * Builds the types that we are going to test. + */ + private JsonType[] buildTypes() throws IOException { + JsonType[] types = new JsonType[2]; + types[0] = new JsonType(jsonString, objectMapper, Map.class); + types[1] = new JsonType(jsonString, objectMapper, JsonNode.class); + return types; + } + + /** + * Represents a test for a specific type, allowing to runRead and runWrite + * on it and finally getting the results from the write/read stop watches. + */ + private static class JsonType { + final StopWatch readStopWatch = new StopWatch("read").keepTaskList(false); + final StopWatch writeStopWatch = new StopWatch("write").keepTaskList(false); + final String jsonString; + final ObjectMapper objectMapper; + final Class type; + final Object master; + + protected JsonType(String jsonString, ObjectMapper objectMapper, Class type) throws IOException { + this.jsonString = jsonString; + this.objectMapper = objectMapper; + this.type = type; + this.master = objectMapper.readValue(new FastStringReader(jsonString), type); + } + + void runRead(int cycles) throws IOException { + readStopWatch.start(); + for (int i = 0; i < cycles; i++) { + objectMapper.readValue(new FastStringReader(jsonString), type); + } + readStopWatch.stop(); + } + + void runWrite(int cycles) throws IOException { + writeStopWatch.start(); + for (int i = 0; i < cycles; i++) { + StringBuilderWriter builderWriter = StringBuilderWriter.Cached.cached(); + objectMapper.writeValue(builderWriter, master); + builderWriter.toString(); + } + writeStopWatch.stop(); + } + } + + public static void main(String[] args) throws Exception { + JacksonTypesBenchmark benchmark = new JacksonTypesBenchmark( + Streams.copyToString(new InputStreamReader(JacksonTypesBenchmark.class.getResourceAsStream("/org/elasticsearch/benchmark/micro/deps/jackson/test1.json")))); + benchmark.run(); + } +} diff --git a/modules/benchmark/micro/src/main/java/org/elasticsearch/benchmark/micro/deps/jackson/test1.json b/modules/benchmark/micro/src/main/java/org/elasticsearch/benchmark/micro/deps/jackson/test1.json new file mode 100644 index 00000000000..28d39a58027 --- /dev/null +++ b/modules/benchmark/micro/src/main/java/org/elasticsearch/benchmark/micro/deps/jackson/test1.json @@ -0,0 +1,22 @@ +{ + glossary: { + "title": "example glossary", + "GlossDiv": { + "title": "S", + "GlossList": { + "GlossEntry": { + "ID": "SGML", + "SortAs": "SGML", + "GlossTerm": "Standard Generalized Markup Language", + "Acronym": "SGML", + "Abbrev": "ISO 8879:1986", + "GlossDef": { + "para": "A meta-markup language, used to create markup languages such as DocBook.", + "GlossSeeAlso": ["GML", "XML"] + }, + "GlossSee": "markup" + } + } + } + } +} \ No newline at end of file diff --git a/modules/benchmark/micro/src/main/java/org/elasticsearch/benchmark/monitor/memory/SimpleMemoryMonitorBenchmark.java b/modules/benchmark/micro/src/main/java/org/elasticsearch/benchmark/monitor/memory/SimpleMemoryMonitorBenchmark.java new file mode 100644 index 00000000000..ded387f011d --- /dev/null +++ b/modules/benchmark/micro/src/main/java/org/elasticsearch/benchmark/monitor/memory/SimpleMemoryMonitorBenchmark.java @@ -0,0 +1,84 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.benchmark.monitor.memory; + +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.server.Server; +import org.elasticsearch.util.StopWatch; +import org.elasticsearch.util.settings.Settings; + +import java.util.Random; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.client.Requests.*; +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; +import static org.elasticsearch.server.ServerBuilder.*; +import static org.elasticsearch.util.settings.ImmutableSettings.*; + +/** + * @author kimchy (Shay Banon) + */ +public class SimpleMemoryMonitorBenchmark { + + public static void main(String[] args) throws Exception { + Random random = new Random(); + + Settings settings = settingsBuilder() + .putTime("cluster.routing.schedule", 200, TimeUnit.MILLISECONDS) + .putInt(SETTING_NUMBER_OF_SHARDS, 5) + .putInt(SETTING_NUMBER_OF_REPLICAS, 1) + .build(); + + Server server1 = serverBuilder().settings(settingsBuilder().putAll(settings).put("name", "server1")).server(); + Server server2 = serverBuilder().settings(settingsBuilder().putAll(settings).put("name", "server2")).server(); + + Client client1 = server1.client(); + + Thread.sleep(1000); + client1.admin().indices().create(createIndexRequest("test")).actionGet(); + Thread.sleep(5000); + + StopWatch stopWatch = new StopWatch().start(); + int COUNT = 200000; + System.out.println("Indexing [" + COUNT + "] ..."); + for (int i = 0; i < COUNT; i++) { + client1.index( + indexRequest("test") + .type("type1") + .id(Integer.toString(i)) + .source(source(Integer.toString(i), "test" + i)) + .opType(IndexRequest.OpType.INDEX) + ).actionGet(); + if ((i % 10000) == 0) { + System.out.println("Indexed 10000, total " + (i + 10000) + " took " + stopWatch.stop().lastTaskTime()); + stopWatch.start(); + } + } + System.out.println("Indexing took " + stopWatch.stop().totalTime()); + + server1.close(); + server2.close(); + } + + private static String source(String id, String nameValue) { + return "{ type1 : { \"id\" : \"" + id + "\", \"name\" : \"" + nameValue + "\" } }"; + } +} diff --git a/modules/benchmark/micro/src/main/java/org/elasticsearch/benchmark/util/lucene/versioned/VersionedMapBenchmark.java b/modules/benchmark/micro/src/main/java/org/elasticsearch/benchmark/util/lucene/versioned/VersionedMapBenchmark.java new file mode 100644 index 00000000000..684481dabc4 --- /dev/null +++ b/modules/benchmark/micro/src/main/java/org/elasticsearch/benchmark/util/lucene/versioned/VersionedMapBenchmark.java @@ -0,0 +1,205 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.benchmark.util.lucene.versioned; + +import org.elasticsearch.util.SizeValue; +import org.elasticsearch.util.StopWatch; +import org.elasticsearch.util.lucene.versioned.ConcurrentVersionedMap; +import org.elasticsearch.util.lucene.versioned.NativeVersionedMap; +import org.elasticsearch.util.lucene.versioned.NonBlockingVersionedMap; +import org.elasticsearch.util.lucene.versioned.VersionedMap; + +import java.lang.management.ManagementFactory; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.CyclicBarrier; + +import static java.util.concurrent.TimeUnit.*; + +/** + * @author kimchy (Shay Banon) + */ +public class VersionedMapBenchmark { + + private final VersionedMap versionedMap; + + private final int readerIterations; + + private final int writerIterations; + + private final CountDownLatch latch; + + private final Thread[] readerThreads; + + private final Thread[] writerThreads; + + private final CyclicBarrier barrier1; + private final CyclicBarrier barrier2; + + public VersionedMapBenchmark(VersionedMap versionedMap, + int numberOfReaders, int readerIterations, + int numberOfWriters, int writerIterations) { + this.versionedMap = versionedMap; + this.readerIterations = readerIterations; + this.writerIterations = writerIterations; + + readerThreads = new Thread[numberOfReaders]; + for (int i = 0; i < numberOfReaders; i++) { + readerThreads[i] = new Thread(new ReaderThread(), "reader[" + i + "]"); + } + + writerThreads = new Thread[numberOfWriters]; + for (int i = 0; i < numberOfWriters; i++) { + writerThreads[i] = new Thread(new WriterThread(), "writer[" + i + "]"); + } + + latch = new CountDownLatch(numberOfReaders + numberOfWriters); + barrier1 = new CyclicBarrier(numberOfReaders + numberOfWriters + 1); + barrier2 = new CyclicBarrier(numberOfReaders + numberOfWriters + 1); + + // now, warm up a bit + StopWatch stopWatch = new StopWatch("warmup"); + stopWatch.start(); + int warmupSize = 1000000; + for (int i = 0; i < warmupSize; i++) { + versionedMap.putVersion(i, i); + versionedMap.beforeVersion(i, i); + } + stopWatch.stop(); + System.out.println("Warmup up of [" + warmupSize + "]: " + stopWatch.totalTime()); + versionedMap.clear(); + } + + public void run() throws Exception { + for (int i = 0; i < 3; i++) { + System.gc(); + MILLISECONDS.sleep(100); + } + + long emptyUsed = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getUsed(); + + for (Thread t : readerThreads) { + t.start(); + } + for (Thread t : writerThreads) { + t.start(); + } + barrier1.await(); + + StopWatch stopWatch = new StopWatch(); + stopWatch.start(); + barrier2.await(); + + latch.await(); + stopWatch.stop(); + + // verify that the writers wrote... + for (int i = 0; i < writerIterations; i++) { + if (versionedMap.beforeVersion(i, Integer.MAX_VALUE)) { + System.out.println("Wrong value for [" + i + ']'); + } + } + + System.out.println("Total:"); + System.out.println(" - [" + readerThreads.length + "] readers with [" + readerIterations + "] iterations"); + System.out.println(" - [" + writerThreads.length + "] writers with [" + writerIterations + "] iterations"); + System.out.println(" - Took: " + stopWatch.totalTime()); + + for (int i = 0; i < 3; i++) { + System.gc(); + MILLISECONDS.sleep(100); + } + + long bytesTaken = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getUsed() - emptyUsed; + System.out.println("Size of [" + writerIterations + "] entries is " + new SizeValue(bytesTaken)); + } + + private class ReaderThread implements Runnable { + @Override public void run() { + try { + barrier1.await(); + barrier2.await(); + } catch (Exception e) { + e.printStackTrace(); + } + for (int i = 0; i < readerIterations; i++) { + versionedMap.beforeVersion(i, i); + } + latch.countDown(); + } + } + + private class WriterThread implements Runnable { + @Override public void run() { + try { + barrier1.await(); + barrier2.await(); + } catch (Exception e) { + e.printStackTrace(); + } + for (int i = 0; i < writerIterations; i++) { + versionedMap.putVersionIfAbsent(i, i); + } + latch.countDown(); + } + } + + // Some results: Two cores machine, general average across 5 runs + +// VersionedMapBenchmark benchmark = new VersionedMapBenchmark( +// versionedMap, 30, 2000000, 10, 2000000 +// ); + +// Running [native] type +// Took StopWatch '': running time = 11.9s +// ----------------------------------------- +// ms % Task name +// ----------------------------------------- +// 11909 100% +// +// Size of [2000000] entries is 17.9mb + +// Running [nb] type +// Took StopWatch '': running time = 6.1s +// ----------------------------------------- +// ms % Task name +// ----------------------------------------- +// 06134 100% +// +// Size of [2000000] entries is 77.6mb + + public static void main(String[] args) throws Exception { + String type = args.length > 0 ? args[0] : "nb"; + VersionedMap versionedMap; + if ("nb".equalsIgnoreCase(type)) { + versionedMap = new NonBlockingVersionedMap(); + } else if ("native".equalsIgnoreCase(type)) { + versionedMap = new NativeVersionedMap(); + } else if ("concurrent".equalsIgnoreCase(type)) { + versionedMap = new ConcurrentVersionedMap(); + } else { + throw new IllegalArgumentException("Type [" + type + "] unknown"); + } + System.out.println("Running [" + type + "] type"); + VersionedMapBenchmark benchmark = new VersionedMapBenchmark( + versionedMap, 30, 2000000, 10, 2000000 + ); + benchmark.run(); + } +} diff --git a/modules/benchmark/micro/src/main/java/org/elasticsearch/benchmark/uuid/SimpleUuidBenchmark.java b/modules/benchmark/micro/src/main/java/org/elasticsearch/benchmark/uuid/SimpleUuidBenchmark.java new file mode 100644 index 00000000000..e8f523aa062 --- /dev/null +++ b/modules/benchmark/micro/src/main/java/org/elasticsearch/benchmark/uuid/SimpleUuidBenchmark.java @@ -0,0 +1,64 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.benchmark.uuid; + +import org.elasticsearch.util.StopWatch; + +import java.util.UUID; +import java.util.concurrent.CountDownLatch; + +/** + * @author kimchy (Shay Banon) + */ +public class SimpleUuidBenchmark { + + private static long NUMBER_OF_ITERATIONS = 10000; + private static int NUMBER_OF_THREADS = 100; + + public static void main(String[] args) throws Exception { + StopWatch stopWatch = new StopWatch().start(); + System.out.println("Running " + NUMBER_OF_ITERATIONS); + for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) { + UUID.randomUUID().toString(); + } + System.out.println("Generated in " + stopWatch.stop().totalTime() + " TP Millis " + (stopWatch.totalTime().millisFrac() / NUMBER_OF_ITERATIONS)); + + System.out.println("Generating using " + NUMBER_OF_THREADS + " threads with " + NUMBER_OF_ITERATIONS + " iterations"); + final CountDownLatch latch = new CountDownLatch(NUMBER_OF_THREADS); + Thread[] threads = new Thread[NUMBER_OF_THREADS]; + for (int i = 0; i < threads.length; i++) { + threads[i] = new Thread(new Runnable() { + @Override public void run() { + for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) { + UUID.randomUUID().toString(); + } + latch.countDown(); + } + }); + } + stopWatch = new StopWatch().start(); + for (Thread thread : threads) { + thread.start(); + } + latch.await(); + stopWatch.stop(); + System.out.println("Generate in " + stopWatch.totalTime() + " TP Millis " + (stopWatch.totalTime().millisFrac() / (NUMBER_OF_ITERATIONS * NUMBER_OF_THREADS))); + } +} diff --git a/modules/elasticsearch/build.gradle b/modules/elasticsearch/build.gradle new file mode 100644 index 00000000000..62c9c1e8705 --- /dev/null +++ b/modules/elasticsearch/build.gradle @@ -0,0 +1,67 @@ +import java.text.SimpleDateFormat + +dependsOn(':test-testng') + +usePlugin 'java' + +archivesBaseName = "$project.archivesBaseName" + +processResources.doLast { + Properties versionProps = new Properties(); + versionProps.setProperty("number", rootProject.version) + versionProps.setProperty("date", buildTimeStr) + versionProps.setProperty("devBuild", rootProject.devBuild.toString()) + + File versionFile = new File(sourceSets.main.classesDir, "/org/elasticsearch/version.properties") + versionFile.parentFile.mkdirs() + versionFile.withOutputStream { + versionProps.store(it, '') + } +} + + +manifest.mainAttributes("Implementation-Title": "ElasticSearch", "Implementation-Version": rootProject.version, "Implementation-Date": buildTimeStr) + +// no need to use the resource dir +sourceSets.main.resources.srcDirs 'src/main/java', rootProject.file('config') +sourceSets.test.resources.srcDir 'src/test/java' + +dependencies { + compile('jline:jline:0.9.94') { transitive = false } + + compile 'org.slf4j:slf4j-api:1.5.8' + compile('org.slf4j:slf4j-log4j12:1.5.8') { transitive = false } + compile('log4j:log4j:1.2.15') { transitive = false } + + compile 'joda-time:joda-time:1.6' + compile 'com.google.collections:google-collections:1.0' + + compile 'org.yaml:snakeyaml:1.5' + + compile 'org.codehaus.jackson:jackson-core-asl:1.4.2' + compile 'org.codehaus.jackson:jackson-mapper-asl:1.4.2' + + compile 'aopalliance:aopalliance:1.0' + compile 'com.google.inject:guice:2.0' + compile 'com.google.inject.extensions:guice-assisted-inject:2.0' + compile 'com.google.inject.extensions:guice-multibindings:2.0' + + compile 'org.apache.lucene:lucene-core:3.0.0' + compile 'org.apache.lucene:lucene-analyzers:3.0.0' + compile 'org.apache.lucene:lucene-queries:3.0.0' + + compile('jgroups:jgroups:2.8.0.GA') { transitive = false } + compile('org.jboss.netty:netty:3.1.5.GA') { transitive = false } + + testCompile project(':test-testng') + testCompile('org.testng:testng:5.10:jdk15') { transitive = false } + testCompile 'org.hamcrest:hamcrest-all:1.1' +} + +test { + useTestNG() + jmvArgs = ["-ea", "-Xmx1024m"] + options.suiteName = project.name + options.listeners = ["org.elasticsearch.util.testng.Listeners"] + options.systemProperties = ["es.test.log.conf": System.getProperty("es.test.log.conf", "log4j-gradle.properties")] +} diff --git a/modules/elasticsearch/src/main/java/config/jgroups/tcp-nio.xml b/modules/elasticsearch/src/main/java/config/jgroups/tcp-nio.xml new file mode 100644 index 00000000000..c71406eb511 --- /dev/null +++ b/modules/elasticsearch/src/main/java/config/jgroups/tcp-nio.xml @@ -0,0 +1,74 @@ + + + + + + + + + + + + + + + + + + + + + diff --git a/modules/elasticsearch/src/main/java/config/jgroups/tcp.xml b/modules/elasticsearch/src/main/java/config/jgroups/tcp.xml new file mode 100644 index 00000000000..02d60d70af7 --- /dev/null +++ b/modules/elasticsearch/src/main/java/config/jgroups/tcp.xml @@ -0,0 +1,68 @@ + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/config/jgroups/tcpgossip.xml b/modules/elasticsearch/src/main/java/config/jgroups/tcpgossip.xml new file mode 100644 index 00000000000..ad87ae618bd --- /dev/null +++ b/modules/elasticsearch/src/main/java/config/jgroups/tcpgossip.xml @@ -0,0 +1,21 @@ + + + + + + + + + + + + + + + + + diff --git a/modules/elasticsearch/src/main/java/config/jgroups/udp.xml b/modules/elasticsearch/src/main/java/config/jgroups/udp.xml new file mode 100644 index 00000000000..a8b232210d8 --- /dev/null +++ b/modules/elasticsearch/src/main/java/config/jgroups/udp.xml @@ -0,0 +1,70 @@ + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/modules/elasticsearch/src/main/java/config/names.txt b/modules/elasticsearch/src/main/java/config/names.txt new file mode 100644 index 00000000000..e30a752439e --- /dev/null +++ b/modules/elasticsearch/src/main/java/config/names.txt @@ -0,0 +1,2937 @@ +Aardwolf +Abdol, Ahmet +Abner Little +Abominable Snowman +Abomination +Abominatrix +Abraxas +Absalom +Absorbing Man +Abyss +Access +Achebe +Achelous +Achilles +Acrobat +Adam II +Adam X +Adaptoid +Administrator +Adonis +Adrenazon +Adversary +Advisor +Aegis +Aero +Afari, Jamal +Aftershock +Agamemnon +Agamotto +Aged Genghis +Agent +Agent Axis +Agent Cheesecake +Agent X +Agent Zero +Aginar +Aggamon +Agon +Agron +Agony +El Aguila +Aguja +Ahab +Ahura +Air-Walker +Airborne +Aireo +Airstrike +Ajak +Ajax +Ajaxis +Akasha +Akhenaten +A'lars +Alaris +Albert +Albino +Albion +Alchemy +Alcmena +Aldebron +Alex +Alexander, Caleb +Alexander, Carrie +Algrim the Strong +Alhazred, Abdul +Alibar +Alistair Smythe +Alistaire Stuart +Aliyah Bishop +Alkhema +All-American +Allan, Liz +Allatou +Allerdyce, St. John +Alpha Ray +Alpha the Ultimate Mutant +Alraune, Marlene +Alysande Stuart +Alyssa Moy +Amalgam +Amanda Sefton +Amatsu-Mikaboshi +Amazon +Amber Hunt +Amelia Voght +Amergin +American Ace +American Dream +American Eagle +American Samurai +Americop +Ameridroid +Amiko Kobayashi +Amina Synge +Aminedi +Ammo +Amphibian +Amphibion +Amphibius +Amun +Anaconda +Anais +Analyzer +Anarchist +Ancient One +Anderssen, Tanya +Andreas von Strucker +Andrew Chord +Andrew Gervais +Android Man +Andromeda +Anelle +Angar the Screamer +The Angel +Angel +Angel Dust +Angel Face +Angel Salvadore +Angela Cairn +Angela Del Toro +Angler +Ani-Mator +Animus +Animus +Ankhi +Annalee +Anelle +Anne-Marie Cortez +Annex +Annie Ghazikhanian +Annihilus +Anole +Anomalito +Anomaloco +Anomaly +Answer +Ant-Man +Anthropomorpho +Anti-Cap +Anti-Phoenix Force +Anti-Venom +Anti-Vision +Antimatter +Antiphon the Overseer +Antonio +Anything +Anubis +Anvil +Apache Kid +Apalla +Ape +Ape-Man +Ape-X +Apocalypse +Apollo +Apryll +Aquarian +Aquarius +Aqueduct +Arabian Knight +Arachne +Aragorn +Araki +Aralune +Araña +Arc +Arcade +Arcademan +Arcanna +Archangel +Archenemy +Archer +Archie Corrigan +Archimage +Architect +Arclight +Arcturus Rann +Ardina +Ardroman +Arena +Ares +Argo +Argus +Ariann +Arides +Ariel +Aries +Arishem the Judge +Arizona Annie +Arize +Arkon +Arkus +Arliss, Todd +Arlok +Armadillo +Armageddon +Armand Martel +Armor +Armory +Arnim Zola +Aron, the Renegade Watcher +Arranger +Arsenal +Arsenic +Artemis +Artie +Arturo Falcones +Jackson Arvad +Asbestos Lady +Asbestos Man +Ashcan +Asmodeus +Asp +Assassin +Astaroth / Asteroth +Astra +Astrid Bloom +Astrovik, Vance +Astron +Astronomer +Asylum +Atalanta +Atalon +Atlas +Atlas +Atlas +Athena +Atleza +Atom Bob +Atom-Smasher +Att-Lass +Attuma +Atum +Aunt May Parker +Auntie Freeze +Auric +Aurora +Authority +Autolycus +Avalanche +Avarrish +Awesome Android +Axum +Azazel +Baal +Bailey, Gailyn +Bailey, Joey +Bailey, Paul +Sunset Bain +Baker, William +Balder +Balthakk +Balor +Bandit +Bantam +Banner, Betty Ross +Banner, Robert Bruce +Banshee +Baphomet +Barbarus +Barnacle +Baron Blood +Baron Brimstone +Baron Macabre +Baron Mordo +Baron Samedi +Baron Strucker +Baron Zemo +Baroness Blood +Barracuda +Barton, Clint +Base +Basilisk +Bast +Bastion +Ruth Bat-Seraph +Batragon +Batroc the Leaper +Battering Ram +Battlestar +Battleaxe +Battletide +Batwing +Beaubier, Jean-Paul +Beaubier, Jeanne-Marie +Beast +Beautiful Dreamer +Beckley, Benny +Bedlam +Bedlam II +Beechman, Jerome +Beetle +Beetle II +Behemoth +Bela +Belasco +Bella Donna +Belathauzer +Bench, Morris +Bengal +Bereet +Berzerker +Bes +Beta Ray Bill +Betty Ross Banner +Bevatron +Beyonder +Bi-Beast +Bible John +Big Bertha +Big Man +Big Wheel +Bill Foster +Binary +Bird-Brain +Bird-Man +Birely, Douglas +Bishop +Bishop, Kate +Bison +Bizarnage +Blackbird +Black, Carmilla +Black Bolt +Black Box +Black Cat +Black Crow +Black Death +Black Dragon +Black Fox +Black Goliath +Blackheart +Blackheath +Black Jack Tarr +Black King +Black Knight +Blacklash +Black Lama +Black Mamba +Black Marvel +Blackout +Black Panther +Black Queen +Black Talon +Black Tarantula +Black Tom Cassidy +Black Widow +Blackwing +Blackwulf +Blade +Blaire, Allison +The Blank +Blaquesmith +Brass +Blastaar +Blaze +Blaze, Johnny +Blaze, Siena +Blazing Skull +Blevins, Sally +Blind Faith +Blind Justice +Blindside +Blindspot +Bling +Blink +Blistik +Blitziana +Blitzkrieger +Blitzschlag, Baron Von +Blizzard +Blizzard II +Blob +Blockbuster +Bloke +Blonde Phantom +Blonsky, Emil +Blood Brothers +Blood Rose +Blood Spider +Bloodaxe +Bloodhawk +Bloodlust +Bloodlust II +Bloodscream +Bloodshed +Bloodsport +Bloodstorm +Bloodtide +Bloodwraith +Bloom, Astrid +Blowhard +Blue Bullet +Blue Marvel +Blue Diamond +Blue Shield +Blue Streak +Blur +Bob +Bob Diamond +Bobster +Bogan, Elias +Bogeyman +Bohusk, Tito +Bombshell +Boneyard +Bont, Alexander +Boobytrap +Book +Boomer +Boom Boom +Boom Boy +Boomerang +Boomslang +Boost +Bora +Bounty +Bounty Hunter +Bova +Box +Box IV +Braddock, Brian +Braddock, Elizabeth "Betsy" +Braddock, Jamie +Braddock, Meggan +Bradley, Isaiah +Brain Cell +Brain-Child +Brain Drain +Brainchild +Brand, Abigail +Brant, Betty +Brass +Bres +Bridge, George Washington +Brigade +Briquette +Eddie Brock +Brother Nature +Brother Tode +Brother Voodoo +Brothers Grimm +Brown, Abe +Bruiser +Brunnhilda +Brutacus +Brute I +Brute II +Brute III +Brynocki +Buckman, Edward "Ned" +Bucky +Bucky III +Bug +Bullet +Bullseye +Bulldozer +Bulldozer +Burner +Burstarr +Bushman +Bushmaster +Bushwacker +Butterball +Buzz +Buzzard +Byrrah +Bethany Cabe +Caber +Cable +Cadaver +Cage, Luke +Caiera +Caiman +Cagliostro +Cain +Caliban +Callisto +Godfrey Calthrop +Calypso +Heather Cameron +Canasta +Cancer +Candra +David Cannon +Cannonball +Cannonball I +Cap 'N Hawk +Caprice +Capricorn +Captain Atlas‎ +Captain America +Captain Barracuda +Captain Britain +Cap 'N Hawk +Captain Fate +Captain Germany +Captain Marvel +Captain Omen +Captain Savage +Captain Ultra +Captain UK +Captain Universe +Captain Wings +Captain Zero +Caregiver +Caretaker +Cardiac +Cardinal +Carnage +Carnivore +Carolyn Parmenter +Guido Carosella +Carrion +Joe Cartelli +Peggy Carter +Sharon Carter +Cassandra Nova +Cassidy, Sean +Cassidy, Theresa +Tom Cassidy +Cassiopea +Castle, Frank +Cat +Cat-Man +Catiana +Cayman +Cecilia Reyes +Celestial Madonna +Centennial +Centurion +Centurious‎ +Centurius +Century, Turner +Cerberus +Cerebra +Cerise +Cethlann +Ch'od +Chaka +Challenger +Chamber +Chameleon +Champion of the Universe +Chance +Changeling +Chaos +Charcoal +Charlie-27 +Charles Xavier +Charon +Cheetah +Chemistro +Lila Cheney +Chi Demon +Chief Examiner +Chimera +Christians, Isaac +Choice +Chondu the Mystic +Chrome +Chronos +Chthon +Chtylok +Citizen V +Clea +Clearcut +Clive +Cloak +Cloud 9 +Cloud +Clown +Coach +Coachwhip +Cobalt Man +Cobra +Cody Mushumanski gun Man aka: the hunter +Coldblood +Coldfire +Cold War +Kasper Cole +Colonel America +Collective Man +Collector +Colleen Wing +Rusty Collins +Colonel +Colossus +Comet +Comet Man +Commander Kraken +Commando +Conan the Barbarian +Condor +Connors, Curtis +Conquer Lord +Conquest +Conquistador +Constrictor +Contemplator +Contessa +Contrary +Controller +Cooper, Valerie +Copperhead +Copperhead +Copycat +Coral +Corbo, Adrian +Corbo, Jared +Cordelia Frost +Abraham Cornelius +Corona +Corruptor +Corsair +Corsi, Tom +Fabian Cortez +Cottonmouth +Courier +Count Abyss +Count Nefaria +Cowgirl +Crazy Eight +Graydon Creed +Creed, Victor +Creel, Carl "Crusher" +Crichton, Lady Jacqueline Falsworth +Crichton, Kenneth +Crime-Buster +Crimebuster +Crime Master +Crimson +Crimson and the Raven +Crimson Cavalier +Crimson Commando +Crimson Cowl +Crimson Craig +Crimson Daffodil +Crimson Dynamo +Crimson Dynamo V +Crippler +Crooked Man +Crossbones +Crossfire +Crown +Crucible +Crusader +Crusader +Crusher +Crystal +Cutthroat +Cybele +Cybelle +Cyber +Cyborg X +Cyclone +Cyclops +Cypher +Derrick Slegers speed aka: slegers +D'Ken +D'Spayre +Robert da Costa +Dagger +Dakimh the Enchanter +Damballah +Damian, Margo +Damon Dran +Lorna Dane +Danger +Danielle Moonstar +Dansen Macabre +Danvers Carol +Daredevil +Dark Angel +Dark Beast +Dark-Crawler +Dark Phoenix +Darkstar +Darkdevil +Darkhawk +Darkoth +Davis, Leila +Dawson, Tex +Day, Wilbur +Daytripper +Dazzler +Dead Girl +Deadhead +Deadly Ernest +Deadpool +Laura Dean +Death +Death Adder +Death's Head I&II +Death's-Head +Deathbird +Deathlok +Deathwatch +Death-Stalker +Deathurge +Deathstroke +Decay +Decay II +Defensor +De La Fontaine, Valentina Allegra +Delilah +Delphi +Delphine Courtney +Dementia +Demiurge +Demogoblin +Demogorge the God-Eater +Demolition Man +Destiny I +Destiny +Destroyer, The +Destroyer, The +Destroyer +Destroyer +Destroyer of Demons +Devastator +Devil Dinosaur +Devil-Slayer +Devos the Devastator +DeWolff, Jean +Diablo +Diamanda Nero +Diamond Lil +Diamondback +Diamondhead +Digitek +Dillon, Maxwell +Dionysus +Dirtnap +Discus +Dittomaster‎ +DJ +D-Man +Dmitri Bukharin +Doc Samson +Doctor Arthur Nagan +Doctor Bong +Doctor Demonicus +Doctor Doom +Doctor Dorcas +Doctor Droom +Doctor Druid +Doctor Faustus +Doctor Glitternight +Doctor Leery +Doctor Minerva‎ +Doctor Octopus +Doctor Spectrum +Doctor Strange +Doctor Sun +Doe, John +Domina +Dominic Fortune +Domino +Dominus +Domo +Donald Pierce +Donald & Deborah Ritter +Doorman +Doop +Doppelganger +Doppleganger +Dorcas, Dr. Lemuel +Dorma +Dormammu +Doug and Jerry +Dougboy +Doughboy +Douglas Ramsey +Douglock +Dracula +Dragon Lord +Dragon Man +Dragon of the Moon +Dragonfly +Dragoness +Dragonwing +Drake, Frank +Drake, Robert "Bobby" +Drax the Destroyer +Dreadknight +Dreadnought +Dream Weaver +Dreaming Celestial +Dreamqueen +Dredmund Druid +Drew, Jessica +Dromedan +Droom, Doctor Anthony +Double Helix +Druid +Druid +Druig +Drumm, Jericho +du Paris, Bennet +Dum-Dum Dugan +DuQuesne, Jacques +Dusk +Dust +Dvorak, Sybil +Dweller-in-Darkness +Dyna-Mite +Earth Lord +Earthquake +Paul Norbert Ebersol +Ebon Seeker +Ecstasy +Echo +Ectokid +Edwards, Ethan +Edwin Jarvis +Eel +Egghead +Ego the Living Planet +El Aguila +El Muerto +Elathan +Electric Eve +Electro +ElectroCute +Electron +Eleggua +Elektra +Elektro +Elf With A Gun +Elfqueen +Eliminator +Eliminator +Elixir +Elsie-Dee +Elven +Elysius +Emma Frost +Empath +Empathoid +Emplate +En Sabah Nur +Enchantress +Energizer +Enforcer +Enigma +Ent +The Entity +Entropic Man +Eon +Epoch +Equilibrius +Equinox +Ereshkigal +Eric the Red +Eric Slaughter +Erg +Ernst +Eros +Eshu +Eson the Searcher +Miguel Espinosa +[[Mister Sinister|Essex, Nathaniel +Eternal Brain +Eternity +Ev Teel Urizen +Evangeline Whedon +Ever +Everyman +Evilhawk‎ +Executioner +Exodus +Exploding Man +Exterminator +Ezekiel +Fafnir +Fagin +Falcon +Fallen One +Fantastic Four +Brian Falsworth +Jacqueline Falsworth +John Falsworth +Famine +Fan Boy +Fandral +Fang +Fantasia +Fantomex +Farallah +Hensley Fargus +Amahl Farouk +Fasaud +Fashima +Fatale +Fateball +Father Time +Fault Zone +Fearmaster‎ +Feedback +Feline +Fenris +Fenris Wolf +Feral +Fer-de-Lance +Feron +Fetter, Philip +Fever Pitch +Fight-Man +Fin +Fin Fang Foom +Firearm +Firebird +Firebolt +Firebrand +Firefrost +Firelord +Firepower +Firestar +Fisk, Richard +Fisk, Wilson +Trevor Fitzroy +Fixx +Fixer +Flag-Smasher +Flambe +Flash Thompson +Flatman +Flex +Flubber +Flumm, Marvin +Fly +Flygirl +Flying Tiger +Foggy Nelson +Fontanelle +Foolkiller +Forbush Man +Force +Forearm +Foreigner +Forge +Forgotten One +Forrester, Lee +Fortunato, Don +Fortune, Dominic +Foster, Bill +Foster, Jane +Foster, Tom +Foxfire +Frank Castle +Drake, Frank +Frankenstein's Monster +Frankie and Victoria +Frankie Raye +Franklin Richards +Freak +Freak of Science +Freakmaster +Freakshow +Freeman, Spike +Free Spirit +Freedom Ring +Frenzy +Frey +Frigga +Frog-Man +Frog-Man +Frost, Adrienne +Frost, Deacon +Frost, Cordelia +Frost, Emma +Fujikawa, Rumiko +Fury +Fury, Jacob "Jake" +Fury, Nick +Fusion +Futurist +G-Force +Gabriel, Devil Hunter +Gabriel the Air-Walker +Gaea +Gaia +Galactus +Galaxy Master +Gambit +Gammenon the Gatherer +Gamora +Ganymede +Gardener +Gargan, Mac +Gargantua +Gargantus +Gargouille +Gargoyle +Garokk the Petrified Man +Garrett, Jonathan "John" +Garrison, Sean +Gatecrasher +Gateway +Gauntlet +Gauntlet +Gavel +Gaza +Gazelle +Gazer +Geb +Gee +Geiger +Geirrodur +Gemini +Genis-Vell +Gertrude Yorkes +Ghaur +Carter Ghazikhanian +Ghazikhanian, Carter +Georgianna Castleberry +Ghost +Ghost Dancer +Ghost Girl +Ghost Girl +Ghost Maker +Ghost Rider +Ghost Rider +Ghost Rider 2099 +Ghoul +Ghoul +Giant-Man +Gibbon +Gibborim +Gibney, Kyle +Gideon +Gideon, Gregory +Gideon Mace +Giganto +Gigantus +Gill, Donald "Donny" +Gilmore, Ritchie +Gin Genie +Gladiator, Shi'ar +Gladiator +Gladiatrix +Glamor +Glitch +Glob +Glob Herman +Gog +Gloom +Glorian +Goblin Queen +Goblyn +Gold, Martin +Gold, Melissa +Golden Archer +Goldbug +Goldeneye +Golden Girl +Golden Oldie +Golem +Goliath +Golubev, Mikula +Gomi +Googam +Gorgeous George +Gorgilla +Gorgon +Gorilla Girl +Gorilla-Man +Gorr +Gosamyr +Grand Director +Grandmaster +Grant, Greer +Grappler +Grasshopper I&II +Graviton +Gravity +Great Gambonnos +Great Video +Green Goblin +Green Goblin IV +Gregory Gideon +Gremlin +Grenade +Grey, Elaine +Grey-Summers, Jean +Grey, Dr. John +Grey, Nate +Grey, Rachel +Grey Gargoyle +Grey, Sara +Grey King +Griffin +Grim Hunter +Grim Reaper +Grimm, Benjamin Jacob +The Grip +Grizzly +Grog the God-Crusher +Gronk +Grotesk +Groundhog +Growing Man +Guardsman +Gunthar of Rigel‎ +Guthrie, Jebediah +Guthrie, Joshua +Guthrie, Melody +Guthrie, Paige +Guthrie, Samuel +Gypsy Moth +Gyrich, Henry Peter +Hack +Hag +Hairbag +Halflife +Hall, Franklin +Halloway, Thomas +Halloween Jack +Hamilton, Bart +Hamilton Slade +Hammer Harrison +Hammer and Anvil +Hammer, Justin +Hammer, Justine +Hammerhead +Hammond, Jim +Hangman +Hank McCoy +Hank Pym +Hanna Levy +Hannibal King +gen Harada +Hardcase +Hardcore +Hardnose +Hardshell +Hardwire +Felicia Hardy +Hargen the Measurer +Harker, Quincy +Harkness, Agatha +Harmonica +Harness +Harold H. Harold +Harpoon +Harpy +Harrier +Jonas Harrow +Harry Leland +Harry Osborn +Hate-Monger +Haven +Havok +Hawkeye +Hawkeye II +Hawkshaw +Hayden, Alex +Hazard +Molly Hayes +Haywire +Hazmat +Headknocker +Headlok +Heart Attack +Hebe +Hecate +Hector +Heimdall +Hela +Helio +Hellcat +Helleyes +Hellfire +Hellion +Hellion +Hellrazor +Hellstrom, Damion +Hellstrom, Patsy +Hephaestus +Hepzibah +Her +Hera +H.E.R.B.I.E. +Hercules +Hermes +Hermod +Hero +Hero for Hire +Herr Kleiser +Hideko Takata +High Evolutionary +High-Tech +Hijacker +Hildegarde +Him +Hindsight Lad +Hit-Maker +Hitman +Hobgoblin +Hobgoblin II +Cameron Hodge +Hogan, Harold "Happy" +Hoder +Hogun +Holocaust +Holly +Honcho +Honey Lemon +Hood +Horrocks, Ned +Hornet +Phineas T. Horton +Horus +Howard the Duck +James Howlett +Hrimhari +Hub +Hugh Jones +Hulk +Hulk 2099 +Hulkling +Human Cannonball +Human Fly +Human Robot +Human Top I +Human Top II +Human Torch I +Human Torch II +Humbug +Humus Sapien +Huntara +Robert Hunter +Stevie Hunter +Hurricane +Husk +Hussar +Hybrid +Hybrid II +Hyde +Hydro +Hydro-Man +Hydron +Hyperion +Hyperkind +Hyperstorm +Hypnotia +Hyppokri +Hippolyta +Icarus +Iceman +Icemaster +Hisako Ichiki +Idunn +Iguana +Ikaris +Ikonn +Ikthalon +Illusion +Immortus +Impala +Imperial Hydra +Impossible Man +Impulse +In-Betweener +Indra +Indech +Inertia +Infamnia +Infant Terrible +Infectia +Inferno +Inferno +Inferno +Infinity +Shola Inkosi +Interloper +Invisible Girl +Invisible Woman +Inza +Ion +Iridia +Ironclad +Iron Cross +Iron Fist +Iron Lad +Iron Maiden +Iron Man +Iron Man 2020 +Iron Monger +ISAAC +Isbisa +Ishihara, Shirow +Isis +It, the Living Colossus +J2 +Jackal +Jackdaw +Jack Flag +Jack Frost +Jackhammer +Jack-in-the-Box +Jack Kirby +Jack of Hearts +Jack O'Lantern +Jackpot +Jade Dragon +Jaeger +Harald Jaekelsson +Jaguar +Jameson, J. Jonah +Jameson, John +Jameson, Dr. Marla +Jaspers, James +Jann +Janus +Jarella +Jaren +Jarvis, Edwin +Jason +Jawynn Dueck The Iron christian of Faith +Jazz +Jean Grey +Jeffries, Madison +Jeffrey Mace +Jekyll +Jenkins, Abner +Jerry Jaxon +Jessica Jones +Jennifer Walters +Jens Meilleur slap shot +Jester +Jigsaw +Jim Hammond +Jimmy Woo +Jocasta +Joe Fixit +Daisy Johnson +John Sublime +Johnny Ohm +Jolt +Jon Spectre +Jones, Angelica +jordan merasty seberius +Jones, Gabe +Jones, Hugh +Jones, Rick +Joseph +Josten, Conrad +Josten, Erik +Joyce, Madeline +Joystick +Juarez, Bonita +Jubilee +Judas Traveller +Judd, Eugene +Jude the Entropic Man +Juggernaut +Jumbo Carnation +Junkpile +Junta +Justice +Justice +Justin Hammer +Kaine +Kala +Jennifer Kale +Kaluu +Kamal +Kamo Tharnn +Kamuu +Garrison Kane +Kang the Conqueror +Kangaroo +Karima Shapandar +Karkas +Karla Sofen +Karma +Karnak +Karnilla +Karolina Dean +Karthon the Quester +Cletus Kasady +Kate Neville +Katu +Kaur, Benazir +Ka-Zar +Kehl of Tauran +Keith Kilham +Kem Horkus +Ketch, Dan +Robert Kelly +Key +Khaos +Khonshu +Khoryphos +Kiber the Cruel +Kick-Ass +Kid Colt +Kid Nova +Kierrok +Killer Shrike +Zebediah Killgrave +Killmonger, Erik +Killpower +Killraven +Kilmer +Kimura +Cessily Kincaid +Jane Kincaid +Kine, Benedict +Hannibal King +Kingpin +King Bedlam +Kingo Sunen +Kirigi +Kirtsyn Perrin Short Stop +Kismet +Kismet Deadly +Kiss +Kiwi Black +Kkallakku +Kl'rt +Klaatu +Klaw +Kleinstocks +Knickknack +Misty Knight +Kofi Whitemane +Kogar +Kohl Harder Boulder Man +Korath the Pursuer +Korg +Kormok +Korrek +Korvac +Korvus +Kosmos +Kragoff, Ivan +Kraken +Krakkan +Krang +Kraven the Hunter +Kravinoff, Alyosha +Sergei Kravinoff +Kristoff Vernard +Kristoff von Doom +Kro +Krystalin +Kubik +Kukulcan +Kurse +Kwannon +Kylun +Kymaera +Lacuna +Lady Deathstrike +Lady Killer +Lady Lark +Lady Lotus +Lady Mandarin +Lady Mastermind +Lady Octopus +La Lunatica +Lancer +Landslide +Lang, Cassie +Lang, Steven +La Nuit +Larry Bodine +Lament +Lasher +Laughton, Ebenezer +Layla Miller +Lazarus +Leader +Leap-Frog +Leash +LeBeau, Remy +Leech +Ned Leeds +Lee Forrester +Leeds, Betty Brant +Left Hand +Left-Winger +Legacy +Legion +Lehnsherr, Erik Magnus +Leir +Lemuel Dorcas +Leo +Leonus +Letha +Levan +Levy, Hannah +Lexington, Alexander +Lianda +Libra +Lifeforce +Lifeguard +Lifter +Lightbright +Lightmaster +Lighting Rod +Lightspeed +Lila Cheney +Lilandra Neramani +Lilith, the daughter of Dracula +Lin Sun +Lincoln, Lonnie Thompson +Link +Lionheart +Luichow, Chan +Live Wire +Living Brain +Living Colossus +Living Diamond +Living Eraser +Living Hulk +Living Laser +Living Lightning +Living Monolith +Living Mummy +Living Pharaoh +Living Pharaoh +Living Planet +Living Tribunal +Living Totem +Lizard +Llan the Sorcerer +Lloigoroth +Llyra +Llyron +Loa +Lobo, Carlos +Lockdown +Lockheed +Lockjaw +Locksmith +Locus +Locust +Lodestone +Logan +Loki +Longneck +Longshot +Looter +Lord Chaos +Lord Dark Wind +Lord Pumpkin +Lorelei +Lorelei II +Lorelei Travis +Lorna Dane +Lorvex +Loss +Lu, Chen +Lucas Brand +Lucifer +Ludi +Lukin, Aleksander +Lumpkin, Willie +Luna +Lunatica +Lunatik +Lupa +Lupo +Lurking Unknown +Lyja +Lykos, Karl +Lynx +M +MacLain, Myron +Mace, Gideon +Mace, Jeffrey +Mach-IV +Machine Man +Machinesmith +Machine Teen +MacKenzie, Al +MacPherran, Mary "Skeeter" +MacTaggert, Moira +Mad-Dog +Mad Dog Rassitano +Mad Jack +Mad Jim Jaspers +Mad Thinker +Mad Thinker’s Awesome Android +Madam Slay +Madame Hydra +Madame MacEvil +Madame Masque +Madame Menace +Madame Web +Madcap +Maddicks, Artie +Madrox, James +Madelyne Pryor +Maelstrom +Maestro +Magdalena +Magdalene +Maggott +Magician +Magik +Magilla +Magma +Magneto +Magnum I +Magnum, Moses +Magnus +Magus +Magus +Maha Yogi +Mahkizmo +Major Mapleleaf +Makkari +Malekith the Accursed +Malice +Malus, Karl +Mammomax +Man-Ape +Man-Beast +Man-Brute +Man-Bull +Man-Eater +Man-Elephant +Man-Killer +Man-Spider +Man-Thing +Man-Wolf +Manbot +Mandarin +Mandrill +Mandroid +Mangle +Mangog +Manikin +Man Mountain Marko +Manslaughter +Manta +Mantis +Mantra +Marc Spector +Mark Gervaisnight shade +Marduk Kurios +Margali Szardos +Maria Hill +Mariko Yashida +Marks, James "Jimmy" +Marko, Cain +Marko, Flint +Marlene Alraune +Marlow, Keen +Marrina +Marrow +Martha Johansson +Martinex +Marvel Boy +Marvel Boy +Marvel Boy +Marvel Girl +Marvel Man +Mar-Vell +Marwan, Krista +Mary Zero +Masaryk, Milos +Masked Marauder +Masked Marvel +Masked Rose +Mason, Louise +Masque +Mass Master +Master +Master of Vengeance +Master Khan +Master Man +Master Menace +Mastermind +Mastermind of the UK +Mastermind +Master Mold +Master Order +Master Pandemonium +Masters, Alicia +Matador +Match +Matsu'o Tsurayaba +Matt Murdock +Mauler +Maur-Konn +Mauvais +Maverick +Max +Maxam +Maximoff, Pietro +Maximoff, Wanda +Maximus +May Parker +May "Mayday" Parker +Mayhem +McCoy, Henry "Hank" +McKenzie, Namor +Meanstreak +Meathook +Mechamage +Medusa +Mekano +Meggan +Meld +Melee +Seamus Mellencamp +Meltdown +Melter +Mentallo +Mentor +Mentor +Mentus +Mephisto +Mercurio +Mercury +Mercy +Merlin +Mesmero +Metal Master +Metalhead +Meteor Man +Meteorite +Meteor Man +Meteorite II +Micro +Microchip +Micromax +Midas +Midgard Serpent +Midnight +Midnight Man +Midnight Sun +Miek +Mikado & Mosha +Mikey +Mikhail Rasputin +Milan +Miller, Layla +Mimic +Mimic +Mimir +Mindmeld +Mindworm +Miracle Man +Mirage I +Mirage II +Misfit +Miss America +Missing Link +Mister Buda +Mister Doll +Mister Fear +Mister Hyde +Mister Jip +Mr. M +Mister Machine +Mister One and Mister Two +Mister Sensitive +Mister Sinister +Mister X +Misty Knight +Mist Mistress +Mockingbird +MODAM +MODOK +Modred the Mystic +Mogul of the Mystic Mountain +Moira Brandon +Moira MacTaggert +Mojo +Mole Man +Molecule Man +Molly Hayes +Molten Man +Mondo +Mongoose +Monroe, Trip +Monsoon +Monstra +Monstro the Mighty +Moon-Boy +Moon Knight +Moondark +Moondragon +Moonhunter +Moonstar, Danielle +Moonstone +Mop Man +Morbius +Mordo, Karl +Mordred +Morg +Morgan Le Fay +Morlun +Morning Star +Morph +Morpheus +Moses Magnum +Mother Earth/Mother Nature +Mother Night +Mother Superior +Motormouth +Mountjoy +Mr. Fish +Mr. Justice +Ms. Marvel +Ms. MODOK +Ms. Steed +M-Twins +Multiple Man +Mundi, Rex +Munroe, Ororo +Murdock, Matt +Murmur +Murmur II +Mutant Master +Mutant X +Myers, Fred +Mys-Tech +Mysterio +Mystique +N'astirh +N'Gabthoth +N'Garai +Naga +Nameless One +Namor the Sub-Mariner +Namora +Namorita +Nanny +Natchios, Elektra +Nate Grey +Nathaniel Essex +Nathaniel Richards +Native +Nebula +Nebulo +Nebulon +Nebulos +Necrodamus +Necromantra +Needle +Nefaria, Luchino +Nefarius +Negasonic Teenage Warhead +Nekra +Nelson, Foggy +Nelson, Greer Grant +Nemesis +Neophyte +Neptune +Network +Neuronne +Neurotap +Neville, Kate +Newell, Walter +New Goblin +Nezarr the Calculator +NFL Superpro +Nicholas Scratch +Nicholas maunderRed Claw +Nick Fury +Nico Minoru +Nightcrawler +Nighthawk +Night Nurse +The Night Man +Nightmare +Night Rider +Nightshade +Nightside +Night Thrasher +Nightwatch +Nightwind +Nikki +Niles Van Roekel +Nimrod +Ningal +Nital, Adri +Nital, Taj +Nitro +Kiden Nixon +MN-E (Ultraverse) +Nobilus +Noble, Peter +Nocturne +Nocturne +Noh-Varr +Nomad +Norman Osborn +Norns +Norrin Radd +North, Dakota +Northstar +Nosferata +Nova +Nova-Prime +Nova, Cassandra +Novs +Nowman, Michael +Nox +Nth Man: the Ultimate Ninja +Nth Man +Nuke - Frank Simpson +Nuke - Squadron Supreme member +Nuklo +Null, the Living Darkness +Numinus +Nut +O'Meggan, Alfie +O'Sullivan, Solomon +Obituary +Obliterator +Oblivion +Occulus +Ocean +Ocelot +Octavius, Dr. Otto +Oddball +Odin +Ogord, Aleta +Ogre +Ogress +O'Hara, Miguel +Omen +Omega Red +Omega I +Omega the Unknown +Omerta +One Above All +Oneg the Prober +Onslaught +Onyxx +Ooze +Optoman +Oracle +Orator +Orb +Orbit +Orchid +Ord +Order +Orikal +Orka +Orphan +Orphan-Maker +Osborn, Harry +Osborn, Norman +Osiris +Outlaw +Outrage +Overkill +Overmind +Overrider +Ozone +Owl +Ox +Ozymandias +Page, Karen +Paibo +Paladin +Paradigm +Paragon +Paralyzer +Paris +Parker, Ben +Parker, Mary Jane +Parker, May +Parker, Richard +Parker, Peter +Parks, Arthur +Pasco +Paste-Pot Pete +Patch +Pathway +Patriot I +Patriot II +Paul Patterson +Payback +Payge, Reeva +Payne, Frank +Peace Monger +Peepers +Penance +Penance II +Peregrine +Perfection +Perseus +Persuader +Persuasion +Perun +Pete Wisdom +Peter Criss +Petros, Dominic +Petruski, Peter +Phade +Phantazia +Phantom Blonde +Phantom Eagle +Phantom Rider +Phalanx +Phage +Phastos +Phat +Phimster, Ellie +Phoenix +Photon +Phyla-Vell +Pierce, Alexander Goodwin +Piledriver +Pipeline +Piper +Pip the Troll +Piper +Piranha +Pisces +Pistol +Pitt, Desmond +Pixie +Pixx +Plague +Plantman +Plasma +Plazm +Plug +Plunderer +Pluto +Poison +Polaris +Poltergeist +Porcupine +Portal +Possessor +Postman +Postmortem +Potts, Virginia "Pepper" +Poundcakes +Powderkeg +Power, Alex +Power, Jack +Power, James Dr. +Power, Julie +Power, Katie +Power, Margaret +Power Broker +Power Man +Power Princess +Power Skrull +Powerpax +Powerhouse +Presence +Pressure +Prester John +Preston, Martin +Pretty Persuasions +Preview +Primal +Prime +Prime Mover +Primevil +Primus +Princess Python +Proctor +Prodigy +Professor Power +Professor X +The Profile +Projector +Prometheus +Protector +Proteus +Prototype +Proudstar, James +Proudstar, John +Prowler +Pryde, Katherine "Kitty" +Psi-Lord +Psyche +Psycho-Man +Psyklop +Psylocke +Puck +Puff Adder +Puishannt +Pulse +Puma +Punchout +Punisher +Punisher I +Punisher 2099 +Puppet Master +Purge +Purple Girl +Purple Man +Pyre +Pyro +Quagmire +Quantum +Quasar +Quasar II +Quasimodo +Quicksand +Quicksilver +Quincy Harker +Quentin Beck +Quentin Quire +Raa of the Caves +Rachel van Helsing +Radd, Norrin +Radioactive Man +Radian +Radion the Atomic Man +Radius +Rafferty +Rage +Raggadorr +Rainbow +Rama-Tut +Raman +Ramrod +Ramsey, Doug +Ramshot +Rancor +Rand, Daniel +Randall Shire +Random +Ranger +Rankin, Calvin +Ransak the Reject +Monica Rappaccini +Rasputin, Illyana +Rasputin, Mikhail +Rasputin, Piotr +Rattler +Ravage 2099 +Raving Beauty +Rawhide Kid +Rax +Raxton, Mark +Raye, Frankie +Raza +Razor Fist +Razorback +Rebel +Reaper +Recorder +Red Ghost +Red Guardian +Red Lotus +Redneck +Red Nine +Red Raven +Red Ronin +Red Shift +Red Skull +Red Skull II +Red Wolf +Redeemer +Redwing +Reignfire +Reilly, Ben +Reject +Remnant +Remy LeBeau +Reptyl +Revanche +Reyes, Cecelia +Reynolds, Katherine +Rhiannon +Rhino +Rhodes, James +Ricadonna +Richard Rider +Richards, Jonathan +Richards, Susan +Rick Jones +Ricochet +Rictor +Rider, Richard +Rigellian Recorder +Right-Winger +Ringer +Ringleader +Ringmaster +Ringo Kid +Rintrah +Riot +Riot Grrl +Ripfire +Rl'nnd +Robertson, Robbie +Robert Kelly +Rocket Raccoon +Rocket Racer +Rock +Rock Python +Rodstvow +Rogers, Steve +Rogue +Roma +Rom the Spaceknight +Ronan the Accuser +Rose +Rosenberg, Marsha +Rossovich, Arkady +Roughhouse +Roulette +Royal Roy +Ruby Thursday +Ruckus +Rune +Runner +Rush +The Russian +Rusty Collins +Ryder +John Ryker +S'byll +Sabra +Sabreclaw +Sabretooth +Sack +Sage +Sagittarius +Saint Anna +Saint Elmo +Sally Floyd +Salvo +Samson, Leonard +Sanders, James +Sandhurst, Basil +Sandman +Sangre +Santos, Miguel +Sasquatch +Satana +Satannish +Saturnyne +Sauron +Savage Steel +Sawyer, Sam +Saxon, Samuel "Starr" +Sayge +Scaleface +Scalphunter +Scanner +Scarecrow +Scarecrow II +Scarlet Beetle +Scarlet Centurion +Scarlet Scarab +Scarlet Spider +Scarlet Spiders +Scarlet Witch +Scarlotti, Mark +Schemer +Schmidt, Johann +Schultz, Herman +Silver Scorpion +Scimitar +Scintilla +Scorcher +Scorpia +Scorpio +Scorpion +Scourge of the Underworld +Scrambler +Scratch, Nicholas +Scream +Screaming Mimi +Screech +Scrier +Sea Urchin +Sebastian Shaw +Seeker +Sefton, Amanda +Sekhmet +Selene +Senor Muerte / Senor Suerte +Sentry +Senator Robert Kelly +Sepulchre, also known as Shadowoman +Sergeant Fury +Serpentina +Sersi +Set +Seth +Shadow-Hunter +Shadow King +Shadow Slasher +Shadowcat +Shadowmage +Shadrac +Shalla-Bal +Shaman +Shamrock +Shang-Chi +Shanga +Shanna the She-Devil +Shapanka, Gregor +Shaper of Worlds +Shard +Sharon Friedlander +Shathra +Shatter +Shatterfist +Shatterstar +Shaw, Sebastian +Shaw, Shinobi +She-Hulk +She-Thing +She-Venom +Shellshock +Shen Kuei +Shinchuko Lotus +Shriker +Shingen Harada +Shiva +Shiver Man +Shocker +Shockwave +Shola Inkosi +Shooting Star +Shotgun +Shriek +Shroud +Shrunken Bones +Shuma-Gorath +Sidewinder +Siege +Sif +Sigmar +Sigyn +Sikorsky +Sikorsky, Raymond +Silke, Samuel +Silly Seal +Silhouette +Silver +Silver Dagger +Silver Fox +Silver Sable +Silver Samurai +Silver Squire +Silver Surfer +Silverclaw +Silvermane +Simpson, Frank +Sims, Ezekiel +Sin +Sin-Eater +Sinclar, Nekra +Sinclair, Rahne +Sinister +Sir Steel +Siryn +Sise-Neg +Skein +Skids +Skin +Skinhead +Skull the Slayer +Skullfire +Skullcrusher +Skunge the Laxidazian Troll +Skyhawk +Skywalker +Slab +Slade, Frederick +Slade, Hamilton +Slapstick +Sleek +Sleeper +Sleepwalker +Slick +Slipstream +Sligguth +Slither +Sludge +Slug +Sluggo +Sluk +Slyde +Smallwood, Marrina +Smart Alec +Smartship Friday +Smasher +Smerdyakov, Dmitri +Smith, Tabitha +Smithers, Samuel +Smuggler I +Smuggler II +Smythe, Alistair +Smythe, Spencer +Snowbird +Snowfall +Sofen, Karla +Solara +Solarman +Solarr +Solo +Soldier X +Solitaire +Solo +Songbird +Son of Satan +Soulfire +Space Phantom +Space Turnip +Spector, Marc +Spectra +Spectral +Specialist +Speed +Speed Demon +Speedball +Speedo +Spellbinder +Spellcheck +Sphinx +Sphinxor +Spider Doppelganger +Spider-Girl +Spider-Ham +Spider-Man +Spider-Slayer +Spider-Woman +Spidercide +Spike +Spike Freeman +Spinnerette +Spiral +Spirit of '76 +Spitfire +Spoilsport +Spoor +Spot +Sprite +Sputnik +Spyder +Spymaster +Spyne +Squirrel Girl +Squidboy +St. Croix, Claudette +St. Croix, Marius +St. Croix, Monet +St. Croix, Nicole +Stacy, George +Stacy, Gwen +Stacy X +Stained Glass Scarlet +Stakar +Stallior +Stane, Ezekiel +Stane, Obadiah +Star-Dancer +Star-Lord +Star Stalker +Star Thief‎ +Starbolt +Stardust +Starfox +Starhawk +Starlight +Stark, Arno +Stark, Tony +Starr the Slayer +Starshine +Starsmore, Jonothon +Starstreak +Stature +Steel Raven +Steel Serpent +Steel Spider +Stegron +Stein, Chase +Stellaris +Stem Cell +Stentor +The Amazing Tanwir Ahmed +The Stepford Cuckoos +Stephen Colbert +Stevie Hunter +Steven Lang +Stewart, Stanley +Stick +Stiletto +Stilt-Man +Stinger +Stingray +Stitch +Stone +Stonecutter +Stonewall +Storm +Storm, Franklin +Storm, Johnny +Storm, Susan +Strange, Stephen +Strange, Victor +Stranger +Stratosfire +Straw Man +Strobe +Strong Guy +Strongarm +Strucker, Baron +Stryfe +Stryker, William +Stunner +Stuntmaster +Stygorr +Stygyro +Styx and Stone +Sublime, John +Sub-Mariner +Sugar Man +Suicide +Sultan +Summers, Alexander +Summers, Christopher +Summers, Gabriel +Summers, Rachel +Summers, Scott +Sunder +Sundragon +Sunfire +Sunpyre +Sunset Bain +Sunspot +Sunstreak +Sunstroke +Sunturion +Sun Girl +Super Rabbit +Super-Adaptoid +Supercharger +Superia +Super-Nova +SuperPro +Super Rabbit +Super Sabre +Super-Skrull +Supernalia +Suprema +Supreme Intelligence +Supremor +Surge +Surtur +Sushi +Svarog +Swarm +Sweetface +Swordsman +S'ym +Sybil Dorn +The Symbiote +Synch +Sytsevich, Aleksei +Szardos, Jimaine +Szardos, Margali +Tag +Tagak the Leopard Lord +Tailhook +Tantra +Talbot, Glenn +Talisman +Tamara Rahn +Tana Nile +Tarantula +Tarleton, George +Tarr, Black Jack +Tarot +Tartarus +Taskmaster +Tatterdemalion +Tattletale +Tattoo +Taurus +Taylor, General Orwell +Techno +Tefral the Surveyor +Tempest +Tempest +Tempo +Tempus +Temugin +Tenpin +Termagaira +Terminator +Terminatrix +Terminus +Terrax the Tamer +Terraxia +Terror +Tess-One +Tessa +Tether +Tethlam +Texas Twister +Thakos +Thane Ector +Thanos +Thena +Thermo +Thing +Thinker +Thin Man +Thirty-Three +Thog +Thomas, Everett +Thompson, Flash +Thor +Thor Girl +Thornn +3-D Man +Threnody +Thumb, Tom +Thumbelina +Thunderball +Thunderbird +Thunderbolt +Thunderclap +Thunderfist +Thunderstrike +Thundra +Tiboldt, Maynard +Tiboro +Tiger Shark +Tigra +Timberius +Time Bomb +Timeshadow +Timeslip +Tinkerer +Titan +Titania +Titanium Man +Toad +Toad-In-Waiting +Todd, Mark +Tom Thumb +Tomazooma +Tombstone +Tommy +Tommy Lightning +Tomorrow Man +Toomes, Adrian +Topaz +Topolov, Yuri +Topspin +Torgo the Vampire +Torgo of Mekka +Toro +Torpedo +Torrent +Torso +Tower +Toxin +Toynbee, Mortimer +Trader +Trapper +Trapster +Trainer, Carolyn +Tran, Chloe +T-Ray +Tremolo +Trevor Fitzroy +Tri-Man +Triathlon +Trick Shot +Trioccula +Triton +Troll +Truffaut, Arlette +Trump +Tugun +Tuc +Tumbler +Tundra +Turac +Turbo +Turner D. Century +Tusk +Tutinax the Mountain-Mover +Two-Gun Kid +Twoyoungmen, Elizabeth +Twoyoungmen, Michael +Tyger Tiger +Typeface +Typhoid +Typhoid Mary +Typhon +Tyr +Tyrak +Tyrannosaur +Tyrannus +Tyrant +Tzabaoth +Uatu +U-Go Girl +Ulik +Ultimo +Ultimus +Ultra-Marine +Ultragirl +Ultron +Umbo +U-Man +Umar +Uncle Ben Parker +Unicorn +Uni-Mind +Union Jack +Unseen +Unthinnk +Unus the Untouchable +Unuscione +Unuscione, Carmella +Unuscione, Angelo +Ursa Major +Urthona +USAgent +Urich, Ben +Urich, Phil +Ulysses +Utgard-Loki +Vagabond +Vague +Vakume +Valentina Allegra de Fontaine +Valerie Cooper +Valinor +Valkin +Valkyrie +Valtorr +Vamp +Vampire by Night +van Dyne, Janet +van Horne, Katrina Luisa +van Lunt, Cornelius +Vance Astro +Vanguard +Vanisher +Vapor +Vargas +Varnae +Vashti +Vaughn, Wendell +Vavavoom +Vector +Vegas +Veil +Veil +Velsing, Bram +Vengeance +Venom +Venom +Venomm +Ventura, Sharon +Venus +Venus Dee Milo +Veritas +Vermin +Vertigo +Vesta +Vibraxas +Vibro +Victor Mancha +Victor von Doom +Victorius +Vidar +Vincente +Vindaloo +Vindicator +Viper +Virako +Virgo +Vishanti +Visimajoris +Vision +Vivisector +Vixen +Volcana +Volla +Volpan +Volstagg +Voyant, Claire +Vulcan +Vulture +Wagner, Kurt +Wagner, Talia Josephine +John Walker +Mary Walker +Patsy Walker +Wallflower +Jennifer Walters +War +War V +War Eagle +War Machine +Warbird +Warhawk +Warlock +Warlock, Adam +Warpath +Miles Warren +Warrior Woman +Warstar +Warstrike +Warwolves +Scott Washington +Washout +Wasp +Watcher +Water Wizard +Watoomb +Watson, Mary Jane +Weapon X +Wendigo +Werewolf by Night +Western Kid +Whiplash +Whirlwind +Whistler +White Fang +Whitemane, Aelfyre +Whitemane, Kofi +Whiteout +White Pilgrim +White Rabbit +White Queen +White Tiger +Whitman, Debra +Whizzer +Wiccan +Wicked +Widget +Wild Child +Wildboys +Wilder, Alex +Wildpride +Wildside +Wild Thing +Will o' the Wisp +Williams, Eric +Williams, Simon +Wilson, Sam +Wilson, Wade +Wind Dancer +Wind Warrior +Windeagle +Windshear +Wing, Colleen +Wingfoot, Wyatt +Winter Soldier +The Wink +Winky Man +Wisdom, Pete +Wisdom, Romany +Witchfire +Wittman, Bentley +Wizard +Wiz Kid +Wolf +Wolfsbane +Wolverine +Wonder Man +Wong +Woo, Jimmy +Woodgod +Worm +Worthington, Warren III +Wraith +Wrath +Wrecker +Wreckage +Mr. Wu +Wundarr the Aquarian +Wyndham, Herbert Edgar +Wysper +X-23 +X-Cutioner +X-Man +X-Treme +Xandu +Charles Xavier +Xavin +Xemnu the Titan +Xemu +Xi'an Chi Xan +Xorn +Xorr the God-Jewel +X-Ray +Y'Garon +Yandroth +Yashida, Mariko +Yellow Claw +Yellowjacket +Yeti +Yith +Ymir +Yondu +Yrial +Yukio +Yuriko Oyama +Yukon Jack +Zabu +Zach +Zaladane +Zarathos +Zarek +Zarrko, the Tomorrow Man +Zartra +Zebediah Killgrave +Zeitgeist +Zemo, Heinrich +Zemo, Helmut +Zero +Zero-G +Zeus +Ziggy Pig +Zip-Zap +Zodiak +Arnim Zola +Zom +Zombie +Zuras +Zzzax diff --git a/modules/elasticsearch/src/main/java/org/apache/lucene/search/ShardFieldDocSortedHitQueue.java b/modules/elasticsearch/src/main/java/org/apache/lucene/search/ShardFieldDocSortedHitQueue.java new file mode 100644 index 00000000000..7d3324ef884 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/apache/lucene/search/ShardFieldDocSortedHitQueue.java @@ -0,0 +1,32 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.lucene.search; + +/** + * @author kimchy (Shay Banon) + */ +// LUCENE TRACK +public class ShardFieldDocSortedHitQueue extends FieldDocSortedHitQueue { + + public ShardFieldDocSortedHitQueue(SortField[] fields, int size) { + super(size); + setFields(fields); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/ElasticSearchException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/ElasticSearchException.java new file mode 100644 index 00000000000..6990f2c5180 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/ElasticSearchException.java @@ -0,0 +1,133 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch; + +/** + * @author kimchy (Shay Banon) (Shay Banon) + */ +public class ElasticSearchException extends RuntimeException { + + /** + * Construct a NestedRuntimeException with the specified detail message. + * + * @param msg the detail message + */ + public ElasticSearchException(String msg) { + super(msg); + } + + /** + * Construct a NestedRuntimeException with the specified detail message + * and nested exception. + * + * @param msg the detail message + * @param cause the nested exception + */ + public ElasticSearchException(String msg, Throwable cause) { + super(msg, cause); + } + + public Throwable unwrapCause() { + return ExceptionsHelper.unwrapCause(this); + } + + /** + * Return the detail message, including the message from the nested exception + * if there is one. + */ + public String getDetailedMessage() { + if (getCause() != null) { + StringBuilder sb = new StringBuilder(); + if (super.getMessage() != null) { + sb.append(super.getMessage()).append("; "); + } + sb.append("nested exception is ").append(getCause()); + return sb.toString(); + } else { + return super.getMessage(); + } + } + + + /** + * Retrieve the innermost cause of this exception, if any. + * + * @return the innermost exception, or null if none + * @since 2.0 + */ + public Throwable getRootCause() { + Throwable rootCause = null; + Throwable cause = getCause(); + while (cause != null && cause != rootCause) { + rootCause = cause; + cause = cause.getCause(); + } + return rootCause; + } + + /** + * Retrieve the most specific cause of this exception, that is, + * either the innermost cause (root cause) or this exception itself. + *

Differs from {@link #getRootCause()} in that it falls back + * to the present exception if there is no root cause. + * + * @return the most specific cause (never null) + * @since 2.0.3 + */ + public Throwable getMostSpecificCause() { + Throwable rootCause = getRootCause(); + return (rootCause != null ? rootCause : this); + } + + /** + * Check whether this exception contains an exception of the given type: + * either it is of the given class itself or it contains a nested cause + * of the given type. + * + * @param exType the exception type to look for + * @return whether there is a nested exception of the specified type + */ + public boolean contains(Class exType) { + if (exType == null) { + return false; + } + if (exType.isInstance(this)) { + return true; + } + Throwable cause = getCause(); + if (cause == this) { + return false; + } + if (cause instanceof ElasticSearchException) { + return ((ElasticSearchException) cause).contains(exType); + } else { + while (cause != null) { + if (exType.isInstance(cause)) { + return true; + } + if (cause.getCause() == cause) { + break; + } + cause = cause.getCause(); + } + return false; + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/ElasticSearchIllegalArgumentException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/ElasticSearchIllegalArgumentException.java new file mode 100644 index 00000000000..eac49adbe9e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/ElasticSearchIllegalArgumentException.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch; + +/** + * @author kimchy (Shay Banon) + */ +public class ElasticSearchIllegalArgumentException extends ElasticSearchException { + + public ElasticSearchIllegalArgumentException() { + super(null); + } + + public ElasticSearchIllegalArgumentException(String msg) { + super(msg); + } + + public ElasticSearchIllegalArgumentException(String msg, Throwable cause) { + super(msg, cause); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/ElasticSearchIllegalStateException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/ElasticSearchIllegalStateException.java new file mode 100644 index 00000000000..356c02fc7ae --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/ElasticSearchIllegalStateException.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch; + +/** + * @author kimchy (Shay Banon) + */ +public class ElasticSearchIllegalStateException extends ElasticSearchException { + + public ElasticSearchIllegalStateException() { + super(null); + } + + public ElasticSearchIllegalStateException(String msg) { + super(msg); + } + + public ElasticSearchIllegalStateException(String msg, Throwable cause) { + super(msg, cause); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/ElasticSearchInterruptedException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/ElasticSearchInterruptedException.java new file mode 100644 index 00000000000..32177d82635 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/ElasticSearchInterruptedException.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch; + +/** + * @author kimchy (Shay Banon) (Shay Banon) + */ +public class ElasticSearchInterruptedException extends ElasticSearchException { + + public ElasticSearchInterruptedException(String message) { + super(message); + } + + public ElasticSearchInterruptedException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/ElasticSearchNullPointerException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/ElasticSearchNullPointerException.java new file mode 100644 index 00000000000..55859d71269 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/ElasticSearchNullPointerException.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch; + +/** + * @author kimchy (Shay Banon) + */ +public class ElasticSearchNullPointerException extends ElasticSearchException { + + public ElasticSearchNullPointerException() { + super(null); + } + + public ElasticSearchNullPointerException(String msg) { + super(msg); + } + + public ElasticSearchNullPointerException(String msg, Throwable cause) { + super(msg, cause); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/ElasticSearchParseException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/ElasticSearchParseException.java new file mode 100644 index 00000000000..9129252f8fe --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/ElasticSearchParseException.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch; + +/** + * @author kimchy (Shay Banon) + */ +public class ElasticSearchParseException extends ElasticSearchException { + + public ElasticSearchParseException(String msg) { + super(msg); + } + + public ElasticSearchParseException(String msg, Throwable cause) { + super(msg, cause); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/ElasticSearchWrapperException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/ElasticSearchWrapperException.java new file mode 100644 index 00000000000..28694071f1a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/ElasticSearchWrapperException.java @@ -0,0 +1,28 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch; + +/** + * @author kimchy (Shay Banon) + */ +public interface ElasticSearchWrapperException { + + Throwable getCause(); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/ExceptionsHelper.java b/modules/elasticsearch/src/main/java/org/elasticsearch/ExceptionsHelper.java new file mode 100644 index 00000000000..8cc5de8e91f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/ExceptionsHelper.java @@ -0,0 +1,64 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch; + +/** + * @author kimchy (Shay Banon) + */ +public final class ExceptionsHelper { + + public static Throwable unwrapCause(Throwable t) { + Throwable result = t; + while (result instanceof ElasticSearchWrapperException) { + result = t.getCause(); + } + return result; + } + + public static String detailedMessage(Throwable t, boolean newLines, int initialCounter) { + int counter = initialCounter + 1; + if (t.getCause() != null) { + StringBuilder sb = new StringBuilder(); + while (t != null) { + if (t.getMessage() != null) { + sb.append(t.getMessage()); + if (!newLines) { + sb.append("; "); + } + } + t = t.getCause(); + if (t != null) { + if (newLines) { + sb.append("\n"); + for (int i = 0; i < counter; i++) { + sb.append("\t"); + } + } else { + sb.append("nested: "); + } + } + counter++; + } + return sb.toString(); + } else { + return t.getMessage(); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/Version.java b/modules/elasticsearch/src/main/java/org/elasticsearch/Version.java new file mode 100644 index 00000000000..60aaf0280e2 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/Version.java @@ -0,0 +1,76 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch; + +import java.io.InputStream; +import java.text.SimpleDateFormat; +import java.util.Date; +import java.util.Properties; +import java.util.TimeZone; + +/** + * @author kimchy (Shay Banon) + */ +public class Version { + + private static final String number; + private static final String date; + private static final boolean devBuild; + + + static { + Properties props = new Properties(); + try { + InputStream stream = Version.class.getClassLoader().getResourceAsStream("org/elasticsearch/version.properties"); + props.load(stream); + stream.close(); + } catch (Exception e) { + // ignore + } + + number = props.getProperty("number", "0.0.0"); + SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss"); + sdf.setTimeZone(TimeZone.getTimeZone("UTC")); + date = props.getProperty("date", sdf.format(new Date())); + devBuild = Boolean.parseBoolean(props.getProperty("devBuild", "false")); + } + + public static String number() { + return number; + } + + public static String date() { + return date; + } + + public static boolean devBuild() { + return devBuild; + } + + public static String full() { + StringBuilder sb = new StringBuilder("ElasticSearch/"); + sb.append(number); + if (devBuild) { + sb.append("/").append(date); + sb.append("/dev"); + } + return sb.toString(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/Action.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/Action.java new file mode 100644 index 00000000000..9481f51f838 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/Action.java @@ -0,0 +1,35 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action; + +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.util.Nullable; + +/** + * @author kimchy (Shay Banon) + */ +public interface Action { + + ActionFuture submit(Request request) throws ElasticSearchException; + + ActionFuture submit(Request request, @Nullable ActionListener listener); + + void execute(Request request, ActionListener listener); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/ActionFuture.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/ActionFuture.java new file mode 100644 index 00000000000..097cc30db22 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/ActionFuture.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action; + +import org.elasticsearch.ElasticSearchException; + +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +/** + * @author kimchy (Shay Banon) + */ +public interface ActionFuture extends Future { + + T actionGet() throws ElasticSearchException; + + T actionGet(long timeoutMillis) throws ElasticSearchException, TimeoutException; + + T actionGet(long timeout, TimeUnit unit) throws ElasticSearchException, TimeoutException; +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/ActionListener.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/ActionListener.java new file mode 100644 index 00000000000..6e5e3d23834 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/ActionListener.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action; + +/** + * @author kimchy (Shay Banon) + */ +public interface ActionListener { + + void onResponse(Response response); + + void onFailure(Throwable e); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/ActionRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/ActionRequest.java new file mode 100644 index 00000000000..fa23ebfcd1b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/ActionRequest.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action; + +import org.elasticsearch.util.io.Streamable; + +/** + * @author kimchy (Shay Banon) + */ +public interface ActionRequest extends Streamable { + + ActionRequestValidationException validate(); + + /** + * Should the response listener be executed on a thread or not. + * + *

When not executing on a thread, it will either be executed on the calling thread, or + * on an expensive, IO based, thread. + */ + boolean listenerThreaded(); + + /** + * Sets if the response listener be executed on a thread or not. + */ + ActionRequest listenerThreaded(boolean listenerThreaded); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/ActionRequestValidationException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/ActionRequestValidationException.java new file mode 100644 index 00000000000..27c9b3fa90a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/ActionRequestValidationException.java @@ -0,0 +1,55 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action; + +import org.elasticsearch.ElasticSearchException; + +import java.util.ArrayList; +import java.util.List; + +/** + * @author kimchy (Shay Banon) + */ +public class ActionRequestValidationException extends ElasticSearchException { + + private final List validationErrors = new ArrayList(); + + public ActionRequestValidationException() { + super(null); + } + + public void addValidationError(String error) { + validationErrors.add(error); + } + + public List validationErrors() { + return validationErrors; + } + + @Override public String getMessage() { + StringBuilder sb = new StringBuilder(); + sb.append("Validation Failed: "); + int index = 0; + for (String error : validationErrors) { + sb.append(++index).append(": ").append(error); + } + return sb.toString(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/ActionResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/ActionResponse.java new file mode 100644 index 00000000000..54f380318a9 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/ActionResponse.java @@ -0,0 +1,28 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action; + +import org.elasticsearch.util.io.Streamable; + +/** + * @author kimchy (Shay Banon) + */ +public interface ActionResponse extends Streamable { +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/Actions.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/Actions.java new file mode 100644 index 00000000000..19cbc61bb00 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/Actions.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action; + +import org.elasticsearch.cluster.ClusterState; + +/** + * @author kimchy (Shay Banon) + */ +public class Actions { + + public static String[] processIndices(ClusterState state, String[] indices) { + if (indices == null || indices.length == 0) { + return state.routingTable().indicesRouting().keySet().toArray(new String[state.routingTable().indicesRouting().keySet().size()]); + } + if (indices.length == 1) { + if (indices[0].length() == 0) { + return state.routingTable().indicesRouting().keySet().toArray(new String[state.routingTable().indicesRouting().keySet().size()]); + } + if (indices[0].equals("_all")) { + return state.routingTable().indicesRouting().keySet().toArray(new String[state.routingTable().indicesRouting().keySet().size()]); + } + } + return indices; + } + + public static ActionRequestValidationException addValidationError(String error, ActionRequestValidationException validationException) { + if (validationException == null) { + validationException = new ActionRequestValidationException(); + } + validationException.addValidationError(error); + return validationException; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/FailedNodeException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/FailedNodeException.java new file mode 100644 index 00000000000..622182b2d19 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/FailedNodeException.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action; + +import org.elasticsearch.ElasticSearchException; + +/** + * @author kimchy (Shay Banon) + */ +public class FailedNodeException extends ElasticSearchException { + + private final String nodeId; + + public FailedNodeException(String nodeId, String msg, Throwable cause) { + super(msg, cause); + this.nodeId = nodeId; + } + + public String nodeId() { + return this.nodeId; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/NoShardAvailableActionException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/NoShardAvailableActionException.java new file mode 100644 index 00000000000..41dedc96980 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/NoShardAvailableActionException.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action; + +import org.elasticsearch.index.shard.IndexShardException; +import org.elasticsearch.index.shard.ShardId; + +/** + * @author kimchy (Shay Banon) + */ +public class NoShardAvailableActionException extends IndexShardException { + + public NoShardAvailableActionException(ShardId shardId, String msg, Throwable cause) { + super(shardId, msg, cause); + } + +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/NoSuchNodeException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/NoSuchNodeException.java new file mode 100644 index 00000000000..414da698b98 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/NoSuchNodeException.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action; + +/** + * @author kimchy (Shay Banon) + */ +public class NoSuchNodeException extends FailedNodeException { + + public NoSuchNodeException(String nodeId) { + super(nodeId, "No such node [" + nodeId + "]", null); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/PrimaryMissingActionException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/PrimaryMissingActionException.java new file mode 100644 index 00000000000..1dd03db884c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/PrimaryMissingActionException.java @@ -0,0 +1,32 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action; + +import org.elasticsearch.ElasticSearchException; + +/** + * @author kimchy (Shay Banon) + */ +public class PrimaryMissingActionException extends ElasticSearchException { + + public PrimaryMissingActionException(String message) { + super(message); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/PrimaryNotStartedActionException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/PrimaryNotStartedActionException.java new file mode 100644 index 00000000000..fc73c3875da --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/PrimaryNotStartedActionException.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action; + +import org.elasticsearch.index.shard.IndexShardException; +import org.elasticsearch.index.shard.ShardId; + +/** + * @author kimchy (Shay Banon) + */ +public class PrimaryNotStartedActionException extends IndexShardException { + + public PrimaryNotStartedActionException(ShardId shardId, String message) { + super(shardId, message); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/ShardNotActiveException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/ShardNotActiveException.java new file mode 100644 index 00000000000..9386dfaf3e6 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/ShardNotActiveException.java @@ -0,0 +1,32 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action; + +import org.elasticsearch.index.shard.ShardId; + +/** + * @author kimchy (Shay Banon) + */ +public class ShardNotActiveException extends ShardOperationFailedException { + + public ShardNotActiveException(ShardId shardId) { + super(shardId, "not active", null); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/ShardOperationFailedException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/ShardOperationFailedException.java new file mode 100644 index 00000000000..d95c124fecf --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/ShardOperationFailedException.java @@ -0,0 +1,40 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action; + +import org.elasticsearch.ElasticSearchWrapperException; +import org.elasticsearch.index.shard.IndexShardException; +import org.elasticsearch.index.shard.ShardId; + +/** + * An exception indicating that a failure occurred performing an operation on the shard. + * + * @author kimchy (Shay Banon) + */ +public class ShardOperationFailedException extends IndexShardException implements ElasticSearchWrapperException { + + public ShardOperationFailedException(ShardId shardId, Throwable cause) { + super(shardId, "", cause); + } + + public ShardOperationFailedException(ShardId shardId, String msg, Throwable cause) { + super(shardId, msg, cause); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/ThreadingModel.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/ThreadingModel.java new file mode 100644 index 00000000000..a04f2e76239 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/ThreadingModel.java @@ -0,0 +1,114 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action; + +import org.elasticsearch.ElasticSearchIllegalArgumentException; + +/** + * @author kimchy (Shay Banon) + */ +public enum ThreadingModel { + NONE((byte) 0), + OPERATION((byte) 1), + LISTENER((byte) 2), + OPERATION_LISTENER((byte) 3); + + private byte id; + + ThreadingModel(byte id) { + this.id = id; + } + + public byte id() { + return this.id; + } + + /** + * true if the actual operation the action represents will be executed + * on a different thread than the calling thread (assuming it will be executed + * on the same node). + */ + public boolean threadedOperation() { + return this == OPERATION || this == OPERATION_LISTENER; + } + + /** + * true if the invocation of the action result listener will be executed + * on a different thread (than the calling thread or an "expensive" thread, like the + * IO thread). + */ + public boolean threadedListener() { + return this == LISTENER || this == OPERATION_LISTENER; + } + + public ThreadingModel addListener() { + if (this == NONE) { + return LISTENER; + } + if (this == OPERATION) { + return OPERATION_LISTENER; + } + return this; + } + + public ThreadingModel removeListener() { + if (this == LISTENER) { + return NONE; + } + if (this == OPERATION_LISTENER) { + return OPERATION; + } + return this; + } + + public ThreadingModel addOperation() { + if (this == NONE) { + return OPERATION; + } + if (this == LISTENER) { + return OPERATION_LISTENER; + } + return this; + } + + public ThreadingModel removeOperation() { + if (this == OPERATION) { + return NONE; + } + if (this == OPERATION_LISTENER) { + return LISTENER; + } + return this; + } + + public static ThreadingModel fromId(byte id) { + if (id == 0) { + return NONE; + } else if (id == 1) { + return OPERATION; + } else if (id == 2) { + return LISTENER; + } else if (id == 3) { + return OPERATION_LISTENER; + } else { + throw new ElasticSearchIllegalArgumentException("No threading model for [" + id + "]"); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/TransportActionModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/TransportActionModule.java new file mode 100644 index 00000000000..bf26f536731 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/TransportActionModule.java @@ -0,0 +1,109 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action; + +import com.google.inject.AbstractModule; +import org.elasticsearch.action.admin.cluster.node.info.TransportNodesInfo; +import org.elasticsearch.action.admin.cluster.ping.broadcast.TransportBroadcastPingAction; +import org.elasticsearch.action.admin.cluster.ping.replication.TransportIndexReplicationPingAction; +import org.elasticsearch.action.admin.cluster.ping.replication.TransportReplicationPingAction; +import org.elasticsearch.action.admin.cluster.ping.replication.TransportShardReplicationPingAction; +import org.elasticsearch.action.admin.cluster.ping.single.TransportSinglePingAction; +import org.elasticsearch.action.admin.cluster.state.TransportClusterStateAction; +import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; +import org.elasticsearch.action.admin.indices.delete.TransportDeleteIndexAction; +import org.elasticsearch.action.admin.indices.flush.TransportFlushAction; +import org.elasticsearch.action.admin.indices.flush.TransportIndexFlushAction; +import org.elasticsearch.action.admin.indices.flush.TransportShardFlushAction; +import org.elasticsearch.action.admin.indices.gateway.snapshot.TransportGatewaySnapshotAction; +import org.elasticsearch.action.admin.indices.gateway.snapshot.TransportIndexGatewaySnapshotAction; +import org.elasticsearch.action.admin.indices.gateway.snapshot.TransportShardGatewaySnapshotAction; +import org.elasticsearch.action.admin.indices.mapping.create.TransportCreateMappingAction; +import org.elasticsearch.action.admin.indices.refresh.TransportIndexRefreshAction; +import org.elasticsearch.action.admin.indices.refresh.TransportRefreshAction; +import org.elasticsearch.action.admin.indices.refresh.TransportShardRefreshAction; +import org.elasticsearch.action.admin.indices.status.TransportIndicesStatusAction; +import org.elasticsearch.action.count.TransportCountAction; +import org.elasticsearch.action.delete.TransportDeleteAction; +import org.elasticsearch.action.deletebyquery.TransportDeleteByQueryAction; +import org.elasticsearch.action.deletebyquery.TransportIndexDeleteByQueryAction; +import org.elasticsearch.action.deletebyquery.TransportShardDeleteByQueryAction; +import org.elasticsearch.action.get.TransportGetAction; +import org.elasticsearch.action.index.TransportIndexAction; +import org.elasticsearch.action.search.TransportSearchAction; +import org.elasticsearch.action.search.TransportSearchScrollAction; +import org.elasticsearch.action.search.type.*; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportActionModule extends AbstractModule { + + @Override protected void configure() { + + bind(TransportNodesInfo.class).asEagerSingleton(); + bind(TransportClusterStateAction.class).asEagerSingleton(); + + bind(TransportSinglePingAction.class).asEagerSingleton(); + bind(TransportBroadcastPingAction.class).asEagerSingleton(); + bind(TransportShardReplicationPingAction.class).asEagerSingleton(); + bind(TransportIndexReplicationPingAction.class).asEagerSingleton(); + bind(TransportReplicationPingAction.class).asEagerSingleton(); + + bind(TransportIndicesStatusAction.class).asEagerSingleton(); + bind(TransportCreateIndexAction.class).asEagerSingleton(); + bind(TransportCreateMappingAction.class).asEagerSingleton(); + bind(TransportDeleteIndexAction.class).asEagerSingleton(); + + bind(TransportShardGatewaySnapshotAction.class).asEagerSingleton(); + bind(TransportIndexGatewaySnapshotAction.class).asEagerSingleton(); + bind(TransportGatewaySnapshotAction.class).asEagerSingleton(); + + bind(TransportShardRefreshAction.class).asEagerSingleton(); + bind(TransportIndexRefreshAction.class).asEagerSingleton(); + bind(TransportRefreshAction.class).asEagerSingleton(); + + bind(TransportShardFlushAction.class).asEagerSingleton(); + bind(TransportIndexFlushAction.class).asEagerSingleton(); + bind(TransportFlushAction.class).asEagerSingleton(); + + bind(TransportIndexAction.class).asEagerSingleton(); + + bind(TransportGetAction.class).asEagerSingleton(); + + bind(TransportDeleteAction.class).asEagerSingleton(); + + bind(TransportShardDeleteByQueryAction.class).asEagerSingleton(); + bind(TransportIndexDeleteByQueryAction.class).asEagerSingleton(); + bind(TransportDeleteByQueryAction.class).asEagerSingleton(); + + bind(TransportCountAction.class).asEagerSingleton(); + + bind(TransportSearchCache.class).asEagerSingleton(); + bind(TransportSearchDfsQueryThenFetchAction.class).asEagerSingleton(); + bind(TransportSearchQueryThenFetchAction.class).asEagerSingleton(); + bind(TransportSearchDfsQueryAndFetchAction.class).asEagerSingleton(); + bind(TransportSearchQueryAndFetchAction.class).asEagerSingleton(); + bind(TransportSearchAction.class).asEagerSingleton(); + + bind(TransportSearchScrollQueryThenFetchAction.class).asEagerSingleton(); + bind(TransportSearchScrollAction.class).asEagerSingleton(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/TransportActions.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/TransportActions.java new file mode 100644 index 00000000000..039ea2b440a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/TransportActions.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportActions { + + public static final String INDEX = "indices/index/shard/index"; + + public static final String COUNT = "indices/count"; + + public static final String DELETE = "indices/index/shard/delete"; + + public static final String DELETE_BY_QUERY = "indices/deleteByQuery"; + + public static final String GET = "indices/get"; + + public static final String SEARCH = "indices/search"; + + public static final String SEARCH_SCROLL = "indices/searchScroll"; + + public static class Admin { + + public static class Indices { + public static final String CREATE = "indices/createIndex"; + public static final String DELETE = "indices/deleteIndex"; + public static final String FLUSH = "indices/flush"; + public static final String REFRESH = "indices/refresh"; + public static final String STATUS = "indices/status"; + + public static class Gateway { + public static final String SNAPSHOT = "indices/gateway/snapshot"; + } + + public static class Mapping { + public static final String CREATE = "indices/createMapping"; + } + } + + public static class Cluster { + + public static final String STATE = "/cluster/state"; + + public static class Node { + public static final String INFO = "/cluster/nodes/info"; + } + + public static class Ping { + public static final String SINGLE = "/cluster/ping/single"; + public static final String REPLICATION = "/cluster/ping/replication"; + public static final String BROADCAST = "/cluster/ping/broadcast"; + } + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java new file mode 100644 index 00000000000..084c05f6e30 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.node.info; + +import org.elasticsearch.action.support.nodes.NodeOperationResponse; +import org.elasticsearch.cluster.node.Node; + +import java.io.DataInput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class NodeInfo extends NodeOperationResponse { + + protected NodeInfo() { + } + + public NodeInfo(Node node) { + super(node); + } + + public static NodeInfo readNodeInfo(DataInput in) throws ClassNotFoundException, IOException { + NodeInfo nodeInfo = new NodeInfo(); + nodeInfo.readFrom(in); + return nodeInfo; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequest.java new file mode 100644 index 00000000000..457d3dc306e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequest.java @@ -0,0 +1,35 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.node.info; + +import org.elasticsearch.action.support.nodes.NodesOperationRequest; + +/** + * @author kimchy (Shay Banon) + */ +public class NodesInfoRequest extends NodesOperationRequest { + + protected NodesInfoRequest() { + } + + public NodesInfoRequest(String... nodesIds) { + super(nodesIds); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoResponse.java new file mode 100644 index 00000000000..a0b9fba250a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoResponse.java @@ -0,0 +1,56 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.node.info; + +import org.elasticsearch.action.support.nodes.NodesOperationResponse; +import org.elasticsearch.cluster.ClusterName; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class NodesInfoResponse extends NodesOperationResponse { + + public NodesInfoResponse() { + } + + public NodesInfoResponse(ClusterName clusterName, NodeInfo[] nodes) { + super(clusterName, nodes); + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + super.readFrom(in); + nodes = new NodeInfo[in.readInt()]; + for (int i = 0; i < nodes.length; i++) { + nodes[i] = NodeInfo.readNodeInfo(in); + } + } + + @Override public void writeTo(DataOutput out) throws IOException { + super.writeTo(out); + out.writeInt(nodes.length); + for (NodeInfo node : nodes) { + node.writeTo(out); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfo.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfo.java new file mode 100644 index 00000000000..6e2f99a79d5 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfo.java @@ -0,0 +1,99 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.node.info; + +import com.google.inject.Inject; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.action.TransportActions; +import org.elasticsearch.action.support.nodes.NodeOperationRequest; +import org.elasticsearch.action.support.nodes.TransportNodesOperationAction; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.atomic.AtomicReferenceArray; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportNodesInfo extends TransportNodesOperationAction { + + @Inject public TransportNodesInfo(Settings settings, ClusterName clusterName, ThreadPool threadPool, + ClusterService clusterService, TransportService transportService) { + super(settings, clusterName, threadPool, clusterService, transportService); + } + + @Override protected String transportAction() { + return TransportActions.Admin.Cluster.Node.INFO; + } + + @Override protected String transportNodeAction() { + return "/cluster/nodes/info/node"; + } + + @Override protected NodesInfoResponse newResponse(NodesInfoRequest nodesInfoRequest, AtomicReferenceArray responses) { + final List nodesInfos = new ArrayList(); + for (int i = 0; i < responses.length(); i++) { + Object resp = responses.get(i); + if (resp instanceof NodeInfo) { + nodesInfos.add((NodeInfo) resp); + } + } + return new NodesInfoResponse(clusterName, nodesInfos.toArray(new NodeInfo[nodesInfos.size()])); + } + + @Override protected NodesInfoRequest newRequest() { + return new NodesInfoRequest(); + } + + @Override protected NodeInfoRequest newNodeRequest() { + return new NodeInfoRequest(); + } + + @Override protected NodeInfoRequest newNodeRequest(String nodeId, NodesInfoRequest request) { + return new NodeInfoRequest(nodeId); + } + + @Override protected NodeInfo newNodeResponse() { + return new NodeInfo(); + } + + @Override protected NodeInfo nodeOperation(NodeInfoRequest nodeInfoRequest) throws ElasticSearchException { + return new NodeInfo(clusterService.state().nodes().localNode()); + } + + @Override protected boolean accumulateExceptions() { + return false; + } + + protected static class NodeInfoRequest extends NodeOperationRequest { + + private NodeInfoRequest() { + } + + private NodeInfoRequest(String nodeId) { + super(nodeId); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/broadcast/BroadcastPingRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/broadcast/BroadcastPingRequest.java new file mode 100644 index 00000000000..2727ec62077 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/broadcast/BroadcastPingRequest.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.ping.broadcast; + +import org.elasticsearch.action.support.broadcast.BroadcastOperationRequest; +import org.elasticsearch.action.support.broadcast.BroadcastOperationThreading; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class BroadcastPingRequest extends BroadcastOperationRequest { + + BroadcastPingRequest() { + } + + public BroadcastPingRequest(String index) { + super(new String[]{index}, null); + } + + public BroadcastPingRequest(String... indices) { + super(indices, null); + } + + @Override public BroadcastPingRequest operationThreading(BroadcastOperationThreading operationThreading) { + super.operationThreading(operationThreading); + return this; + } + + @Override public BroadcastPingRequest listenerThreaded(boolean threadedListener) { + super.listenerThreaded(threadedListener); + return this; + } + + public BroadcastPingRequest queryHint(String queryHint) { + this.queryHint = queryHint; + return this; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + super.readFrom(in); + } + + @Override public void writeTo(DataOutput out) throws IOException { + super.writeTo(out); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/broadcast/BroadcastPingResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/broadcast/BroadcastPingResponse.java new file mode 100644 index 00000000000..69d1eede6bb --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/broadcast/BroadcastPingResponse.java @@ -0,0 +1,48 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.ping.broadcast; + +import org.elasticsearch.action.support.broadcast.BroadcastOperationResponse; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class BroadcastPingResponse extends BroadcastOperationResponse { + + BroadcastPingResponse() { + + } + + public BroadcastPingResponse(int successfulShards, int failedShards) { + super(successfulShards, failedShards); + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + super.readFrom(in); + } + + @Override public void writeTo(DataOutput out) throws IOException { + super.writeTo(out); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/broadcast/BroadcastShardPingRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/broadcast/BroadcastShardPingRequest.java new file mode 100644 index 00000000000..df3566ac289 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/broadcast/BroadcastShardPingRequest.java @@ -0,0 +1,48 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.ping.broadcast; + +import org.elasticsearch.action.support.broadcast.BroadcastShardOperationRequest; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class BroadcastShardPingRequest extends BroadcastShardOperationRequest { + + BroadcastShardPingRequest() { + + } + + public BroadcastShardPingRequest(String index, int shardId) { + super(index, shardId); + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + super.readFrom(in); + } + + @Override public void writeTo(DataOutput out) throws IOException { + super.writeTo(out); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/broadcast/BroadcastShardPingResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/broadcast/BroadcastShardPingResponse.java new file mode 100644 index 00000000000..567a722fd9d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/broadcast/BroadcastShardPingResponse.java @@ -0,0 +1,48 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.ping.broadcast; + +import org.elasticsearch.action.support.broadcast.BroadcastShardOperationResponse; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class BroadcastShardPingResponse extends BroadcastShardOperationResponse { + + BroadcastShardPingResponse() { + + } + + public BroadcastShardPingResponse(String index, int shardId) { + super(index, shardId); + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + super.readFrom(in); + } + + @Override public void writeTo(DataOutput out) throws IOException { + super.writeTo(out); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/broadcast/TransportBroadcastPingAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/broadcast/TransportBroadcastPingAction.java new file mode 100644 index 00000000000..5c8f7acf0ba --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/broadcast/TransportBroadcastPingAction.java @@ -0,0 +1,88 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.ping.broadcast; + +import com.google.inject.Inject; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.action.TransportActions; +import org.elasticsearch.action.support.broadcast.TransportBroadcastOperationAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +import java.util.concurrent.atomic.AtomicReferenceArray; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportBroadcastPingAction extends TransportBroadcastOperationAction { + + @Inject public TransportBroadcastPingAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, IndicesService indicesService) { + super(settings, threadPool, clusterService, transportService, indicesService); + } + + @Override protected String transportAction() { + return TransportActions.Admin.Cluster.Ping.BROADCAST; + } + + @Override protected String transportShardAction() { + return "/cluster/ping/broadcast/shard"; + } + + @Override protected BroadcastPingRequest newRequest() { + return new BroadcastPingRequest(); + } + + @Override protected BroadcastPingResponse newResponse(BroadcastPingRequest broadcastPingRequest, AtomicReferenceArray shardsResponses) { + int successfulShards = 0; + int failedShards = 0; + for (int i = 0; i < shardsResponses.length(); i++) { + if (shardsResponses.get(i) == null) { + failedShards++; + } else { + successfulShards++; + } + } + return new BroadcastPingResponse(successfulShards, failedShards); + } + + @Override protected boolean accumulateExceptions() { + return false; + } + + @Override protected BroadcastShardPingRequest newShardRequest() { + return new BroadcastShardPingRequest(); + } + + @Override protected BroadcastShardPingRequest newShardRequest(ShardRouting shard, BroadcastPingRequest request) { + return new BroadcastShardPingRequest(shard.index(), shard.id()); + } + + @Override protected BroadcastShardPingResponse newShardResponse() { + return new BroadcastShardPingResponse(); + } + + @Override protected BroadcastShardPingResponse shardOperation(BroadcastShardPingRequest broadcastShardPingRequest) throws ElasticSearchException { + return new BroadcastShardPingResponse(); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/replication/IndexReplicationPingRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/replication/IndexReplicationPingRequest.java new file mode 100644 index 00000000000..e9f2c5be2d2 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/replication/IndexReplicationPingRequest.java @@ -0,0 +1,58 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.ping.replication; + +import org.elasticsearch.action.support.replication.IndexReplicationOperationRequest; +import org.elasticsearch.util.TimeValue; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexReplicationPingRequest extends IndexReplicationOperationRequest { + + public IndexReplicationPingRequest(String index) { + this.index = index; + } + + IndexReplicationPingRequest(ReplicationPingRequest request, String index) { + this.index = index; + this.timeout = request.timeout(); + } + + IndexReplicationPingRequest() { + } + + public IndexReplicationPingRequest timeout(TimeValue timeout) { + this.timeout = timeout; + return this; + } + + public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + super.readFrom(in); + } + + public void writeTo(DataOutput out) throws IOException { + super.writeTo(out); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/replication/IndexReplicationPingResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/replication/IndexReplicationPingResponse.java new file mode 100644 index 00000000000..411da2c36d1 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/replication/IndexReplicationPingResponse.java @@ -0,0 +1,77 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.ping.replication; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexReplicationPingResponse implements ActionResponse, Streamable { + + private String index; + + private int successfulShards; + + private int failedShards; + + IndexReplicationPingResponse(String index, int successfulShards, int failedShards) { + this.index = index; + this.successfulShards = successfulShards; + this.failedShards = failedShards; + } + + IndexReplicationPingResponse() { + + } + + public String index() { + return index; + } + + public int successfulShards() { + return successfulShards; + } + + public int failedShards() { + return failedShards; + } + + public int totalShards() { + return successfulShards + failedShards; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + index = in.readUTF(); + successfulShards = in.readInt(); + failedShards = in.readInt(); + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeUTF(index); + out.writeInt(successfulShards); + out.writeInt(failedShards); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/replication/ReplicationPingRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/replication/ReplicationPingRequest.java new file mode 100644 index 00000000000..959b6c2c5fe --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/replication/ReplicationPingRequest.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.ping.replication; + +import org.elasticsearch.action.support.replication.IndicesReplicationOperationRequest; +import org.elasticsearch.util.TimeValue; + +/** + * @author kimchy (Shay Banon) + */ +public class ReplicationPingRequest extends IndicesReplicationOperationRequest { + + public ReplicationPingRequest(String index) { + this(new String[]{index}); + } + + public ReplicationPingRequest(String... indices) { + this.indices = indices; + } + + ReplicationPingRequest() { + + } + + @Override public ReplicationPingRequest listenerThreaded(boolean threadedListener) { + super.listenerThreaded(threadedListener); + return this; + } + + public ReplicationPingRequest timeout(TimeValue timeout) { + this.timeout = timeout; + return this; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/replication/ReplicationPingResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/replication/ReplicationPingResponse.java new file mode 100644 index 00000000000..c872aa3f23e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/replication/ReplicationPingResponse.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.ping.replication; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +/** + * @author kimchy (Shay Banon) + */ +public class ReplicationPingResponse implements ActionResponse, Streamable { + + private Map responses = new HashMap(); + + ReplicationPingResponse() { + + } + + public Map indices() { + return responses; + } + + public IndexReplicationPingResponse index(String index) { + return responses.get(index); + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + int size = in.readInt(); + for (int i = 0; i < size; i++) { + IndexReplicationPingResponse response = new IndexReplicationPingResponse(); + response.readFrom(in); + responses.put(response.index(), response); + } + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeInt(responses.size()); + for (IndexReplicationPingResponse response : responses.values()) { + response.writeTo(out); + } + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/replication/ShardReplicationPingRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/replication/ShardReplicationPingRequest.java new file mode 100644 index 00000000000..3fec06f8093 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/replication/ShardReplicationPingRequest.java @@ -0,0 +1,61 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.ping.replication; + +import org.elasticsearch.action.support.replication.ShardReplicationOperationRequest; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class ShardReplicationPingRequest extends ShardReplicationOperationRequest { + + private int shardId; + + public ShardReplicationPingRequest(IndexReplicationPingRequest request, int shardId) { + this(request.index(), shardId); + timeout = request.timeout(); + } + + public ShardReplicationPingRequest(String index, int shardId) { + this.index = index; + this.shardId = shardId; + } + + ShardReplicationPingRequest() { + } + + public int shardId() { + return this.shardId; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + super.readFrom(in); + shardId = in.readInt(); + } + + @Override public void writeTo(DataOutput out) throws IOException { + super.writeTo(out); + out.writeInt(shardId); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/replication/ShardReplicationPingResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/replication/ShardReplicationPingResponse.java new file mode 100644 index 00000000000..60c4c642013 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/replication/ShardReplicationPingResponse.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.ping.replication; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class ShardReplicationPingResponse implements ActionResponse, Streamable { + + ShardReplicationPingResponse() { + + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + } + + @Override public void writeTo(DataOutput out) throws IOException { + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/replication/TransportIndexReplicationPingAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/replication/TransportIndexReplicationPingAction.java new file mode 100644 index 00000000000..bdbc58d13d7 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/replication/TransportIndexReplicationPingAction.java @@ -0,0 +1,85 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.ping.replication; + +import com.google.inject.Inject; +import org.elasticsearch.action.support.replication.TransportIndexReplicationOperationAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.index.Index; +import org.elasticsearch.indices.IndexMissingException; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +import java.util.concurrent.atomic.AtomicReferenceArray; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportIndexReplicationPingAction extends TransportIndexReplicationOperationAction { + + private final ClusterService clusterService; + + @Inject public TransportIndexReplicationPingAction(Settings settings, ClusterService clusterService, + TransportService transportService, ThreadPool threadPool, + TransportShardReplicationPingAction shardReplicationPingAction) { + super(settings, transportService, threadPool, shardReplicationPingAction); + this.clusterService = clusterService; + } + + @Override protected IndexReplicationPingRequest newRequestInstance() { + return new IndexReplicationPingRequest(); + } + + @Override protected IndexReplicationPingResponse newResponseInstance(IndexReplicationPingRequest request, AtomicReferenceArray shardsResponses) { + int successfulShards = 0; + int failedShards = 0; + for (int i = 0; i < shardsResponses.length(); i++) { + if (shardsResponses.get(i) == null) { + failedShards++; + } else { + successfulShards++; + } + } + return new IndexReplicationPingResponse(request.index(), successfulShards, failedShards); + } + + @Override protected boolean accumulateExceptions() { + return false; + } + + @Override protected String transportAction() { + return "ping/replication/index"; + } + + @Override protected GroupShardsIterator shards(IndexReplicationPingRequest indexRequest) { + IndexRoutingTable indexRouting = clusterService.state().routingTable().index(indexRequest.index()); + if (indexRouting == null) { + throw new IndexMissingException(new Index(indexRequest.index())); + } + return indexRouting.groupByShardsIt(); + } + + @Override protected ShardReplicationPingRequest newShardRequestInstance(IndexReplicationPingRequest indexRequest, int shardId) { + return new ShardReplicationPingRequest(indexRequest, shardId); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/replication/TransportReplicationPingAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/replication/TransportReplicationPingAction.java new file mode 100644 index 00000000000..8d6aa8f3571 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/replication/TransportReplicationPingAction.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.ping.replication; + +import com.google.inject.Inject; +import org.elasticsearch.action.TransportActions; +import org.elasticsearch.action.support.replication.TransportIndicesReplicationOperationAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +import java.util.concurrent.atomic.AtomicReferenceArray; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportReplicationPingAction extends TransportIndicesReplicationOperationAction { + + @Inject public TransportReplicationPingAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, TransportIndexReplicationPingAction indexAction) { + super(settings, transportService, clusterService, threadPool, indexAction); + } + + @Override protected ReplicationPingRequest newRequestInstance() { + return new ReplicationPingRequest(); + } + + @Override protected ReplicationPingResponse newResponseInstance(ReplicationPingRequest request, AtomicReferenceArray indexResponses) { + ReplicationPingResponse response = new ReplicationPingResponse(); + for (int i = 0; i < indexResponses.length(); i++) { + IndexReplicationPingResponse indexResponse = (IndexReplicationPingResponse) indexResponses.get(i); + if (indexResponse != null) { + response.indices().put(indexResponse.index(), indexResponse); + } + } + return response; + } + + @Override protected boolean accumulateExceptions() { + return false; + } + + @Override protected String transportAction() { + return TransportActions.Admin.Cluster.Ping.REPLICATION; + } + + @Override protected IndexReplicationPingRequest newIndexRequestInstance(ReplicationPingRequest request, String index) { + return new IndexReplicationPingRequest(request, index); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/replication/TransportShardReplicationPingAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/replication/TransportShardReplicationPingAction.java new file mode 100644 index 00000000000..33a9ab0d57f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/replication/TransportShardReplicationPingAction.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.ping.replication; + +import com.google.inject.Inject; +import org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportShardReplicationPingAction extends TransportShardReplicationOperationAction { + + @Inject public TransportShardReplicationPingAction(Settings settings, TransportService transportService, + ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, + ShardStateAction shardStateAction) { + super(settings, transportService, clusterService, indicesService, threadPool, shardStateAction); + } + + @Override protected ShardReplicationPingRequest newRequestInstance() { + return new ShardReplicationPingRequest(); + } + + @Override protected ShardReplicationPingResponse newResponseInstance() { + return new ShardReplicationPingResponse(); + } + + @Override protected String transportAction() { + return "ping/replication/shard"; + } + + @Override protected ShardReplicationPingResponse shardOperationOnPrimary(ShardOperationRequest shardRequest) { + return new ShardReplicationPingResponse(); + } + + @Override protected void shardOperationOnBackup(ShardOperationRequest shardRequest) { + } + + @Override protected ShardsIterator shards(ShardReplicationPingRequest request) { + return clusterService.state().routingTable().index(request.index()).shard(request.shardId()).shardsIt(); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/single/SinglePingRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/single/SinglePingRequest.java new file mode 100644 index 00000000000..de27ea005a8 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/single/SinglePingRequest.java @@ -0,0 +1,72 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.ping.single; + +import org.elasticsearch.action.support.single.SingleOperationRequest; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class SinglePingRequest extends SingleOperationRequest { + + public SinglePingRequest(String index) { + super(index, null, null); + } + + public SinglePingRequest(String index, String type, String id) { + super(index, type, id); + } + + SinglePingRequest() { + } + + public SinglePingRequest type(String type) { + this.type = type; + return this; + } + + public SinglePingRequest id(String id) { + this.id = id; + return this; + } + + @Override public SinglePingRequest listenerThreaded(boolean threadedListener) { + super.listenerThreaded(threadedListener); + return this; + } + + @Override public SinglePingRequest threadedOperation(boolean threadedOperation) { + super.threadedOperation(threadedOperation); + return this; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + super.readFrom(in); + } + + @Override public void writeTo(DataOutput out) throws IOException { + super.writeTo(out); + } + +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/single/SinglePingResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/single/SinglePingResponse.java new file mode 100644 index 00000000000..f53c4bbd903 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/single/SinglePingResponse.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.ping.single; + +import org.elasticsearch.action.ActionResponse; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class SinglePingResponse implements ActionResponse { + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + } + + @Override public void writeTo(DataOutput out) throws IOException { + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/single/TransportSinglePingAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/single/TransportSinglePingAction.java new file mode 100644 index 00000000000..82105471ba6 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/ping/single/TransportSinglePingAction.java @@ -0,0 +1,60 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.ping.single; + +import com.google.inject.Inject; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.action.TransportActions; +import org.elasticsearch.action.support.single.TransportSingleOperationAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportSinglePingAction extends TransportSingleOperationAction { + + @Inject public TransportSinglePingAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, IndicesService indicesService) { + super(settings, threadPool, clusterService, transportService, indicesService); + } + + @Override protected String transportAction() { + return TransportActions.Admin.Cluster.Ping.SINGLE; + } + + @Override protected String transportShardAction() { + return "/cluster/ping/single/shard"; + } + + @Override protected SinglePingResponse shardOperation(SinglePingRequest request, int shardId) throws ElasticSearchException { + return new SinglePingResponse(); + } + + @Override protected SinglePingRequest newRequest() { + return new SinglePingRequest(); + } + + @Override protected SinglePingResponse newResponse() { + return new SinglePingResponse(); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequest.java new file mode 100644 index 00000000000..398726f2d8f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequest.java @@ -0,0 +1,46 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.state; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.MasterNodeOperationRequest; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class ClusterStateRequest extends MasterNodeOperationRequest { + + public ClusterStateRequest() { + } + + @Override public ActionRequestValidationException validate() { + return null; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + } + + @Override public void writeTo(DataOutput out) throws IOException { + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java new file mode 100644 index 00000000000..c89ea59f72e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java @@ -0,0 +1,54 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.state; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.cluster.ClusterState; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class ClusterStateResponse implements ActionResponse { + + private ClusterState clusterState; + + ClusterStateResponse() { + } + + ClusterStateResponse(ClusterState clusterState) { + this.clusterState = clusterState; + } + + public ClusterState state() { + return this.clusterState; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + clusterState = ClusterState.Builder.readFrom(in, null, null); + } + + @Override public void writeTo(DataOutput out) throws IOException { + ClusterState.Builder.writeTo(clusterState, out); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java new file mode 100644 index 00000000000..4089e36411a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -0,0 +1,55 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.state; + +import com.google.inject.Inject; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.action.TransportActions; +import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportClusterStateAction extends TransportMasterNodeOperationAction { + + @Inject public TransportClusterStateAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) { + super(settings, transportService, clusterService, threadPool); + } + + @Override protected String transportAction() { + return TransportActions.Admin.Cluster.STATE; + } + + @Override protected ClusterStateRequest newRequest() { + return new ClusterStateRequest(); + } + + @Override protected ClusterStateResponse newResponse() { + return new ClusterStateResponse(); + } + + @Override protected ClusterStateResponse masterOperation(ClusterStateRequest request) throws ElasticSearchException { + return new ClusterStateResponse(clusterService.state()); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java new file mode 100644 index 00000000000..d633bf9e1aa --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java @@ -0,0 +1,101 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.create; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.MasterNodeOperationRequest; +import org.elasticsearch.util.TimeValue; +import org.elasticsearch.util.settings.Settings; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.action.Actions.*; +import static org.elasticsearch.util.TimeValue.*; +import static org.elasticsearch.util.settings.ImmutableSettings.Builder.*; +import static org.elasticsearch.util.settings.ImmutableSettings.*; + +/** + * @author kimchy (Shay Banon) + */ +public class CreateIndexRequest extends MasterNodeOperationRequest { + + private String index; + + private Settings settings = EMPTY_SETTINGS; + + private TimeValue timeout = new TimeValue(10, TimeUnit.SECONDS); + + public CreateIndexRequest(String index) { + this(index, EMPTY_SETTINGS); + } + + public CreateIndexRequest(String index, Settings settings) { + this.index = index; + this.settings = settings; + } + + CreateIndexRequest() { + } + + @Override public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (index == null) { + validationException = addValidationError("index is missing", validationException); + } + return validationException; + } + + String index() { + return index; + } + + Settings settings() { + return settings; + } + + public CreateIndexRequest settings(Settings settings) { + this.settings = settings; + return this; + } + + TimeValue timeout() { + return timeout; + } + + public CreateIndexRequest timeout(TimeValue timeout) { + this.timeout = timeout; + return this; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + index = in.readUTF(); + settings = readSettingsFromStream(in); + timeout = readTimeValue(in); + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeUTF(index); + writeSettingsToStream(settings, out); + timeout.writeTo(out); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java new file mode 100644 index 00000000000..d4d9578194d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.create; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class CreateIndexResponse implements ActionResponse, Streamable { + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + } + + @Override public void writeTo(DataOutput out) throws IOException { + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java new file mode 100644 index 00000000000..62fc2c169cd --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java @@ -0,0 +1,61 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.create; + +import com.google.inject.Inject; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.action.TransportActions; +import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.metadata.MetaDataService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportCreateIndexAction extends TransportMasterNodeOperationAction { + + private final MetaDataService metaDataService; + + @Inject public TransportCreateIndexAction(Settings settings, TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, MetaDataService metaDataService) { + super(settings, transportService, clusterService, threadPool); + this.metaDataService = metaDataService; + } + + @Override protected String transportAction() { + return TransportActions.Admin.Indices.CREATE; + } + + @Override protected CreateIndexRequest newRequest() { + return new CreateIndexRequest(); + } + + @Override protected CreateIndexResponse newResponse() { + return new CreateIndexResponse(); + } + + @Override protected CreateIndexResponse masterOperation(CreateIndexRequest request) throws ElasticSearchException { + metaDataService.createIndex(request.index(), request.settings(), request.timeout()); + return new CreateIndexResponse(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequest.java new file mode 100644 index 00000000000..7e69dc3cd9b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequest.java @@ -0,0 +1,79 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.delete; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.MasterNodeOperationRequest; +import org.elasticsearch.util.TimeValue; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import static org.elasticsearch.action.Actions.*; +import static org.elasticsearch.util.TimeValue.*; + +/** + * @author kimchy (Shay Banon) + */ +public class DeleteIndexRequest extends MasterNodeOperationRequest { + + private String index; + + private TimeValue timeout = timeValueSeconds(10); + + public DeleteIndexRequest(String index) { + this.index = index; + } + + DeleteIndexRequest() { + } + + @Override public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (index == null) { + validationException = addValidationError("index is missing", validationException); + } + return validationException; + } + + String index() { + return index; + } + + TimeValue timeout() { + return timeout; + } + + public DeleteIndexRequest timeout(TimeValue timeout) { + this.timeout = timeout; + return this; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + index = in.readUTF(); + timeout = readTimeValue(in); + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeUTF(index); + timeout.writeTo(out); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexResponse.java new file mode 100644 index 00000000000..36d8b1aacea --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexResponse.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.delete; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class DeleteIndexResponse implements ActionResponse, Streamable { + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + } + + @Override public void writeTo(DataOutput out) throws IOException { + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java new file mode 100644 index 00000000000..1719887778e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java @@ -0,0 +1,61 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.delete; + +import com.google.inject.Inject; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.action.TransportActions; +import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.metadata.MetaDataService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportDeleteIndexAction extends TransportMasterNodeOperationAction { + + private final MetaDataService metaDataService; + + @Inject public TransportDeleteIndexAction(Settings settings, TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, MetaDataService metaDataService) { + super(settings, transportService, clusterService, threadPool); + this.metaDataService = metaDataService; + } + + @Override protected String transportAction() { + return TransportActions.Admin.Indices.DELETE; + } + + @Override protected DeleteIndexRequest newRequest() { + return new DeleteIndexRequest(); + } + + @Override protected DeleteIndexResponse newResponse() { + return new DeleteIndexResponse(); + } + + @Override protected DeleteIndexResponse masterOperation(DeleteIndexRequest request) throws ElasticSearchException { + metaDataService.deleteIndex(request.index(), request.timeout()); + return new DeleteIndexResponse(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequest.java new file mode 100644 index 00000000000..e9e07237630 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequest.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.flush; + +import org.elasticsearch.action.support.replication.IndicesReplicationOperationRequest; +import org.elasticsearch.util.TimeValue; + +/** + * @author kimchy (Shay Banon) + */ +public class FlushRequest extends IndicesReplicationOperationRequest { + + public FlushRequest(String index) { + this(new String[]{index}); + } + + public FlushRequest(String... indices) { + this.indices = indices; + } + + @Override public FlushRequest listenerThreaded(boolean threadedListener) { + super.listenerThreaded(threadedListener); + return this; + } + + public FlushRequest timeout(TimeValue timeout) { + this.timeout = timeout; + return this; + } + + FlushRequest() { + + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushResponse.java new file mode 100644 index 00000000000..505331d6541 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushResponse.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.flush; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +/** + * @author kimchy (Shay Banon) + */ +public class FlushResponse implements ActionResponse, Streamable { + + private Map indices = new HashMap(); + + FlushResponse() { + + } + + public Map indices() { + return indices; + } + + public IndexFlushResponse index(String index) { + return indices.get(index); + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + int size = in.readInt(); + for (int i = 0; i < size; i++) { + IndexFlushResponse indexFlushResponse = new IndexFlushResponse(); + indexFlushResponse.readFrom(in); + indices.put(indexFlushResponse.index(), indexFlushResponse); + } + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeInt(indices.size()); + for (IndexFlushResponse indexFlushResponse : indices.values()) { + indexFlushResponse.writeTo(out); + } + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/flush/IndexFlushRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/flush/IndexFlushRequest.java new file mode 100644 index 00000000000..6447ae2aef0 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/flush/IndexFlushRequest.java @@ -0,0 +1,58 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.flush; + +import org.elasticsearch.action.support.replication.IndexReplicationOperationRequest; +import org.elasticsearch.util.TimeValue; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexFlushRequest extends IndexReplicationOperationRequest { + + public IndexFlushRequest(String index) { + this.index = index; + } + + IndexFlushRequest(FlushRequest request, String index) { + this.index = index; + this.timeout = request.timeout(); + } + + IndexFlushRequest() { + } + + public IndexFlushRequest timeout(TimeValue timeout) { + this.timeout = timeout; + return this; + } + + public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + super.readFrom(in); + } + + public void writeTo(DataOutput out) throws IOException { + super.writeTo(out); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/flush/IndexFlushResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/flush/IndexFlushResponse.java new file mode 100644 index 00000000000..a5965ab7f6e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/flush/IndexFlushResponse.java @@ -0,0 +1,77 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.flush; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexFlushResponse implements ActionResponse, Streamable { + + private String index; + + private int successfulShards; + + private int failedShards; + + IndexFlushResponse(String index, int successfulShards, int failedShards) { + this.index = index; + this.successfulShards = successfulShards; + this.failedShards = failedShards; + } + + IndexFlushResponse() { + + } + + public String index() { + return index; + } + + public int successfulShards() { + return successfulShards; + } + + public int failedShards() { + return failedShards; + } + + public int totalShards() { + return successfulShards + failedShards; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + index = in.readUTF(); + successfulShards = in.readInt(); + failedShards = in.readInt(); + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeUTF(index); + out.writeInt(successfulShards); + out.writeInt(failedShards); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/flush/ShardFlushRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/flush/ShardFlushRequest.java new file mode 100644 index 00000000000..0cb8eede57c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/flush/ShardFlushRequest.java @@ -0,0 +1,61 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.flush; + +import org.elasticsearch.action.support.replication.ShardReplicationOperationRequest; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class ShardFlushRequest extends ShardReplicationOperationRequest { + + private int shardId; + + public ShardFlushRequest(IndexFlushRequest indexFlushRequest, int shardId) { + this(indexFlushRequest.index(), shardId); + timeout = indexFlushRequest.timeout(); + } + + public ShardFlushRequest(String index, int shardId) { + this.index = index; + this.shardId = shardId; + } + + ShardFlushRequest() { + } + + public int shardId() { + return this.shardId; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + super.readFrom(in); + shardId = in.readInt(); + } + + @Override public void writeTo(DataOutput out) throws IOException { + super.writeTo(out); + out.writeInt(shardId); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/flush/ShardFlushResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/flush/ShardFlushResponse.java new file mode 100644 index 00000000000..d09e0876733 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/flush/ShardFlushResponse.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.flush; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class ShardFlushResponse implements ActionResponse, Streamable { + + ShardFlushResponse() { + + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + } + + @Override public void writeTo(DataOutput out) throws IOException { + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java new file mode 100644 index 00000000000..561654cc774 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.flush; + +import com.google.inject.Inject; +import org.elasticsearch.action.TransportActions; +import org.elasticsearch.action.support.replication.TransportIndicesReplicationOperationAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +import java.util.concurrent.atomic.AtomicReferenceArray; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportFlushAction extends TransportIndicesReplicationOperationAction { + + @Inject public TransportFlushAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, TransportIndexFlushAction indexFlushAction) { + super(settings, transportService, clusterService, threadPool, indexFlushAction); + } + + @Override protected FlushRequest newRequestInstance() { + return new FlushRequest(); + } + + @Override protected FlushResponse newResponseInstance(FlushRequest request, AtomicReferenceArray indexResponses) { + FlushResponse response = new FlushResponse(); + for (int i = 0; i < indexResponses.length(); i++) { + IndexFlushResponse indexFlushResponse = (IndexFlushResponse) indexResponses.get(i); + if (indexFlushResponse != null) { + response.indices().put(indexFlushResponse.index(), indexFlushResponse); + } + } + return response; + } + + @Override protected boolean accumulateExceptions() { + return false; + } + + @Override protected String transportAction() { + return TransportActions.Admin.Indices.FLUSH; + } + + @Override protected IndexFlushRequest newIndexRequestInstance(FlushRequest request, String index) { + return new IndexFlushRequest(request, index); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportIndexFlushAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportIndexFlushAction.java new file mode 100644 index 00000000000..5470b7c9ec4 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportIndexFlushAction.java @@ -0,0 +1,85 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.flush; + +import com.google.inject.Inject; +import org.elasticsearch.action.support.replication.TransportIndexReplicationOperationAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.index.Index; +import org.elasticsearch.indices.IndexMissingException; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +import java.util.concurrent.atomic.AtomicReferenceArray; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportIndexFlushAction extends TransportIndexReplicationOperationAction { + + private final ClusterService clusterService; + + @Inject public TransportIndexFlushAction(Settings settings, ClusterService clusterService, + TransportService transportService, ThreadPool threadPool, + TransportShardFlushAction shardFlushAction) { + super(settings, transportService, threadPool, shardFlushAction); + this.clusterService = clusterService; + } + + @Override protected IndexFlushRequest newRequestInstance() { + return new IndexFlushRequest(); + } + + @Override protected IndexFlushResponse newResponseInstance(IndexFlushRequest indexFlushReqest, AtomicReferenceArray shardsResponses) { + int successfulShards = 0; + int failedShards = 0; + for (int i = 0; i < shardsResponses.length(); i++) { + if (shardsResponses.get(i) == null) { + failedShards++; + } else { + successfulShards++; + } + } + return new IndexFlushResponse(indexFlushReqest.index(), successfulShards, failedShards); + } + + @Override protected boolean accumulateExceptions() { + return false; + } + + @Override protected String transportAction() { + return "indices/index/flush"; + } + + @Override protected GroupShardsIterator shards(IndexFlushRequest indexFlushRequest) { + IndexRoutingTable indexRouting = clusterService.state().routingTable().index(indexFlushRequest.index()); + if (indexRouting == null) { + throw new IndexMissingException(new Index(indexFlushRequest.index())); + } + return indexRouting.groupByShardsIt(); + } + + @Override protected ShardFlushRequest newShardRequestInstance(IndexFlushRequest indexFlushRequest, int shardId) { + return new ShardFlushRequest(indexFlushRequest, shardId); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java new file mode 100644 index 00000000000..c71e8168e93 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java @@ -0,0 +1,69 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.flush; + +import com.google.inject.Inject; +import org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportShardFlushAction extends TransportShardReplicationOperationAction { + + @Inject public TransportShardFlushAction(Settings settings, TransportService transportService, + ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, + ShardStateAction shardStateAction) { + super(settings, transportService, clusterService, indicesService, threadPool, shardStateAction); + } + + @Override protected ShardFlushRequest newRequestInstance() { + return new ShardFlushRequest(); + } + + @Override protected ShardFlushResponse newResponseInstance() { + return new ShardFlushResponse(); + } + + @Override protected String transportAction() { + return "indices/index/shard/flush"; + } + + @Override protected ShardFlushResponse shardOperationOnPrimary(ShardOperationRequest shardRequest) { + ShardFlushRequest request = shardRequest.request; + indexShard(shardRequest).flush(); + return new ShardFlushResponse(); + } + + @Override protected void shardOperationOnBackup(ShardOperationRequest shardRequest) { + ShardFlushRequest request = shardRequest.request; + indexShard(shardRequest).flush(); + } + + @Override protected ShardsIterator shards(ShardFlushRequest request) { + return clusterService.state().routingTable().index(request.index()).shard(request.shardId()).shardsIt(); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/GatewaySnapshotRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/GatewaySnapshotRequest.java new file mode 100644 index 00000000000..807c88808e5 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/GatewaySnapshotRequest.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.gateway.snapshot; + +import org.elasticsearch.action.support.replication.IndicesReplicationOperationRequest; +import org.elasticsearch.util.TimeValue; + +/** + * @author kimchy (Shay Banon) + */ +public class GatewaySnapshotRequest extends IndicesReplicationOperationRequest { + + public GatewaySnapshotRequest(String index) { + this(new String[]{index}); + } + + public GatewaySnapshotRequest(String... indices) { + this.indices = indices; + } + + GatewaySnapshotRequest() { + + } + + @Override public GatewaySnapshotRequest listenerThreaded(boolean threadedListener) { + super.listenerThreaded(threadedListener); + return this; + } + + public GatewaySnapshotRequest timeout(TimeValue timeout) { + this.timeout = timeout; + return this; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/GatewaySnapshotResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/GatewaySnapshotResponse.java new file mode 100644 index 00000000000..19213be17ce --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/GatewaySnapshotResponse.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.gateway.snapshot; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +/** + * @author kimchy (Shay Banon) + */ +public class GatewaySnapshotResponse implements ActionResponse, Streamable { + + private Map indexResponses = new HashMap(); + + GatewaySnapshotResponse() { + + } + + public Map indices() { + return indexResponses; + } + + public IndexGatewaySnapshotResponse index(String index) { + return indexResponses.get(index); + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + int size = in.readInt(); + for (int i = 0; i < size; i++) { + IndexGatewaySnapshotResponse response = new IndexGatewaySnapshotResponse(); + response.readFrom(in); + indexResponses.put(response.index(), response); + } + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeInt(indexResponses.size()); + for (IndexGatewaySnapshotResponse indexGatewaySnapshotResponse : indexResponses.values()) { + indexGatewaySnapshotResponse.writeTo(out); + } + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/IndexGatewaySnapshotRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/IndexGatewaySnapshotRequest.java new file mode 100644 index 00000000000..1ab7b36d0f5 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/IndexGatewaySnapshotRequest.java @@ -0,0 +1,58 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.gateway.snapshot; + +import org.elasticsearch.action.support.replication.IndexReplicationOperationRequest; +import org.elasticsearch.util.TimeValue; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexGatewaySnapshotRequest extends IndexReplicationOperationRequest { + + public IndexGatewaySnapshotRequest(String index) { + this.index = index; + } + + IndexGatewaySnapshotRequest(GatewaySnapshotRequest request, String index) { + this.index = index; + this.timeout = request.timeout(); + } + + IndexGatewaySnapshotRequest() { + } + + public IndexGatewaySnapshotRequest timeout(TimeValue timeout) { + this.timeout = timeout; + return this; + } + + public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + super.readFrom(in); + } + + public void writeTo(DataOutput out) throws IOException { + super.writeTo(out); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/IndexGatewaySnapshotResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/IndexGatewaySnapshotResponse.java new file mode 100644 index 00000000000..72010cd47fb --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/IndexGatewaySnapshotResponse.java @@ -0,0 +1,77 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.gateway.snapshot; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexGatewaySnapshotResponse implements ActionResponse, Streamable { + + private String index; + + private int successfulShards; + + private int failedShards; + + IndexGatewaySnapshotResponse(String index, int successfulShards, int failedShards) { + this.index = index; + this.successfulShards = successfulShards; + this.failedShards = failedShards; + } + + IndexGatewaySnapshotResponse() { + + } + + public String index() { + return index; + } + + public int successfulShards() { + return successfulShards; + } + + public int failedShards() { + return failedShards; + } + + public int totalShards() { + return successfulShards + failedShards; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + index = in.readUTF(); + successfulShards = in.readInt(); + failedShards = in.readInt(); + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeUTF(index); + out.writeInt(successfulShards); + out.writeInt(failedShards); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/ShardGatewaySnapshotRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/ShardGatewaySnapshotRequest.java new file mode 100644 index 00000000000..245d0ec0a35 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/ShardGatewaySnapshotRequest.java @@ -0,0 +1,61 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.gateway.snapshot; + +import org.elasticsearch.action.support.replication.ShardReplicationOperationRequest; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class ShardGatewaySnapshotRequest extends ShardReplicationOperationRequest { + + private int shardId; + + public ShardGatewaySnapshotRequest(IndexGatewaySnapshotRequest request, int shardId) { + this(request.index(), shardId); + timeout = request.timeout(); + } + + public ShardGatewaySnapshotRequest(String index, int shardId) { + this.index = index; + this.shardId = shardId; + } + + ShardGatewaySnapshotRequest() { + } + + public int shardId() { + return this.shardId; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + super.readFrom(in); + shardId = in.readInt(); + } + + @Override public void writeTo(DataOutput out) throws IOException { + super.writeTo(out); + out.writeInt(shardId); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/ShardGatewaySnapshotResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/ShardGatewaySnapshotResponse.java new file mode 100644 index 00000000000..2c93b7d9069 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/ShardGatewaySnapshotResponse.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.gateway.snapshot; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class ShardGatewaySnapshotResponse implements ActionResponse, Streamable { + + ShardGatewaySnapshotResponse() { + + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + } + + @Override public void writeTo(DataOutput out) throws IOException { + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/TransportGatewaySnapshotAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/TransportGatewaySnapshotAction.java new file mode 100644 index 00000000000..39b0191e2e4 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/TransportGatewaySnapshotAction.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.gateway.snapshot; + +import com.google.inject.Inject; +import org.elasticsearch.action.TransportActions; +import org.elasticsearch.action.support.replication.TransportIndicesReplicationOperationAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +import java.util.concurrent.atomic.AtomicReferenceArray; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportGatewaySnapshotAction extends TransportIndicesReplicationOperationAction { + + @Inject public TransportGatewaySnapshotAction(Settings settings, TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, TransportIndexGatewaySnapshotAction indexAction) { + super(settings, transportService, clusterService, threadPool, indexAction); + } + + @Override protected GatewaySnapshotRequest newRequestInstance() { + return new GatewaySnapshotRequest(); + } + + @Override protected GatewaySnapshotResponse newResponseInstance(GatewaySnapshotRequest request, AtomicReferenceArray indexResponses) { + GatewaySnapshotResponse response = new GatewaySnapshotResponse(); + for (int i = 0; i < indexResponses.length(); i++) { + IndexGatewaySnapshotResponse indexResponse = (IndexGatewaySnapshotResponse) indexResponses.get(i); + if (indexResponse != null) { + response.indices().put(indexResponse.index(), indexResponse); + } + } + return response; + } + + @Override protected boolean accumulateExceptions() { + return false; + } + + @Override protected String transportAction() { + return TransportActions.Admin.Indices.Gateway.SNAPSHOT; + } + + @Override protected IndexGatewaySnapshotRequest newIndexRequestInstance(GatewaySnapshotRequest request, String index) { + return new IndexGatewaySnapshotRequest(request, index); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/TransportIndexGatewaySnapshotAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/TransportIndexGatewaySnapshotAction.java new file mode 100644 index 00000000000..426021b054a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/TransportIndexGatewaySnapshotAction.java @@ -0,0 +1,85 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.gateway.snapshot; + +import com.google.inject.Inject; +import org.elasticsearch.action.support.replication.TransportIndexReplicationOperationAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.index.Index; +import org.elasticsearch.indices.IndexMissingException; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +import java.util.concurrent.atomic.AtomicReferenceArray; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportIndexGatewaySnapshotAction extends TransportIndexReplicationOperationAction { + + private final ClusterService clusterService; + + @Inject public TransportIndexGatewaySnapshotAction(Settings settings, ClusterService clusterService, + TransportService transportService, ThreadPool threadPool, + TransportShardGatewaySnapshotAction shardGatewaySnapshotAction) { + super(settings, transportService, threadPool, shardGatewaySnapshotAction); + this.clusterService = clusterService; + } + + @Override protected IndexGatewaySnapshotRequest newRequestInstance() { + return new IndexGatewaySnapshotRequest(); + } + + @Override protected IndexGatewaySnapshotResponse newResponseInstance(IndexGatewaySnapshotRequest request, AtomicReferenceArray shardsResponses) { + int successfulShards = 0; + int failedShards = 0; + for (int i = 0; i < shardsResponses.length(); i++) { + if (shardsResponses.get(i) == null) { + failedShards++; + } else { + successfulShards++; + } + } + return new IndexGatewaySnapshotResponse(request.index(), successfulShards, failedShards); + } + + @Override protected boolean accumulateExceptions() { + return false; + } + + @Override protected String transportAction() { + return "indices/index/gateway/snapshot"; + } + + @Override protected GroupShardsIterator shards(IndexGatewaySnapshotRequest request) { + IndexRoutingTable indexRouting = clusterService.state().routingTable().index(request.index()); + if (indexRouting == null) { + throw new IndexMissingException(new Index(request.index())); + } + return indexRouting.groupByShardsIt(); + } + + @Override protected ShardGatewaySnapshotRequest newShardRequestInstance(IndexGatewaySnapshotRequest request, int shardId) { + return new ShardGatewaySnapshotRequest(request, shardId); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/TransportShardGatewaySnapshotAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/TransportShardGatewaySnapshotAction.java new file mode 100644 index 00000000000..7c230691d44 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/TransportShardGatewaySnapshotAction.java @@ -0,0 +1,77 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.gateway.snapshot; + +import com.google.inject.Inject; +import org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.index.gateway.IndexShardGatewayService; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportShardGatewaySnapshotAction extends TransportShardReplicationOperationAction { + + @Inject public TransportShardGatewaySnapshotAction(Settings settings, TransportService transportService, + ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, + ShardStateAction shardStateAction) { + super(settings, transportService, clusterService, indicesService, threadPool, shardStateAction); + } + + @Override protected ShardGatewaySnapshotRequest newRequestInstance() { + return new ShardGatewaySnapshotRequest(); + } + + @Override protected ShardGatewaySnapshotResponse newResponseInstance() { + return new ShardGatewaySnapshotResponse(); + } + + @Override protected String transportAction() { + return "indices/index/shard/gateway/snapshot"; + } + + @Override protected ShardGatewaySnapshotResponse shardOperationOnPrimary(ShardOperationRequest shardRequest) { + IndexShardGatewayService shardGatewayService = indicesService.indexServiceSafe(shardRequest.request.index()) + .shardInjectorSafe(shardRequest.shardId).getInstance(IndexShardGatewayService.class); + shardGatewayService.snapshot(); + return new ShardGatewaySnapshotResponse(); + } + + @Override protected void shardOperationOnBackup(ShardOperationRequest shardRequest) { + // silently ignore, we disable it with #ignoreBackups anyhow + } + + @Override protected ShardsIterator shards(ShardGatewaySnapshotRequest request) { + return clusterService.state().routingTable().index(request.index()).shard(request.shardId()).shardsIt(); + } + + /** + * Snapshot should only happen on primary shards. + */ + @Override protected boolean ignoreBackups() { + return true; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/mapping/create/CreateMappingRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/mapping/create/CreateMappingRequest.java new file mode 100644 index 00000000000..770c648f642 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/mapping/create/CreateMappingRequest.java @@ -0,0 +1,132 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.mapping.create; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.util.Required; +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import static org.elasticsearch.action.Actions.*; + +/** + * @author kimchy (Shay Banon) + */ +public class CreateMappingRequest implements ActionRequest, Streamable { + + private String[] indices; + + private String mappingType; + + private String mappingSource; + + CreateMappingRequest() { + } + + public CreateMappingRequest(String... indices) { + this.indices = indices; + } + + public CreateMappingRequest(String index, String mappingType, String mappingSource) { + this(new String[]{index}, mappingType, mappingSource); + } + + public CreateMappingRequest(String[] indices, String mappingType, String mappingSource) { + this.indices = indices; + this.mappingType = mappingType; + this.mappingSource = mappingSource; + } + + @Override public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (mappingSource == null) { + validationException = addValidationError("mapping source is missing", validationException); + } + return validationException; + } + + @Override public boolean listenerThreaded() { + // we don't really care about this... + return true; + } + + @Override public CreateMappingRequest listenerThreaded(boolean threadedListener) { + return this; + } + + String[] indices() { + return indices; + } + + String mappingType() { + return mappingType; + } + + /** + * The type of the mappings. Not required since it can be defined explicitly within the mapping source. + * If it is not defined within the mapping source, then it is required. + */ + public CreateMappingRequest mappingType(String mappingType) { + this.mappingType = mappingType; + return this; + } + + String mappingSource() { + return mappingSource; + } + + @Required public CreateMappingRequest mappingSource(String mappingSource) { + this.mappingSource = mappingSource; + return this; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + indices = new String[in.readInt()]; + for (int i = 0; i < indices.length; i++) { + indices[i] = in.readUTF(); + } + if (in.readBoolean()) { + mappingType = in.readUTF(); + } + mappingSource = in.readUTF(); + } + + @Override public void writeTo(DataOutput out) throws IOException { + if (indices == null) { + out.writeInt(0); + } else { + out.writeInt(indices.length); + for (String index : indices) { + out.writeUTF(index); + } + } + if (mappingType == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeUTF(mappingType); + } + out.writeUTF(mappingSource); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/mapping/create/CreateMappingResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/mapping/create/CreateMappingResponse.java new file mode 100644 index 00000000000..741a8bd7070 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/mapping/create/CreateMappingResponse.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.mapping.create; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class CreateMappingResponse implements ActionResponse, Streamable { + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + } + + @Override public void writeTo(DataOutput out) throws IOException { + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/mapping/create/TransportCreateMappingAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/mapping/create/TransportCreateMappingAction.java new file mode 100644 index 00000000000..efbed765596 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/mapping/create/TransportCreateMappingAction.java @@ -0,0 +1,122 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.mapping.create; + +import com.google.inject.Inject; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.Actions; +import org.elasticsearch.action.TransportActions; +import org.elasticsearch.action.support.BaseAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.metadata.MetaDataService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.*; +import org.elasticsearch.util.io.VoidStreamable; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportCreateMappingAction extends BaseAction { + + private final TransportService transportService; + + private final ClusterService clusterService; + + private final MetaDataService metaDataService; + + private final ThreadPool threadPool; + + @Inject public TransportCreateMappingAction(Settings settings, TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, MetaDataService metaDataService) { + super(settings); + this.transportService = transportService; + this.clusterService = clusterService; + this.threadPool = threadPool; + this.metaDataService = metaDataService; + + transportService.registerHandler(TransportActions.Admin.Indices.Mapping.CREATE, new TransportHandler()); + } + + @Override protected void doExecute(final CreateMappingRequest request, final ActionListener listener) { + final String[] indices = Actions.processIndices(clusterService.state(), request.indices()); + if (clusterService.state().nodes().localNodeMaster()) { + threadPool.execute(new Runnable() { + @Override public void run() { + try { + metaDataService.addMapping(indices, request.mappingType(), request.mappingSource()); + listener.onResponse(new CreateMappingResponse()); + } catch (Exception e) { + listener.onFailure(e); + } + } + }); + } else { + transportService.sendRequest(clusterService.state().nodes().masterNode(), TransportActions.Admin.Indices.Mapping.CREATE, request, + new VoidTransportResponseHandler() { + @Override public void handleResponse(VoidStreamable response) { + listener.onResponse(new CreateMappingResponse()); + } + + @Override public void handleException(RemoteTransportException exp) { + listener.onFailure(exp); + } + }); + } + } + + private class TransportHandler extends BaseTransportRequestHandler { + + + @Override public CreateMappingRequest newInstance() { + return new CreateMappingRequest(); + } + + @Override public void messageReceived(final CreateMappingRequest request, final TransportChannel channel) throws Exception { + String[] indices = Actions.processIndices(clusterService.state(), request.indices()); + if (clusterService.state().nodes().localNodeMaster()) { + // handle the actual creation of a new index + metaDataService.addMapping(indices, request.mappingType(), request.mappingSource()); + channel.sendResponse(VoidStreamable.INSTANCE); + } else { + transportService.sendRequest(clusterService.state().nodes().masterNode(), TransportActions.Admin.Indices.Mapping.CREATE, request, new VoidTransportResponseHandler() { + + @Override public void handleResponse(VoidStreamable response) { + try { + channel.sendResponse(response); + } catch (IOException e) { + logger.error("Failed to send response", e); + } + } + + @Override public void handleException(RemoteTransportException exp) { + try { + channel.sendResponse(exp); + } catch (IOException e) { + logger.error("Failed to send response", e); + } + } + }); + } + } + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/refresh/IndexRefreshRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/refresh/IndexRefreshRequest.java new file mode 100644 index 00000000000..15d3e6c648f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/refresh/IndexRefreshRequest.java @@ -0,0 +1,72 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.refresh; + +import org.elasticsearch.action.support.replication.IndexReplicationOperationRequest; +import org.elasticsearch.util.TimeValue; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexRefreshRequest extends IndexReplicationOperationRequest { + + private boolean waitForOperations = true; + + public IndexRefreshRequest(String index) { + this.index = index; + } + + IndexRefreshRequest(RefreshRequest request, String index) { + this.index = index; + this.timeout = request.timeout(); + this.waitForOperations = request.waitForOperations(); + } + + IndexRefreshRequest() { + } + + public IndexRefreshRequest timeout(TimeValue timeout) { + this.timeout = timeout; + return this; + } + + public boolean waitForOperations() { + return waitForOperations; + } + + public IndexRefreshRequest waitForOperations(boolean waitForOperations) { + this.waitForOperations = waitForOperations; + return this; + } + + public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + super.readFrom(in); + waitForOperations = in.readBoolean(); + } + + public void writeTo(DataOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(waitForOperations); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/refresh/IndexRefreshResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/refresh/IndexRefreshResponse.java new file mode 100644 index 00000000000..0146094cfff --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/refresh/IndexRefreshResponse.java @@ -0,0 +1,77 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.refresh; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexRefreshResponse implements ActionResponse, Streamable { + + private String index; + + private int successfulShards; + + private int failedShards; + + IndexRefreshResponse(String index, int successfulShards, int failedShards) { + this.index = index; + this.successfulShards = successfulShards; + this.failedShards = failedShards; + } + + IndexRefreshResponse() { + + } + + public String index() { + return index; + } + + public int successfulShards() { + return successfulShards; + } + + public int failedShards() { + return failedShards; + } + + public int totalShards() { + return successfulShards + failedShards; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + index = in.readUTF(); + successfulShards = in.readInt(); + failedShards = in.readInt(); + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeUTF(index); + out.writeInt(successfulShards); + out.writeInt(failedShards); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequest.java new file mode 100644 index 00000000000..33689bb7b20 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequest.java @@ -0,0 +1,76 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.refresh; + +import org.elasticsearch.action.support.replication.IndicesReplicationOperationRequest; +import org.elasticsearch.util.TimeValue; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class RefreshRequest extends IndicesReplicationOperationRequest { + + private boolean waitForOperations = true; + + public RefreshRequest(String index) { + this(new String[]{index}); + } + + public RefreshRequest(String... indices) { + this.indices = indices; + } + + public RefreshRequest timeout(TimeValue timeout) { + this.timeout = timeout; + return this; + } + + RefreshRequest() { + + } + + @Override public RefreshRequest listenerThreaded(boolean threadedListener) { + super.listenerThreaded(threadedListener); + return this; + } + + public boolean waitForOperations() { + return waitForOperations; + } + + public RefreshRequest waitForOperations(boolean waitForOperations) { + this.waitForOperations = waitForOperations; + return this; + } + + public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + super.readFrom(in); + waitForOperations = in.readBoolean(); + } + + public void writeTo(DataOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(waitForOperations); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java new file mode 100644 index 00000000000..080904dfdea --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.refresh; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +/** + * @author kimchy (Shay Banon) + */ +public class RefreshResponse implements ActionResponse, Streamable { + + private Map indices = new HashMap(); + + RefreshResponse() { + + } + + public Map indices() { + return indices; + } + + public IndexRefreshResponse index(String index) { + return indices.get(index); + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + int size = in.readInt(); + for (int i = 0; i < size; i++) { + IndexRefreshResponse response = new IndexRefreshResponse(); + response.readFrom(in); + indices.put(response.index(), response); + } + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeInt(indices.size()); + for (IndexRefreshResponse indexRefreshResponse : indices.values()) { + indexRefreshResponse.writeTo(out); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/refresh/ShardRefreshRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/refresh/ShardRefreshRequest.java new file mode 100644 index 00000000000..e0e0365a598 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/refresh/ShardRefreshRequest.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.refresh; + +import org.elasticsearch.action.support.replication.ShardReplicationOperationRequest; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class ShardRefreshRequest extends ShardReplicationOperationRequest { + + private int shardId; + private boolean waitForOperations = true; + + public ShardRefreshRequest(IndexRefreshRequest request, int shardId) { + this(request.index(), shardId); + timeout = request.timeout(); + waitForOperations = request.waitForOperations(); + } + + public ShardRefreshRequest(String index, int shardId) { + this.index = index; + this.shardId = shardId; + } + + ShardRefreshRequest() { + } + + public int shardId() { + return this.shardId; + } + + public boolean waitForOperations() { + return waitForOperations; + } + + public ShardRefreshRequest waitForOperations(boolean waitForOperations) { + this.waitForOperations = waitForOperations; + return this; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + super.readFrom(in); + shardId = in.readInt(); + waitForOperations = in.readBoolean(); + } + + @Override public void writeTo(DataOutput out) throws IOException { + super.writeTo(out); + out.writeInt(shardId); + out.writeBoolean(waitForOperations); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/refresh/ShardRefreshResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/refresh/ShardRefreshResponse.java new file mode 100644 index 00000000000..ebe348452f5 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/refresh/ShardRefreshResponse.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.refresh; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class ShardRefreshResponse implements ActionResponse, Streamable { + + ShardRefreshResponse() { + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + } + + @Override public void writeTo(DataOutput out) throws IOException { + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportIndexRefreshAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportIndexRefreshAction.java new file mode 100644 index 00000000000..e3f078e788b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportIndexRefreshAction.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.refresh; + +import com.google.inject.Inject; +import org.elasticsearch.action.support.replication.TransportIndexReplicationOperationAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +import java.util.concurrent.atomic.AtomicReferenceArray; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportIndexRefreshAction extends TransportIndexReplicationOperationAction { + + private final ClusterService clusterService; + + @Inject public TransportIndexRefreshAction(Settings settings, ClusterService clusterService, + TransportService transportService, ThreadPool threadPool, + TransportShardRefreshAction shardRefreshAction) { + super(settings, transportService, threadPool, shardRefreshAction); + this.clusterService = clusterService; + } + + @Override protected IndexRefreshRequest newRequestInstance() { + return new IndexRefreshRequest(); + } + + @Override protected IndexRefreshResponse newResponseInstance(IndexRefreshRequest request, AtomicReferenceArray shardsResponses) { + int successfulShards = 0; + int failedShards = 0; + for (int i = 0; i < shardsResponses.length(); i++) { + if (shardsResponses.get(i) == null) { + failedShards++; + } else { + successfulShards++; + } + } + return new IndexRefreshResponse(request.index(), successfulShards, failedShards); + } + + @Override protected boolean accumulateExceptions() { + return false; + } + + @Override protected String transportAction() { + return "indices/index/refresh"; + } + + @Override protected GroupShardsIterator shards(IndexRefreshRequest request) { + return clusterService.state().routingTable().index(request.index()).groupByShardsIt(); + } + + @Override protected ShardRefreshRequest newShardRequestInstance(IndexRefreshRequest request, int shardId) { + return new ShardRefreshRequest(request, shardId); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java new file mode 100644 index 00000000000..71499f84249 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.refresh; + +import com.google.inject.Inject; +import org.elasticsearch.action.TransportActions; +import org.elasticsearch.action.support.replication.TransportIndicesReplicationOperationAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +import java.util.concurrent.atomic.AtomicReferenceArray; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportRefreshAction extends TransportIndicesReplicationOperationAction { + + @Inject public TransportRefreshAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, TransportIndexRefreshAction indexAction) { + super(settings, transportService, clusterService, threadPool, indexAction); + } + + @Override protected RefreshRequest newRequestInstance() { + return new RefreshRequest(); + } + + @Override protected RefreshResponse newResponseInstance(RefreshRequest request, AtomicReferenceArray indexResponses) { + RefreshResponse response = new RefreshResponse(); + for (int i = 0; i < indexResponses.length(); i++) { + IndexRefreshResponse indexResponse = (IndexRefreshResponse) indexResponses.get(i); + if (indexResponse != null) { + response.indices().put(indexResponse.index(), indexResponse); + } + } + return response; + } + + @Override protected boolean accumulateExceptions() { + return false; + } + + @Override protected String transportAction() { + return TransportActions.Admin.Indices.REFRESH; + } + + @Override protected IndexRefreshRequest newIndexRequestInstance(RefreshRequest request, String index) { + return new IndexRefreshRequest(request, index); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java new file mode 100644 index 00000000000..20b15566ef0 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java @@ -0,0 +1,69 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.refresh; + +import com.google.inject.Inject; +import org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportShardRefreshAction extends TransportShardReplicationOperationAction { + + @Inject public TransportShardRefreshAction(Settings settings, TransportService transportService, + ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, + ShardStateAction shardStateAction) { + super(settings, transportService, clusterService, indicesService, threadPool, shardStateAction); + } + + @Override protected ShardRefreshRequest newRequestInstance() { + return new ShardRefreshRequest(); + } + + @Override protected ShardRefreshResponse newResponseInstance() { + return new ShardRefreshResponse(); + } + + @Override protected String transportAction() { + return "indices/index/shard/refresh"; + } + + @Override protected ShardRefreshResponse shardOperationOnPrimary(ShardOperationRequest shardRequest) { + ShardRefreshRequest request = shardRequest.request; + indexShard(shardRequest).refresh(request.waitForOperations()); + return new ShardRefreshResponse(); + } + + @Override protected void shardOperationOnBackup(ShardOperationRequest shardRequest) { + ShardRefreshRequest request = shardRequest.request; + indexShard(shardRequest).refresh(request.waitForOperations()); + } + + @Override protected ShardsIterator shards(ShardRefreshRequest request) { + return clusterService.state().routingTable().index(request.index()).shard(request.shardId()).shardsIt(); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/status/IndexShardStatus.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/status/IndexShardStatus.java new file mode 100644 index 00000000000..8bdca9ad37e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/status/IndexShardStatus.java @@ -0,0 +1,142 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.status; + +import com.google.common.collect.Iterators; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.util.SizeValue; + +import java.util.Iterator; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexShardStatus implements Iterable { + + public static class Docs { + public static final Docs UNKNOWN = new Docs(); + + int numDocs = -1; + int maxDoc = -1; + int deletedDocs = -1; + + public int numDocs() { + return numDocs; + } + + public int maxDoc() { + return maxDoc; + } + + public int deletedDocs() { + return deletedDocs; + } + } + + private final ShardId shardId; + + private final ShardStatus[] shards; + + IndexShardStatus(ShardId shardId, ShardStatus[] shards) { + this.shardId = shardId; + this.shards = shards; + } + + public ShardId shardId() { + return this.shardId; + } + + public ShardStatus[] shards() { + return this.shards; + } + + public SizeValue storeSize() { + long bytes = -1; + for (ShardStatus shard : shards()) { + if (shard.storeSize().bytes() != SizeValue.UNKNOWN.bytes()) { + if (bytes == -1) { + bytes = 0; + } + bytes += shard.storeSize().bytes(); + } + } + return new SizeValue(bytes); + } + + public SizeValue estimatedFlushableMemorySize() { + long bytes = -1; + for (ShardStatus shard : shards()) { + if (shard.estimatedFlushableMemorySize().bytes() != SizeValue.UNKNOWN.bytes()) { + if (bytes == -1) { + bytes = 0; + } + bytes += shard.estimatedFlushableMemorySize().bytes(); + } + } + return new SizeValue(bytes); + } + + public long translogOperations() { + long translogOperations = -1; + for (ShardStatus shard : shards()) { + if (shard.translogOperations() != -1) { + if (translogOperations == -1) { + translogOperations = 0; + } + translogOperations += shard.translogOperations(); + } + } + return translogOperations; + } + + public Docs docs() { + Docs docs = new Docs(); + for (ShardStatus shard : shards()) { + if (!shard.shardRouting().primary()) { + // only sum docs for the primaries + continue; + } + if (shard.docs().numDocs() != -1) { + if (docs.numDocs == -1) { + docs.numDocs = 0; + } + docs.numDocs += shard.docs().numDocs(); + } + if (shard.docs().maxDoc() != -1) { + if (docs.maxDoc == -1) { + docs.maxDoc = 0; + } + docs.maxDoc += shard.docs().maxDoc(); + } + if (shard.docs().deletedDocs() != -1) { + if (docs.deletedDocs == -1) { + docs.deletedDocs = 0; + } + docs.deletedDocs += shard.docs().deletedDocs(); + } + } + return docs; + } + + @Override public Iterator iterator() { + return Iterators.forArray(shards); + } + +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/status/IndexStatus.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/status/IndexStatus.java new file mode 100644 index 00000000000..46ba10ddda8 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/status/IndexStatus.java @@ -0,0 +1,162 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.status; + +import com.google.common.collect.Maps; +import org.elasticsearch.util.SizeValue; +import org.elasticsearch.util.settings.Settings; + +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +import static com.google.common.collect.Lists.*; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexStatus implements Iterable { + + public static class Docs { + public static final Docs UNKNOWN = new Docs(); + + int numDocs = -1; + int maxDoc = -1; + int deletedDocs = -1; + + public int numDocs() { + return numDocs; + } + + public int maxDoc() { + return maxDoc; + } + + public int deletedDocs() { + return deletedDocs; + } + } + + private final String index; + + private final Map indexShards; + + private final Settings settings; + + IndexStatus(String index, Settings settings, ShardStatus[] shards) { + this.index = index; + this.settings = settings; + + Map> tmpIndexShards = Maps.newHashMap(); + for (ShardStatus shard : shards) { + List lst = tmpIndexShards.get(shard.shardRouting().id()); + if (lst == null) { + lst = newArrayList(); + tmpIndexShards.put(shard.shardRouting().id(), lst); + } + lst.add(shard); + } + indexShards = Maps.newHashMap(); + for (Map.Entry> entry : tmpIndexShards.entrySet()) { + indexShards.put(entry.getKey(), new IndexShardStatus(entry.getValue().get(0).shardRouting().shardId(), entry.getValue().toArray(new ShardStatus[entry.getValue().size()]))); + } + } + + public String index() { + return this.index; + } + + public Map shards() { + return this.indexShards; + } + + public Settings settings() { + return this.settings; + } + + public SizeValue storeSize() { + long bytes = -1; + for (IndexShardStatus shard : this) { + if (shard.storeSize().bytes() != SizeValue.UNKNOWN.bytes()) { + if (bytes == -1) { + bytes = 0; + } + bytes += shard.storeSize().bytes(); + } + } + return new SizeValue(bytes); + } + + public SizeValue estimatedFlushableMemorySize() { + long bytes = -1; + for (IndexShardStatus shard : this) { + if (shard.estimatedFlushableMemorySize().bytes() != SizeValue.UNKNOWN.bytes()) { + if (bytes == -1) { + bytes = 0; + } + bytes += shard.estimatedFlushableMemorySize().bytes(); + } + } + return new SizeValue(bytes); + } + + public long translogOperations() { + long translogOperations = -1; + for (IndexShardStatus shard : this) { + if (shard.translogOperations() != -1) { + if (translogOperations == -1) { + translogOperations = 0; + } + translogOperations += shard.translogOperations(); + } + } + return translogOperations; + } + + public Docs docs() { + Docs docs = new Docs(); + for (IndexShardStatus shard : this) { + if (shard.docs().numDocs() != -1) { + if (docs.numDocs == -1) { + docs.numDocs = 0; + } + docs.numDocs += shard.docs().numDocs(); + } + if (shard.docs().maxDoc() != -1) { + if (docs.maxDoc == -1) { + docs.maxDoc = 0; + } + docs.maxDoc += shard.docs().maxDoc(); + } + if (shard.docs().deletedDocs() != -1) { + if (docs.deletedDocs == -1) { + docs.deletedDocs = 0; + } + docs.deletedDocs += shard.docs().deletedDocs(); + } + } + return docs; + } + + @Override public Iterator iterator() { + return indexShards.values().iterator(); + } + +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/status/IndicesStatusRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/status/IndicesStatusRequest.java new file mode 100644 index 00000000000..cc28e6c6958 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/status/IndicesStatusRequest.java @@ -0,0 +1,48 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.status; + +import org.elasticsearch.action.support.shards.ShardsOperationRequest; +import org.elasticsearch.action.support.shards.ShardsOperationThreading; +import org.elasticsearch.util.Strings; + +/** + * @author kimchy (Shay Banon) + */ +public class IndicesStatusRequest extends ShardsOperationRequest { + + public IndicesStatusRequest() { + this(Strings.EMPTY_ARRAY); + } + + public IndicesStatusRequest(String... indices) { + super(indices); + } + + @Override public IndicesStatusRequest listenerThreaded(boolean listenerThreaded) { + super.listenerThreaded(listenerThreaded); + return this; + } + + @Override public IndicesStatusRequest operationThreading(ShardsOperationThreading operationThreading) { + super.operationThreading(operationThreading); + return this; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/status/IndicesStatusResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/status/IndicesStatusResponse.java new file mode 100644 index 00000000000..0e7a7397d6b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/status/IndicesStatusResponse.java @@ -0,0 +1,107 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.status; + +import com.google.common.collect.ImmutableMap; +import org.elasticsearch.action.support.shards.ShardsOperationResponse; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.util.settings.Settings; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static com.google.common.collect.Lists.*; +import static com.google.common.collect.Maps.*; +import static org.elasticsearch.action.admin.indices.status.ShardStatus.*; +import static org.elasticsearch.util.settings.ImmutableSettings.*; + +/** + * @author kimchy (Shay Banon) + */ +public class IndicesStatusResponse extends ShardsOperationResponse { + + private Map indicesSettings = ImmutableMap.of(); + + private Map indicesStatus; + + IndicesStatusResponse() { + } + + IndicesStatusResponse(ShardStatus[] shards, ClusterState clusterState) { + super(shards); + indicesSettings = newHashMap(); + for (ShardStatus shard : shards) { + if (!indicesSettings.containsKey(shard.shardRouting().index())) { + indicesSettings.put(shard.shardRouting().index(), clusterState.metaData().index(shard.shardRouting().index()).settings()); + } + } + } + + public IndexStatus index(String index) { + return indices().get(index); + } + + public Map indices() { + if (indicesStatus != null) { + return indicesStatus; + } + Map indicesStatus = newHashMap(); + for (String index : indicesSettings.keySet()) { + List shards = newArrayList(); + for (ShardStatus shard : shards()) { + if (shard.shardRouting().index().equals(index)) { + shards.add(shard); + } + } + indicesStatus.put(index, new IndexStatus(index, indicesSettings.get(index), shards.toArray(new ShardStatus[shards.size()]))); + } + this.indicesStatus = indicesStatus; + return indicesStatus; + } + + @Override public void writeTo(DataOutput out) throws IOException { + super.writeTo(out); + out.writeInt(shards().length); + for (ShardStatus status : shards()) { + status.writeTo(out); + } + out.writeInt(indicesSettings.size()); + for (Map.Entry entry : indicesSettings.entrySet()) { + out.writeUTF(entry.getKey()); + writeSettingsToStream(entry.getValue(), out); + } + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + super.readFrom(in); + shards = new ShardStatus[in.readInt()]; + for (int i = 0; i < shards.length; i++) { + shards[i] = readIndexShardStatus(in); + } + indicesSettings = newHashMap(); + int size = in.readInt(); + for (int i = 0; i < size; i++) { + indicesSettings.put(in.readUTF(), readSettingsFromStream(in)); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/status/ShardStatus.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/status/ShardStatus.java new file mode 100644 index 00000000000..17470aea55c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/status/ShardStatus.java @@ -0,0 +1,131 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.status; + +import org.elasticsearch.action.support.shards.ShardOperationResponse; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.index.shard.IndexShardState; +import org.elasticsearch.util.SizeValue; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import static org.elasticsearch.util.SizeValue.*; + +/** + * @author kimchy (Shay Banon) + */ +public class ShardStatus extends ShardOperationResponse { + + public static class Docs { + public static final Docs UNKNOWN = new Docs(); + + int numDocs = -1; + int maxDoc = -1; + int deletedDocs = -1; + + public int numDocs() { + return numDocs; + } + + public int maxDoc() { + return maxDoc; + } + + public int deletedDocs() { + return deletedDocs; + } + } + + IndexShardState state; + + SizeValue storeSize = SizeValue.UNKNOWN; + + SizeValue estimatedFlushableMemorySize = SizeValue.UNKNOWN; + + long translogId = -1; + + long translogOperations = -1; + + Docs docs = Docs.UNKNOWN; + + ShardStatus() { + } + + ShardStatus(ShardRouting shardRouting) { + super(shardRouting); + } + + public IndexShardState state() { + return state; + } + + public SizeValue storeSize() { + return storeSize; + } + + public SizeValue estimatedFlushableMemorySize() { + return estimatedFlushableMemorySize; + } + + public long translogId() { + return translogId; + } + + public long translogOperations() { + return translogOperations; + } + + public Docs docs() { + return docs; + } + + public static ShardStatus readIndexShardStatus(DataInput in) throws ClassNotFoundException, IOException { + ShardStatus shardStatus = new ShardStatus(); + shardStatus.readFrom(in); + return shardStatus; + } + + @Override public void writeTo(DataOutput out) throws IOException { + super.writeTo(out); + out.writeByte(state.id()); + storeSize.writeTo(out); + estimatedFlushableMemorySize.writeTo(out); + out.writeLong(translogId); + out.writeLong(translogOperations); + out.writeInt(docs.numDocs()); + out.writeInt(docs.maxDoc()); + out.writeInt(docs.deletedDocs()); + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + super.readFrom(in); + state = IndexShardState.fromId(in.readByte()); + storeSize = readSizeValue(in); + estimatedFlushableMemorySize = readSizeValue(in); + translogId = in.readLong(); + translogOperations = in.readLong(); + docs = new Docs(); + docs.numDocs = in.readInt(); + docs.maxDoc = in.readInt(); + docs.deletedDocs = in.readInt(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/status/TransportIndicesStatusAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/status/TransportIndicesStatusAction.java new file mode 100644 index 00000000000..df924b9d27a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/admin/indices/status/TransportIndicesStatusAction.java @@ -0,0 +1,124 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.status; + +import com.google.inject.Inject; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.action.TransportActions; +import org.elasticsearch.action.support.shards.ShardOperationRequest; +import org.elasticsearch.action.support.shards.TransportShardsOperationActions; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.shard.InternalIndexShard; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; +import java.util.List; +import java.util.concurrent.atomic.AtomicReferenceArray; + +import static com.google.common.collect.Lists.*; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportIndicesStatusAction extends TransportShardsOperationActions { + + @Inject public TransportIndicesStatusAction(Settings settings, ClusterService clusterService, TransportService transportService, IndicesService indicesService, ThreadPool threadPool) { + super(settings, clusterService, transportService, indicesService, threadPool); + } + + @Override protected String transportAction() { + return TransportActions.Admin.Indices.STATUS; + } + + @Override protected String transportShardAction() { + return "indices/status/shard"; + } + + @Override protected IndicesStatusRequest newRequest() { + return new IndicesStatusRequest(); + } + + @Override protected IndexShardStatusRequest newShardRequest() { + return new IndexShardStatusRequest(); + } + + @Override protected IndexShardStatusRequest newShardRequest(ShardRouting shard, IndicesStatusRequest request) { + return new IndexShardStatusRequest(shard.index(), shard.id()); + } + + @Override protected ShardStatus newShardResponse() { + return new ShardStatus(); + } + + @Override protected boolean accumulateExceptions() { + return false; + } + + @Override protected IndicesStatusResponse newResponse(IndicesStatusRequest request, ClusterState clusterState, AtomicReferenceArray shardsResponses) { + final List shards = newArrayList(); + for (int i = 0; i < shardsResponses.length(); i++) { + Object resp = shardsResponses.get(i); + if (resp instanceof ShardStatus) { + shards.add((ShardStatus) resp); + } + } + return new IndicesStatusResponse(shards.toArray(new ShardStatus[shards.size()]), clusterState); + } + + @Override protected ShardStatus shardOperation(IndexShardStatusRequest request) throws ElasticSearchException { + InternalIndexShard indexShard = (InternalIndexShard) indicesService.indexServiceSafe(request.index()).shard(request.shardId()); + ShardStatus shardStatus = new ShardStatus(indexShard.routingEntry()); + shardStatus.state = indexShard.state(); + try { + shardStatus.storeSize = indexShard.store().estimateSize(); + } catch (IOException e) { + // failure to get the store size... + } + shardStatus.estimatedFlushableMemorySize = indexShard.estimateFlushableMemorySize(); + shardStatus.translogId = indexShard.translog().currentId(); + shardStatus.translogOperations = indexShard.translog().size(); + Engine.Searcher searcher = indexShard.searcher(); + try { + shardStatus.docs = new ShardStatus.Docs(); + shardStatus.docs.numDocs = searcher.reader().numDocs(); + shardStatus.docs.maxDoc = searcher.reader().maxDoc(); + shardStatus.docs.deletedDocs = searcher.reader().numDeletedDocs(); + } finally { + searcher.release(); + } + return shardStatus; + } + + public static class IndexShardStatusRequest extends ShardOperationRequest { + + IndexShardStatusRequest() { + } + + IndexShardStatusRequest(String index, int shardId) { + super(index, shardId); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/count/CountRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/count/CountRequest.java new file mode 100644 index 00000000000..3409e226023 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/count/CountRequest.java @@ -0,0 +1,138 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.count; + +import org.elasticsearch.action.support.broadcast.BroadcastOperationRequest; +import org.elasticsearch.action.support.broadcast.BroadcastOperationThreading; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.util.Nullable; +import org.elasticsearch.util.Required; +import org.elasticsearch.util.Strings; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class CountRequest extends BroadcastOperationRequest { + + public static final float DEFAULT_MIN_SCORE = -1f; + + private float minScore = DEFAULT_MIN_SCORE; + @Required private String querySource; + private String[] types = Strings.EMPTY_ARRAY; + @Nullable private String queryParserName; + + CountRequest() { + } + + public CountRequest(String... indices) { + super(indices, null); + } + + @Override public CountRequest operationThreading(BroadcastOperationThreading operationThreading) { + super.operationThreading(operationThreading); + return this; + } + + @Override public CountRequest listenerThreaded(boolean threadedListener) { + super.listenerThreaded(threadedListener); + return this; + } + + public CountRequest queryHint(String queryHint) { + this.queryHint = queryHint; + return this; + } + + float minScore() { + return minScore; + } + + public CountRequest minScore(float minScore) { + this.minScore = minScore; + return this; + } + + String querySource() { + return querySource; + } + + @Required public CountRequest querySource(QueryBuilder queryBuilder) { + return querySource(queryBuilder.build()); + } + + public CountRequest querySource(String querySource) { + this.querySource = querySource; + return this; + } + + String queryParserName() { + return queryParserName; + } + + public CountRequest queryParserName(String queryParserName) { + this.queryParserName = queryParserName; + return this; + } + + String[] types() { + return this.types; + } + + public CountRequest types(String... types) { + this.types = types; + return this; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + super.readFrom(in); + minScore = in.readFloat(); + querySource = in.readUTF(); + if (in.readBoolean()) { + queryParserName = in.readUTF(); + } + int typesSize = in.readInt(); + if (typesSize > 0) { + types = new String[typesSize]; + for (int i = 0; i < typesSize; i++) { + types[i] = in.readUTF(); + } + } + } + + @Override public void writeTo(DataOutput out) throws IOException { + super.writeTo(out); + out.writeFloat(minScore); + out.writeUTF(querySource); + if (queryParserName == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeUTF(queryParserName); + } + out.writeInt(types.length); + for (String type : types) { + out.writeUTF(type); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/count/CountResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/count/CountResponse.java new file mode 100644 index 00000000000..47feab1268c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/count/CountResponse.java @@ -0,0 +1,57 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.count; + +import org.elasticsearch.action.support.broadcast.BroadcastOperationResponse; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class CountResponse extends BroadcastOperationResponse { + + private long count; + + CountResponse() { + + } + + public CountResponse(long count, int successfulShards, int failedShards) { + super(successfulShards, failedShards); + this.count = count; + } + + public long count() { + return count; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + super.readFrom(in); + count = in.readLong(); + } + + @Override public void writeTo(DataOutput out) throws IOException { + super.writeTo(out); + out.writeLong(count); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/count/ShardCountRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/count/ShardCountRequest.java new file mode 100644 index 00000000000..866017d6cf0 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/count/ShardCountRequest.java @@ -0,0 +1,100 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.count; + +import org.elasticsearch.action.support.broadcast.BroadcastShardOperationRequest; +import org.elasticsearch.util.Nullable; +import org.elasticsearch.util.Strings; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class ShardCountRequest extends BroadcastShardOperationRequest { + + private float minScore; + private String querySource; + private String[] types = Strings.EMPTY_ARRAY; + @Nullable private String queryParserName; + + ShardCountRequest() { + + } + + public ShardCountRequest(String index, int shardId, String querySource, float minScore, + @Nullable String queryParserName, String... types) { + super(index, shardId); + this.minScore = minScore; + this.querySource = querySource; + this.queryParserName = queryParserName; + this.types = types; + } + + public float minScore() { + return minScore; + } + + public String querySource() { + return querySource; + } + + public String queryParserName() { + return queryParserName; + } + + public String[] types() { + return this.types; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + super.readFrom(in); + minScore = in.readFloat(); + querySource = in.readUTF(); + if (in.readBoolean()) { + queryParserName = in.readUTF(); + } + int typesSize = in.readInt(); + if (typesSize > 0) { + types = new String[typesSize]; + for (int i = 0; i < typesSize; i++) { + types[i] = in.readUTF(); + } + } + } + + @Override public void writeTo(DataOutput out) throws IOException { + super.writeTo(out); + out.writeFloat(minScore); + out.writeUTF(querySource); + if (queryParserName == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeUTF(queryParserName); + } + out.writeInt(types.length); + for (String type : types) { + out.writeUTF(type); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/count/ShardCountResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/count/ShardCountResponse.java new file mode 100644 index 00000000000..c40be03f2f8 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/count/ShardCountResponse.java @@ -0,0 +1,57 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.count; + +import org.elasticsearch.action.support.broadcast.BroadcastShardOperationResponse; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class ShardCountResponse extends BroadcastShardOperationResponse { + + private long count; + + ShardCountResponse() { + + } + + public ShardCountResponse(String index, int shardId, long count) { + super(index, shardId); + this.count = count; + } + + public long count() { + return this.count; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + super.readFrom(in); + count = in.readLong(); + } + + @Override public void writeTo(DataOutput out) throws IOException { + super.writeTo(out); + out.writeLong(count); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/count/TransportCountAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/count/TransportCountAction.java new file mode 100644 index 00000000000..dfecdc4b0d2 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/count/TransportCountAction.java @@ -0,0 +1,94 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.count; + +import com.google.inject.Inject; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.action.TransportActions; +import org.elasticsearch.action.support.broadcast.TransportBroadcastOperationAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +import java.util.concurrent.atomic.AtomicReferenceArray; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportCountAction extends TransportBroadcastOperationAction { + + @Inject public TransportCountAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, IndicesService indicesService) { + super(settings, threadPool, clusterService, transportService, indicesService); + } + + @Override protected String transportAction() { + return TransportActions.COUNT; + } + + @Override protected String transportShardAction() { + return "indices/count/shard"; + } + + @Override protected CountRequest newRequest() { + return new CountRequest(); + } + + @Override protected ShardCountRequest newShardRequest() { + return new ShardCountRequest(); + } + + @Override protected ShardCountRequest newShardRequest(ShardRouting shard, CountRequest request) { + return new ShardCountRequest(shard.index(), shard.id(), request.querySource(), request.minScore(), request.queryParserName(), request.types()); + } + + @Override protected ShardCountResponse newShardResponse() { + return new ShardCountResponse(); + } + + @Override protected CountResponse newResponse(CountRequest request, AtomicReferenceArray shardsResponses) { + int successfulShards = 0; + int failedShards = 0; + long count = 0; + for (int i = 0; i < shardsResponses.length(); i++) { + ShardCountResponse shardCountResponse = (ShardCountResponse) shardsResponses.get(i); + if (shardCountResponse == null) { + failedShards++; + } else { + count += shardCountResponse.count(); + successfulShards++; + } + } + return new CountResponse(count, successfulShards, failedShards); + } + + @Override protected boolean accumulateExceptions() { + return false; + } + + @Override protected ShardCountResponse shardOperation(ShardCountRequest request) throws ElasticSearchException { + IndexShard indexShard = indicesService.indexServiceSafe(request.index()).shardSafe(request.shardId()); + long count = indexShard.count(request.minScore(), request.querySource(), request.queryParserName(), request.types()); + return new ShardCountResponse(request.index(), request.shardId(), count); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java new file mode 100644 index 00000000000..9f4a54aaac8 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java @@ -0,0 +1,109 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.delete; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.replication.ShardReplicationOperationRequest; +import org.elasticsearch.util.Required; +import org.elasticsearch.util.TimeValue; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import static org.elasticsearch.action.Actions.*; + +/** + * @author kimchy (Shay Banon) + */ +public class DeleteRequest extends ShardReplicationOperationRequest { + + private String type; + private String id; + + public DeleteRequest(String index) { + this.index = index; + } + + public DeleteRequest(String index, String type, String id) { + this.index = index; + this.type = type; + this.id = id; + } + + DeleteRequest() { + } + + @Override public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = super.validate(); + if (type == null) { + validationException = addValidationError("type is missing", validationException); + } + if (id == null) { + validationException = addValidationError("id is missing", validationException); + } + return validationException; + } + + @Override public DeleteRequest listenerThreaded(boolean threadedListener) { + super.listenerThreaded(threadedListener); + return this; + } + + @Override public DeleteRequest operationThreaded(boolean threadedOperation) { + super.operationThreaded(threadedOperation); + return this; + } + + String type() { + return type; + } + + @Required public DeleteRequest type(String type) { + this.type = type; + return this; + } + + String id() { + return id; + } + + @Required public DeleteRequest id(String id) { + this.id = id; + return this; + } + + public DeleteRequest timeout(TimeValue timeout) { + this.timeout = timeout; + return this; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + super.readFrom(in); + type = in.readUTF(); + id = in.readUTF(); + } + + @Override public void writeTo(DataOutput out) throws IOException { + super.writeTo(out); + out.writeUTF(type); + out.writeUTF(id); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/delete/DeleteResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/delete/DeleteResponse.java new file mode 100644 index 00000000000..124b168721d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/delete/DeleteResponse.java @@ -0,0 +1,73 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.delete; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class DeleteResponse implements ActionResponse, Streamable { + + private String index; + + private String id; + + private String type; + + DeleteResponse() { + + } + + public DeleteResponse(String index, String type, String id) { + this.index = index; + this.id = id; + this.type = type; + } + + public String index() { + return this.index; + } + + public String id() { + return this.id; + } + + public String type() { + return this.type; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + index = in.readUTF(); + id = in.readUTF(); + type = in.readUTF(); + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeUTF(index); + out.writeUTF(id); + out.writeUTF(type); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java new file mode 100644 index 00000000000..4abf9a352d8 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java @@ -0,0 +1,106 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.delete; + +import com.google.inject.Inject; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.TransportActions; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; +import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; +import org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.indices.IndexAlreadyExistsException; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportDeleteAction extends TransportShardReplicationOperationAction { + + private final boolean autoCreateIndex; + + private final TransportCreateIndexAction createIndexAction; + + @Inject public TransportDeleteAction(Settings settings, TransportService transportService, ClusterService clusterService, + IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, + TransportCreateIndexAction createIndexAction) { + super(settings, transportService, clusterService, indicesService, threadPool, shardStateAction); + this.createIndexAction = createIndexAction; + this.autoCreateIndex = componentSettings.getAsBoolean("autoCreateIndex", true); + } + + @Override protected void doExecute(final DeleteRequest deleteRequest, final ActionListener listener) { + if (autoCreateIndex) { + if (!clusterService.state().metaData().hasIndex(deleteRequest.index())) { + createIndexAction.execute(new CreateIndexRequest(deleteRequest.index()), new ActionListener() { + @Override public void onResponse(CreateIndexResponse result) { + TransportDeleteAction.super.doExecute(deleteRequest, listener); + } + + @Override public void onFailure(Throwable e) { + if (ExceptionsHelper.unwrapCause(e) instanceof IndexAlreadyExistsException) { + // we have the index, do it + TransportDeleteAction.super.doExecute(deleteRequest, listener); + } else { + listener.onFailure(e); + } + } + }); + } else { + super.doExecute(deleteRequest, listener); + } + } + } + + @Override protected DeleteRequest newRequestInstance() { + return new DeleteRequest(); + } + + @Override protected DeleteResponse newResponseInstance() { + return new DeleteResponse(); + } + + @Override protected String transportAction() { + return TransportActions.DELETE; + } + + @Override protected DeleteResponse shardOperationOnPrimary(ShardOperationRequest shardRequest) { + DeleteRequest request = shardRequest.request; + indexShard(shardRequest).delete(request.type(), request.id()); + return new DeleteResponse(request.index(), request.type(), request.id()); + } + + @Override protected void shardOperationOnBackup(ShardOperationRequest shardRequest) { + DeleteRequest request = shardRequest.request; + indexShard(shardRequest).delete(request.type(), request.id()); + } + + @Override protected ShardsIterator shards(DeleteRequest request) { + return indicesService.indexServiceSafe(request.index()).operationRouting() + .deleteShards(clusterService.state(), request.type(), request.id()); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryRequest.java new file mode 100644 index 00000000000..eb4b3bb0732 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryRequest.java @@ -0,0 +1,107 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.deletebyquery; + +import org.elasticsearch.action.support.replication.IndicesReplicationOperationRequest; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.util.Required; +import org.elasticsearch.util.Strings; +import org.elasticsearch.util.TimeValue; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class DeleteByQueryRequest extends IndicesReplicationOperationRequest { + + private String querySource; + private String queryParserName; + private String[] types = Strings.EMPTY_ARRAY; + + public DeleteByQueryRequest(String... indices) { + this.indices = indices; + } + + DeleteByQueryRequest() { + } + + @Override public DeleteByQueryRequest listenerThreaded(boolean threadedListener) { + super.listenerThreaded(threadedListener); + return this; + } + + String querySource() { + return querySource; + } + + @Required public DeleteByQueryRequest querySource(QueryBuilder queryBuilder) { + return querySource(queryBuilder.build()); + } + + @Required public DeleteByQueryRequest querySource(String querySource) { + this.querySource = querySource; + return this; + } + + String queryParserName() { + return queryParserName; + } + + public DeleteByQueryRequest queryParserName(String queryParserName) { + this.queryParserName = queryParserName; + return this; + } + + String[] types() { + return this.types; + } + + public DeleteByQueryRequest types(String... types) { + this.types = types; + return this; + } + + public DeleteByQueryRequest timeout(TimeValue timeout) { + this.timeout = timeout; + return this; + } + + public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + super.readFrom(in); + querySource = in.readUTF(); + if (in.readBoolean()) { + queryParserName = in.readUTF(); + } + } + + public void writeTo(DataOutput out) throws IOException { + super.writeTo(out); + out.writeUTF(querySource); + if (queryParserName == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeUTF(queryParserName); + } + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryResponse.java new file mode 100644 index 00000000000..4d8d0af9fd3 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryResponse.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.deletebyquery; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +/** + * @author kimchy (Shay Banon) + */ +public class DeleteByQueryResponse implements ActionResponse, Streamable { + + private Map indexResponses = new HashMap(); + + DeleteByQueryResponse() { + + } + + public Map indices() { + return indexResponses; + } + + public IndexDeleteByQueryResponse index(String index) { + return indexResponses.get(index); + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + int size = in.readInt(); + for (int i = 0; i < size; i++) { + IndexDeleteByQueryResponse response = new IndexDeleteByQueryResponse(); + response.readFrom(in); + indexResponses.put(response.index(), response); + } + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeInt(indexResponses.size()); + for (IndexDeleteByQueryResponse indexResponse : indexResponses.values()) { + indexResponse.writeTo(out); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/deletebyquery/IndexDeleteByQueryRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/deletebyquery/IndexDeleteByQueryRequest.java new file mode 100644 index 00000000000..8da948bb6ef --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/deletebyquery/IndexDeleteByQueryRequest.java @@ -0,0 +1,129 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.deletebyquery; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.replication.IndexReplicationOperationRequest; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.util.Required; +import org.elasticsearch.util.Strings; +import org.elasticsearch.util.TimeValue; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import static org.elasticsearch.action.Actions.*; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexDeleteByQueryRequest extends IndexReplicationOperationRequest { + + private String querySource; + private String queryParserName; + private String[] types = Strings.EMPTY_ARRAY; + + public IndexDeleteByQueryRequest(String index, String... types) { + this.index = index; + this.types = types; + } + + IndexDeleteByQueryRequest(DeleteByQueryRequest request, String index) { + this.index = index; + this.timeout = request.timeout(); + this.querySource = request.querySource(); + this.queryParserName = request.queryParserName(); + this.types = request.types(); + } + + + IndexDeleteByQueryRequest() { + } + + String querySource() { + return querySource; + } + + @Override public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = super.validate(); + if (querySource == null) { + validationException = addValidationError("querySource is missing", validationException); + } + return validationException; + } + + @Required public IndexDeleteByQueryRequest querySource(QueryBuilder queryBuilder) { + return querySource(queryBuilder.build()); + } + + @Required public IndexDeleteByQueryRequest querySource(String querySource) { + this.querySource = querySource; + return this; + } + + String queryParserName() { + return queryParserName; + } + + String[] types() { + return this.types; + } + + public IndexDeleteByQueryRequest queryParserName(String queryParserName) { + this.queryParserName = queryParserName; + return this; + } + + public IndexDeleteByQueryRequest timeout(TimeValue timeout) { + this.timeout = timeout; + return this; + } + + public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + super.readFrom(in); + querySource = in.readUTF(); + if (in.readBoolean()) { + queryParserName = in.readUTF(); + } + int typesSize = in.readInt(); + if (typesSize > 0) { + types = new String[typesSize]; + for (int i = 0; i < typesSize; i++) { + types[i] = in.readUTF(); + } + } + } + + public void writeTo(DataOutput out) throws IOException { + super.writeTo(out); + out.writeUTF(querySource); + if (queryParserName == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeUTF(queryParserName); + } + out.writeInt(types.length); + for (String type : types) { + out.writeUTF(type); + } + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/deletebyquery/IndexDeleteByQueryResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/deletebyquery/IndexDeleteByQueryResponse.java new file mode 100644 index 00000000000..5faf53e6e3e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/deletebyquery/IndexDeleteByQueryResponse.java @@ -0,0 +1,77 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.deletebyquery; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexDeleteByQueryResponse implements ActionResponse, Streamable { + + private String index; + + private int successfulShards; + + private int failedShards; + + IndexDeleteByQueryResponse(String index, int successfulShards, int failedShards) { + this.index = index; + this.successfulShards = successfulShards; + this.failedShards = failedShards; + } + + IndexDeleteByQueryResponse() { + + } + + public String index() { + return this.index; + } + + public int totalShards() { + return failedShards + successfulShards; + } + + public int successfulShards() { + return successfulShards; + } + + public int failedShards() { + return failedShards; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + index = in.readUTF(); + successfulShards = in.readInt(); + failedShards = in.readInt(); + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeUTF(index); + out.writeInt(successfulShards); + out.writeInt(failedShards); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/deletebyquery/ShardDeleteByQueryRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/deletebyquery/ShardDeleteByQueryRequest.java new file mode 100644 index 00000000000..077e843b489 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/deletebyquery/ShardDeleteByQueryRequest.java @@ -0,0 +1,114 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.deletebyquery; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.replication.ShardReplicationOperationRequest; +import org.elasticsearch.util.Nullable; +import org.elasticsearch.util.Strings; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import static org.elasticsearch.action.Actions.*; + +/** + * @author kimchy (Shay Banon) + */ +public class ShardDeleteByQueryRequest extends ShardReplicationOperationRequest { + + private int shardId; + private String querySource; + private String queryParserName; + private String[] types = Strings.EMPTY_ARRAY; + + public ShardDeleteByQueryRequest(String index, String querySource, @Nullable String queryParserName, String[] types, int shardId) { + this.index = index; + this.querySource = querySource; + this.queryParserName = queryParserName; + this.types = types; + this.shardId = shardId; + } + + ShardDeleteByQueryRequest(IndexDeleteByQueryRequest request, int shardId) { + this(request.index(), request.querySource(), request.queryParserName(), request.types(), shardId); + timeout = request.timeout(); + } + + ShardDeleteByQueryRequest() { + } + + @Override public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = super.validate(); + if (querySource == null) { + addValidationError("querySource is missing", validationException); + } + return validationException; + } + + public int shardId() { + return this.shardId; + } + + public String querySource() { + return querySource; + } + + public String queryParserName() { + return queryParserName; + } + + public String[] types() { + return this.types; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + super.readFrom(in); + querySource = in.readUTF(); + if (in.readBoolean()) { + queryParserName = in.readUTF(); + } + shardId = in.readInt(); + int typesSize = in.readInt(); + if (typesSize > 0) { + types = new String[typesSize]; + for (int i = 0; i < typesSize; i++) { + types[i] = in.readUTF(); + } + } + } + + @Override public void writeTo(DataOutput out) throws IOException { + super.writeTo(out); + out.writeUTF(querySource); + if (queryParserName == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeUTF(queryParserName); + } + out.writeInt(shardId); + out.writeInt(types.length); + for (String type : types) { + out.writeUTF(type); + } + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/deletebyquery/ShardDeleteByQueryResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/deletebyquery/ShardDeleteByQueryResponse.java new file mode 100644 index 00000000000..75fde2b534e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/deletebyquery/ShardDeleteByQueryResponse.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.deletebyquery; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class ShardDeleteByQueryResponse implements ActionResponse, Streamable { + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + } + + @Override public void writeTo(DataOutput out) throws IOException { + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/deletebyquery/TransportDeleteByQueryAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/deletebyquery/TransportDeleteByQueryAction.java new file mode 100644 index 00000000000..4fc122ecd0c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/deletebyquery/TransportDeleteByQueryAction.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.deletebyquery; + +import com.google.inject.Inject; +import org.elasticsearch.action.TransportActions; +import org.elasticsearch.action.support.replication.TransportIndicesReplicationOperationAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +import java.util.concurrent.atomic.AtomicReferenceArray; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportDeleteByQueryAction extends TransportIndicesReplicationOperationAction { + + @Inject public TransportDeleteByQueryAction(Settings settings, ClusterService clusterService, TransportService transportService, + ThreadPool threadPool, TransportIndexDeleteByQueryAction indexDeleteByQueryAction) { + super(settings, transportService, clusterService, threadPool, indexDeleteByQueryAction); + } + + @Override protected DeleteByQueryRequest newRequestInstance() { + return new DeleteByQueryRequest(); + } + + @Override protected DeleteByQueryResponse newResponseInstance(DeleteByQueryRequest request, AtomicReferenceArray indexResponses) { + DeleteByQueryResponse response = new DeleteByQueryResponse(); + for (int i = 0; i < indexResponses.length(); i++) { + IndexDeleteByQueryResponse indexResponse = (IndexDeleteByQueryResponse) indexResponses.get(i); + if (indexResponse != null) { + response.indices().put(indexResponse.index(), indexResponse); + } + } + return response; + } + + @Override protected boolean accumulateExceptions() { + return false; + } + + @Override protected String transportAction() { + return TransportActions.DELETE_BY_QUERY; + } + + @Override protected IndexDeleteByQueryRequest newIndexRequestInstance(DeleteByQueryRequest request, String index) { + return new IndexDeleteByQueryRequest(request, index); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/deletebyquery/TransportIndexDeleteByQueryAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/deletebyquery/TransportIndexDeleteByQueryAction.java new file mode 100644 index 00000000000..af5c1ec9672 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/deletebyquery/TransportIndexDeleteByQueryAction.java @@ -0,0 +1,81 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.deletebyquery; + +import com.google.inject.Inject; +import org.elasticsearch.action.support.replication.TransportIndexReplicationOperationAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +import java.util.concurrent.atomic.AtomicReferenceArray; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportIndexDeleteByQueryAction extends TransportIndexReplicationOperationAction { + + private final ClusterService clusterService; + + private final IndicesService indicesService; + + @Inject public TransportIndexDeleteByQueryAction(Settings settings, ClusterService clusterService, TransportService transportService, IndicesService indicesService, + ThreadPool threadPool, TransportShardDeleteByQueryAction shardDeleteByQueryAction) { + super(settings, transportService, threadPool, shardDeleteByQueryAction); + this.clusterService = clusterService; + this.indicesService = indicesService; + } + + @Override protected IndexDeleteByQueryRequest newRequestInstance() { + return new IndexDeleteByQueryRequest(); + } + + @Override protected IndexDeleteByQueryResponse newResponseInstance(IndexDeleteByQueryRequest request, AtomicReferenceArray shardsResponses) { + int successfulShards = 0; + int failedShards = 0; + for (int i = 0; i < shardsResponses.length(); i++) { + if (shardsResponses.get(i) == null) { + failedShards++; + } else { + successfulShards++; + } + } + return new IndexDeleteByQueryResponse(request.index(), successfulShards, failedShards); + } + + @Override protected boolean accumulateExceptions() { + return false; + } + + @Override protected String transportAction() { + return "indices/index/deleteByQuery"; + } + + @Override protected GroupShardsIterator shards(IndexDeleteByQueryRequest request) { + return indicesService.indexServiceSafe(request.index()).operationRouting().deleteByQueryShards(clusterService.state()); + } + + @Override protected ShardDeleteByQueryRequest newShardRequestInstance(IndexDeleteByQueryRequest request, int shardId) { + return new ShardDeleteByQueryRequest(request, shardId); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/deletebyquery/TransportShardDeleteByQueryAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/deletebyquery/TransportShardDeleteByQueryAction.java new file mode 100644 index 00000000000..337243b9688 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/deletebyquery/TransportShardDeleteByQueryAction.java @@ -0,0 +1,77 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.deletebyquery; + +import com.google.inject.Inject; +import org.elasticsearch.ElasticSearchIllegalStateException; +import org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportShardDeleteByQueryAction extends TransportShardReplicationOperationAction { + + @Inject public TransportShardDeleteByQueryAction(Settings settings, TransportService transportService, + ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, + ShardStateAction shardStateAction) { + super(settings, transportService, clusterService, indicesService, threadPool, shardStateAction); + } + + @Override protected ShardDeleteByQueryRequest newRequestInstance() { + return new ShardDeleteByQueryRequest(); + } + + @Override protected ShardDeleteByQueryResponse newResponseInstance() { + return new ShardDeleteByQueryResponse(); + } + + @Override protected String transportAction() { + return "indices/index/shard/deleteByQuery"; + } + + @Override protected ShardDeleteByQueryResponse shardOperationOnPrimary(ShardOperationRequest shardRequest) { + ShardDeleteByQueryRequest request = shardRequest.request; + indexShard(shardRequest).deleteByQuery(request.querySource(), request.queryParserName(), request.types()); + return new ShardDeleteByQueryResponse(); + } + + @Override protected void shardOperationOnBackup(ShardOperationRequest shardRequest) { + ShardDeleteByQueryRequest request = shardRequest.request; + indexShard(shardRequest).deleteByQuery(request.querySource(), request.queryParserName(), request.types()); + } + + @Override protected ShardsIterator shards(ShardDeleteByQueryRequest request) { + GroupShardsIterator group = indicesService.indexServiceSafe(request.index()).operationRouting().deleteByQueryShards(clusterService.state()); + for (ShardsIterator shards : group) { + if (shards.shardId().id() == request.shardId()) { + return shards; + } + } + throw new ElasticSearchIllegalStateException("No shards iterator found for shard [" + request.shardId() + "]"); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/get/GetRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/get/GetRequest.java new file mode 100644 index 00000000000..ab06309311e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/get/GetRequest.java @@ -0,0 +1,72 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.get; + +import org.elasticsearch.action.support.single.SingleOperationRequest; +import org.elasticsearch.util.Required; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class GetRequest extends SingleOperationRequest { + + GetRequest() { + } + + public GetRequest(String index) { + super(index, null, null); + } + + public GetRequest(String index, String type, String id) { + super(index, type, id); + } + + @Required public GetRequest type(String type) { + this.type = type; + return this; + } + + @Required public GetRequest id(String id) { + this.id = id; + return this; + } + + @Override public GetRequest listenerThreaded(boolean threadedListener) { + super.listenerThreaded(threadedListener); + return this; + } + + @Override public GetRequest threadedOperation(boolean threadedOperation) { + super.threadedOperation(threadedOperation); + return this; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + super.readFrom(in); + } + + @Override public void writeTo(DataOutput out) throws IOException { + super.writeTo(out); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/get/GetResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/get/GetResponse.java new file mode 100644 index 00000000000..8c8cda0774f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/get/GetResponse.java @@ -0,0 +1,92 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.get; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class GetResponse implements ActionResponse, Streamable { + + private String index; + + private String type; + + private String id; + + private String source; + + public GetResponse() { + } + + public GetResponse(String index, String type, String id, String source) { + this.index = index; + this.type = type; + this.id = id; + this.source = source; + } + + public boolean empty() { + return source == null; + } + + public String index() { + return this.index; + } + + public String type() { + return type; + } + + public String id() { + return id; + } + + public String source() { + return this.source; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + index = in.readUTF(); + type = in.readUTF(); + id = in.readUTF(); + if (in.readBoolean()) { + source = in.readUTF(); + } + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeUTF(index); + out.writeUTF(type); + out.writeUTF(id); + if (source == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeUTF(source); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/get/TransportGetAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/get/TransportGetAction.java new file mode 100644 index 00000000000..5748b76cac3 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/get/TransportGetAction.java @@ -0,0 +1,63 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.get; + +import com.google.inject.Inject; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.action.TransportActions; +import org.elasticsearch.action.support.single.TransportSingleOperationAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportGetAction extends TransportSingleOperationAction { + + @Inject public TransportGetAction(Settings settings, ClusterService clusterService, TransportService transportService, + IndicesService indicesService, ThreadPool threadPool) { + super(settings, threadPool, clusterService, transportService, indicesService); + } + + @Override protected String transportAction() { + return TransportActions.GET; + } + + @Override protected String transportShardAction() { + return "indices/get/shard"; + } + + @Override protected GetResponse shardOperation(GetRequest request, int shardId) throws ElasticSearchException { + IndexShard indexShard = indicesService.indexServiceSafe(request.index()).shardSafe(shardId); + return new GetResponse(request.index(), request.type(), request.id(), indexShard.get(request.type(), request.id())); + } + + @Override protected GetRequest newRequest() { + return new GetRequest(); + } + + @Override protected GetResponse newResponse() { + return new GetResponse(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/index/IndexRequest.java new file mode 100644 index 00000000000..d6421f946d0 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -0,0 +1,168 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.index; + +import org.elasticsearch.ElasticSearchIllegalArgumentException; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.replication.ShardReplicationOperationRequest; +import org.elasticsearch.util.Required; +import org.elasticsearch.util.TimeValue; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import static org.elasticsearch.action.Actions.*; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexRequest extends ShardReplicationOperationRequest { + + public static enum OpType { + /** + * Index the source. If there an existing document with the id, it will + * be replaced. + */ + INDEX((byte) 0), + /** + * Creates the resource. Simply adds it to the index, if there is an existing + * document with the id, then it won't be removed. + */ + CREATE((byte) 1); + + private byte id; + + OpType(byte id) { + this.id = id; + } + + public byte id() { + return id; + } + + public static OpType fromId(byte id) { + if (id == 0) { + return INDEX; + } else if (id == 1) { + return CREATE; + } else { + throw new ElasticSearchIllegalArgumentException("No type match for [" + id + "]"); + } + } + } + + private String type; + private String id; + private String source; + private OpType opType = OpType.INDEX; + + public IndexRequest(String index) { + this.index = index; + } + + public IndexRequest(String index, String type, String id, String source) { + this.index = index; + this.type = type; + this.id = id; + this.source = source; + } + + IndexRequest() { + } + + @Override public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = super.validate(); + if (type == null) { + validationException = addValidationError("type is missing", validationException); + } + if (source == null) { + validationException = addValidationError("source is missing", validationException); + } + return validationException; + } + + @Override public IndexRequest listenerThreaded(boolean threadedListener) { + super.listenerThreaded(threadedListener); + return this; + } + + @Override public IndexRequest operationThreaded(boolean threadedOperation) { + super.operationThreaded(threadedOperation); + return this; + } + + String type() { + return type; + } + + @Required public IndexRequest type(String type) { + this.type = type; + return this; + } + + String id() { + return id; + } + + @Required public IndexRequest id(String id) { + this.id = id; + return this; + } + + String source() { + return source; + } + + public IndexRequest source(String source) { + this.source = source; + return this; + } + + public IndexRequest timeout(TimeValue timeout) { + this.timeout = timeout; + return this; + } + + public IndexRequest opType(OpType opType) { + this.opType = opType; + return this; + } + + public OpType opType() { + return this.opType; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + super.readFrom(in); + type = in.readUTF(); + id = in.readUTF(); + source = in.readUTF(); + opType = OpType.fromId(in.readByte()); + } + + @Override public void writeTo(DataOutput out) throws IOException { + super.writeTo(out); + out.writeUTF(type); + out.writeUTF(id); + out.writeUTF(source); + out.writeByte(opType.id()); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/index/IndexResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/index/IndexResponse.java new file mode 100644 index 00000000000..5a4228180da --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/index/IndexResponse.java @@ -0,0 +1,73 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.index; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexResponse implements ActionResponse, Streamable { + + private String index; + + private String id; + + private String type; + + public IndexResponse() { + + } + + public IndexResponse(String index, String type, String id) { + this.index = index; + this.id = id; + this.type = type; + } + + public String index() { + return this.index; + } + + public String id() { + return this.id; + } + + public String type() { + return this.type; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + index = in.readUTF(); + id = in.readUTF(); + type = in.readUTF(); + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeUTF(index); + out.writeUTF(id); + out.writeUTF(type); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java new file mode 100644 index 00000000000..496d9d3a48e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java @@ -0,0 +1,125 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.index; + +import com.google.inject.Inject; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.TransportActions; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; +import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; +import org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.indices.IndexAlreadyExistsException; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.UUID; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportIndexAction extends TransportShardReplicationOperationAction { + + private final boolean autoCreateIndex; + + private final boolean allowIdGeneration; + + private final TransportCreateIndexAction createIndexAction; + + @Inject public TransportIndexAction(Settings settings, TransportService transportService, ClusterService clusterService, + IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, + TransportCreateIndexAction createIndexAction) { + super(settings, transportService, clusterService, indicesService, threadPool, shardStateAction); + this.createIndexAction = createIndexAction; + this.autoCreateIndex = componentSettings.getAsBoolean("autoCreateIndex", true); + this.allowIdGeneration = componentSettings.getAsBoolean("allowIdGeneration", true); + } + + @Override protected void doExecute(final IndexRequest indexRequest, final ActionListener listener) { + if (allowIdGeneration) { + if (indexRequest.id() == null) { + indexRequest.id(UUID.randomUUID().toString()); + // since we generate the id, change it to CREATE + indexRequest.opType(IndexRequest.OpType.CREATE); + } + } + if (autoCreateIndex) { + if (!clusterService.state().metaData().hasIndex(indexRequest.index())) { + createIndexAction.execute(new CreateIndexRequest(indexRequest.index()), new ActionListener() { + @Override public void onResponse(CreateIndexResponse result) { + TransportIndexAction.super.doExecute(indexRequest, listener); + } + + @Override public void onFailure(Throwable e) { + if (ExceptionsHelper.unwrapCause(e) instanceof IndexAlreadyExistsException) { + // we have the index, do it + TransportIndexAction.super.doExecute(indexRequest, listener); + } else { + listener.onFailure(e); + } + } + }); + } else { + super.doExecute(indexRequest, listener); + } + } + } + + @Override protected IndexRequest newRequestInstance() { + return new IndexRequest(); + } + + @Override protected IndexResponse newResponseInstance() { + return new IndexResponse(); + } + + @Override protected String transportAction() { + return TransportActions.INDEX; + } + + @Override protected ShardsIterator shards(IndexRequest request) { + return indicesService.indexServiceSafe(request.index()).operationRouting() + .indexShards(clusterService.state(), request.type(), request.id()); + } + + @Override protected IndexResponse shardOperationOnPrimary(ShardOperationRequest shardRequest) { + IndexRequest request = shardRequest.request; + if (request.opType() == IndexRequest.OpType.INDEX) { + indexShard(shardRequest).index(request.type(), request.id(), request.source()); + } else { + indexShard(shardRequest).create(request.type(), request.id(), request.source()); + } + return new IndexResponse(request.index(), request.type(), request.id()); + } + + @Override protected void shardOperationOnBackup(ShardOperationRequest shardRequest) { + IndexRequest request = shardRequest.request; + if (request.opType() == IndexRequest.OpType.INDEX) { + indexShard(shardRequest).index(request.type(), request.id(), request.source()); + } else { + indexShard(shardRequest).create(request.type(), request.id(), request.source()); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/SearchOperationThreading.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/SearchOperationThreading.java new file mode 100644 index 00000000000..39395b9fd81 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/SearchOperationThreading.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.search; + +import org.elasticsearch.ElasticSearchIllegalArgumentException; + +/** + * Controls the operation threading model for search operation that are performed + * locally on the executing node. + * + * @author kimchy (Shay Banon) + */ +public enum SearchOperationThreading { + /** + * No threads are used, all the local shards operations will be performed on the calling + * thread. + */ + NO_THREADS((byte) 0), + /** + * The local shards operations will be performed in serial manner on a single forked thread. + */ + SINGLE_THREAD((byte) 1), + /** + * Each local shard operation will execute on its own thread. + */ + THREAD_PER_SHARD((byte) 2); + + private final byte id; + + SearchOperationThreading(byte id) { + this.id = id; + } + + public byte id() { + return this.id; + } + + public static SearchOperationThreading fromId(byte id) { + if (id == 0) { + return NO_THREADS; + } + if (id == 1) { + return SINGLE_THREAD; + } + if (id == 2) { + return THREAD_PER_SHARD; + } + throw new ElasticSearchIllegalArgumentException("No type matching id [" + id + "]"); + } + + public static SearchOperationThreading fromString(String value, SearchOperationThreading defaultValue) { + if (value == null) { + return defaultValue; + } + return SearchOperationThreading.valueOf(value.toUpperCase()); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/SearchRequest.java new file mode 100644 index 00000000000..f8998db5ba4 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -0,0 +1,303 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.search; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.search.Scroll; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.util.Required; +import org.elasticsearch.util.Strings; +import org.elasticsearch.util.TimeValue; +import org.elasticsearch.util.gnu.trove.TObjectFloatHashMap; +import org.elasticsearch.util.gnu.trove.TObjectFloatIterator; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import static org.elasticsearch.action.Actions.*; +import static org.elasticsearch.search.Scroll.*; +import static org.elasticsearch.util.TimeValue.*; + +/** + * @author kimchy (Shay Banon) + */ +public class SearchRequest implements ActionRequest { + + private static TObjectFloatHashMap EMPTY = new TObjectFloatHashMap(); + + private SearchType searchType = SearchType.QUERY_THEN_FETCH; + + private String[] indices; + + private String queryHint; + + private String source; + + private Scroll scroll; + + private int from = -1; + + private int size = -1; + + private String[] types = Strings.EMPTY_ARRAY; + + private TObjectFloatHashMap queryBoost = EMPTY; + + private TimeValue timeout; + + private boolean listenerThreaded = false; + private SearchOperationThreading operationThreading = SearchOperationThreading.SINGLE_THREAD; + + SearchRequest() { + } + + public SearchRequest(String... indices) { + this.indices = indices; + } + + public SearchRequest(String index, SearchSourceBuilder source) { + this(index, source.build()); + } + + public SearchRequest(String index, String source) { + this(new String[]{index}, source); + } + + public SearchRequest(String[] indices, SearchSourceBuilder source) { + this(indices, source.build()); + } + + public SearchRequest(String[] indices, String source) { + this.indices = indices; + this.source = source; + } + + @Override public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (source == null) { + validationException = addValidationError("search source is missing", validationException); + } + return validationException; + } + + @Override public boolean listenerThreaded() { + return listenerThreaded; + } + + @Override public SearchRequest listenerThreaded(boolean listenerThreaded) { + this.listenerThreaded = listenerThreaded; + return this; + } + + public SearchOperationThreading operationThreading() { + return this.operationThreading; + } + + public SearchRequest operationThreading(SearchOperationThreading operationThreading) { + this.operationThreading = operationThreading; + return this; + } + + public SearchRequest searchType(SearchType searchType) { + this.searchType = searchType; + return this; + } + + @Required public SearchRequest source(SearchSourceBuilder sourceBuilder) { + return source(sourceBuilder.build()); + } + + @Required public SearchRequest source(String source) { + this.source = source; + return this; + } + + public SearchType searchType() { + return searchType; + } + + public String[] indices() { + return indices; + } + + public SearchRequest queryHint(String queryHint) { + this.queryHint = queryHint; + return this; + } + + public String queryHint() { + return queryHint; + } + + public String source() { + return source; + } + + public Scroll scroll() { + return scroll; + } + + public SearchRequest scroll(Scroll scroll) { + this.scroll = scroll; + return this; + } + + public int from() { + return from; + } + + public SearchRequest from(int from) { + this.from = from; + return this; + } + + public String[] types() { + return types; + } + + public SearchRequest types(String... types) { + this.types = types; + return this; + } + + public TimeValue timeout() { + return timeout; + } + + public void timeout(TimeValue timeout) { + this.timeout = timeout; + } + + /** + * Allows to set a dynamic query boost on an index level query. Very handy when, for example, each user has + * his own index, and friends matter more than friends of friends. + */ + public TObjectFloatHashMap queryBoost() { + return queryBoost; + } + + public SearchRequest queryBoost(String index, float queryBoost) { + if (this.queryBoost == EMPTY) { + this.queryBoost = new TObjectFloatHashMap(); + } + this.queryBoost.put(index, queryBoost); + return this; + } + + public int size() { + return size; + } + + public SearchRequest size(int size) { + this.size = size; + return this; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + operationThreading = SearchOperationThreading.fromId(in.readByte()); + searchType = SearchType.fromId(in.readByte()); + + indices = new String[in.readInt()]; + for (int i = 0; i < indices.length; i++) { + indices[i] = in.readUTF(); + } + + if (in.readBoolean()) { + queryHint = in.readUTF(); + } + + if (in.readBoolean()) { + scroll = readScroll(in); + } + from = in.readInt(); + size = in.readInt(); + if (in.readBoolean()) { + timeout = readTimeValue(in); + } + source = in.readUTF(); + + int size = in.readInt(); + if (size == 0) { + queryBoost = EMPTY; + } else { + queryBoost = new TObjectFloatHashMap(size); + for (int i = 0; i < size; i++) { + queryBoost.put(in.readUTF(), in.readFloat()); + } + } + + int typesSize = in.readInt(); + if (typesSize > 0) { + types = new String[typesSize]; + for (int i = 0; i < typesSize; i++) { + types[i] = in.readUTF(); + } + } + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeByte(operationThreading.id()); + out.writeByte(searchType.id()); + + out.writeInt(indices.length); + for (String index : indices) { + out.writeUTF(index); + } + + if (queryHint == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeUTF(queryHint); + } + + if (scroll == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + scroll.writeTo(out); + } + out.writeInt(from); + out.writeInt(size); + if (timeout == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + timeout.writeTo(out); + } + out.writeUTF(source); + if (queryBoost == null) { + out.writeInt(0); + } else { + out.writeInt(queryBoost.size()); + for (TObjectFloatIterator it = queryBoost.iterator(); it.hasNext();) { + out.writeUTF(it.key()); + out.writeFloat(it.value()); + it.advance(); + } + } + out.writeInt(types.length); + for (String type : types) { + out.writeUTF(type); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/SearchResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/SearchResponse.java new file mode 100644 index 00000000000..6aa24285c74 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/SearchResponse.java @@ -0,0 +1,120 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.search; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.facets.Facets; +import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.util.json.JsonBuilder; +import org.elasticsearch.util.json.ToJson; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import static org.elasticsearch.search.internal.InternalSearchResponse.*; + +/** + * @author kimchy (Shay Banon) + */ +public class SearchResponse implements ActionResponse, ToJson { + + private InternalSearchResponse internalResponse; + + private String scrollId; + + private int totalShards; + + private int successfulShards; + + public SearchResponse() { + } + + public SearchResponse(InternalSearchResponse internalResponse, String scrollId, int totalShards, int successfulShards) { + this.internalResponse = internalResponse; + this.scrollId = scrollId; + this.totalShards = totalShards; + this.successfulShards = successfulShards; + } + + public SearchHits hits() { + return internalResponse.hits(); + } + + public Facets facets() { + return internalResponse.facets(); + } + + public int totalShards() { + return totalShards; + } + + public int successfulShards() { + return successfulShards; + } + + public int failedShards() { + return totalShards - successfulShards; + } + + public String scrollId() { + return scrollId; + } + + public static SearchResponse readSearchResponse(DataInput in) throws IOException, ClassNotFoundException { + SearchResponse response = new SearchResponse(); + response.readFrom(in); + return response; + } + + @Override public void toJson(JsonBuilder builder) throws IOException { + if (scrollId != null) { + builder.field("_scrollId", scrollId); + } + builder.startObject("_shards"); + builder.field("total", totalShards()); + builder.field("successful", successfulShards()); + builder.field("failed", failedShards()); + builder.endObject(); + internalResponse.toJson(builder); + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + internalResponse = readInternalSearchResponse(in); + totalShards = in.readInt(); + successfulShards = in.readInt(); + if (in.readBoolean()) { + scrollId = in.readUTF(); + } + } + + @Override public void writeTo(DataOutput out) throws IOException { + internalResponse.writeTo(out); + out.writeInt(totalShards); + out.writeInt(successfulShards); + if (scrollId == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeUTF(scrollId); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/SearchScrollRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/SearchScrollRequest.java new file mode 100644 index 00000000000..14ba0190b21 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/SearchScrollRequest.java @@ -0,0 +1,95 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.search; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.search.Scroll; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import static org.elasticsearch.action.Actions.*; +import static org.elasticsearch.search.Scroll.*; + +/** + * @author kimchy (Shay Banon) + */ +public class SearchScrollRequest implements ActionRequest { + + private String scrollId; + + private Scroll scroll; + + public SearchScrollRequest() { + } + + public SearchScrollRequest(String scrollId) { + this.scrollId = scrollId; + } + + @Override public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (scrollId == null) { + validationException = addValidationError("scrollId is missing", validationException); + } + return validationException; + } + + @Override public boolean listenerThreaded() { + // TODO threaded + return false; //To change body of implemented methods use File | Settings | File Templates. + } + + @Override public ActionRequest listenerThreaded(boolean threadedListener) { + // TODO threaded + return null; //To change body of implemented methods use File | Settings | File Templates. + } + + public String scrollId() { + return scrollId; + } + + public Scroll scroll() { + return scroll; + } + + public void scroll(Scroll scroll) { + this.scroll = scroll; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + scrollId = in.readUTF(); + if (in.readBoolean()) { + scroll = readScroll(in); + } + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeUTF(scrollId); + if (scroll == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + scroll.writeTo(out); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/SearchType.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/SearchType.java new file mode 100644 index 00000000000..42ef69b8419 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/SearchType.java @@ -0,0 +1,56 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.search; + +import org.elasticsearch.ElasticSearchIllegalArgumentException; + +/** + * @author kimchy (Shay Banon) + */ +public enum SearchType { + DFS_QUERY_THEN_FETCH((byte) 0), + QUERY_THEN_FETCH((byte) 1), + DFS_QUERY_AND_FETCH((byte) 2), + QUERY_AND_FETCH((byte) 3); + + private byte id; + + SearchType(byte id) { + this.id = id; + } + + public byte id() { + return this.id; + } + + public static SearchType fromId(byte id) { + if (id == 0) { + return DFS_QUERY_THEN_FETCH; + } else if (id == 1) { + return QUERY_THEN_FETCH; + } else if (id == 2) { + return DFS_QUERY_AND_FETCH; + } else if (id == 3) { + return QUERY_AND_FETCH; + } else { + throw new ElasticSearchIllegalArgumentException("No search type for [" + id + "]"); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java new file mode 100644 index 00000000000..28ee357fc3b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -0,0 +1,112 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.search; + +import com.google.inject.Inject; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.TransportActions; +import org.elasticsearch.action.search.type.TransportSearchDfsQueryAndFetchAction; +import org.elasticsearch.action.search.type.TransportSearchDfsQueryThenFetchAction; +import org.elasticsearch.action.search.type.TransportSearchQueryAndFetchAction; +import org.elasticsearch.action.search.type.TransportSearchQueryThenFetchAction; +import org.elasticsearch.action.support.BaseAction; +import org.elasticsearch.transport.BaseTransportRequestHandler; +import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +import static org.elasticsearch.action.search.SearchType.*; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportSearchAction extends BaseAction { + + private final TransportSearchDfsQueryThenFetchAction dfsQueryThenFetchAction; + + private final TransportSearchQueryThenFetchAction queryThenFetchAction; + + private final TransportSearchDfsQueryAndFetchAction dfsQueryAndFetchAction; + + private final TransportSearchQueryAndFetchAction queryAndFetchAction; + + @Inject public TransportSearchAction(Settings settings, TransportService transportService, + TransportSearchDfsQueryThenFetchAction dfsQueryThenFetchAction, + TransportSearchQueryThenFetchAction queryThenFetchAction, + TransportSearchDfsQueryAndFetchAction dfsQueryAndFetchAction, + TransportSearchQueryAndFetchAction queryAndFetchAction) { + super(settings); + this.dfsQueryThenFetchAction = dfsQueryThenFetchAction; + this.queryThenFetchAction = queryThenFetchAction; + this.dfsQueryAndFetchAction = dfsQueryAndFetchAction; + this.queryAndFetchAction = queryAndFetchAction; + + transportService.registerHandler(TransportActions.SEARCH, new TransportHandler()); + } + + @Override protected void doExecute(SearchRequest searchRequest, ActionListener listener) { + if (searchRequest.searchType() == DFS_QUERY_THEN_FETCH) { + dfsQueryThenFetchAction.execute(searchRequest, listener); + } else if (searchRequest.searchType() == SearchType.QUERY_THEN_FETCH) { + queryThenFetchAction.execute(searchRequest, listener); + } else if (searchRequest.searchType() == SearchType.DFS_QUERY_AND_FETCH) { + dfsQueryAndFetchAction.execute(searchRequest, listener); + } else if (searchRequest.searchType() == SearchType.QUERY_AND_FETCH) { + queryAndFetchAction.execute(searchRequest, listener); + } + } + + private class TransportHandler extends BaseTransportRequestHandler { + + @Override public SearchRequest newInstance() { + return new SearchRequest(); + } + + @Override public void messageReceived(SearchRequest request, final TransportChannel channel) throws Exception { + // no need for a threaded listener + request.listenerThreaded(false); + // we don't spawn, so if we get a request with no threading, change it to single threaded + if (request.operationThreading() == SearchOperationThreading.NO_THREADS) { + request.operationThreading(SearchOperationThreading.SINGLE_THREAD); + } + execute(request, new ActionListener() { + @Override public void onResponse(SearchResponse result) { + try { + channel.sendResponse(result); + } catch (Exception e) { + onFailure(e); + } + } + + @Override public void onFailure(Throwable e) { + try { + channel.sendResponse(e); + } catch (Exception e1) { + logger.warn("Failed to send response for search", e1); + } + } + }); + } + + @Override public boolean spawn() { + return false; + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java new file mode 100644 index 00000000000..1e7fd6f74f5 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java @@ -0,0 +1,91 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.search; + +import com.google.inject.Inject; +import org.elasticsearch.ElasticSearchIllegalArgumentException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.TransportActions; +import org.elasticsearch.action.search.type.ParsedScrollId; +import org.elasticsearch.action.search.type.TransportSearchScrollQueryThenFetchAction; +import org.elasticsearch.action.support.BaseAction; +import org.elasticsearch.transport.BaseTransportRequestHandler; +import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +import static org.elasticsearch.action.search.type.ParsedScrollId.*; +import static org.elasticsearch.action.search.type.TransportSearchHelper.*; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportSearchScrollAction extends BaseAction { + + private final TransportSearchScrollQueryThenFetchAction queryThenFetchAction; + + @Inject public TransportSearchScrollAction(Settings settings, TransportService transportService, + TransportSearchScrollQueryThenFetchAction queryThenFetchAction) { + super(settings); + this.queryThenFetchAction = queryThenFetchAction; + + transportService.registerHandler(TransportActions.SEARCH_SCROLL, new TransportHandler()); + } + + @Override protected void doExecute(SearchScrollRequest request, ActionListener listener) { + try { + ParsedScrollId scrollId = parseScrollId(request.scrollId()); + if (scrollId.type().equals(QUERY_THEN_FETCH_TYPE)) { + queryThenFetchAction.execute(request, scrollId, listener); + } else { + throw new ElasticSearchIllegalArgumentException("Scroll id type [" + scrollId.type() + "] unrecongnized"); + } + } catch (Exception e) { + listener.onFailure(e); + } + } + + private class TransportHandler extends BaseTransportRequestHandler { + + @Override public SearchScrollRequest newInstance() { + return new SearchScrollRequest(); + } + + @Override public void messageReceived(SearchScrollRequest request, final TransportChannel channel) throws Exception { + execute(request, new ActionListener() { + @Override public void onResponse(SearchResponse result) { + try { + channel.sendResponse(result); + } catch (Exception e) { + onFailure(e); + } + } + + @Override public void onFailure(Throwable e) { + try { + channel.sendResponse(e); + } catch (Exception e1) { + logger.warn("Failed to send response for search", e1); + } + } + }); + } + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/type/ParsedScrollId.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/type/ParsedScrollId.java new file mode 100644 index 00000000000..73e50906de6 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/type/ParsedScrollId.java @@ -0,0 +1,56 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.search.type; + +import org.elasticsearch.util.Tuple; + +/** + * @author kimchy (Shay Banon) + */ +public class ParsedScrollId { + + public static String QUERY_THEN_FETCH_TYPE = "queryThenFetch"; + + public static String QUERY_AND_FETCH_TYPE = "queryAndFetch"; + + private final String source; + + private final String type; + + private final Tuple[] values; + + public ParsedScrollId(String source, String type, Tuple[] values) { + this.source = source; + this.type = type; + this.values = values; + } + + public String source() { + return source; + } + + public String type() { + return type; + } + + public Tuple[] values() { + return values; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/type/TransportSearchCache.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/type/TransportSearchCache.java new file mode 100644 index 00000000000..54a5e028902 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/type/TransportSearchCache.java @@ -0,0 +1,102 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.search.type; + +import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.dfs.DfsSearchResult; +import org.elasticsearch.search.fetch.FetchSearchResult; +import org.elasticsearch.search.fetch.QueryFetchSearchResult; +import org.elasticsearch.search.query.QuerySearchResultProvider; + +import java.util.Collection; +import java.util.Map; +import java.util.Queue; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentLinkedQueue; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportSearchCache { + + private final Queue> cacheDfsResults = new ConcurrentLinkedQueue>(); + + private final Queue> cacheQueryResults = new ConcurrentLinkedQueue>(); + + private final Queue> cacheFetchResults = new ConcurrentLinkedQueue>(); + + private final Queue> cacheQueryFetchResults = new ConcurrentLinkedQueue>(); + + public Collection obtainDfsResults() { + Collection dfsSearchResults; + while ((dfsSearchResults = cacheDfsResults.poll()) == null) { + cacheDfsResults.offer(new ConcurrentLinkedQueue()); + } + dfsSearchResults.clear(); + return dfsSearchResults; + } + + public void releaseDfsResults(Collection dfsResults) { + dfsResults.clear(); + cacheDfsResults.offer(dfsResults); + } + + public Map obtainQueryResults() { + Map queryResults; + while ((queryResults = cacheQueryResults.poll()) == null) { + cacheQueryResults.offer(new ConcurrentHashMap()); + } + queryResults.clear(); + return queryResults; + } + + public void releaseQueryResults(Map queryResults) { + queryResults.clear(); + cacheQueryResults.offer(queryResults); + } + + public Map obtainFetchResults() { + Map fetchResults; + while ((fetchResults = cacheFetchResults.poll()) == null) { + cacheFetchResults.offer(new ConcurrentHashMap()); + } + fetchResults.clear(); + return fetchResults; + } + + public void releaseFetchResults(Map fetchResults) { + fetchResults.clear(); + cacheFetchResults.offer(fetchResults); + } + + public Map obtainQueryFetchResults() { + Map fetchResults; + while ((fetchResults = cacheQueryFetchResults.poll()) == null) { + cacheQueryFetchResults.offer(new ConcurrentHashMap()); + } + fetchResults.clear(); + return fetchResults; + } + + public void releaseQueryFetchResults(Map fetchResults) { + fetchResults.clear(); + cacheQueryFetchResults.offer(fetchResults); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/type/TransportSearchDfsQueryAndFetchAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/type/TransportSearchDfsQueryAndFetchAction.java new file mode 100644 index 00000000000..8a2b6f0e8e9 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/type/TransportSearchDfsQueryAndFetchAction.java @@ -0,0 +1,174 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.search.type; + +import com.google.inject.Inject; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.SearchOperationThreading; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.node.Node; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.action.SearchServiceListener; +import org.elasticsearch.search.action.SearchServiceTransportAction; +import org.elasticsearch.search.controller.SearchPhaseController; +import org.elasticsearch.search.dfs.AggregatedDfs; +import org.elasticsearch.search.dfs.DfsSearchResult; +import org.elasticsearch.search.fetch.QueryFetchSearchResult; +import org.elasticsearch.search.internal.InternalSearchRequest; +import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.search.query.QuerySearchRequest; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.util.settings.Settings; + +import java.util.Collection; +import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.elasticsearch.action.search.type.TransportSearchHelper.*; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportSearchDfsQueryAndFetchAction extends TransportSearchTypeAction { + + @Inject public TransportSearchDfsQueryAndFetchAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, IndicesService indicesService, + TransportSearchCache transportSearchCache, SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController) { + super(settings, threadPool, clusterService, indicesService, transportSearchCache, searchService, searchPhaseController); + } + + @Override protected void doExecute(SearchRequest searchRequest, ActionListener listener) { + new AsyncAction(searchRequest, listener).start(); + } + + private class AsyncAction extends BaseAsyncAction { + + private final Collection dfsResults = transportSearchCache.obtainDfsResults(); + + private final Map queryFetchResults = transportSearchCache.obtainQueryFetchResults(); + + + private AsyncAction(SearchRequest request, ActionListener listener) { + super(request, listener); + } + + @Override protected void sendExecuteFirstPhase(Node node, InternalSearchRequest request, SearchServiceListener listener) { + searchService.sendExecuteDfs(node, request, listener); + } + + @Override protected void processFirstPhaseResult(ShardRouting shard, DfsSearchResult result) { + dfsResults.add(result); + } + + @Override protected void moveToSecondPhase() { + final AggregatedDfs dfs = searchPhaseController.aggregateDfs(dfsResults); + final AtomicInteger counter = new AtomicInteger(dfsResults.size()); + + int localOperations = 0; + for (DfsSearchResult dfsResult : dfsResults) { + Node node = nodes.get(dfsResult.shardTarget().nodeId()); + if (node.id().equals(nodes.localNodeId())) { + localOperations++; + } else { + QuerySearchRequest querySearchRequest = new QuerySearchRequest(dfsResult.id(), dfs); + executeSecondPhase(counter, node, querySearchRequest); + } + } + if (localOperations > 0) { + if (request.operationThreading() == SearchOperationThreading.SINGLE_THREAD) { + threadPool.execute(new Runnable() { + @Override public void run() { + for (DfsSearchResult dfsResult : dfsResults) { + Node node = nodes.get(dfsResult.shardTarget().nodeId()); + if (node.id().equals(nodes.localNodeId())) { + QuerySearchRequest querySearchRequest = new QuerySearchRequest(dfsResult.id(), dfs); + executeSecondPhase(counter, node, querySearchRequest); + } + } + transportSearchCache.releaseDfsResults(dfsResults); + } + }); + } else { + boolean localAsync = request.operationThreading() == SearchOperationThreading.THREAD_PER_SHARD; + for (DfsSearchResult dfsResult : dfsResults) { + final Node node = nodes.get(dfsResult.shardTarget().nodeId()); + if (node.id().equals(nodes.localNodeId())) { + final QuerySearchRequest querySearchRequest = new QuerySearchRequest(dfsResult.id(), dfs); + if (localAsync) { + threadPool.execute(new Runnable() { + @Override public void run() { + executeSecondPhase(counter, node, querySearchRequest); + } + }); + } else { + executeSecondPhase(counter, node, querySearchRequest); + } + } + } + transportSearchCache.releaseDfsResults(dfsResults); + } + } + } + + private void executeSecondPhase(final AtomicInteger counter, Node node, QuerySearchRequest querySearchRequest) { + searchService.sendExecuteFetch(node, querySearchRequest, new SearchServiceListener() { + @Override public void onResult(QueryFetchSearchResult result) { + queryFetchResults.put(result.shardTarget(), result); + if (counter.decrementAndGet() == 0) { + finishHim(); + } + } + + @Override public void onFailure(Throwable t) { + if (logger.isDebugEnabled()) { + logger.debug("Failed to execute query phase", t); + } + successulOps.decrementAndGet(); + if (counter.decrementAndGet() == 0) { + finishHim(); + } + } + }); + } + + private void finishHim() { + sortedShardList = searchPhaseController.sortDocs(queryFetchResults.values()); + final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryFetchResults, queryFetchResults); + String scrollIdX = null; + if (request.scroll() != null) { + scrollIdX = buildScrollId(request.searchType(), queryFetchResults.values()); + } + final String scrollId = scrollIdX; + transportSearchCache.releaseQueryFetchResults(queryFetchResults); + if (request.listenerThreaded()) { + threadPool.execute(new Runnable() { + @Override public void run() { + listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successulOps.get())); + } + }); + } else { + listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successulOps.get())); + } + } + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/type/TransportSearchDfsQueryThenFetchAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/type/TransportSearchDfsQueryThenFetchAction.java new file mode 100644 index 00000000000..5b85cbad5b1 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/type/TransportSearchDfsQueryThenFetchAction.java @@ -0,0 +1,255 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.search.type; + +import com.google.inject.Inject; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.SearchOperationThreading; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.node.Node; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.action.SearchServiceListener; +import org.elasticsearch.search.action.SearchServiceTransportAction; +import org.elasticsearch.search.controller.SearchPhaseController; +import org.elasticsearch.search.dfs.AggregatedDfs; +import org.elasticsearch.search.dfs.DfsSearchResult; +import org.elasticsearch.search.fetch.FetchSearchRequest; +import org.elasticsearch.search.fetch.FetchSearchResult; +import org.elasticsearch.search.internal.InternalSearchRequest; +import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.search.query.QuerySearchRequest; +import org.elasticsearch.search.query.QuerySearchResult; +import org.elasticsearch.search.query.QuerySearchResultProvider; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.util.settings.Settings; +import org.elasticsearch.util.trove.ExtTIntArrayList; + +import java.util.Collection; +import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportSearchDfsQueryThenFetchAction extends TransportSearchTypeAction { + + @Inject public TransportSearchDfsQueryThenFetchAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, IndicesService indicesService, + TransportSearchCache transportSearchCache, SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController) { + super(settings, threadPool, clusterService, indicesService, transportSearchCache, searchService, searchPhaseController); + } + + @Override protected void doExecute(SearchRequest searchRequest, ActionListener listener) { + new AsyncAction(searchRequest, listener).start(); + } + + private class AsyncAction extends BaseAsyncAction { + + private final Collection dfsResults = transportSearchCache.obtainDfsResults(); + + private final Map queryResults = transportSearchCache.obtainQueryResults(); + + private final Map fetchResults = transportSearchCache.obtainFetchResults(); + + + private AsyncAction(SearchRequest request, ActionListener listener) { + super(request, listener); + } + + @Override protected void sendExecuteFirstPhase(Node node, InternalSearchRequest request, SearchServiceListener listener) { + searchService.sendExecuteDfs(node, request, listener); + } + + @Override protected void processFirstPhaseResult(ShardRouting shard, DfsSearchResult result) { + dfsResults.add(result); + } + + @Override protected void moveToSecondPhase() { + final AggregatedDfs dfs = searchPhaseController.aggregateDfs(dfsResults); + final AtomicInteger counter = new AtomicInteger(dfsResults.size()); + + + int localOperations = 0; + for (DfsSearchResult dfsResult : dfsResults) { + Node node = nodes.get(dfsResult.shardTarget().nodeId()); + if (node.id().equals(nodes.localNodeId())) { + localOperations++; + } else { + QuerySearchRequest querySearchRequest = new QuerySearchRequest(dfsResult.id(), dfs); + executeQuery(counter, querySearchRequest, node); + } + } + + if (localOperations > 0) { + if (request.operationThreading() == SearchOperationThreading.SINGLE_THREAD) { + threadPool.execute(new Runnable() { + @Override public void run() { + for (DfsSearchResult dfsResult : dfsResults) { + Node node = nodes.get(dfsResult.shardTarget().nodeId()); + if (node.id().equals(nodes.localNodeId())) { + QuerySearchRequest querySearchRequest = new QuerySearchRequest(dfsResult.id(), dfs); + executeQuery(counter, querySearchRequest, node); + } + } + transportSearchCache.releaseDfsResults(dfsResults); + } + }); + } else { + boolean localAsync = request.operationThreading() == SearchOperationThreading.THREAD_PER_SHARD; + for (DfsSearchResult dfsResult : dfsResults) { + final Node node = nodes.get(dfsResult.shardTarget().nodeId()); + if (node.id().equals(nodes.localNodeId())) { + final QuerySearchRequest querySearchRequest = new QuerySearchRequest(dfsResult.id(), dfs); + if (localAsync) { + threadPool.execute(new Runnable() { + @Override public void run() { + executeQuery(counter, querySearchRequest, node); + } + }); + } else { + executeQuery(counter, querySearchRequest, node); + } + } + } + transportSearchCache.releaseDfsResults(dfsResults); + } + } + } + + private void executeQuery(final AtomicInteger counter, QuerySearchRequest querySearchRequest, Node node) { + searchService.sendExecuteQuery(node, querySearchRequest, new SearchServiceListener() { + @Override public void onResult(QuerySearchResult result) { + queryResults.put(result.shardTarget(), result); + if (counter.decrementAndGet() == 0) { + executeFetchPhase(); + } + } + + @Override public void onFailure(Throwable t) { + if (logger.isDebugEnabled()) { + logger.debug("Failed to execute query phase", t); + } + successulOps.decrementAndGet(); + if (counter.decrementAndGet() == 0) { + executeFetchPhase(); + } + } + }); + } + + private void executeFetchPhase() { + sortedShardList = searchPhaseController.sortDocs(queryResults.values()); + final Map docIdsToLoad = searchPhaseController.docIdsToLoad(sortedShardList); + + if (docIdsToLoad.isEmpty()) { + finishHim(); + } + + final AtomicInteger counter = new AtomicInteger(docIdsToLoad.size()); + int localOperations = 0; + for (Map.Entry entry : docIdsToLoad.entrySet()) { + Node node = nodes.get(entry.getKey().nodeId()); + if (node.id().equals(nodes.localNodeId())) { + localOperations++; + } else { + FetchSearchRequest fetchSearchRequest = new FetchSearchRequest(queryResults.get(entry.getKey()).id(), entry.getValue()); + executeFetch(counter, fetchSearchRequest, node); + } + } + + if (localOperations > 0) { + if (request.operationThreading() == SearchOperationThreading.SINGLE_THREAD) { + threadPool.execute(new Runnable() { + @Override public void run() { + for (Map.Entry entry : docIdsToLoad.entrySet()) { + Node node = nodes.get(entry.getKey().nodeId()); + if (node.id().equals(nodes.localNodeId())) { + FetchSearchRequest fetchSearchRequest = new FetchSearchRequest(queryResults.get(entry.getKey()).id(), entry.getValue()); + executeFetch(counter, fetchSearchRequest, node); + } + } + } + }); + } else { + boolean localAsync = request.operationThreading() == SearchOperationThreading.THREAD_PER_SHARD; + for (Map.Entry entry : docIdsToLoad.entrySet()) { + final Node node = nodes.get(entry.getKey().nodeId()); + if (node.id().equals(nodes.localNodeId())) { + final FetchSearchRequest fetchSearchRequest = new FetchSearchRequest(queryResults.get(entry.getKey()).id(), entry.getValue()); + if (localAsync) { + threadPool.execute(new Runnable() { + @Override public void run() { + executeFetch(counter, fetchSearchRequest, node); + } + }); + } else { + executeFetch(counter, fetchSearchRequest, node); + } + } + } + } + } + } + + private void executeFetch(final AtomicInteger counter, FetchSearchRequest fetchSearchRequest, Node node) { + searchService.sendExecuteFetch(node, fetchSearchRequest, new SearchServiceListener() { + @Override public void onResult(FetchSearchResult result) { + fetchResults.put(result.shardTarget(), result); + if (counter.decrementAndGet() == 0) { + finishHim(); + } + } + + @Override public void onFailure(Throwable t) { + if (logger.isDebugEnabled()) { + logger.debug("Failed to execute fetch phase", t); + } + successulOps.decrementAndGet(); + if (counter.decrementAndGet() == 0) { + finishHim(); + } + } + }); + } + + private void finishHim() { + final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryResults, fetchResults); + String scrollIdX = null; + if (request.scroll() != null) { + scrollIdX = TransportSearchHelper.buildScrollId(request.searchType(), fetchResults.values()); + } + final String scrollId = scrollIdX; + transportSearchCache.releaseQueryResults(queryResults); + transportSearchCache.releaseFetchResults(fetchResults); + if (request.listenerThreaded()) { + threadPool.execute(new Runnable() { + @Override public void run() { + listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successulOps.get())); + } + }); + } else { + listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successulOps.get())); + } + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/type/TransportSearchHelper.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/type/TransportSearchHelper.java new file mode 100644 index 00000000000..a2be9d6a311 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/type/TransportSearchHelper.java @@ -0,0 +1,99 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.search.type; + +import org.elasticsearch.ElasticSearchIllegalStateException; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchScrollRequest; +import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.search.fetch.FetchSearchResultProvider; +import org.elasticsearch.search.internal.InternalScrollSearchRequest; +import org.elasticsearch.search.internal.InternalSearchRequest; +import org.elasticsearch.util.Tuple; + +import java.util.regex.Pattern; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class TransportSearchHelper { + + + private final static Pattern scrollIdPattern; + + static { + scrollIdPattern = Pattern.compile(";"); + } + + public static InternalSearchRequest internalSearchRequest(ShardRouting shardRouting, SearchRequest request) { + InternalSearchRequest internalRequest = new InternalSearchRequest(shardRouting, request.source()); + internalRequest.from(request.from()).size(request.size()); + internalRequest.scroll(request.scroll()); + if (request.queryBoost() != null) { + if (request.queryBoost().containsKey(shardRouting.index())) { + internalRequest.queryBoost(request.queryBoost().get(shardRouting.index())); + } + } + internalRequest.timeout(request.timeout()); + internalRequest.types(request.types()); + return internalRequest; + } + + public static InternalScrollSearchRequest internalScrollSearchRequest(long id, SearchScrollRequest request) { + InternalScrollSearchRequest internalRequest = new InternalScrollSearchRequest(id); + internalRequest.scroll(request.scroll()); + return internalRequest; + } + + public static String buildScrollId(SearchType searchType, Iterable fetchResults) { + if (searchType == SearchType.DFS_QUERY_THEN_FETCH || searchType == SearchType.QUERY_THEN_FETCH) { + return buildScrollId(ParsedScrollId.QUERY_THEN_FETCH_TYPE, fetchResults); + } else if (searchType == SearchType.QUERY_AND_FETCH || searchType == SearchType.DFS_QUERY_AND_FETCH) { + return buildScrollId(ParsedScrollId.QUERY_AND_FETCH_TYPE, fetchResults); + } else { + throw new ElasticSearchIllegalStateException(); + } + } + + public static String buildScrollId(String type, Iterable fetchResults) { + StringBuilder sb = new StringBuilder().append(type).append(';'); + for (FetchSearchResultProvider fetchResult : fetchResults) { + sb.append(fetchResult.id()).append(':').append(fetchResult.shardTarget().nodeId()).append(';'); + } + return sb.toString(); + } + + public static ParsedScrollId parseScrollId(String scrollId) { + String[] elements = scrollIdPattern.split(scrollId); + @SuppressWarnings({"unchecked"}) Tuple[] values = new Tuple[elements.length - 1]; + for (int i = 1; i < elements.length; i++) { + String element = elements[i]; + int index = element.indexOf(':'); + values[i - 1] = new Tuple(element.substring(index + 1), Long.parseLong(element.substring(0, index))); + } + return new ParsedScrollId(scrollId, elements[0], values); + } + + private TransportSearchHelper() { + + } + +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/type/TransportSearchQueryAndFetchAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/type/TransportSearchQueryAndFetchAction.java new file mode 100644 index 00000000000..c445556afbc --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/type/TransportSearchQueryAndFetchAction.java @@ -0,0 +1,95 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.search.type; + +import com.google.inject.Inject; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.node.Node; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.action.SearchServiceListener; +import org.elasticsearch.search.action.SearchServiceTransportAction; +import org.elasticsearch.search.controller.SearchPhaseController; +import org.elasticsearch.search.fetch.QueryFetchSearchResult; +import org.elasticsearch.search.internal.InternalSearchRequest; +import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.util.settings.Settings; + +import java.util.Map; + +import static org.elasticsearch.action.search.type.TransportSearchHelper.*; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportSearchQueryAndFetchAction extends TransportSearchTypeAction { + + @Inject public TransportSearchQueryAndFetchAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, IndicesService indicesService, + TransportSearchCache transportSearchCache, SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController) { + super(settings, threadPool, clusterService, indicesService, transportSearchCache, searchService, searchPhaseController); + } + + @Override protected void doExecute(SearchRequest searchRequest, ActionListener listener) { + new AsyncAction(searchRequest, listener).start(); + } + + private class AsyncAction extends BaseAsyncAction { + + private final Map queryFetchResults = transportSearchCache.obtainQueryFetchResults(); + + + private AsyncAction(SearchRequest request, ActionListener listener) { + super(request, listener); + } + + @Override protected void sendExecuteFirstPhase(Node node, InternalSearchRequest request, SearchServiceListener listener) { + searchService.sendExecuteFetch(node, request, listener); + } + + @Override protected void processFirstPhaseResult(ShardRouting shard, QueryFetchSearchResult result) { + queryFetchResults.put(result.shardTarget(), result); + } + + @Override protected void moveToSecondPhase() { + sortedShardList = searchPhaseController.sortDocs(queryFetchResults.values()); + final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryFetchResults, queryFetchResults); + String scrollIdX = null; + if (request.scroll() != null) { + scrollIdX = buildScrollId(request.searchType(), queryFetchResults.values()); + } + final String scrollId = scrollIdX; + transportSearchCache.releaseQueryFetchResults(queryFetchResults); + if (request.listenerThreaded()) { + threadPool.execute(new Runnable() { + @Override public void run() { + listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successulOps.get())); + } + }); + } else { + listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successulOps.get())); + } + } + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/type/TransportSearchQueryThenFetchAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/type/TransportSearchQueryThenFetchAction.java new file mode 100644 index 00000000000..c1907915797 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/type/TransportSearchQueryThenFetchAction.java @@ -0,0 +1,168 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.search.type; + +import com.google.inject.Inject; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.SearchOperationThreading; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.node.Node; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.action.SearchServiceListener; +import org.elasticsearch.search.action.SearchServiceTransportAction; +import org.elasticsearch.search.controller.SearchPhaseController; +import org.elasticsearch.search.fetch.FetchSearchRequest; +import org.elasticsearch.search.fetch.FetchSearchResult; +import org.elasticsearch.search.internal.InternalSearchRequest; +import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.search.query.QuerySearchResult; +import org.elasticsearch.search.query.QuerySearchResultProvider; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.util.settings.Settings; +import org.elasticsearch.util.trove.ExtTIntArrayList; + +import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportSearchQueryThenFetchAction extends TransportSearchTypeAction { + + @Inject public TransportSearchQueryThenFetchAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, IndicesService indicesService, + TransportSearchCache transportSearchCache, SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController) { + super(settings, threadPool, clusterService, indicesService, transportSearchCache, searchService, searchPhaseController); + } + + @Override protected void doExecute(SearchRequest searchRequest, ActionListener listener) { + new AsyncAction(searchRequest, listener).start(); + } + + private class AsyncAction extends BaseAsyncAction { + + private final Map queryResults = transportSearchCache.obtainQueryResults(); + + private final Map fetchResults = transportSearchCache.obtainFetchResults(); + + + private AsyncAction(SearchRequest request, ActionListener listener) { + super(request, listener); + } + + @Override protected void sendExecuteFirstPhase(Node node, InternalSearchRequest request, SearchServiceListener listener) { + searchService.sendExecuteQuery(node, request, listener); + } + + @Override protected void processFirstPhaseResult(ShardRouting shard, QuerySearchResult result) { + queryResults.put(result.shardTarget(), result); + } + + @Override protected void moveToSecondPhase() { + sortedShardList = searchPhaseController.sortDocs(queryResults.values()); + final Map docIdsToLoad = searchPhaseController.docIdsToLoad(sortedShardList); + + if (docIdsToLoad.isEmpty()) { + finishHim(); + } + + final AtomicInteger counter = new AtomicInteger(docIdsToLoad.size()); + + int localOperations = 0; + for (Map.Entry entry : docIdsToLoad.entrySet()) { + Node node = nodes.get(entry.getKey().nodeId()); + if (node.id().equals(nodes.localNodeId())) { + localOperations++; + } else { + FetchSearchRequest fetchSearchRequest = new FetchSearchRequest(queryResults.get(entry.getKey()).id(), entry.getValue()); + executeFetch(counter, fetchSearchRequest, node); + } + } + + if (localOperations > 0) { + if (request.operationThreading() == SearchOperationThreading.SINGLE_THREAD) { + threadPool.execute(new Runnable() { + @Override public void run() { + for (Map.Entry entry : docIdsToLoad.entrySet()) { + Node node = nodes.get(entry.getKey().nodeId()); + if (node.id().equals(nodes.localNodeId())) { + FetchSearchRequest fetchSearchRequest = new FetchSearchRequest(queryResults.get(entry.getKey()).id(), entry.getValue()); + executeFetch(counter, fetchSearchRequest, node); + } + } + } + }); + } else { + boolean localAsync = request.operationThreading() == SearchOperationThreading.THREAD_PER_SHARD; + for (Map.Entry entry : docIdsToLoad.entrySet()) { + final Node node = nodes.get(entry.getKey().nodeId()); + if (node.id().equals(nodes.localNodeId())) { + final FetchSearchRequest fetchSearchRequest = new FetchSearchRequest(queryResults.get(entry.getKey()).id(), entry.getValue()); + if (localAsync) { + threadPool.execute(new Runnable() { + @Override public void run() { + executeFetch(counter, fetchSearchRequest, node); + } + }); + } else { + executeFetch(counter, fetchSearchRequest, node); + } + } + } + } + } + } + + private void executeFetch(final AtomicInteger counter, FetchSearchRequest fetchSearchRequest, Node node) { + searchService.sendExecuteFetch(node, fetchSearchRequest, new SearchServiceListener() { + @Override public void onResult(FetchSearchResult result) { + fetchResults.put(result.shardTarget(), result); + if (counter.decrementAndGet() == 0) { + finishHim(); + } + } + + @Override public void onFailure(Throwable t) { + if (logger.isDebugEnabled()) { + logger.debug("Failed to execute fetch phase", t); + } + successulOps.decrementAndGet(); + if (counter.decrementAndGet() == 0) { + finishHim(); + } + } + }); + } + + private void finishHim() { + InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryResults, fetchResults); + String scrollId = null; + if (request.scroll() != null) { + scrollId = TransportSearchHelper.buildScrollId(request.searchType(), fetchResults.values()); + } + transportSearchCache.releaseQueryResults(queryResults); + transportSearchCache.releaseFetchResults(fetchResults); + listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successulOps.get())); + } + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/type/TransportSearchScrollQueryThenFetchAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/type/TransportSearchScrollQueryThenFetchAction.java new file mode 100644 index 00000000000..ff7ee1db228 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/type/TransportSearchScrollQueryThenFetchAction.java @@ -0,0 +1,183 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.search.type; + +import com.google.inject.Inject; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchScrollRequest; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.node.Node; +import org.elasticsearch.cluster.node.Nodes; +import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.action.SearchServiceListener; +import org.elasticsearch.search.action.SearchServiceTransportAction; +import org.elasticsearch.search.controller.SearchPhaseController; +import org.elasticsearch.search.controller.ShardDoc; +import org.elasticsearch.search.fetch.FetchSearchRequest; +import org.elasticsearch.search.fetch.FetchSearchResult; +import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.search.query.QuerySearchResult; +import org.elasticsearch.search.query.QuerySearchResultProvider; +import org.elasticsearch.util.Tuple; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.settings.Settings; +import org.elasticsearch.util.trove.ExtTIntArrayList; + +import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportSearchScrollQueryThenFetchAction extends AbstractComponent { + + private final ClusterService clusterService; + + private final SearchServiceTransportAction searchService; + + private final SearchPhaseController searchPhaseController; + + private final TransportSearchCache transportSearchCache; + + @Inject public TransportSearchScrollQueryThenFetchAction(Settings settings, ClusterService clusterService, + TransportSearchCache transportSearchCache, + SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController) { + super(settings); + this.clusterService = clusterService; + this.transportSearchCache = transportSearchCache; + this.searchService = searchService; + this.searchPhaseController = searchPhaseController; + } + + public void execute(SearchScrollRequest request, ParsedScrollId scrollId, ActionListener listener) { + new AsyncAction(request, scrollId, listener).start(); + } + + private class AsyncAction { + + private final SearchScrollRequest request; + + private final ActionListener listener; + + private final ParsedScrollId scrollId; + + private final Nodes nodes; + + private final Map queryResults = transportSearchCache.obtainQueryResults(); + + private final Map fetchResults = transportSearchCache.obtainFetchResults(); + + private volatile ShardDoc[] sortedShardList; + + private final AtomicInteger successfulOps; + + private AsyncAction(SearchScrollRequest request, ParsedScrollId scrollId, ActionListener listener) { + this.request = request; + this.listener = listener; + this.scrollId = scrollId; + this.nodes = clusterService.state().nodes(); + this.successfulOps = new AtomicInteger(scrollId.values().length); + } + + public void start() { + final AtomicInteger counter = new AtomicInteger(scrollId.values().length); + for (Tuple target : scrollId.values()) { + Node node = nodes.get(target.v1()); + if (node == null) { + if (logger.isDebugEnabled()) { + logger.debug("Node [" + target.v1() + "] not available for scroll request [" + scrollId.source() + "]"); + } + successfulOps.decrementAndGet(); + if (counter.decrementAndGet() == 0) { + executeFetchPhase(); + } + } else { + searchService.sendExecuteQuery(node, TransportSearchHelper.internalScrollSearchRequest(target.v2(), request), new SearchServiceListener() { + @Override public void onResult(QuerySearchResult result) { + queryResults.put(result.shardTarget(), result); + if (counter.decrementAndGet() == 0) { + executeFetchPhase(); + } + } + + @Override public void onFailure(Throwable t) { + if (logger.isDebugEnabled()) { + logger.debug("Failed to execute query phase", t); + } + successfulOps.decrementAndGet(); + if (counter.decrementAndGet() == 0) { + executeFetchPhase(); + } + } + }); + } + } + } + + private void executeFetchPhase() { + sortedShardList = searchPhaseController.sortDocs(queryResults.values()); + Map docIdsToLoad = searchPhaseController.docIdsToLoad(sortedShardList); + + if (docIdsToLoad.isEmpty()) { + finishHim(); + } + + final AtomicInteger counter = new AtomicInteger(docIdsToLoad.size()); + + for (Map.Entry entry : docIdsToLoad.entrySet()) { + SearchShardTarget shardTarget = entry.getKey(); + ExtTIntArrayList docIds = entry.getValue(); + FetchSearchRequest fetchSearchRequest = new FetchSearchRequest(queryResults.get(shardTarget).id(), docIds); + Node node = nodes.get(shardTarget.nodeId()); + searchService.sendExecuteFetch(node, fetchSearchRequest, new SearchServiceListener() { + @Override public void onResult(FetchSearchResult result) { + fetchResults.put(result.shardTarget(), result); + if (counter.decrementAndGet() == 0) { + finishHim(); + } + } + + @Override public void onFailure(Throwable t) { + if (logger.isDebugEnabled()) { + logger.debug("Failed to execute fetch phase", t); + } + successfulOps.decrementAndGet(); + if (counter.decrementAndGet() == 0) { + finishHim(); + } + } + }); + } + } + + private void finishHim() { + InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryResults, fetchResults); + String scrollId = null; + if (request.scroll() != null) { + scrollId = TransportSearchHelper.buildScrollId(this.scrollId.type(), fetchResults.values()); + } + transportSearchCache.releaseQueryResults(queryResults); + transportSearchCache.releaseFetchResults(fetchResults); + listener.onResponse(new SearchResponse(internalResponse, scrollId, this.scrollId.values().length, successfulOps.get())); + } + + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/type/TransportSearchTypeAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/type/TransportSearchTypeAction.java new file mode 100644 index 00000000000..2b3d4761876 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/search/type/TransportSearchTypeAction.java @@ -0,0 +1,213 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.search.type; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.SearchOperationThreading; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.BaseAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.Node; +import org.elasticsearch.cluster.node.Nodes; +import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.search.action.SearchServiceListener; +import org.elasticsearch.search.action.SearchServiceTransportAction; +import org.elasticsearch.search.controller.SearchPhaseController; +import org.elasticsearch.search.controller.ShardDoc; +import org.elasticsearch.search.internal.InternalSearchRequest; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.util.settings.Settings; + +import java.util.Iterator; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.elasticsearch.action.Actions.*; +import static org.elasticsearch.action.search.type.TransportSearchHelper.*; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class TransportSearchTypeAction extends BaseAction { + + protected final ThreadPool threadPool; + + protected final ClusterService clusterService; + + protected final IndicesService indicesService; + + protected final SearchServiceTransportAction searchService; + + protected final SearchPhaseController searchPhaseController; + + protected final TransportSearchCache transportSearchCache; + + public TransportSearchTypeAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, IndicesService indicesService, + TransportSearchCache transportSearchCache, SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController) { + super(settings); + this.threadPool = threadPool; + this.clusterService = clusterService; + this.transportSearchCache = transportSearchCache; + this.indicesService = indicesService; + this.searchService = searchService; + this.searchPhaseController = searchPhaseController; + } + + protected abstract class BaseAsyncAction { + + protected final ActionListener listener; + + protected final GroupShardsIterator shardsIts; + + protected final SearchRequest request; + + protected final Nodes nodes; + + protected final int expectedSuccessfulOps; + + protected final int expectedTotalOps; + + protected final AtomicInteger successulOps = new AtomicInteger(); + + protected final AtomicInteger totalOps = new AtomicInteger(); + + protected volatile ShardDoc[] sortedShardList; + + protected BaseAsyncAction(SearchRequest request, ActionListener listener) { + this.request = request; + this.listener = listener; + + ClusterState clusterState = clusterService.state(); + + nodes = clusterState.nodes(); + + shardsIts = indicesService.searchShards(clusterState, processIndices(clusterState, request.indices()), request.queryHint()); + expectedSuccessfulOps = shardsIts.size(); + expectedTotalOps = shardsIts.totalSize(); + } + + public void start() { + // count the local operations, and perform the non local ones + int localOperations = 0; + for (final ShardsIterator shardIt : shardsIts) { + final ShardRouting shard = shardIt.next(); + if (shard.active()) { + if (shard.currentNodeId().equals(nodes.localNodeId())) { + localOperations++; + } else { + // do the remote operation here, the localAsync flag is not relevant + performFirstPhase(shardIt.reset()); + } + } else { + // as if we have a "problem", so we iterate to the next one and maintain counts + onFirstPhaseResult(shard, shardIt, null); + } + } + // we have local operations, perform them now + if (localOperations > 0) { + if (request.operationThreading() == SearchOperationThreading.SINGLE_THREAD) { + threadPool.execute(new Runnable() { + @Override public void run() { + for (final ShardsIterator shardIt : shardsIts) { + final ShardRouting shard = shardIt.reset().next(); + if (shard.active()) { + if (shard.currentNodeId().equals(nodes.localNodeId())) { + performFirstPhase(shardIt.reset()); + } + } + } + } + }); + } else { + boolean localAsync = request.operationThreading() == SearchOperationThreading.THREAD_PER_SHARD; + for (final ShardsIterator shardIt : shardsIts) { + final ShardRouting shard = shardIt.reset().next(); + if (shard.active()) { + if (shard.currentNodeId().equals(nodes.localNodeId())) { + if (localAsync) { + threadPool.execute(new Runnable() { + @Override public void run() { + performFirstPhase(shardIt.reset()); + } + }); + } else { + performFirstPhase(shardIt.reset()); + } + } + } + } + } + } + } + + private void performFirstPhase(final Iterator shardIt) { + if (!shardIt.hasNext()) { + return; + } + final ShardRouting shard = shardIt.next(); + if (!shard.active()) { + // as if we have a "problem", so we iterate to the next one and maintain counts + onFirstPhaseResult(shard, shardIt, null); + } else { + Node node = nodes.get(shard.currentNodeId()); + sendExecuteFirstPhase(node, internalSearchRequest(shard, request), new SearchServiceListener() { + @Override public void onResult(FirstResult result) { + onFirstPhaseResult(shard, result); + } + + @Override public void onFailure(Throwable t) { + onFirstPhaseResult(shard, shardIt, t); + } + }); + } + } + + private void onFirstPhaseResult(ShardRouting shard, FirstResult result) { + processFirstPhaseResult(shard, result); + if (successulOps.incrementAndGet() == expectedSuccessfulOps || + totalOps.incrementAndGet() == expectedTotalOps) { + moveToSecondPhase(); + } + } + + private void onFirstPhaseResult(ShardRouting shard, final Iterator shardIt, Throwable t) { + if (logger.isDebugEnabled()) { + if (t != null) { + logger.debug(shard.shortSummary() + ": Failed to search [" + request + "]", t); + } + } + if (totalOps.incrementAndGet() == expectedTotalOps) { + moveToSecondPhase(); + } else { + performFirstPhase(shardIt); + } + } + + protected abstract void sendExecuteFirstPhase(Node node, InternalSearchRequest request, SearchServiceListener listener); + + protected abstract void processFirstPhaseResult(ShardRouting shard, FirstResult result); + + protected abstract void moveToSecondPhase(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/BaseAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/BaseAction.java new file mode 100644 index 00000000000..25895dfa84d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/BaseAction.java @@ -0,0 +1,64 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support; + +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.action.*; +import org.elasticsearch.util.Nullable; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.settings.Settings; + +import static org.elasticsearch.action.support.PlainActionFuture.*; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class BaseAction extends AbstractComponent implements Action { + + protected BaseAction(Settings settings) { + super(settings); + } + + @Override public ActionFuture submit(Request request) throws ElasticSearchException { + return submit(request, null); + } + + @Override public ActionFuture submit(Request request, @Nullable ActionListener listener) { + PlainActionFuture future = newFuture(listener); + if (listener == null) { + // since we don't have a listener, and we release a possible lock with the future + // there is no need to execute it under a listener thread + request.listenerThreaded(false); + } + execute(request, future); + return future; + } + + @Override public void execute(Request request, ActionListener listener) { + ActionRequestValidationException validationException = request.validate(); + if (validationException != null) { + listener.onFailure(validationException); + return; + } + doExecute(request, listener); + } + + protected abstract void doExecute(Request request, ActionListener responseActionListener); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java new file mode 100644 index 00000000000..1ff580ab7f4 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java @@ -0,0 +1,163 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support; + +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.ElasticSearchInterruptedException; +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.util.Nullable; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +/** + * @author kimchy (Shay Banon) + */ +public class PlainActionFuture implements ActionFuture, ActionListener { + + public static PlainActionFuture newFuture() { + return newFuture(null); + } + + public static PlainActionFuture newFuture(@Nullable ActionListener listener) { + return new PlainActionFuture(listener); + } + + private final ActionListener listener; + + private final CountDownLatch latch; + + private volatile boolean done; + private volatile boolean canceled; + private volatile T result; + private volatile Throwable exp; + + public PlainActionFuture(ActionListener listener) { + this.listener = listener; + latch = new CountDownLatch(1); + } + + @Override public boolean cancel(boolean mayInterruptIfRunning) { + if (done) + return true; + + canceled = true; + latch.countDown(); + return true; + } + + @Override public boolean isCancelled() { + return canceled; + } + + @Override public boolean isDone() { + return done; + } + + @Override public T get() throws InterruptedException, ExecutionException { + latch.await(); + + if (!done || canceled) { + throw new InterruptedException("future was interrupted"); + } + + if (exp != null) { + throw new ExecutionException(exp.getMessage(), exp); + } + + return this.result; + } + + @Override public T get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { + latch.await(timeout, unit); + + if (!done || canceled) { + throw new TimeoutException("response did not arrive"); + } + + if (exp != null) { + throw new ExecutionException(exp.getMessage(), exp); + } + + return this.result; + } + + @Override public T actionGet() throws ElasticSearchException { + try { + return get(); + } catch (InterruptedException e) { + throw new ElasticSearchInterruptedException(e.getMessage()); + } catch (ExecutionException e) { + if (e.getCause() instanceof ElasticSearchException) { + throw (ElasticSearchException) e.getCause(); + } else { + throw new TransportException("Failed execution", e); + } + } + } + + @Override public T actionGet(long timeoutMillis) throws ElasticSearchException, TimeoutException { + return actionGet(timeoutMillis, TimeUnit.MILLISECONDS); + } + + @Override public T actionGet(long timeout, TimeUnit unit) throws ElasticSearchException, TimeoutException { + try { + return get(timeout, unit); + } catch (InterruptedException e) { + throw new ElasticSearchInterruptedException(e.getMessage()); + } catch (ExecutionException e) { + if (e.getCause() instanceof ElasticSearchException) { + throw (ElasticSearchException) e.getCause(); + } else { + throw new ElasticSearchException("Failed execution", e); + } + } + } + + @Override public void onResponse(T result) { + this.done = true; + this.result = result; + + if (canceled) + return; + + if (listener != null) { + listener.onResponse(result); + } + latch.countDown(); + } + + @Override public void onFailure(Throwable e) { + this.done = true; + this.exp = e; + + if (canceled) + return; + + if (listener != null) { + listener.onFailure(exp); + } + latch.countDown(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastOperationRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastOperationRequest.java new file mode 100644 index 00000000000..4c18c13cf5c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastOperationRequest.java @@ -0,0 +1,104 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.broadcast; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.util.Nullable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class BroadcastOperationRequest implements ActionRequest { + + private String[] indices; + + @Nullable protected String queryHint; + + private boolean listenerThreaded = false; + private BroadcastOperationThreading operationThreading = BroadcastOperationThreading.SINGLE_THREAD; + + protected BroadcastOperationRequest() { + + } + + protected BroadcastOperationRequest(String[] indices, @Nullable String queryHint) { + this.indices = indices; + } + + public String[] indices() { + return indices; + } + + public String queryHint() { + return queryHint; + } + + @Override public ActionRequestValidationException validate() { + return null; + } + + @Override public boolean listenerThreaded() { + return this.listenerThreaded; + } + + @Override public BroadcastOperationRequest listenerThreaded(boolean listenerThreaded) { + this.listenerThreaded = listenerThreaded; + return this; + } + + public BroadcastOperationThreading operationThreading() { + return operationThreading; + } + + public BroadcastOperationRequest operationThreading(BroadcastOperationThreading operationThreading) { + this.operationThreading = operationThreading; + return this; + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeInt(indices.length); + for (String index : indices) { + out.writeUTF(index); + } + if (queryHint == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeUTF(queryHint); + } + out.writeByte(operationThreading.id()); + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + indices = new String[in.readInt()]; + for (int i = 0; i < indices.length; i++) { + indices[i] = in.readUTF(); + } + if (in.readBoolean()) { + queryHint = in.readUTF(); + } + operationThreading = BroadcastOperationThreading.fromId(in.readByte()); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastOperationResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastOperationResponse.java new file mode 100644 index 00000000000..17628413dfc --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastOperationResponse.java @@ -0,0 +1,66 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.broadcast; + +import org.elasticsearch.action.ActionResponse; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class BroadcastOperationResponse implements ActionResponse { + + private int successfulShards; + + private int failedShards; + + protected BroadcastOperationResponse() { + } + + protected BroadcastOperationResponse(int successfulShards, int failedShards) { + this.successfulShards = successfulShards; + this.failedShards = failedShards; + } + + public int totalShards() { + return successfulShards + failedShards; + } + + public int successfulShards() { + return successfulShards; + } + + public int failedShards() { + return failedShards; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + successfulShards = in.readInt(); + failedShards = in.readInt(); + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeInt(successfulShards); + out.writeInt(failedShards); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastOperationThreading.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastOperationThreading.java new file mode 100644 index 00000000000..489891e4793 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastOperationThreading.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.broadcast; + +import org.elasticsearch.ElasticSearchIllegalArgumentException; + +/** + * Controls the operation threading model for broadcast operation that are performed + * locally on the executing node. + * + * @author kimchy (Shay Banon) + */ +public enum BroadcastOperationThreading { + /** + * No threads are used, all the local shards operations will be performed on the calling + * thread. + */ + NO_THREADS((byte) 0), + /** + * The local shards operations will be performed in serial manner on a single forked thread. + */ + SINGLE_THREAD((byte) 1), + /** + * Each local shard operation will execute on its own thread. + */ + THREAD_PER_SHARD((byte) 2); + + private final byte id; + + BroadcastOperationThreading(byte id) { + this.id = id; + } + + public byte id() { + return this.id; + } + + public static BroadcastOperationThreading fromId(byte id) { + if (id == 0) { + return NO_THREADS; + } + if (id == 1) { + return SINGLE_THREAD; + } + if (id == 2) { + return THREAD_PER_SHARD; + } + throw new ElasticSearchIllegalArgumentException("No type matching id [" + id + "]"); + } + + public static BroadcastOperationThreading fromString(String value, BroadcastOperationThreading defaultValue) { + if (value == null) { + return defaultValue; + } + return BroadcastOperationThreading.valueOf(value.toUpperCase()); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardOperationRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardOperationRequest.java new file mode 100644 index 00000000000..a37d0b41fa4 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardOperationRequest.java @@ -0,0 +1,62 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.broadcast; + +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class BroadcastShardOperationRequest implements Streamable { + + private String index; + + private int shardId; + + protected BroadcastShardOperationRequest() { + } + + protected BroadcastShardOperationRequest(String index, int shardId) { + this.index = index; + this.shardId = shardId; + } + + public String index() { + return this.index; + } + + public int shardId() { + return this.shardId; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + index = in.readUTF(); + shardId = in.readInt(); + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeUTF(index); + out.writeInt(shardId); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardOperationResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardOperationResponse.java new file mode 100644 index 00000000000..d40c65eaec9 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardOperationResponse.java @@ -0,0 +1,63 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.broadcast; + +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class BroadcastShardOperationResponse implements Streamable { + + String index; + + int shardId; + + protected BroadcastShardOperationResponse() { + + } + + protected BroadcastShardOperationResponse(String index, int shardId) { + this.index = index; + this.shardId = shardId; + } + + public String index() { + return this.index; + } + + public int shardId() { + return this.shardId; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + index = in.readUTF(); + shardId = in.readInt(); + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeUTF(index); + out.writeInt(shardId); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastOperationAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastOperationAction.java new file mode 100644 index 00000000000..deb19aaf203 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastOperationAction.java @@ -0,0 +1,310 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.broadcast; + +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.support.BaseAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.Node; +import org.elasticsearch.cluster.node.Nodes; +import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.*; +import org.elasticsearch.util.settings.Settings; + +import java.util.Iterator; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReferenceArray; + +import static org.elasticsearch.action.Actions.*; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class TransportBroadcastOperationAction + extends BaseAction { + + protected final ClusterService clusterService; + + protected final TransportService transportService; + + protected final IndicesService indicesService; + + protected final ThreadPool threadPool; + + protected TransportBroadcastOperationAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, IndicesService indicesService) { + super(settings); + this.clusterService = clusterService; + this.transportService = transportService; + this.threadPool = threadPool; + this.indicesService = indicesService; + + transportService.registerHandler(transportAction(), new TransportHandler()); + transportService.registerHandler(transportShardAction(), new ShardTransportHandler()); + } + + @Override protected void doExecute(Request request, ActionListener listener) { + new AsyncBroadcastAction(request, listener).start(); + } + + protected abstract String transportAction(); + + protected abstract String transportShardAction(); + + protected abstract Request newRequest(); + + protected abstract Response newResponse(Request request, AtomicReferenceArray shardsResponses); + + protected abstract ShardRequest newShardRequest(); + + protected abstract ShardRequest newShardRequest(ShardRouting shard, Request request); + + protected abstract ShardResponse newShardResponse(); + + protected abstract ShardResponse shardOperation(ShardRequest request) throws ElasticSearchException; + + protected abstract boolean accumulateExceptions(); + + private class AsyncBroadcastAction { + + private final Request request; + + private final ActionListener listener; + + private final Nodes nodes; + + private final GroupShardsIterator shardsIts; + + private final int expectedOps; + + private final AtomicInteger counterOps = new AtomicInteger(); + + private final AtomicInteger indexCounter = new AtomicInteger(); + + private final AtomicReferenceArray shardsResponses; + + private AsyncBroadcastAction(Request request, ActionListener listener) { + this.request = request; + this.listener = listener; + + ClusterState clusterState = clusterService.state(); + nodes = clusterState.nodes(); + shardsIts = indicesService.searchShards(clusterState, processIndices(clusterState, request.indices()), request.queryHint()); + expectedOps = shardsIts.size(); + + + shardsResponses = new AtomicReferenceArray(expectedOps); + } + + public void start() { + // count the local operations, and perform the non local ones + int localOperations = 0; + for (final ShardsIterator shardIt : shardsIts) { + final ShardRouting shard = shardIt.next(); + if (shard.active()) { + if (shard.currentNodeId().equals(nodes.localNodeId())) { + localOperations++; + } else { + // do the remote operation here, the localAsync flag is not relevant + performOperation(shardIt.reset(), true); + } + } else { + // as if we have a "problem", so we iterate to the next one and maintain counts + onOperation(shard, shardIt, null, false); + } + } + // we have local operations, perform them now + if (localOperations > 0) { + if (request.operationThreading() == BroadcastOperationThreading.SINGLE_THREAD) { + threadPool.execute(new Runnable() { + @Override public void run() { + for (final ShardsIterator shardIt : shardsIts) { + final ShardRouting shard = shardIt.reset().next(); + if (shard.active()) { + if (shard.currentNodeId().equals(nodes.localNodeId())) { + performOperation(shardIt.reset(), false); + } + } + } + } + }); + } else { + boolean localAsync = request.operationThreading() == BroadcastOperationThreading.THREAD_PER_SHARD; + for (final ShardsIterator shardIt : shardsIts) { + final ShardRouting shard = shardIt.reset().next(); + if (shard.active()) { + if (shard.currentNodeId().equals(nodes.localNodeId())) { + performOperation(shardIt.reset(), localAsync); + } + } + } + } + } + } + + private void performOperation(final Iterator shardIt, boolean localAsync) { + final ShardRouting shard = shardIt.next(); + if (!shard.active()) { + // as if we have a "problem", so we iterate to the next one and maintain counts + onOperation(shard, shardIt, null, false); + } else { + final ShardRequest shardRequest = newShardRequest(shard, request); + if (shard.currentNodeId().equals(nodes.localNodeId())) { + if (localAsync) { + threadPool.execute(new Runnable() { + @Override public void run() { + try { + onOperation(shard, shardOperation(shardRequest), true); + } catch (Exception e) { + onOperation(shard, shardIt, e, true); + } + } + }); + } else { + try { + onOperation(shard, shardOperation(shardRequest), false); + } catch (Exception e) { + onOperation(shard, shardIt, e, false); + } + } + } else { + Node node = nodes.get(shard.currentNodeId()); + transportService.sendRequest(node, transportShardAction(), shardRequest, new BaseTransportResponseHandler() { + @Override public ShardResponse newInstance() { + return newShardResponse(); + } + + @Override public void handleResponse(ShardResponse response) { + onOperation(shard, response, false); + } + + @Override public void handleException(RemoteTransportException exp) { + onOperation(shard, shardIt, exp, false); + } + + @Override public boolean spawn() { + // we never spawn here, we will span if needed in onOperation + return false; + } + }); + } + } + } + + private void onOperation(ShardRouting shard, ShardResponse response, boolean alreadyThreaded) { + shardsResponses.set(indexCounter.getAndIncrement(), response); + if (expectedOps == counterOps.incrementAndGet()) { + finishHim(alreadyThreaded); + } + } + + private void onOperation(ShardRouting shard, final Iterator shardIt, Exception e, boolean alreadyThreaded) { + if (logger.isDebugEnabled()) { + if (e != null) { + logger.debug(shard.shortSummary() + ": Failed to execute [" + request + "]", e); + } + } + if (!shardIt.hasNext()) { + // no more shards in this partition + int index = indexCounter.getAndIncrement(); + if (accumulateExceptions()) { + shardsResponses.set(index, new ShardOperationFailedException(shard.shardId(), e)); + } + if (expectedOps == counterOps.incrementAndGet()) { + finishHim(alreadyThreaded); + } + return; + } + // we are not threaded here if we got here from the transport + // or we possibly threaded if we got from a local threaded one, + // in which case, the next shard in the partition will not be local one + // so there is no meaning to this flag + performOperation(shardIt, true); + } + + private void finishHim(boolean alreadyThreaded) { + // if we need to execute the listener on a thread, and we are not threaded already + // then do it + if (request.listenerThreaded() && !alreadyThreaded) { + threadPool.execute(new Runnable() { + @Override public void run() { + listener.onResponse(newResponse(request, shardsResponses)); + } + }); + } else { + listener.onResponse(newResponse(request, shardsResponses)); + } + } + } + + private class TransportHandler extends BaseTransportRequestHandler { + + @Override public Request newInstance() { + return newRequest(); + } + + @Override public void messageReceived(Request request, final TransportChannel channel) throws Exception { + // we just send back a response, no need to fork a listener + request.listenerThreaded(false); + // we don't spawn, so if we get a request with no threading, change it to single threaded + if (request.operationThreading() == BroadcastOperationThreading.NO_THREADS) { + request.operationThreading(BroadcastOperationThreading.SINGLE_THREAD); + } + execute(request, new ActionListener() { + @Override public void onResponse(Response response) { + try { + channel.sendResponse(response); + } catch (Exception e) { + onFailure(e); + } + } + + @Override public void onFailure(Throwable e) { + try { + channel.sendResponse(e); + } catch (Exception e1) { + logger.warn("Failed to send response", e1); + } + } + }); + } + + @Override public boolean spawn() { + return false; + } + } + + private class ShardTransportHandler extends BaseTransportRequestHandler { + + @Override public ShardRequest newInstance() { + return newShardRequest(); + } + + @Override public void messageReceived(ShardRequest request, TransportChannel channel) throws Exception { + channel.sendResponse(shardOperation(request)); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/master/MasterNodeOperationRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/master/MasterNodeOperationRequest.java new file mode 100644 index 00000000000..de537a40523 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/master/MasterNodeOperationRequest.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.master; + +import org.elasticsearch.action.ActionRequest; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class MasterNodeOperationRequest implements ActionRequest { + + @Override public boolean listenerThreaded() { + // always threaded + return true; + } + + @Override public MasterNodeOperationRequest listenerThreaded(boolean listenerThreaded) { + // really, does not mean anything in this case + return this; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeOperationAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeOperationAction.java new file mode 100644 index 00000000000..5d067981775 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeOperationAction.java @@ -0,0 +1,127 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.master; + +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.BaseAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.node.Nodes; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.*; +import org.elasticsearch.util.settings.Settings; + +/** + * A base class for operations that needs to be performed on the master node. + * + * @author kimchy (Shay Banon) + */ +public abstract class TransportMasterNodeOperationAction extends BaseAction { + + protected final TransportService transportService; + + protected final ClusterService clusterService; + + protected final ThreadPool threadPool; + + protected TransportMasterNodeOperationAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) { + super(settings); + this.transportService = transportService; + this.clusterService = clusterService; + this.threadPool = threadPool; + + transportService.registerHandler(transportAction(), new TransportHandler()); + } + + protected abstract String transportAction(); + + protected abstract Request newRequest(); + + protected abstract Response newResponse(); + + protected abstract Response masterOperation(Request request) throws ElasticSearchException; + + @Override protected void doExecute(final Request request, final ActionListener listener) { + Nodes nodes = clusterService.state().nodes(); + if (nodes.localNodeMaster()) { + threadPool.execute(new Runnable() { + @Override public void run() { + try { + Response response = masterOperation(request); + listener.onResponse(response); + } catch (Exception e) { + listener.onFailure(e); + } + } + }); + } else { + transportService.sendRequest(nodes.masterNode(), transportAction(), request, new BaseTransportResponseHandler() { + @Override public Response newInstance() { + return newResponse(); + } + + @Override public void handleResponse(Response response) { + listener.onResponse(response); + } + + @Override public void handleException(RemoteTransportException exp) { + listener.onFailure(exp); + } + }); + } + } + + private class TransportHandler extends BaseTransportRequestHandler { + + @Override public Request newInstance() { + return newRequest(); + } + + @Override public void messageReceived(final Request request, final TransportChannel channel) throws Exception { + if (clusterService.state().nodes().localNodeMaster()) { + Response response = masterOperation(request); + channel.sendResponse(response); + } else { + transportService.sendRequest(clusterService.state().nodes().masterNode(), transportAction(), request, new BaseTransportResponseHandler() { + @Override public Response newInstance() { + return newResponse(); + } + + @Override public void handleResponse(Response response) { + try { + channel.sendResponse(response); + } catch (Exception e) { + logger.error("Failed to send response", e); + } + } + + @Override public void handleException(RemoteTransportException exp) { + try { + channel.sendResponse(exp); + } catch (Exception e) { + logger.error("Failed to send response", e); + } + } + }); + } + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/nodes/NodeOperationRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/nodes/NodeOperationRequest.java new file mode 100644 index 00000000000..48696a8a16c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/nodes/NodeOperationRequest.java @@ -0,0 +1,50 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.nodes; + +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class NodeOperationRequest implements Streamable { + + private String nodeId; + + protected NodeOperationRequest() { + + } + + protected NodeOperationRequest(String nodeId) { + this.nodeId = nodeId; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + nodeId = in.readUTF(); + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeUTF(nodeId); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/nodes/NodeOperationResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/nodes/NodeOperationResponse.java new file mode 100644 index 00000000000..6c21cb6ceb0 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/nodes/NodeOperationResponse.java @@ -0,0 +1,54 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.nodes; + +import org.elasticsearch.cluster.node.Node; +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class NodeOperationResponse implements Streamable { + + private Node node; + + protected NodeOperationResponse() { + } + + protected NodeOperationResponse(Node node) { + this.node = node; + } + + public Node node() { + return node; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + node = Node.readNode(in); + } + + @Override public void writeTo(DataOutput out) throws IOException { + node.writeTo(out); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/nodes/NodesOperationRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/nodes/NodesOperationRequest.java new file mode 100644 index 00000000000..b90be902502 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/nodes/NodesOperationRequest.java @@ -0,0 +1,83 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.nodes; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.util.Strings; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class NodesOperationRequest implements ActionRequest { + + public static String[] ALL_NODES = Strings.EMPTY_ARRAY; + + private String[] nodesIds; + + private boolean listenerThreaded = false; + + protected NodesOperationRequest() { + + } + + protected NodesOperationRequest(String... nodesIds) { + this.nodesIds = nodesIds; + } + + @Override public NodesOperationRequest listenerThreaded(boolean listenerThreaded) { + this.listenerThreaded = listenerThreaded; + return this; + } + + @Override public boolean listenerThreaded() { + return this.listenerThreaded; + } + + public String[] nodesIds() { + return nodesIds; + } + + @Override public ActionRequestValidationException validate() { + return null; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + nodesIds = new String[in.readInt()]; + for (int i = 0; i < nodesIds.length; i++) { + nodesIds[i] = in.readUTF(); + } + } + + @Override public void writeTo(DataOutput out) throws IOException { + if (nodesIds == null) { + out.writeInt(0); + } else { + out.writeInt(nodesIds.length); + for (String nodeId : nodesIds) { + out.writeUTF(nodeId); + } + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/nodes/NodesOperationResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/nodes/NodesOperationResponse.java new file mode 100644 index 00000000000..5e8d02c5530 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/nodes/NodesOperationResponse.java @@ -0,0 +1,80 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.nodes; + +import com.google.common.collect.Maps; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.cluster.ClusterName; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.Iterator; +import java.util.Map; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class NodesOperationResponse implements ActionResponse, Iterable { + + private ClusterName clusterName; + + protected NodeResponse[] nodes; + + private Map nodesMap; + + protected NodesOperationResponse() { + } + + protected NodesOperationResponse(ClusterName clusterName, NodeResponse[] nodes) { + this.clusterName = clusterName; + this.nodes = nodes; + } + + public ClusterName clusterName() { + return this.clusterName; + } + + public NodeResponse[] nodes() { + return nodes; + } + + @Override public Iterator iterator() { + return nodesMap().values().iterator(); + } + + public Map nodesMap() { + if (nodesMap == null) { + nodesMap = Maps.newHashMap(); + for (NodeResponse nodeResponse : nodes) { + nodesMap.put(nodeResponse.node().id(), nodeResponse); + } + } + return nodesMap; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + clusterName = ClusterName.readClusterName(in); + } + + @Override public void writeTo(DataOutput out) throws IOException { + clusterName.writeTo(out); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesOperationAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesOperationAction.java new file mode 100644 index 00000000000..88d703e17d4 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesOperationAction.java @@ -0,0 +1,242 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.nodes; + +import com.google.inject.Inject; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.NoSuchNodeException; +import org.elasticsearch.action.support.BaseAction; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.Node; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.*; +import org.elasticsearch.util.settings.Settings; + +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReferenceArray; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class TransportNodesOperationAction extends BaseAction { + + protected final ClusterName clusterName; + + protected final ThreadPool threadPool; + + protected final ClusterService clusterService; + + protected final TransportService transportService; + + @Inject public TransportNodesOperationAction(Settings settings, ClusterName clusterName, ThreadPool threadPool, + ClusterService clusterService, TransportService transportService) { + super(settings); + this.clusterName = clusterName; + this.threadPool = threadPool; + this.clusterService = clusterService; + this.transportService = transportService; + + transportService.registerHandler(transportAction(), new TransportHandler()); + transportService.registerHandler(transportNodeAction(), new NodeTransportHandler()); + } + + @Override protected void doExecute(Request request, ActionListener listener) { + new AsyncAction(request, listener).start(); + } + + protected abstract String transportAction(); + + protected abstract String transportNodeAction(); + + protected abstract Request newRequest(); + + protected abstract Response newResponse(Request request, AtomicReferenceArray nodesResponses); + + protected abstract NodeRequest newNodeRequest(); + + protected abstract NodeRequest newNodeRequest(String nodeId, Request request); + + protected abstract NodeResponse newNodeResponse(); + + protected abstract NodeResponse nodeOperation(NodeRequest request) throws ElasticSearchException; + + protected abstract boolean accumulateExceptions(); + + + private class AsyncAction { + + private final Request request; + + private final String[] nodesIds; + + private final ActionListener listener; + + private final ClusterState clusterState; + + private final AtomicReferenceArray responses; + + private final AtomicInteger index = new AtomicInteger(); + + private final AtomicInteger counter = new AtomicInteger(); + + private AsyncAction(Request request, ActionListener listener) { + this.request = request; + this.listener = listener; + clusterState = clusterService.state(); + String[] nodesIds = request.nodesIds(); + if (nodesIds == null || nodesIds.length == 0) { + int index = 0; + nodesIds = new String[clusterState.nodes().size()]; + for (Node node : clusterState.nodes()) { + nodesIds[index++] = node.id(); + } + } + this.nodesIds = nodesIds; + this.responses = new AtomicReferenceArray(nodesIds.length); + } + + private void start() { + for (final String nodeId : nodesIds) { + final Node node = clusterState.nodes().nodes().get(nodeId); + if (nodeId.equals("_local") || nodeId.equals(clusterState.nodes().localNodeId())) { + threadPool.execute(new Runnable() { + @Override public void run() { + try { + onOperation(nodeOperation(newNodeRequest(clusterState.nodes().localNodeId(), request))); + } catch (Exception e) { + onFailure(clusterState.nodes().localNodeId(), e); + } + } + }); + } else if (nodeId.equals("_master")) { + threadPool.execute(new Runnable() { + @Override public void run() { + try { + onOperation(nodeOperation(newNodeRequest(clusterState.nodes().masterNodeId(), request))); + } catch (Exception e) { + onFailure(clusterState.nodes().masterNodeId(), e); + } + } + }); + } else { + if (node == null) { + onFailure(nodeId, new NoSuchNodeException(nodeId)); + } else { + NodeRequest nodeRequest = newNodeRequest(nodeId, request); + transportService.sendRequest(node, transportNodeAction(), nodeRequest, new BaseTransportResponseHandler() { + @Override public NodeResponse newInstance() { + return newNodeResponse(); + } + + @Override public void handleResponse(NodeResponse response) { + onOperation(response); + } + + @Override public void handleException(RemoteTransportException exp) { + onFailure(node.id(), exp); + } + + @Override public boolean spawn() { + return false; + } + }); + } + } + } + } + + private void onOperation(NodeResponse nodeResponse) { + // need two counters to avoid race conditions + responses.set(index.getAndIncrement(), nodeResponse); + if (counter.incrementAndGet() == responses.length()) { + finishHim(); + } + } + + private void onFailure(String nodeId, Throwable t) { + int idx = index.getAndIncrement(); + if (accumulateExceptions()) { + responses.set(idx, new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", t)); + } + if (counter.incrementAndGet() == responses.length()) { + finishHim(); + } + } + + private void finishHim() { + if (request.listenerThreaded()) { + threadPool.execute(new Runnable() { + @Override public void run() { + listener.onResponse(newResponse(request, responses)); + } + }); + } else { + listener.onResponse(newResponse(request, responses)); + } + } + } + + private class TransportHandler extends BaseTransportRequestHandler { + + @Override public Request newInstance() { + return newRequest(); + } + + @Override public void messageReceived(final Request request, final TransportChannel channel) throws Exception { + request.listenerThreaded(false); + execute(request, new ActionListener() { + @Override public void onResponse(Response response) { + try { + channel.sendResponse(response); + } catch (Exception e) { + onFailure(e); + } + } + + @Override public void onFailure(Throwable e) { + try { + channel.sendResponse(e); + } catch (Exception e1) { + logger.warn("Failed to send response", e); + } + } + }); + } + + @Override public boolean spawn() { + return false; + } + } + + private class NodeTransportHandler extends BaseTransportRequestHandler { + + @Override public NodeRequest newInstance() { + return newNodeRequest(); + } + + @Override public void messageReceived(NodeRequest request, TransportChannel channel) throws Exception { + channel.sendResponse(nodeOperation(request)); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/replication/IndexReplicationOperationRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/replication/IndexReplicationOperationRequest.java new file mode 100644 index 00000000000..3b618412bc3 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/replication/IndexReplicationOperationRequest.java @@ -0,0 +1,77 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.replication; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.util.TimeValue; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import static org.elasticsearch.action.Actions.*; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexReplicationOperationRequest implements ActionRequest { + + protected TimeValue timeout = ShardReplicationOperationRequest.DEFAULT_TIMEOUT; + + protected String index; + + private boolean threadedListener = false; + + public TimeValue timeout() { + return timeout; + } + + public String index() { + return this.index; + } + + @Override public boolean listenerThreaded() { + return this.threadedListener; + } + + @Override public IndexReplicationOperationRequest listenerThreaded(boolean threadedListener) { + this.threadedListener = threadedListener; + return this; + } + + @Override public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (index == null) { + validationException = addValidationError("index name missing", validationException); + } + return validationException; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + timeout = TimeValue.readTimeValue(in); + index = in.readUTF(); + } + + @Override public void writeTo(DataOutput out) throws IOException { + timeout.writeTo(out); + out.writeUTF(index); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/replication/IndicesReplicationOperationRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/replication/IndicesReplicationOperationRequest.java new file mode 100644 index 00000000000..ffe79ce4fc6 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/replication/IndicesReplicationOperationRequest.java @@ -0,0 +1,77 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.replication; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.util.TimeValue; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class IndicesReplicationOperationRequest implements ActionRequest { + + protected TimeValue timeout = ShardReplicationOperationRequest.DEFAULT_TIMEOUT; + + protected String[] indices; + + private boolean threadedListener = false; + + public TimeValue timeout() { + return timeout; + } + + public String[] indices() { + return this.indices; + } + + @Override public ActionRequestValidationException validate() { + return null; + } + + @Override public boolean listenerThreaded() { + return this.threadedListener; + } + + @Override public IndicesReplicationOperationRequest listenerThreaded(boolean threadedListener) { + this.threadedListener = threadedListener; + return this; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + timeout = TimeValue.readTimeValue(in); + indices = new String[in.readInt()]; + for (int i = 0; i < indices.length; i++) { + indices[i] = in.readUTF(); + } + } + + @Override public void writeTo(DataOutput out) throws IOException { + timeout.writeTo(out); + out.writeInt(indices.length); + for (String index : indices) { + out.writeUTF(index); + } + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/replication/ShardReplicationOperationRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/replication/ShardReplicationOperationRequest.java new file mode 100644 index 00000000000..f3d7534d00c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/replication/ShardReplicationOperationRequest.java @@ -0,0 +1,98 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.replication; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.util.TimeValue; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.action.Actions.*; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class ShardReplicationOperationRequest implements ActionRequest { + + public static final TimeValue DEFAULT_TIMEOUT = new TimeValue(1, TimeUnit.MINUTES); + + protected TimeValue timeout = DEFAULT_TIMEOUT; + + protected String index; + + private boolean threadedListener = false; + private boolean threadedOperation = false; + + public TimeValue timeout() { + return timeout; + } + + public String index() { + return this.index; + } + + @Override public boolean listenerThreaded() { + return threadedListener; + } + + @Override public ShardReplicationOperationRequest listenerThreaded(boolean threadedListener) { + this.threadedListener = threadedListener; + return this; + } + + + /** + * Controls if the operation will be executed on a separate thread when executed locally. + */ + public boolean operationThreaded() { + return threadedOperation; + } + + /** + * Controls if the operation will be executed on a separate thread when executed locally. + */ + public ShardReplicationOperationRequest operationThreaded(boolean threadedOperation) { + this.threadedOperation = threadedOperation; + return this; + } + + @Override public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (index == null) { + validationException = addValidationError("index is missing", validationException); + } + return validationException; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + timeout = TimeValue.readTimeValue(in); + index = in.readUTF(); + // no need to serialize threaded* parameters, since they only matter locally + } + + @Override public void writeTo(DataOutput out) throws IOException { + timeout.writeTo(out); + out.writeUTF(index); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/replication/TransportIndexReplicationOperationAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/replication/TransportIndexReplicationOperationAction.java new file mode 100644 index 00000000000..7efb50007ee --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/replication/TransportIndexReplicationOperationAction.java @@ -0,0 +1,157 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.replication; + +import com.google.inject.Inject; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.BaseAction; +import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.BaseTransportRequestHandler; +import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReferenceArray; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class TransportIndexReplicationOperationAction + extends BaseAction { + + protected final ThreadPool threadPool; + + protected final TransportShardReplicationOperationAction shardAction; + + @Inject public TransportIndexReplicationOperationAction(Settings settings, TransportService transportService, ThreadPool threadPool, + TransportShardReplicationOperationAction shardAction) { + super(settings); + this.threadPool = threadPool; + this.shardAction = shardAction; + + transportService.registerHandler(transportAction(), new TransportHandler()); + } + + @Override protected void doExecute(final Request request, final ActionListener listener) { + GroupShardsIterator groups; + try { + groups = shards(request); + } catch (Exception e) { + listener.onFailure(e); + return; + } + final AtomicInteger indexCounter = new AtomicInteger(); + final AtomicInteger completionCounter = new AtomicInteger(groups.size()); + final AtomicReferenceArray shardsResponses = new AtomicReferenceArray(groups.size()); + + for (final ShardsIterator shards : groups) { + ShardRequest shardRequest = newShardRequestInstance(request, shards.shardId().id()); + // TODO for now, we fork operations on shards of the index + shardRequest.operationThreaded(true); + // no need for threaded listener, we will fork when its done based on the index request + shardRequest.listenerThreaded(false); + shardAction.execute(shardRequest, new ActionListener() { + @Override public void onResponse(ShardResponse result) { + shardsResponses.set(indexCounter.getAndIncrement(), result); + if (completionCounter.decrementAndGet() == 0) { + if (request.listenerThreaded()) { + threadPool.execute(new Runnable() { + @Override public void run() { + listener.onResponse(newResponseInstance(request, shardsResponses)); + } + }); + } else { + listener.onResponse(newResponseInstance(request, shardsResponses)); + } + } + } + + @Override public void onFailure(Throwable e) { + int index = indexCounter.getAndIncrement(); + if (accumulateExceptions()) { + shardsResponses.set(index, e); + } + if (completionCounter.decrementAndGet() == 0) { + if (request.listenerThreaded()) { + threadPool.execute(new Runnable() { + @Override public void run() { + listener.onResponse(newResponseInstance(request, shardsResponses)); + } + }); + } else { + listener.onResponse(newResponseInstance(request, shardsResponses)); + } + } + } + }); + } + } + + protected abstract Request newRequestInstance(); + + protected abstract Response newResponseInstance(Request request, AtomicReferenceArray shardsResponses); + + protected abstract String transportAction(); + + protected abstract GroupShardsIterator shards(Request request) throws ElasticSearchException; + + protected abstract ShardRequest newShardRequestInstance(Request request, int shardId); + + protected abstract boolean accumulateExceptions(); + + private class TransportHandler extends BaseTransportRequestHandler { + + @Override public Request newInstance() { + return newRequestInstance(); + } + + @Override public void messageReceived(final Request request, final TransportChannel channel) throws Exception { + // no need to use threaded listener, since we just send a response + request.listenerThreaded(false); + execute(request, new ActionListener() { + @Override public void onResponse(Response result) { + try { + channel.sendResponse(result); + } catch (Exception e) { + onFailure(e); + } + } + + @Override public void onFailure(Throwable e) { + try { + channel.sendResponse(e); + } catch (Exception e1) { + logger.warn("Failed to send error response for action [" + transportAction() + "] and request [" + request + "]", e1); + } + } + }); + } + + @Override public boolean spawn() { + // no need to spawn, since in the doExecute we always execute with threaded operation set to true + return false; + } + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/replication/TransportIndicesReplicationOperationAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/replication/TransportIndicesReplicationOperationAction.java new file mode 100644 index 00000000000..2da1544f2f7 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/replication/TransportIndicesReplicationOperationAction.java @@ -0,0 +1,150 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.replication; + +import com.google.inject.Inject; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.Actions; +import org.elasticsearch.action.support.BaseAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.BaseTransportRequestHandler; +import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReferenceArray; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class TransportIndicesReplicationOperationAction + extends BaseAction { + + protected final ThreadPool threadPool; + + protected final ClusterService clusterService; + + protected final TransportIndexReplicationOperationAction indexAction; + + @Inject public TransportIndicesReplicationOperationAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, + TransportIndexReplicationOperationAction indexAction) { + super(settings); + this.threadPool = threadPool; + this.clusterService = clusterService; + this.indexAction = indexAction; + + transportService.registerHandler(transportAction(), new TransportHandler()); + } + + @Override protected void doExecute(final Request request, final ActionListener listener) { + String[] indices = Actions.processIndices(clusterService.state(), request.indices()); + final AtomicInteger indexCounter = new AtomicInteger(); + final AtomicInteger completionCounter = new AtomicInteger(indices.length); + final AtomicReferenceArray indexResponses = new AtomicReferenceArray(indices.length); + + for (final String index : indices) { + IndexRequest indexRequest = newIndexRequestInstance(request, index); + // no threading needed, all is done on the index replication one + indexRequest.listenerThreaded(false); + indexAction.execute(indexRequest, new ActionListener() { + @Override public void onResponse(IndexResponse result) { + indexResponses.set(indexCounter.getAndIncrement(), result); + if (completionCounter.decrementAndGet() == 0) { + if (request.listenerThreaded()) { + threadPool.execute(new Runnable() { + @Override public void run() { + listener.onResponse(newResponseInstance(request, indexResponses)); + } + }); + } else { + listener.onResponse(newResponseInstance(request, indexResponses)); + } + } + } + + @Override public void onFailure(Throwable e) { + e.printStackTrace(); + int index = indexCounter.getAndIncrement(); + if (accumulateExceptions()) { + indexResponses.set(index, e); + } + if (completionCounter.decrementAndGet() == 0) { + if (request.listenerThreaded()) { + threadPool.execute(new Runnable() { + @Override public void run() { + listener.onResponse(newResponseInstance(request, indexResponses)); + } + }); + } else { + listener.onResponse(newResponseInstance(request, indexResponses)); + } + } + } + }); + } + } + + protected abstract Request newRequestInstance(); + + protected abstract Response newResponseInstance(Request request, AtomicReferenceArray indexResponses); + + protected abstract String transportAction(); + + protected abstract IndexRequest newIndexRequestInstance(Request request, String index); + + protected abstract boolean accumulateExceptions(); + + private class TransportHandler extends BaseTransportRequestHandler { + + @Override public Request newInstance() { + return newRequestInstance(); + } + + @Override public void messageReceived(final Request request, final TransportChannel channel) throws Exception { + // no need for a threaded listener, since we just send a response + request.listenerThreaded(false); + execute(request, new ActionListener() { + @Override public void onResponse(Response result) { + try { + channel.sendResponse(result); + } catch (Exception e) { + onFailure(e); + } + } + + @Override public void onFailure(Throwable e) { + try { + channel.sendResponse(e); + } catch (Exception e1) { + logger.warn("Failed to send error response for action [" + transportAction() + "] and request [" + request + "]", e1); + } + } + }); + } + + @Override public boolean spawn() { + // no need to spawn, since we always execute in the index one with threadedOperation set to true + return false; + } + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java new file mode 100644 index 00000000000..e7955d88b2f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java @@ -0,0 +1,485 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.replication; + +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.PrimaryNotStartedActionException; +import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.support.BaseAction; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.TimeoutClusterStateListener; +import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.cluster.node.Node; +import org.elasticsearch.cluster.node.Nodes; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.index.IndexShardMissingException; +import org.elasticsearch.index.shard.IllegalIndexShardStateException; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardNotStartedException; +import org.elasticsearch.indices.IndexMissingException; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.*; +import org.elasticsearch.util.TimeValue; +import org.elasticsearch.util.io.Streamable; +import org.elasticsearch.util.io.VoidStreamable; +import org.elasticsearch.util.settings.Settings; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class TransportShardReplicationOperationAction extends BaseAction { + + protected final TransportService transportService; + + protected final ClusterService clusterService; + + protected final IndicesService indicesService; + + protected final ThreadPool threadPool; + + protected final ShardStateAction shardStateAction; + + protected TransportShardReplicationOperationAction(Settings settings, TransportService transportService, + ClusterService clusterService, IndicesService indicesService, + ThreadPool threadPool, ShardStateAction shardStateAction) { + super(settings); + this.transportService = transportService; + this.clusterService = clusterService; + this.indicesService = indicesService; + this.threadPool = threadPool; + this.shardStateAction = shardStateAction; + + transportService.registerHandler(transportAction(), new OperationTransportHandler()); + transportService.registerHandler(transportBackupAction(), new BackupOperationTransportHandler()); + } + + @Override protected void doExecute(Request request, ActionListener listener) { + new AsyncShardOperationAction(request, listener).start(); + } + + protected abstract Request newRequestInstance(); + + protected abstract Response newResponseInstance(); + + protected abstract String transportAction(); + + protected abstract Response shardOperationOnPrimary(ShardOperationRequest shardRequest); + + protected abstract void shardOperationOnBackup(ShardOperationRequest shardRequest); + + protected abstract ShardsIterator shards(Request request) throws ElasticSearchException; + + /** + * Should the operations be performed on the backups as well. Defaults to false meaning operations + * will be executed on the backup. + */ + protected boolean ignoreBackups() { + return false; + } + + private String transportBackupAction() { + return transportAction() + "/backup"; + } + + protected IndexShard indexShard(ShardOperationRequest shardRequest) { + return indicesService.indexServiceSafe(shardRequest.request.index()).shardSafe(shardRequest.shardId); + } + + private class OperationTransportHandler extends BaseTransportRequestHandler { + + @Override public Request newInstance() { + return newRequestInstance(); + } + + @Override public void messageReceived(final Request request, final TransportChannel channel) throws Exception { + // no need to have a threaded listener since we just send back a response + request.listenerThreaded(false); + // if we have a local operation, execute it on a thread since we don't spawn + request.operationThreaded(true); + execute(request, new ActionListener() { + @Override public void onResponse(Response result) { + try { + channel.sendResponse(result); + } catch (Exception e) { + onFailure(e); + } + } + + @Override public void onFailure(Throwable e) { + try { + channel.sendResponse(e); + } catch (Exception e1) { + logger.warn("Failed to send response for " + transportAction(), e1); + } + } + }); + } + + @Override public boolean spawn() { + return false; + } + } + + private class BackupOperationTransportHandler extends BaseTransportRequestHandler { + + @Override public ShardOperationRequest newInstance() { + return new ShardOperationRequest(); + } + + @Override public void messageReceived(ShardOperationRequest request, TransportChannel channel) throws Exception { + shardOperationOnBackup(request); + channel.sendResponse(VoidStreamable.INSTANCE); + } + } + + protected class ShardOperationRequest implements Streamable { + + public int shardId; + + public Request request; + + public ShardOperationRequest() { + } + + public ShardOperationRequest(int shardId, Request request) { + this.shardId = shardId; + this.request = request; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + shardId = in.readInt(); + request = newRequestInstance(); + request.readFrom(in); + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeInt(shardId); + request.writeTo(out); + } + } + + private class AsyncShardOperationAction { + + private final ActionListener listener; + + private final Request request; + + private Nodes nodes; + + private ShardsIterator shards; + + private final AtomicBoolean primaryOperationStarted = new AtomicBoolean(); + + private AsyncShardOperationAction(Request request, ActionListener listener) { + this.request = request; + this.listener = listener; + } + + public void start() { + start(false); + } + + /** + * Returns true if the action starting to be performed on the primary (or is done). + */ + public boolean start(final boolean fromClusterEvent) throws ElasticSearchException { + ClusterState clusterState = clusterService.state(); + nodes = clusterState.nodes(); + try { + shards = shards(request); + } catch (Exception e) { + listener.onFailure(new ShardOperationFailedException(shards.shardId(), e)); + return true; + } + + boolean foundPrimary = false; + for (final ShardRouting shard : shards) { + if (shard.primary()) { + if (!shard.active()) { + retryPrimary(fromClusterEvent, shard); + return false; + } + + if (!primaryOperationStarted.compareAndSet(false, true)) { + return false; + } + + foundPrimary = true; + if (shard.currentNodeId().equals(nodes.localNodeId())) { + if (request.operationThreaded()) { + threadPool.execute(new Runnable() { + @Override public void run() { + performOnPrimary(shard.id(), fromClusterEvent, true, shard); + } + }); + } else { + performOnPrimary(shard.id(), fromClusterEvent, false, shard); + } + } else { + Node node = nodes.get(shard.currentNodeId()); + transportService.sendRequest(node, transportAction(), request, new BaseTransportResponseHandler() { + + @Override public Response newInstance() { + return newResponseInstance(); + } + + @Override public void handleResponse(Response response) { + listener.onResponse(response); + } + + @Override public void handleException(RemoteTransportException exp) { + listener.onFailure(exp); + } + + @Override public boolean spawn() { + return request.listenerThreaded(); + } + }); + } + break; + } + } + // we should never get here, but here we go + if (!foundPrimary) { + final PrimaryNotStartedActionException failure = new PrimaryNotStartedActionException(shards.shardId(), "Primary not found"); + if (request.listenerThreaded()) { + threadPool.execute(new Runnable() { + @Override public void run() { + listener.onFailure(failure); + } + }); + } else { + listener.onFailure(failure); + } + } + return true; + } + + private void retryPrimary(boolean fromClusterEvent, final ShardRouting shard) { + if (!fromClusterEvent) { + // make it threaded operation so we fork on the discovery listener thread + request.operationThreaded(true); + clusterService.add(request.timeout(), new TimeoutClusterStateListener() { + @Override public void clusterChanged(ClusterChangedEvent event) { + if (start(true)) { + // if we managed to start and perform the operation on the primary, we can remove this listener + clusterService.remove(this); + } + } + + @Override public void onTimeout(TimeValue timeValue) { + final PrimaryNotStartedActionException failure = new PrimaryNotStartedActionException(shard.shardId(), "Timeout waiting for [" + timeValue + "]"); + if (request.listenerThreaded()) { + threadPool.execute(new Runnable() { + @Override public void run() { + listener.onFailure(failure); + } + }); + } else { + listener.onFailure(failure); + } + } + }); + } + } + + private void performOnPrimary(int primaryShardId, boolean fromDiscoveryListener, boolean alreadyThreaded, final ShardRouting shard) { + try { + Response response = shardOperationOnPrimary(new ShardOperationRequest(primaryShardId, request)); + performBackups(response, alreadyThreaded); + } catch (IndexShardNotStartedException e) { + // still in recovery, retry (we know that its not UNASSIGNED OR INITIALIZING since we are checking it in the calling method) + retryPrimary(fromDiscoveryListener, shard); + } catch (Exception e) { + listener.onFailure(new ShardOperationFailedException(shards.shardId(), e)); + } + } + + private void performBackups(final Response response, boolean alreadyThreaded) { + if (ignoreBackups() || shards.size() == 1 /* no backups */) { + if (alreadyThreaded || !request.listenerThreaded()) { + listener.onResponse(response); + } else { + threadPool.execute(new Runnable() { + @Override public void run() { + listener.onResponse(response); + } + }); + } + return; + } + + // initialize the counter + int backupCounter = 0; + for (final ShardRouting shard : shards.reset()) { + if (shard.primary()) { + continue; + } + backupCounter++; + // if we are relocating the backup, we want to perform the index operation on both the relocating + // shard and the target shard. This means that we won't loose index operations between end of recovery + // and reassignment of the shard by the master node + if (shard.relocating()) { + backupCounter++; + } + } + + AtomicInteger counter = new AtomicInteger(backupCounter); + for (final ShardRouting shard : shards.reset()) { + if (shard.primary()) { + continue; + } + // we index on a backup that is initializing as well since we might not have got the event + // yet that it was started. We will get an exception IllegalShardState exception if its not started + // and that's fine, we will ignore it + if (shard.unassigned()) { + if (counter.decrementAndGet() == 0) { + if (alreadyThreaded || !request.listenerThreaded()) { + listener.onResponse(response); + } else { + threadPool.execute(new Runnable() { + @Override public void run() { + listener.onResponse(response); + } + }); + } + break; + } + continue; + } + performOnBackup(response, counter, shard, shard.currentNodeId()); + if (shard.relocating()) { + performOnBackup(response, counter, shard, shard.relocatingNodeId()); + } + } + } + + private void performOnBackup(final Response response, final AtomicInteger counter, final ShardRouting shard, String nodeId) { + final ShardOperationRequest shardRequest = new ShardOperationRequest(shards.shardId().id(), request); + if (!nodeId.equals(nodes.localNodeId())) { + Node node = nodes.get(nodeId); + transportService.sendRequest(node, transportBackupAction(), shardRequest, new VoidTransportResponseHandler() { + @Override public void handleResponse(VoidStreamable vResponse) { + finishIfPossible(); + } + + @Override public void handleException(RemoteTransportException exp) { + if (!ignoreBackupException(exp.unwrapCause())) { + logger.warn("Failed to perform " + transportAction() + " on backup " + shards.shardId(), exp); + shardStateAction.shardFailed(shard); + } + finishIfPossible(); + } + + private void finishIfPossible() { + if (counter.decrementAndGet() == 0) { + if (request.listenerThreaded()) { + threadPool.execute(new Runnable() { + @Override public void run() { + listener.onResponse(response); + } + }); + } else { + listener.onResponse(response); + } + } + } + + @Override public boolean spawn() { + // don't spawn, we will call the listener on a thread pool if needed + return false; + } + }); + } else { + if (request.operationThreaded()) { + threadPool.execute(new Runnable() { + @Override public void run() { + try { + shardOperationOnBackup(shardRequest); + } catch (Exception e) { + if (!ignoreBackupException(e)) { + logger.warn("Failed to perform " + transportAction() + " on backup " + shards.shardId(), e); + shardStateAction.shardFailed(shard); + } + } + if (counter.decrementAndGet() == 0) { + listener.onResponse(response); + } + } + }); + } else { + try { + shardOperationOnBackup(shardRequest); + } catch (Exception e) { + if (!ignoreBackupException(e)) { + logger.warn("Failed to perform " + transportAction() + " on backup " + shards.shardId(), e); + shardStateAction.shardFailed(shard); + } + } + if (counter.decrementAndGet() == 0) { + if (request.listenerThreaded()) { + threadPool.execute(new Runnable() { + @Override public void run() { + listener.onResponse(response); + } + }); + } else { + listener.onResponse(response); + } + } + } + } + } + + /** + * Should an exception be ignored when the operation is performed on the backup. The exception + * is ignored if it is: + * + *
    + *
  • IllegalIndexShardStateException: The shard has not yet moved to started mode (it is still recovering). + *
  • IndexMissingException/IndexShardMissingException: The shard has not yet started to initialize on the target node. + *
+ */ + private boolean ignoreBackupException(Throwable e) { + if (e instanceof IllegalIndexShardStateException) { + return true; + } + if (e instanceof IndexMissingException) { + return true; + } + if (e instanceof IndexShardMissingException) { + return true; + } + return false; + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/shards/ShardOperationRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/shards/ShardOperationRequest.java new file mode 100644 index 00000000000..9b47a175b4d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/shards/ShardOperationRequest.java @@ -0,0 +1,62 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.shards; + +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class ShardOperationRequest implements Streamable { + + private String index; + + private int shardId; + + protected ShardOperationRequest() { + } + + protected ShardOperationRequest(String index, int shardId) { + this.index = index; + this.shardId = shardId; + } + + public String index() { + return this.index; + } + + public int shardId() { + return this.shardId; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + index = in.readUTF(); + shardId = in.readInt(); + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeUTF(index); + out.writeInt(shardId); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/shards/ShardOperationResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/shards/ShardOperationResponse.java new file mode 100644 index 00000000000..25e182b8755 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/shards/ShardOperationResponse.java @@ -0,0 +1,57 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.shards; + +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import static org.elasticsearch.cluster.routing.ImmutableShardRouting.*; + +/** + * @author kimchy (Shay Banon) + */ +public class ShardOperationResponse implements Streamable { + + private ShardRouting shardRouting; + + protected ShardOperationResponse() { + + } + + protected ShardOperationResponse(ShardRouting shardRouting) { + this.shardRouting = shardRouting; + } + + public ShardRouting shardRouting() { + return shardRouting; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + shardRouting = readShardRoutingEntry(in); + } + + @Override public void writeTo(DataOutput out) throws IOException { + shardRouting.writeTo(out); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/shards/ShardsOperationRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/shards/ShardsOperationRequest.java new file mode 100644 index 00000000000..8c124ba8186 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/shards/ShardsOperationRequest.java @@ -0,0 +1,88 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.shards; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.util.Strings; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class ShardsOperationRequest implements ActionRequest { + + private String[] indices; + + private boolean listenerThreaded = false; + private ShardsOperationThreading operationThreading = ShardsOperationThreading.SINGLE_THREAD; + + protected ShardsOperationRequest() { + } + + protected ShardsOperationRequest(String... indices) { + this.indices = indices == null ? Strings.EMPTY_ARRAY : indices; + } + + public String[] indices() { + return this.indices; + } + + @Override public ActionRequestValidationException validate() { + return null; + } + + @Override public boolean listenerThreaded() { + return this.listenerThreaded; + } + + @Override public ShardsOperationRequest listenerThreaded(boolean listenerThreaded) { + this.listenerThreaded = listenerThreaded; + return this; + } + + public ShardsOperationThreading operationThreading() { + return operationThreading; + } + + public ShardsOperationRequest operationThreading(ShardsOperationThreading operationThreading) { + this.operationThreading = operationThreading; + return this; + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeInt(indices.length); + for (String index : indices) { + out.writeUTF(index); + } + out.writeByte(operationThreading.id()); + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + indices = new String[in.readInt()]; + for (int i = 0; i < indices.length; i++) { + indices[i] = in.readUTF(); + } + operationThreading = ShardsOperationThreading.fromId(in.readByte()); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/shards/ShardsOperationResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/shards/ShardsOperationResponse.java new file mode 100644 index 00000000000..d2f3741caa1 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/shards/ShardsOperationResponse.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.shards; + +import org.elasticsearch.action.ActionResponse; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class ShardsOperationResponse implements ActionResponse { + + protected ShardResponse[] shards; + + protected ShardsOperationResponse() { + } + + protected ShardsOperationResponse(ShardResponse[] shards) { + this.shards = shards; + } + + public ShardResponse[] shards() { + return this.shards; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + } + + @Override public void writeTo(DataOutput out) throws IOException { + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/shards/ShardsOperationThreading.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/shards/ShardsOperationThreading.java new file mode 100644 index 00000000000..226cc5d70e3 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/shards/ShardsOperationThreading.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.shards; + +import org.elasticsearch.ElasticSearchIllegalArgumentException; + +/** + * Controls the operation threading model for shards operation that are performed + * locally on the executing node. + * + * @author kimchy (Shay Banon) + */ +public enum ShardsOperationThreading { + /** + * No threads are used, all the local shards operations will be performed on the calling + * thread. + */ + NO_THREADS((byte) 0), + /** + * The local shards operations will be performed in serial manner on a single forked thread. + */ + SINGLE_THREAD((byte) 1), + /** + * Each local shard operation will execute on its own thread. + */ + THREAD_PER_SHARD((byte) 2); + + private final byte id; + + ShardsOperationThreading(byte id) { + this.id = id; + } + + public byte id() { + return this.id; + } + + public static ShardsOperationThreading fromId(byte id) { + if (id == 0) { + return NO_THREADS; + } + if (id == 1) { + return SINGLE_THREAD; + } + if (id == 2) { + return THREAD_PER_SHARD; + } + throw new ElasticSearchIllegalArgumentException("No type matching id [" + id + "]"); + } + + public static ShardsOperationThreading fromString(String value, ShardsOperationThreading defaultValue) { + if (value == null) { + return defaultValue; + } + return ShardsOperationThreading.valueOf(value.toUpperCase()); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/shards/TransportShardsOperationActions.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/shards/TransportShardsOperationActions.java new file mode 100644 index 00000000000..d1ced46c974 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/shards/TransportShardsOperationActions.java @@ -0,0 +1,291 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.shards; + +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ShardNotActiveException; +import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.support.BaseAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.Node; +import org.elasticsearch.cluster.node.Nodes; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.*; +import org.elasticsearch.util.settings.Settings; + +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReferenceArray; + +import static org.elasticsearch.action.Actions.*; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class TransportShardsOperationActions extends BaseAction { + + protected final ClusterService clusterService; + + protected final TransportService transportService; + + protected final IndicesService indicesService; + + protected final ThreadPool threadPool; + + protected TransportShardsOperationActions(Settings settings, ClusterService clusterService, TransportService transportService, + IndicesService indicesService, ThreadPool threadPool) { + super(settings); + this.clusterService = clusterService; + this.transportService = transportService; + this.indicesService = indicesService; + this.threadPool = threadPool; + + transportService.registerHandler(transportAction(), new TransportHandler()); + transportService.registerHandler(transportShardAction(), new ShardTransportHandler()); + } + + @Override protected void doExecute(Request request, ActionListener listener) { + new AsyncBroadcastAction(request, listener).start(); + } + + protected abstract String transportAction(); + + protected abstract String transportShardAction(); + + protected abstract Request newRequest(); + + protected abstract Response newResponse(Request request, ClusterState clusterState, AtomicReferenceArray shardsResponses); + + protected abstract ShardRequest newShardRequest(); + + protected abstract ShardRequest newShardRequest(ShardRouting shard, Request request); + + protected abstract ShardResponse newShardResponse(); + + protected abstract ShardResponse shardOperation(ShardRequest request) throws ElasticSearchException; + + protected abstract boolean accumulateExceptions(); + + private class AsyncBroadcastAction { + + private final Request request; + + private final ActionListener listener; + + private final ClusterState clusterState; + + private final Nodes nodes; + + private final List shards; + + private final AtomicInteger opsCounter = new AtomicInteger(); + + private final AtomicInteger indexCounter = new AtomicInteger(); + + private final AtomicReferenceArray shardsResponses; + + private AsyncBroadcastAction(Request request, ActionListener listener) { + this.request = request; + this.listener = listener; + + clusterState = clusterService.state(); + nodes = clusterState.nodes(); + shards = clusterState.routingTable().allShards(processIndices(clusterState, request.indices())); + + shardsResponses = new AtomicReferenceArray(shards.size()); + } + + public void start() { + // count the local operations, and perform the non local ones + int localOperations = 0; + for (final ShardRouting shard : shards) { + if (shard.active()) { + if (shard.currentNodeId().equals(nodes.localNodeId())) { + localOperations++; + } else { + // do the remote operation here, the localAsync flag is not relevant + performOperation(shard, true); + } + } else { + // as if we have a "problem", so we iterate to the next one and maintain counts + onFailure(shard, new ShardNotActiveException(shard.shardId()), false); + } + } + // we have local operations, perform them now + if (localOperations > 0) { + if (request.operationThreading() == ShardsOperationThreading.SINGLE_THREAD) { + threadPool.execute(new Runnable() { + @Override public void run() { + for (final ShardRouting shard : shards) { + if (shard.active()) { + if (shard.currentNodeId().equals(nodes.localNodeId())) { + performOperation(shard, false); + } + } + } + } + }); + } else { + boolean localAsync = request.operationThreading() == ShardsOperationThreading.THREAD_PER_SHARD; + for (final ShardRouting shard : shards) { + if (shard.active()) { + if (shard.currentNodeId().equals(nodes.localNodeId())) { + performOperation(shard, localAsync); + } + } + } + } + } + } + + private void performOperation(final ShardRouting shard, boolean localAsync) { + final ShardRequest shardRequest = newShardRequest(shard, request); + if (shard.currentNodeId().equals(nodes.localNodeId())) { + if (localAsync) { + threadPool.execute(new Runnable() { + @Override public void run() { + try { + onOperation(shard, shardOperation(shardRequest), true); + } catch (Exception e) { + onFailure(shard, e, true); + } + } + }); + } else { + try { + onOperation(shard, shardOperation(shardRequest), false); + } catch (Exception e) { + onFailure(shard, e, false); + } + } + } else { + Node node = nodes.get(shard.currentNodeId()); + transportService.sendRequest(node, transportShardAction(), shardRequest, new BaseTransportResponseHandler() { + @Override public ShardResponse newInstance() { + return newShardResponse(); + } + + @Override public void handleResponse(ShardResponse response) { + onOperation(shard, response, false); + } + + @Override public void handleException(RemoteTransportException exp) { + onFailure(shard, exp, false); + } + + @Override public boolean spawn() { + // we never spawn here, we will spawn if needed in onOperation + return false; + } + }); + } + } + + private void onOperation(ShardRouting shardRouting, ShardResponse shardResponse, boolean alreadyThreaded) { + // need two counters to avoid race conditions + shardsResponses.set(indexCounter.getAndIncrement(), shardResponse); + if (opsCounter.incrementAndGet() == shardsResponses.length()) { + finishHim(alreadyThreaded); + } + } + + private void onFailure(ShardRouting shardRouting, Throwable t, boolean alreadyThreaded) { + int idx = indexCounter.getAndIncrement(); + if (accumulateExceptions()) { + ShardOperationFailedException failedException; + if (t instanceof ShardOperationFailedException) { + failedException = (ShardOperationFailedException) t; + } else { + failedException = new ShardOperationFailedException(shardRouting.shardId(), t); + } + shardsResponses.set(idx, failedException); + } + if (opsCounter.incrementAndGet() == shardsResponses.length()) { + finishHim(alreadyThreaded); + } + } + + private void finishHim(boolean alreadyThreaded) { + // if we need to execute the listener on a thread, and we are not threaded already + // then do it + if (request.listenerThreaded() && !alreadyThreaded) { + threadPool.execute(new Runnable() { + @Override public void run() { + listener.onResponse(newResponse(request, clusterState, shardsResponses)); + } + }); + } else { + listener.onResponse(newResponse(request, clusterState, shardsResponses)); + } + } + } + + private class TransportHandler extends BaseTransportRequestHandler { + + @Override public Request newInstance() { + return newRequest(); + } + + @Override public void messageReceived(Request request, final TransportChannel channel) throws Exception { + // we just send back a response, no need to fork a listener + request.listenerThreaded(false); + // we don't spawn, so if we get a request with no threading, change it to single threaded + if (request.operationThreading() == ShardsOperationThreading.NO_THREADS) { + request.operationThreading(ShardsOperationThreading.SINGLE_THREAD); + } + execute(request, new ActionListener() { + @Override public void onResponse(Response response) { + try { + channel.sendResponse(response); + } catch (Exception e) { + onFailure(e); + } + } + + @Override public void onFailure(Throwable e) { + try { + channel.sendResponse(e); + } catch (Exception e1) { + logger.warn("Failed to send response", e1); + } + } + }); + } + + @Override public boolean spawn() { + return false; + } + } + + private class ShardTransportHandler extends BaseTransportRequestHandler { + + @Override public ShardRequest newInstance() { + return newShardRequest(); + } + + @Override public void messageReceived(ShardRequest request, TransportChannel channel) throws Exception { + channel.sendResponse(shardOperation(request)); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/single/SingleOperationRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/single/SingleOperationRequest.java new file mode 100644 index 00000000000..9008db3ec1c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/single/SingleOperationRequest.java @@ -0,0 +1,115 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.single; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.Actions; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class SingleOperationRequest implements ActionRequest { + + protected String index; + protected String type; + protected String id; + + private boolean threadedListener = false; + private boolean threadedOperation = false; + + protected SingleOperationRequest() { + } + + public SingleOperationRequest(String index, String type, String id) { + this.index = index; + this.type = type; + this.id = id; + } + + @Override public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (index == null) { + validationException = Actions.addValidationError("index is missing", validationException); + } + if (type == null) { + validationException = Actions.addValidationError("type is missing", validationException); + } + if (id == null) { + validationException = Actions.addValidationError("id is missing", validationException); + } + return validationException; + } + + public String index() { + return index; + } + + public String type() { + return type; + } + + public String id() { + return id; + } + + @Override public boolean listenerThreaded() { + return threadedListener; + } + + @Override public SingleOperationRequest listenerThreaded(boolean threadedListener) { + this.threadedListener = threadedListener; + return this; + } + + /** + * Controls if the operation will be executed on a separate thread when executed locally. + */ + public boolean threadedOperation() { + return threadedOperation; + } + + /** + * Controls if the operation will be executed on a separate thread when executed locally. + */ + public SingleOperationRequest threadedOperation(boolean threadedOperation) { + this.threadedOperation = threadedOperation; + return this; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + index = in.readUTF(); + type = in.readUTF(); + id = in.readUTF(); + // no need to pass threading over the network, they are always false when coming throw a thread pool + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeUTF(index); + out.writeUTF(type); + out.writeUTF(id); + } + +} + diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/single/TransportSingleOperationAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/single/TransportSingleOperationAction.java new file mode 100644 index 00000000000..bd756e02cd6 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/action/support/single/TransportSingleOperationAction.java @@ -0,0 +1,299 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.single; + +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.NoShardAvailableActionException; +import org.elasticsearch.action.support.BaseAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.Node; +import org.elasticsearch.cluster.node.Nodes; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.*; +import org.elasticsearch.util.io.Streamable; +import org.elasticsearch.util.settings.Settings; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.Iterator; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class TransportSingleOperationAction extends BaseAction { + + protected final ClusterService clusterService; + + protected final TransportService transportService; + + protected final IndicesService indicesService; + + protected final ThreadPool threadPool; + + protected TransportSingleOperationAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, IndicesService indicesService) { + super(settings); + this.clusterService = clusterService; + this.transportService = transportService; + this.threadPool = threadPool; + this.indicesService = indicesService; + + transportService.registerHandler(transportAction(), new TransportHandler()); + transportService.registerHandler(transportShardAction(), new ShardTransportHandler()); + } + + @Override protected void doExecute(Request request, ActionListener listener) { + new AsyncSingleAction(request, listener).start(); + } + + protected abstract String transportAction(); + + protected abstract String transportShardAction(); + + protected abstract Response shardOperation(Request request, int shardId) throws ElasticSearchException; + + protected abstract Request newRequest(); + + protected abstract Response newResponse(); + + private class AsyncSingleAction { + + private final ActionListener listener; + + private final ShardsIterator shards; + + private Iterator shardsIt; + + private final Request request; + + private final Nodes nodes; + + private AsyncSingleAction(Request request, ActionListener listener) { + this.request = request; + this.listener = listener; + + ClusterState clusterState = clusterService.state(); + + nodes = clusterState.nodes(); + + this.shards = indicesService.indexServiceSafe(request.index).operationRouting() + .getShards(clusterState, request.type(), request.id()); + this.shardsIt = shards.iterator(); + } + + public void start() { + performFirst(); + } + + public void onFailure(ShardRouting shardRouting, Exception e) { + if (logger.isDebugEnabled()) { + logger.debug(shardRouting.shortSummary() + ": Failed to get [" + request.type() + "#" + request.id() + "]", e); + } + perform(e); + } + + /** + * First get should try and use a shard that exists on a local node for better performance + */ + private void performFirst() { + while (shardsIt.hasNext()) { + final ShardRouting shard = shardsIt.next(); + if (!shard.active()) { + continue; + } + if (shard.currentNodeId().equals(nodes.localNodeId())) { + if (request.threadedOperation()) { + threadPool.execute(new Runnable() { + @Override public void run() { + try { + Response response = shardOperation(request, shard.id()); + listener.onResponse(response); + } catch (Exception e) { + onFailure(shard, e); + } + } + }); + return; + } else { + try { + final Response response = shardOperation(request, shard.id()); + if (request.listenerThreaded()) { + threadPool.execute(new Runnable() { + @Override public void run() { + listener.onResponse(response); + } + }); + } else { + listener.onResponse(response); + } + return; + } catch (Exception e) { + onFailure(shard, e); + } + } + } + } + if (!shardsIt.hasNext()) { + // no local node get, go remote + shardsIt = shards.reset().iterator(); + perform(null); + } + } + + private void perform(final Exception lastException) { + while (shardsIt.hasNext()) { + final ShardRouting shard = shardsIt.next(); + if (!shard.active()) { + continue; + } + // no need to check for local nodes, we tried them already in performFirstGet + if (!shard.currentNodeId().equals(nodes.localNodeId())) { + Node node = nodes.get(shard.currentNodeId()); + transportService.sendRequest(node, transportShardAction(), new ShardSingleOperationRequest(request, shard.id()), new BaseTransportResponseHandler() { + @Override public Response newInstance() { + return newResponse(); + } + + @Override public void handleResponse(final Response response) { + if (request.listenerThreaded()) { + threadPool.execute(new Runnable() { + @Override public void run() { + listener.onResponse(response); + } + }); + } else { + listener.onResponse(response); + } + } + + @Override public void handleException(RemoteTransportException exp) { + onFailure(shard, exp); + } + + @Override public boolean spawn() { + // no need to spawn, we will execute the listener on a different thread if needed in handleResponse + return false; + } + }); + return; + } + } + if (!shardsIt.hasNext()) { + final NoShardAvailableActionException failure = new NoShardAvailableActionException(shards.shardId(), "No shard available for [" + request.type() + "#" + request.id() + "]", lastException); + if (request.listenerThreaded()) { + threadPool.execute(new Runnable() { + @Override public void run() { + listener.onFailure(failure); + } + }); + } else { + listener.onFailure(failure); + } + } + } + } + + private class TransportHandler extends BaseTransportRequestHandler { + + @Override public Request newInstance() { + return newRequest(); + } + + @Override public void messageReceived(Request request, final TransportChannel channel) throws Exception { + // no need to have a threaded listener since we just send back a response + request.listenerThreaded(false); + // if we have a local operation, execute it on a thread since we don't spawn + request.threadedOperation(true); + execute(request, new ActionListener() { + @Override public void onResponse(Response result) { + try { + channel.sendResponse(result); + } catch (Exception e) { + onFailure(e); + } + } + + @Override public void onFailure(Throwable e) { + try { + channel.sendResponse(e); + } catch (Exception e1) { + logger.warn("Failed to send response for get", e1); + } + } + }); + } + + @Override public boolean spawn() { + return false; + } + } + + private class ShardTransportHandler extends BaseTransportRequestHandler { + + @Override public ShardSingleOperationRequest newInstance() { + return new ShardSingleOperationRequest(); + } + + @Override public void messageReceived(ShardSingleOperationRequest request, TransportChannel channel) throws Exception { + Response response = shardOperation(request.request(), request.shardId()); + channel.sendResponse(response); + } + } + + protected class ShardSingleOperationRequest implements Streamable { + + private Request request; + + private int shardId; + + ShardSingleOperationRequest() { + } + + public ShardSingleOperationRequest(Request request, int shardId) { + this.request = request; + this.shardId = shardId; + } + + public Request request() { + return request; + } + + public int shardId() { + return shardId; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + request = newRequest(); + request.readFrom(in); + shardId = in.readInt(); + } + + @Override public void writeTo(DataOutput out) throws IOException { + request.writeTo(out); + out.writeInt(shardId); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java new file mode 100644 index 00000000000..bff8995a868 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java @@ -0,0 +1,191 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.bootstrap; + +import com.google.inject.CreationException; +import com.google.inject.spi.Message; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.Version; +import org.elasticsearch.env.Environment; +import org.elasticsearch.jmx.JmxService; +import org.elasticsearch.server.Server; +import org.elasticsearch.server.ServerBuilder; +import org.elasticsearch.server.internal.InternalSettingsPerparer; +import org.elasticsearch.util.Classes; +import org.elasticsearch.util.Tuple; +import org.elasticsearch.util.jline.ANSI; +import org.elasticsearch.util.logging.Loggers; +import org.elasticsearch.util.logging.log4j.LogConfigurator; +import org.elasticsearch.util.settings.Settings; +import org.slf4j.Logger; + +import java.io.File; +import java.util.Set; + +import static com.google.common.collect.Sets.*; +import static jline.ANSIBuffer.ANSICodes.*; +import static org.elasticsearch.util.settings.ImmutableSettings.Builder.*; +import static org.elasticsearch.util.settings.ImmutableSettings.*; + +/** + * @author kimchy (Shay Banon) + */ +public class Bootstrap { + + private Server server; + + private void setup(boolean addShutdownHook) throws Exception { + Tuple tuple = InternalSettingsPerparer.prepareSettings(EMPTY_SETTINGS, true); + + try { + Classes.getDefaultClassLoader().loadClass("org.apache.log4j.Logger"); + LogConfigurator.configure(tuple.v1()); + } catch (ClassNotFoundException e) { + // no log4j + } catch (NoClassDefFoundError e) { + // no log4j + } catch (Exception e) { + System.err.println("Failed to configure logging..."); + e.printStackTrace(); + } + + if (tuple.v1().get(JmxService.SettingsConstants.CREATE_CONNECTOR) == null) { + // automatically create the connector if we are bootstrapping + Settings updated = settingsBuilder().putAll(tuple.v1()).putBoolean(JmxService.SettingsConstants.CREATE_CONNECTOR, true).build(); + tuple = new Tuple(updated, tuple.v2()); + } + + ServerBuilder serverBuilder = ServerBuilder.serverBuilder().settings(tuple.v1()).loadConfigSettings(false); + server = serverBuilder.build(); + if (addShutdownHook) { + Runtime.getRuntime().addShutdownHook(new Thread() { + @Override public void run() { + server.close(); + } + }); + } + } + + /** + * hook for JSVC + */ + public void init(String[] args) throws Exception { + setup(true); + } + + /** + * hook for JSVC + */ + public void start() { + server.start(); + } + + /** + * hook for JSVC + */ + public void stop() { + server.stop(); + } + + + /** + * hook for JSVC + */ + public void destroy() { + server.close(); + } + + + public static void main(String[] args) { + Bootstrap bootstrap = new Bootstrap(); + String pidFile = System.getProperty("es-pidfile"); + + boolean foreground = System.getProperty("es-foreground") != null; + + String stage = "Initialization"; + try { + if (!foreground) { + Loggers.disableConsoleLogging(); + System.out.close(); + } + bootstrap.setup(true); + + if (pidFile != null) { + new File(pidFile).deleteOnExit(); + } + + stage = "Startup"; + bootstrap.start(); + + if (!foreground) { + System.err.close(); + } + } catch (Throwable e) { + Logger logger = Loggers.getLogger(Bootstrap.class); + if (bootstrap.server != null) { + logger = Loggers.getLogger(Bootstrap.class, bootstrap.server.settings().get("name")); + } + StringBuilder errorMessage = new StringBuilder("{").append(Version.full()).append("}: "); + try { + if (ANSI.isEnabled()) { + errorMessage.append(attrib(ANSI.Code.FG_RED)).append(stage).append(" Failed ...").append(attrib(ANSI.Code.OFF)).append("\n"); + } else { + errorMessage.append(stage).append(" Failed ...\n"); + } + } catch (Throwable t) { + errorMessage.append(stage).append(" Failed ...\n"); + } + if (e instanceof CreationException) { + CreationException createException = (CreationException) e; + Set seenMessages = newHashSet(); + int counter = 1; + for (Message message : createException.getErrorMessages()) { + String detailedMessage; + if (message.getCause() == null) { + detailedMessage = message.getMessage(); + } else { + detailedMessage = ExceptionsHelper.detailedMessage(message.getCause(), true, 0); + } + if (detailedMessage == null) { + detailedMessage = message.getMessage(); + } + if (seenMessages.contains(detailedMessage)) { + continue; + } + seenMessages.add(detailedMessage); + errorMessage.append("").append(counter++).append(") ").append(detailedMessage); + } + } else { + errorMessage.append("- ").append(ExceptionsHelper.detailedMessage(e, true, 0)); + } + if (foreground) { + logger.error(errorMessage.toString()); + } else { + System.err.println(errorMessage); + System.err.flush(); + } + Loggers.disableConsoleLogging(); + if (logger.isDebugEnabled()) { + logger.debug("Exception", e); + } + System.exit(3); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/AdminClient.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/AdminClient.java new file mode 100644 index 00000000000..d4283918e2c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/AdminClient.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +/** + * @author kimchy (Shay Banon) + */ +public interface AdminClient { + + ClusterAdminClient cluster(); + + IndicesAdminClient indices(); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/Client.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/Client.java new file mode 100644 index 00000000000..8cf42b67bef --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/Client.java @@ -0,0 +1,88 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.count.CountRequest; +import org.elasticsearch.action.count.CountResponse; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.delete.DeleteResponse; +import org.elasticsearch.action.deletebyquery.DeleteByQueryRequest; +import org.elasticsearch.action.deletebyquery.DeleteByQueryResponse; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchScrollRequest; + +/** + * @author kimchy (Shay Banon) + */ +public interface Client { + + void close(); + + AdminClient admin(); + + ActionFuture index(IndexRequest request); + + ActionFuture index(IndexRequest request, ActionListener listener); + + void execIndex(IndexRequest request, ActionListener listener); + + ActionFuture delete(DeleteRequest request); + + ActionFuture delete(DeleteRequest request, ActionListener listener); + + void execDelete(DeleteRequest request, ActionListener listener); + + ActionFuture deleteByQuery(DeleteByQueryRequest request); + + ActionFuture deleteByQuery(DeleteByQueryRequest request, ActionListener listener); + + void execDeleteByQuery(DeleteByQueryRequest request, ActionListener listener); + + ActionFuture get(GetRequest request); + + ActionFuture get(GetRequest request, ActionListener listener); + + void execGet(GetRequest request, ActionListener listener); + + ActionFuture count(CountRequest request); + + ActionFuture count(CountRequest request, ActionListener listener); + + void execCount(CountRequest request, ActionListener listener); + + ActionFuture search(SearchRequest request); + + ActionFuture search(SearchRequest request, ActionListener listener); + + void execSearch(SearchRequest request, ActionListener listener); + + ActionFuture searchScroll(SearchScrollRequest request); + + ActionFuture searchScroll(SearchScrollRequest request, ActionListener listener); + + void execSearchScroll(SearchScrollRequest request, ActionListener listener); +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/ClusterAdminClient.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/ClusterAdminClient.java new file mode 100644 index 00000000000..f0949fe4531 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/ClusterAdminClient.java @@ -0,0 +1,69 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.action.admin.cluster.ping.broadcast.BroadcastPingRequest; +import org.elasticsearch.action.admin.cluster.ping.broadcast.BroadcastPingResponse; +import org.elasticsearch.action.admin.cluster.ping.replication.ReplicationPingRequest; +import org.elasticsearch.action.admin.cluster.ping.replication.ReplicationPingResponse; +import org.elasticsearch.action.admin.cluster.ping.single.SinglePingRequest; +import org.elasticsearch.action.admin.cluster.ping.single.SinglePingResponse; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; + +/** + * @author kimchy (Shay Banon) + */ +public interface ClusterAdminClient { + + ActionFuture state(ClusterStateRequest request); + + ActionFuture state(ClusterStateRequest request, ActionListener listener); + + void execState(ClusterStateRequest request, ActionListener listener); + + ActionFuture ping(SinglePingRequest request); + + ActionFuture ping(SinglePingRequest request, ActionListener listener); + + void execPing(SinglePingRequest request, ActionListener listener); + + ActionFuture ping(BroadcastPingRequest request); + + ActionFuture ping(BroadcastPingRequest request, ActionListener listener); + + void execPing(BroadcastPingRequest request, ActionListener listener); + + ActionFuture ping(ReplicationPingRequest request); + + ActionFuture ping(ReplicationPingRequest request, ActionListener listener); + + void execPing(ReplicationPingRequest request, ActionListener listener); + + ActionFuture nodesInfo(NodesInfoRequest request); + + ActionFuture nodesInfo(NodesInfoRequest request, ActionListener listener); + + void execNodesInfo(NodesInfoRequest request, ActionListener listener); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/IndicesAdminClient.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/IndicesAdminClient.java new file mode 100644 index 00000000000..39da5cfc479 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/IndicesAdminClient.java @@ -0,0 +1,85 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; +import org.elasticsearch.action.admin.indices.flush.FlushRequest; +import org.elasticsearch.action.admin.indices.flush.FlushResponse; +import org.elasticsearch.action.admin.indices.gateway.snapshot.GatewaySnapshotRequest; +import org.elasticsearch.action.admin.indices.gateway.snapshot.GatewaySnapshotResponse; +import org.elasticsearch.action.admin.indices.mapping.create.CreateMappingRequest; +import org.elasticsearch.action.admin.indices.mapping.create.CreateMappingResponse; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.action.admin.indices.status.IndicesStatusRequest; +import org.elasticsearch.action.admin.indices.status.IndicesStatusResponse; + +/** + * @author kimchy (Shay Banon) + */ +public interface IndicesAdminClient { + + ActionFuture status(IndicesStatusRequest request); + + ActionFuture status(IndicesStatusRequest request, ActionListener listener); + + void execStatus(IndicesStatusRequest request, ActionListener listener); + + ActionFuture create(CreateIndexRequest request); + + ActionFuture create(CreateIndexRequest request, ActionListener listener); + + void execCreate(CreateIndexRequest request, ActionListener listener); + + ActionFuture delete(DeleteIndexRequest request); + + ActionFuture delete(DeleteIndexRequest request, ActionListener listener); + + void execDelete(DeleteIndexRequest request, ActionListener listener); + + ActionFuture refresh(RefreshRequest request); + + ActionFuture refresh(RefreshRequest request, ActionListener listener); + + void execRefresh(RefreshRequest request, ActionListener listener); + + ActionFuture flush(FlushRequest request); + + ActionFuture flush(FlushRequest request, ActionListener listener); + + void execFlush(FlushRequest request, ActionListener listener); + + ActionFuture createMapping(CreateMappingRequest request); + + ActionFuture createMapping(CreateMappingRequest request, ActionListener listener); + + void execCreateMapping(CreateMappingRequest request, ActionListener listener); + + ActionFuture gatewaySnapshot(GatewaySnapshotRequest request); + + ActionFuture gatewaySnapshot(GatewaySnapshotRequest request, ActionListener listener); + + void execGatewaySnapshot(GatewaySnapshotRequest request, ActionListener listener); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/Requests.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/Requests.java new file mode 100644 index 00000000000..a0200838c88 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/Requests.java @@ -0,0 +1,126 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; +import org.elasticsearch.action.admin.cluster.ping.broadcast.BroadcastPingRequest; +import org.elasticsearch.action.admin.cluster.ping.replication.ReplicationPingRequest; +import org.elasticsearch.action.admin.cluster.ping.single.SinglePingRequest; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; +import org.elasticsearch.action.admin.indices.flush.FlushRequest; +import org.elasticsearch.action.admin.indices.gateway.snapshot.GatewaySnapshotRequest; +import org.elasticsearch.action.admin.indices.mapping.create.CreateMappingRequest; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.admin.indices.status.IndicesStatusRequest; +import org.elasticsearch.action.count.CountRequest; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.deletebyquery.DeleteByQueryRequest; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchScrollRequest; + +/** + * @author kimchy (Shay Banon) + */ +public class Requests { + + public static IndexRequest indexRequest(String index) { + return new IndexRequest(index); + } + + public static DeleteRequest deleteRequest(String index) { + return new DeleteRequest(index); + } + + public static DeleteByQueryRequest deleteByQueryRequest(String... indices) { + return new DeleteByQueryRequest(indices); + } + + public static GetRequest getRequest(String index) { + return new GetRequest(index); + } + + public static CountRequest countRequest(String... indices) { + return new CountRequest(indices); + } + + public static SearchRequest searchRequest(String... index) { + return new SearchRequest(index); + } + + public static SearchScrollRequest searchScrollRequest(String scrollId) { + return new SearchScrollRequest(scrollId); + } + + public static IndicesStatusRequest indicesStatus(String... indices) { + return new IndicesStatusRequest(indices); + } + + public static CreateIndexRequest createIndexRequest(String index) { + return new CreateIndexRequest(index); + } + + public static DeleteIndexRequest deleteIndexRequest(String index) { + return new DeleteIndexRequest(index); + } + + public static CreateMappingRequest createMappingRequest(String... indices) { + return new CreateMappingRequest(indices); + } + + public static RefreshRequest refreshRequest(String... indices) { + return new RefreshRequest(indices); + } + + public static FlushRequest flushRequest(String... indices) { + return new FlushRequest(indices); + } + + public static GatewaySnapshotRequest gatewaySnapshotRequest(String... indices) { + return new GatewaySnapshotRequest(indices); + } + + public static SinglePingRequest pingSingleRequest(String index) { + return new SinglePingRequest(index); + } + + public static BroadcastPingRequest pingBroadcastRequest(String... indices) { + return new BroadcastPingRequest(indices); + } + + public static ReplicationPingRequest pingReplicationRequest(String... indices) { + return new ReplicationPingRequest(indices); + } + + public static NodesInfoRequest nodesInfo() { + return new NodesInfoRequest(); + } + + public static NodesInfoRequest nodesInfo(String... nodesIds) { + return new NodesInfoRequest(nodesIds); + } + + public static ClusterStateRequest clusterState() { + return new ClusterStateRequest(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/server/ServerAdminClient.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/server/ServerAdminClient.java new file mode 100644 index 00000000000..2646d2f7837 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/server/ServerAdminClient.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.server; + +import com.google.inject.Inject; +import org.elasticsearch.client.AdminClient; +import org.elasticsearch.client.ClusterAdminClient; +import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class ServerAdminClient extends AbstractComponent implements AdminClient { + + private final ServerIndicesAdminClient indicesAdminClient; + + private final ServerClusterAdminClient clusterAdminClient; + + @Inject public ServerAdminClient(Settings settings, ServerClusterAdminClient clusterAdminClient, ServerIndicesAdminClient indicesAdminClient) { + super(settings); + this.indicesAdminClient = indicesAdminClient; + this.clusterAdminClient = clusterAdminClient; + } + + @Override public IndicesAdminClient indices() { + return indicesAdminClient; + } + + @Override public ClusterAdminClient cluster() { + return this.clusterAdminClient; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/server/ServerClient.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/server/ServerClient.java new file mode 100644 index 00000000000..12cd21cc0c4 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/server/ServerClient.java @@ -0,0 +1,173 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.server; + +import com.google.inject.Inject; +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.count.CountRequest; +import org.elasticsearch.action.count.CountResponse; +import org.elasticsearch.action.count.TransportCountAction; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.delete.DeleteResponse; +import org.elasticsearch.action.delete.TransportDeleteAction; +import org.elasticsearch.action.deletebyquery.DeleteByQueryRequest; +import org.elasticsearch.action.deletebyquery.DeleteByQueryResponse; +import org.elasticsearch.action.deletebyquery.TransportDeleteByQueryAction; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.get.TransportGetAction; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.index.TransportIndexAction; +import org.elasticsearch.action.search.*; +import org.elasticsearch.client.AdminClient; +import org.elasticsearch.client.Client; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class ServerClient extends AbstractComponent implements Client { + + private final ServerAdminClient admin; + + private final TransportIndexAction indexAction; + + private final TransportDeleteAction deleteAction; + + private final TransportDeleteByQueryAction deleteByQueryAction; + + private final TransportGetAction getAction; + + private final TransportCountAction countAction; + + private final TransportSearchAction searchAction; + + private final TransportSearchScrollAction searchScrollAction; + + @Inject public ServerClient(Settings settings, ServerAdminClient admin, + TransportIndexAction indexAction, TransportDeleteAction deleteAction, + TransportDeleteByQueryAction deleteByQueryAction, TransportGetAction getAction, TransportCountAction countAction, + TransportSearchAction searchAction, TransportSearchScrollAction searchScrollAction) { + super(settings); + this.admin = admin; + this.indexAction = indexAction; + this.deleteAction = deleteAction; + this.deleteByQueryAction = deleteByQueryAction; + this.getAction = getAction; + this.countAction = countAction; + this.searchAction = searchAction; + this.searchScrollAction = searchScrollAction; + } + + @Override public void close() { + // nothing really to do + } + + @Override public AdminClient admin() { + return this.admin; + } + + @Override public ActionFuture index(IndexRequest request) { + return indexAction.submit(request); + } + + @Override public ActionFuture index(IndexRequest request, ActionListener listener) { + return indexAction.submit(request, listener); + } + + @Override public void execIndex(IndexRequest request, ActionListener listener) { + indexAction.execute(request, listener); + } + + @Override public ActionFuture delete(DeleteRequest request) { + return deleteAction.submit(request); + } + + @Override public ActionFuture delete(DeleteRequest request, ActionListener listener) { + return deleteAction.submit(request, listener); + } + + @Override public void execDelete(DeleteRequest request, ActionListener listener) { + deleteAction.execute(request, listener); + } + + @Override public ActionFuture deleteByQuery(DeleteByQueryRequest request) { + return deleteByQueryAction.submit(request); + } + + @Override public ActionFuture deleteByQuery(DeleteByQueryRequest request, ActionListener listener) { + return deleteByQueryAction.submit(request, listener); + } + + @Override public void execDeleteByQuery(DeleteByQueryRequest request, ActionListener listener) { + deleteByQueryAction.execute(request, listener); + } + + @Override public ActionFuture get(GetRequest request) { + return getAction.submit(request); + } + + @Override public ActionFuture get(GetRequest request, ActionListener listener) { + return getAction.submit(request, listener); + } + + @Override public void execGet(GetRequest request, ActionListener listener) { + getAction.execute(request, listener); + } + + @Override public ActionFuture count(CountRequest request) { + return countAction.submit(request); + } + + @Override public ActionFuture count(CountRequest request, ActionListener listener) { + return countAction.submit(request, listener); + } + + @Override public void execCount(CountRequest request, ActionListener listener) { + countAction.execute(request, listener); + } + + @Override public ActionFuture search(SearchRequest request) { + return searchAction.submit(request); + } + + @Override public ActionFuture search(SearchRequest request, ActionListener listener) { + return searchAction.submit(request, listener); + } + + @Override public void execSearch(SearchRequest request, ActionListener listener) { + searchAction.execute(request, listener); + } + + @Override public ActionFuture searchScroll(SearchScrollRequest request) { + return searchScrollAction.submit(request); + } + + @Override public ActionFuture searchScroll(SearchScrollRequest request, ActionListener listener) { + return searchScrollAction.submit(request, listener); + } + + @Override public void execSearchScroll(SearchScrollRequest request, ActionListener listener) { + searchScrollAction.execute(request, listener); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/server/ServerClientModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/server/ServerClientModule.java new file mode 100644 index 00000000000..3992b8a9266 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/server/ServerClientModule.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.server; + +import com.google.inject.AbstractModule; +import org.elasticsearch.client.AdminClient; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.ClusterAdminClient; +import org.elasticsearch.client.IndicesAdminClient; + +/** + * @author kimchy (Shay Banon) + */ +public class ServerClientModule extends AbstractModule { + + @Override protected void configure() { + bind(ClusterAdminClient.class).to(ServerClusterAdminClient.class).asEagerSingleton(); + bind(IndicesAdminClient.class).to(ServerIndicesAdminClient.class).asEagerSingleton(); + bind(AdminClient.class).to(ServerAdminClient.class).asEagerSingleton(); + bind(Client.class).to(ServerClient.class).asEagerSingleton(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/server/ServerClusterAdminClient.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/server/ServerClusterAdminClient.java new file mode 100644 index 00000000000..3d73d44600f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/server/ServerClusterAdminClient.java @@ -0,0 +1,131 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.server; + +import com.google.inject.Inject; +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.action.admin.cluster.node.info.TransportNodesInfo; +import org.elasticsearch.action.admin.cluster.ping.broadcast.BroadcastPingRequest; +import org.elasticsearch.action.admin.cluster.ping.broadcast.BroadcastPingResponse; +import org.elasticsearch.action.admin.cluster.ping.broadcast.TransportBroadcastPingAction; +import org.elasticsearch.action.admin.cluster.ping.replication.ReplicationPingRequest; +import org.elasticsearch.action.admin.cluster.ping.replication.ReplicationPingResponse; +import org.elasticsearch.action.admin.cluster.ping.replication.TransportReplicationPingAction; +import org.elasticsearch.action.admin.cluster.ping.single.SinglePingRequest; +import org.elasticsearch.action.admin.cluster.ping.single.SinglePingResponse; +import org.elasticsearch.action.admin.cluster.ping.single.TransportSinglePingAction; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.admin.cluster.state.TransportClusterStateAction; +import org.elasticsearch.client.ClusterAdminClient; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class ServerClusterAdminClient extends AbstractComponent implements ClusterAdminClient { + + private final TransportClusterStateAction clusterStateAction; + + private final TransportSinglePingAction singlePingAction; + + private final TransportBroadcastPingAction broadcastPingAction; + + private final TransportReplicationPingAction replicationPingAction; + + private final TransportNodesInfo nodesInfo; + + @Inject public ServerClusterAdminClient(Settings settings, + TransportClusterStateAction clusterStateAction, + TransportSinglePingAction singlePingAction, TransportBroadcastPingAction broadcastPingAction, TransportReplicationPingAction replicationPingAction, + TransportNodesInfo nodesInfo) { + super(settings); + this.clusterStateAction = clusterStateAction; + this.nodesInfo = nodesInfo; + this.singlePingAction = singlePingAction; + this.broadcastPingAction = broadcastPingAction; + this.replicationPingAction = replicationPingAction; + } + + @Override public ActionFuture state(ClusterStateRequest request) { + return clusterStateAction.submit(request); + } + + @Override public ActionFuture state(ClusterStateRequest request, ActionListener listener) { + return clusterStateAction.submit(request, listener); + } + + @Override public void execState(ClusterStateRequest request, ActionListener listener) { + clusterStateAction.execute(request, listener); + } + + @Override public ActionFuture ping(SinglePingRequest request) { + return singlePingAction.submit(request); + } + + @Override public ActionFuture ping(SinglePingRequest request, ActionListener listener) { + return singlePingAction.submit(request, listener); + } + + @Override public void execPing(SinglePingRequest request, ActionListener listener) { + singlePingAction.execute(request, listener); + } + + @Override public ActionFuture ping(BroadcastPingRequest request) { + return broadcastPingAction.submit(request); + } + + @Override public ActionFuture ping(BroadcastPingRequest request, ActionListener listener) { + return broadcastPingAction.submit(request, listener); + } + + @Override public void execPing(BroadcastPingRequest request, ActionListener listener) { + broadcastPingAction.execute(request, listener); + } + + @Override public ActionFuture ping(ReplicationPingRequest request) { + return replicationPingAction.submit(request); + } + + @Override public ActionFuture ping(ReplicationPingRequest request, ActionListener listener) { + return replicationPingAction.submit(request, listener); + } + + @Override public void execPing(ReplicationPingRequest request, ActionListener listener) { + replicationPingAction.execute(request, listener); + } + + @Override public ActionFuture nodesInfo(NodesInfoRequest request) { + return nodesInfo.submit(request); + } + + @Override public ActionFuture nodesInfo(NodesInfoRequest request, ActionListener listener) { + return nodesInfo.submit(request, listener); + } + + @Override public void execNodesInfo(NodesInfoRequest request, ActionListener listener) { + nodesInfo.execute(request, listener); + } + +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/server/ServerIndicesAdminClient.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/server/ServerIndicesAdminClient.java new file mode 100644 index 00000000000..9ca22f359aa --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/server/ServerIndicesAdminClient.java @@ -0,0 +1,166 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.server; + +import com.google.inject.Inject; +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; +import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; +import org.elasticsearch.action.admin.indices.delete.TransportDeleteIndexAction; +import org.elasticsearch.action.admin.indices.flush.FlushRequest; +import org.elasticsearch.action.admin.indices.flush.FlushResponse; +import org.elasticsearch.action.admin.indices.flush.TransportFlushAction; +import org.elasticsearch.action.admin.indices.gateway.snapshot.GatewaySnapshotRequest; +import org.elasticsearch.action.admin.indices.gateway.snapshot.GatewaySnapshotResponse; +import org.elasticsearch.action.admin.indices.gateway.snapshot.TransportGatewaySnapshotAction; +import org.elasticsearch.action.admin.indices.mapping.create.CreateMappingRequest; +import org.elasticsearch.action.admin.indices.mapping.create.CreateMappingResponse; +import org.elasticsearch.action.admin.indices.mapping.create.TransportCreateMappingAction; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.action.admin.indices.refresh.TransportRefreshAction; +import org.elasticsearch.action.admin.indices.status.IndicesStatusRequest; +import org.elasticsearch.action.admin.indices.status.IndicesStatusResponse; +import org.elasticsearch.action.admin.indices.status.TransportIndicesStatusAction; +import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class ServerIndicesAdminClient extends AbstractComponent implements IndicesAdminClient { + + private final TransportIndicesStatusAction indicesStatusAction; + + private final TransportCreateIndexAction createIndexAction; + + private final TransportDeleteIndexAction deleteIndexAction; + + private final TransportRefreshAction refreshAction; + + private final TransportFlushAction flushAction; + + private final TransportCreateMappingAction createMappingAction; + + private final TransportGatewaySnapshotAction gatewaySnapshotAction; + + @Inject public ServerIndicesAdminClient(Settings settings, TransportIndicesStatusAction indicesStatusAction, + TransportCreateIndexAction createIndexAction, TransportDeleteIndexAction deleteIndexAction, + TransportRefreshAction refreshAction, TransportFlushAction flushAction, + TransportCreateMappingAction createMappingAction, TransportGatewaySnapshotAction gatewaySnapshotAction) { + super(settings); + this.indicesStatusAction = indicesStatusAction; + this.createIndexAction = createIndexAction; + this.deleteIndexAction = deleteIndexAction; + this.refreshAction = refreshAction; + this.flushAction = flushAction; + this.createMappingAction = createMappingAction; + this.gatewaySnapshotAction = gatewaySnapshotAction; + } + + @Override public ActionFuture status(IndicesStatusRequest request) { + return indicesStatusAction.submit(request); + } + + @Override public ActionFuture status(IndicesStatusRequest request, ActionListener listener) { + return indicesStatusAction.submit(request, listener); + } + + @Override public void execStatus(IndicesStatusRequest request, ActionListener listener) { + indicesStatusAction.execute(request, listener); + } + + @Override public ActionFuture create(CreateIndexRequest request) { + return createIndexAction.submit(request); + } + + @Override public ActionFuture create(CreateIndexRequest request, ActionListener listener) { + return createIndexAction.submit(request, listener); + } + + @Override public void execCreate(CreateIndexRequest request, ActionListener listener) { + createIndexAction.execute(request, listener); + } + + @Override public ActionFuture delete(DeleteIndexRequest request) { + return deleteIndexAction.submit(request); + } + + @Override public ActionFuture delete(DeleteIndexRequest request, ActionListener listener) { + return deleteIndexAction.submit(request, listener); + } + + @Override public void execDelete(DeleteIndexRequest request, ActionListener listener) { + deleteIndexAction.execute(request, listener); + } + + @Override public ActionFuture refresh(RefreshRequest request) { + return refreshAction.submit(request); + } + + @Override public ActionFuture refresh(RefreshRequest request, ActionListener listener) { + return refreshAction.submit(request, listener); + } + + @Override public void execRefresh(RefreshRequest request, ActionListener listener) { + refreshAction.execute(request, listener); + } + + @Override public ActionFuture flush(FlushRequest request) { + return flushAction.submit(request); + } + + @Override public ActionFuture flush(FlushRequest request, ActionListener listener) { + return flushAction.submit(request, listener); + } + + @Override public void execFlush(FlushRequest request, ActionListener listener) { + flushAction.execute(request, listener); + } + + @Override public ActionFuture createMapping(CreateMappingRequest request) { + return createMappingAction.submit(request); + } + + @Override public ActionFuture createMapping(CreateMappingRequest request, ActionListener listener) { + return createMapping(request, listener); + } + + @Override public void execCreateMapping(CreateMappingRequest request, ActionListener listener) { + createMappingAction.execute(request, listener); + } + + @Override public ActionFuture gatewaySnapshot(GatewaySnapshotRequest request) { + return gatewaySnapshotAction.submit(request); + } + + @Override public ActionFuture gatewaySnapshot(GatewaySnapshotRequest request, ActionListener listener) { + return gatewaySnapshotAction.submit(request, listener); + } + + @Override public void execGatewaySnapshot(GatewaySnapshotRequest request, ActionListener listener) { + gatewaySnapshotAction.execute(request, listener); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/ClientTransportModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/ClientTransportModule.java new file mode 100644 index 00000000000..668840bb056 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/ClientTransportModule.java @@ -0,0 +1,40 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.transport; + +import com.google.inject.AbstractModule; +import org.elasticsearch.client.transport.support.InternalTransportAdminClient; +import org.elasticsearch.client.transport.support.InternalTransportClient; +import org.elasticsearch.client.transport.support.InternalTransportClusterAdminClient; +import org.elasticsearch.client.transport.support.InternalTransportIndicesAdminClient; + +/** + * @author kimchy (Shay Banon) + */ +public class ClientTransportModule extends AbstractModule { + + @Override protected void configure() { + bind(InternalTransportClient.class).asEagerSingleton(); + bind(InternalTransportAdminClient.class).asEagerSingleton(); + bind(InternalTransportIndicesAdminClient.class).asEagerSingleton(); + bind(InternalTransportClusterAdminClient.class).asEagerSingleton(); + bind(TransportClientNodesService.class).asEagerSingleton(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/NoNodeAvailableException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/NoNodeAvailableException.java new file mode 100644 index 00000000000..2ddfe4b1f9a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/NoNodeAvailableException.java @@ -0,0 +1,32 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.transport; + +import org.elasticsearch.ElasticSearchException; + +/** + * @author kimchy (Shay Banon) + */ +public class NoNodeAvailableException extends ElasticSearchException { + + public NoNodeAvailableException() { + super("No node available"); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/TransportClient.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/TransportClient.java new file mode 100644 index 00000000000..1fddf91cb57 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/TransportClient.java @@ -0,0 +1,263 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.transport; + +import com.google.common.collect.ImmutableList; +import com.google.inject.Guice; +import com.google.inject.Injector; +import com.google.inject.Module; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.count.CountRequest; +import org.elasticsearch.action.count.CountResponse; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.delete.DeleteResponse; +import org.elasticsearch.action.deletebyquery.DeleteByQueryRequest; +import org.elasticsearch.action.deletebyquery.DeleteByQueryResponse; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchScrollRequest; +import org.elasticsearch.client.AdminClient; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.transport.action.ClientTransportActionModule; +import org.elasticsearch.client.transport.support.InternalTransportClient; +import org.elasticsearch.cluster.ClusterNameModule; +import org.elasticsearch.cluster.node.Node; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.EnvironmentModule; +import org.elasticsearch.server.internal.InternalSettingsPerparer; +import org.elasticsearch.threadpool.ThreadPoolModule; +import org.elasticsearch.transport.TransportModule; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.Tuple; +import org.elasticsearch.util.settings.ImmutableSettings; +import org.elasticsearch.util.settings.Settings; +import org.elasticsearch.util.settings.SettingsModule; +import org.elasticsearch.util.transport.TransportAddress; + +import java.util.ArrayList; + +import static org.elasticsearch.util.settings.ImmutableSettings.*; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportClient implements Client { + + private final Injector injector; + + private final Settings settings; + + private final Environment environment; + + + private final TransportClientNodesService nodesService; + + private final InternalTransportClient internalClient; + + + public TransportClient() throws ElasticSearchException { + this(ImmutableSettings.Builder.EMPTY_SETTINGS, true); + } + + public TransportClient(Settings settings) { + this(settings, true); + } + + public TransportClient(Settings pSettings, boolean loadConfigSettings) throws ElasticSearchException { + Tuple tuple = InternalSettingsPerparer.prepareSettings(pSettings, loadConfigSettings); + this.settings = settingsBuilder().putAll(tuple.v1()) + .putBoolean("network.server", false) + .putBoolean("discovery.client", true) + .build(); + this.environment = tuple.v2(); + + ArrayList modules = new ArrayList(); + modules.add(new EnvironmentModule(environment)); + modules.add(new SettingsModule(settings)); + modules.add(new ClusterNameModule(settings)); + modules.add(new ThreadPoolModule(settings)); + modules.add(new TransportModule(settings)); + modules.add(new ClientTransportActionModule()); + modules.add(new ClientTransportModule()); + + // disabled, still having problems with jgroups acting just as client + if (settings.getAsBoolean("discovery.enabled", true) && false) { + modules.add(new TransportClientClusterModule(settings)); + } + + injector = Guice.createInjector(modules); + + injector.getInstance(TransportService.class).start(); + try { + injector.getInstance(TransportClientClusterService.class).start(); + } catch (Exception e) { + // ignore + } + + nodesService = injector.getInstance(TransportClientNodesService.class); + internalClient = injector.getInstance(InternalTransportClient.class); + } + + /** + * Returns the current registered transport addresses to use (added using + * {@link #addTransportAddress(org.elasticsearch.util.transport.TransportAddress)}. + */ + public ImmutableList transportAddresses() { + return nodesService.transportAddresses(); + } + + /** + * Returns the current connected transport nodes that this client will use. + * + *

The nodes include all the nodes that are currently alive based on the transport + * addresses provided. + */ + public ImmutableList connectedNodes() { + return nodesService.connectedNodes(); + } + + /** + * Adds a transport address that will be used to connect to. + * + *

The Node this transport address represents will be used if its possible to connect to it. + * If it is unavailable, it will be automatically connected to once it is up. + * + *

In order to get the list of all the current connected nodes, please see {@link #connectedNodes()}. + */ + public TransportClient addTransportAddress(TransportAddress transportAddress) { + nodesService.addTransportAddress(transportAddress); + return this; + } + + /** + * Removes a transport address from the list of transport addresses that are used to connect to. + */ + public TransportClient removeTransportAddress(TransportAddress transportAddress) { + nodesService.removeTransportAddress(transportAddress); + return this; + } + + /** + * Closes the client. + */ + @Override public void close() { + try { + injector.getInstance(TransportClientClusterService.class).close(); + } catch (Exception e) { + // ignore + } + injector.getInstance(TransportClientNodesService.class).close(); + injector.getInstance(TransportService.class).close(); + } + + @Override public AdminClient admin() { + return internalClient.admin(); + } + + @Override public ActionFuture index(IndexRequest request) { + return internalClient.index(request); + } + + @Override public ActionFuture index(IndexRequest request, ActionListener listener) { + return internalClient.index(request, listener); + } + + @Override public void execIndex(IndexRequest request, ActionListener listener) { + internalClient.execIndex(request, listener); + } + + @Override public ActionFuture delete(DeleteRequest request) { + return internalClient.delete(request); + } + + @Override public ActionFuture delete(DeleteRequest request, ActionListener listener) { + return internalClient.delete(request, listener); + } + + @Override public void execDelete(DeleteRequest request, ActionListener listener) { + internalClient.execDelete(request, listener); + } + + @Override public ActionFuture deleteByQuery(DeleteByQueryRequest request) { + return internalClient.deleteByQuery(request); + } + + @Override public ActionFuture deleteByQuery(DeleteByQueryRequest request, ActionListener listener) { + return internalClient.deleteByQuery(request, listener); + } + + @Override public void execDeleteByQuery(DeleteByQueryRequest request, ActionListener listener) { + internalClient.execDeleteByQuery(request, listener); + } + + @Override public ActionFuture get(GetRequest request) { + return internalClient.get(request); + } + + @Override public ActionFuture get(GetRequest request, ActionListener listener) { + return internalClient.get(request, listener); + } + + @Override public void execGet(GetRequest request, ActionListener listener) { + internalClient.execGet(request, listener); + } + + @Override public ActionFuture count(CountRequest request) { + return internalClient.count(request); + } + + @Override public ActionFuture count(CountRequest request, ActionListener listener) { + return internalClient.count(request, listener); + } + + @Override public void execCount(CountRequest request, ActionListener listener) { + internalClient.execCount(request, listener); + } + + @Override public ActionFuture search(SearchRequest request) { + return internalClient.search(request); + } + + @Override public ActionFuture search(SearchRequest request, ActionListener listener) { + return internalClient.search(request, listener); + } + + @Override public void execSearch(SearchRequest request, ActionListener listener) { + internalClient.execSearch(request, listener); + } + + @Override public ActionFuture searchScroll(SearchScrollRequest request) { + return internalClient.searchScroll(request); + } + + @Override public ActionFuture searchScroll(SearchScrollRequest request, ActionListener listener) { + return internalClient.searchScroll(request, listener); + } + + @Override public void execSearchScroll(SearchScrollRequest request, ActionListener listener) { + internalClient.execSearchScroll(request, listener); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/TransportClientClusterModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/TransportClientClusterModule.java new file mode 100644 index 00000000000..6fd16fef307 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/TransportClientClusterModule.java @@ -0,0 +1,52 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.transport; + +import com.google.inject.AbstractModule; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.DefaultClusterService; +import org.elasticsearch.discovery.DiscoveryModule; +import org.elasticsearch.util.logging.Loggers; +import org.elasticsearch.util.settings.NoClassSettingsException; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportClientClusterModule extends AbstractModule { + + private final Settings settings; + + public TransportClientClusterModule(Settings settings) { + this.settings = settings; + } + + @Override protected void configure() { + try { + new DiscoveryModule(settings).configure(binder()); + bind(ClusterService.class).to(DefaultClusterService.class).asEagerSingleton(); + bind(TransportClientClusterService.class).asEagerSingleton(); + } catch (NoClassSettingsException e) { + // that's fine, no actual implementation for discovery + } catch (Exception e) { + Loggers.getLogger(getClass(), settings).warn("Failed to load discovery", e); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/TransportClientClusterService.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/TransportClientClusterService.java new file mode 100644 index 00000000000..891dc2bdf2b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/TransportClientClusterService.java @@ -0,0 +1,60 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.transport; + +import com.google.inject.Inject; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.discovery.DiscoveryService; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportClientClusterService extends AbstractComponent { + + private final ClusterService clusterService; + + private final TransportClientNodesService nodesService; + + private final DiscoveryService discoveryService; + + @Inject public TransportClientClusterService(Settings settings, ClusterService clusterService, TransportClientNodesService nodesService, + DiscoveryService discoveryService) { + super(settings); + this.clusterService = clusterService; + this.nodesService = nodesService; + this.discoveryService = discoveryService; + + clusterService.add(nodesService); + } + + public void start() { + clusterService.add(nodesService); + clusterService.start(); + discoveryService.start(); + } + + public void close() { + clusterService.remove(nodesService); + clusterService.close(); + discoveryService.close(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java new file mode 100644 index 00000000000..9be4f8a977a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java @@ -0,0 +1,208 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.transport; + +import com.google.common.collect.ImmutableList; +import com.google.inject.Inject; +import org.elasticsearch.action.TransportActions; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.client.Requests; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.node.Node; +import org.elasticsearch.cluster.node.Nodes; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.BaseTransportResponseHandler; +import org.elasticsearch.transport.RemoteTransportException; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.TimeValue; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.settings.Settings; +import org.elasticsearch.util.transport.TransportAddress; + +import java.util.HashSet; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.elasticsearch.util.TimeValue.*; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportClientNodesService extends AbstractComponent implements ClusterStateListener { + + private final TimeValue nodesSamplerInterval; + + private final ClusterName clusterName; + + private final TransportService transportService; + + private final ThreadPool threadPool; + + private volatile ImmutableList transportAddresses = ImmutableList.of(); + + private final Object transportMutex = new Object(); + + private volatile ImmutableList nodes = ImmutableList.of(); + + private volatile Nodes discoveredNodes; + + private final AtomicInteger tempNodeIdGenerator = new AtomicInteger(); + + private final ScheduledNodesSampler nodesSampler = new ScheduledNodesSampler(); + + private final ScheduledFuture nodesSamplerFuture; + + private final AtomicInteger randomNodeGenerator = new AtomicInteger(); + + @Inject public TransportClientNodesService(Settings settings, ClusterName clusterName, + TransportService transportService, ThreadPool threadPool) { + super(settings); + this.clusterName = clusterName; + this.transportService = transportService; + this.threadPool = threadPool; + + this.nodesSamplerInterval = componentSettings.getAsTime("nodesSamplerInterval", timeValueSeconds(1)); + + this.nodesSamplerFuture = threadPool.scheduleWithFixedDelay(nodesSampler, nodesSamplerInterval); + } + + public ImmutableList transportAddresses() { + return this.transportAddresses; + } + + public ImmutableList connectedNodes() { + return this.nodes; + } + + public TransportClientNodesService addTransportAddress(TransportAddress transportAddress) { + synchronized (transportMutex) { + ImmutableList.Builder builder = ImmutableList.builder(); + transportAddresses = builder.addAll(transportAddresses).add(transportAddress).build(); + } + nodesSampler.run(); + return this; + } + + public TransportClientNodesService removeTransportAddress(TransportAddress transportAddress) { + synchronized (transportMutex) { + ImmutableList.Builder builder = ImmutableList.builder(); + for (TransportAddress otherTransportAddress : transportAddresses) { + if (!otherTransportAddress.equals(transportAddress)) { + builder.add(otherTransportAddress); + } + } + transportAddresses = builder.build(); + } + nodesSampler.run(); + return this; + } + + public Node randomNode() { + ImmutableList nodes = this.nodes; + if (nodes.isEmpty()) { + throw new NoNodeAvailableException(); + } + return nodes.get(Math.abs(randomNodeGenerator.incrementAndGet()) % nodes.size()); + } + + public void close() { + nodesSamplerFuture.cancel(true); + } + + @Override public void clusterChanged(ClusterChangedEvent event) { + transportService.nodesAdded(event.nodesDelta().addedNodes()); + this.discoveredNodes = event.state().nodes(); + HashSet newNodes = new HashSet(nodes); + newNodes.addAll(discoveredNodes.nodes().values()); + nodes = new ImmutableList.Builder().addAll(newNodes).build(); + transportService.nodesRemoved(event.nodesDelta().removedNodes()); + } + + private class ScheduledNodesSampler implements Runnable { + + @Override public synchronized void run() { + ImmutableList transportAddresses = TransportClientNodesService.this.transportAddresses; + final CountDownLatch latch = new CountDownLatch(transportAddresses.size()); + final CopyOnWriteArrayList nodesInfoResponses = new CopyOnWriteArrayList(); + final CopyOnWriteArrayList tempNodes = new CopyOnWriteArrayList(); + for (final TransportAddress transportAddress : transportAddresses) { + threadPool.execute(new Runnable() { + @Override public void run() { + Node tempNode = new Node("#temp#-" + tempNodeIdGenerator.incrementAndGet(), transportAddress); + tempNodes.add(tempNode); + try { + transportService.nodesAdded(ImmutableList.of(tempNode)); + transportService.sendRequest(tempNode, TransportActions.Admin.Cluster.Node.INFO, Requests.nodesInfo("_local"), new BaseTransportResponseHandler() { + + @Override public NodesInfoResponse newInstance() { + return new NodesInfoResponse(); + } + + @Override public void handleResponse(NodesInfoResponse response) { + nodesInfoResponses.add(response); + latch.countDown(); + } + + @Override public void handleException(RemoteTransportException exp) { + logger.debug("Failed to get node info from " + transportAddress + ", removed from nodes list", exp); + latch.countDown(); + } + }); + } catch (Exception e) { + logger.debug("Failed to get node info from " + transportAddress + ", removed from nodes list", e); + latch.countDown(); + } + } + }); + } + + try { + latch.await(); + } catch (InterruptedException e) { + return; + } + + HashSet newNodes = new HashSet(); + for (NodesInfoResponse nodesInfoResponse : nodesInfoResponses) { + if (nodesInfoResponse.nodes().length > 0) { + Node node = nodesInfoResponse.nodes()[0].node(); + if (!clusterName.equals(nodesInfoResponse.clusterName())) { + logger.warn("Node {} not part of the cluster {}, ignoring...", node, clusterName); + } else { + newNodes.add(node); + } + } else { + // should not really happen.... + logger.debug("No info returned from node..."); + } + } + if (discoveredNodes != null) { + newNodes.addAll(discoveredNodes.nodes().values()); + } + nodes = new ImmutableList.Builder().addAll(newNodes).build(); + + transportService.nodesRemoved(tempNodes); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/ClientTransportAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/ClientTransportAction.java new file mode 100644 index 00000000000..c056153ff17 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/ClientTransportAction.java @@ -0,0 +1,40 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.transport.action; + +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.cluster.node.Node; +import org.elasticsearch.util.Nullable; + +/** + * @author kimchy (Shay Banon) + */ +public interface ClientTransportAction { + + ActionFuture submit(Node node, Request request) throws ElasticSearchException; + + ActionFuture submit(Node node, Request request, @Nullable ActionListener listener); + + void execute(Node node, Request request, ActionListener listener); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/ClientTransportActionModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/ClientTransportActionModule.java new file mode 100644 index 00000000000..c71403e5880 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/ClientTransportActionModule.java @@ -0,0 +1,70 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.transport.action; + +import com.google.inject.AbstractModule; +import org.elasticsearch.client.transport.action.admin.cluster.node.info.ClientTransportNodesInfoAction; +import org.elasticsearch.client.transport.action.admin.cluster.ping.broadcast.ClientTransportBroadcastPingAction; +import org.elasticsearch.client.transport.action.admin.cluster.ping.replication.ClientTransportReplicationPingAction; +import org.elasticsearch.client.transport.action.admin.cluster.ping.single.ClientTransportSinglePingAction; +import org.elasticsearch.client.transport.action.admin.cluster.state.ClientTransportClusterStateAction; +import org.elasticsearch.client.transport.action.admin.indices.create.ClientTransportCreateIndexAction; +import org.elasticsearch.client.transport.action.admin.indices.delete.ClientTransportDeleteIndexAction; +import org.elasticsearch.client.transport.action.admin.indices.flush.ClientTransportFlushAction; +import org.elasticsearch.client.transport.action.admin.indices.gateway.snapshot.ClientTransportGatewaySnapshotAction; +import org.elasticsearch.client.transport.action.admin.indices.mapping.create.ClientTransportCreateMappingAction; +import org.elasticsearch.client.transport.action.admin.indices.refresh.ClientTransportRefreshAction; +import org.elasticsearch.client.transport.action.admin.indices.status.ClientTransportIndicesStatusAction; +import org.elasticsearch.client.transport.action.count.ClientTransportCountAction; +import org.elasticsearch.client.transport.action.delete.ClientTransportDeleteAction; +import org.elasticsearch.client.transport.action.deletebyquery.ClientTransportDeleteByQueryAction; +import org.elasticsearch.client.transport.action.get.ClientTransportGetAction; +import org.elasticsearch.client.transport.action.index.ClientTransportIndexAction; +import org.elasticsearch.client.transport.action.search.ClientTransportSearchAction; +import org.elasticsearch.client.transport.action.search.ClientTransportSearchScrollAction; + +/** + * @author kimchy (Shay Banon) + */ +public class ClientTransportActionModule extends AbstractModule { + + @Override protected void configure() { + bind(ClientTransportIndexAction.class).asEagerSingleton(); + bind(ClientTransportDeleteAction.class).asEagerSingleton(); + bind(ClientTransportDeleteByQueryAction.class).asEagerSingleton(); + bind(ClientTransportGetAction.class).asEagerSingleton(); + bind(ClientTransportCountAction.class).asEagerSingleton(); + bind(ClientTransportSearchAction.class).asEagerSingleton(); + bind(ClientTransportSearchScrollAction.class).asEagerSingleton(); + + bind(ClientTransportIndicesStatusAction.class).asEagerSingleton(); + bind(ClientTransportRefreshAction.class).asEagerSingleton(); + bind(ClientTransportFlushAction.class).asEagerSingleton(); + bind(ClientTransportCreateIndexAction.class).asEagerSingleton(); + bind(ClientTransportDeleteIndexAction.class).asEagerSingleton(); + bind(ClientTransportCreateMappingAction.class).asEagerSingleton(); + bind(ClientTransportGatewaySnapshotAction.class).asEagerSingleton(); + bind(ClientTransportNodesInfoAction.class).asEagerSingleton(); + bind(ClientTransportSinglePingAction.class).asEagerSingleton(); + bind(ClientTransportReplicationPingAction.class).asEagerSingleton(); + bind(ClientTransportBroadcastPingAction.class).asEagerSingleton(); + bind(ClientTransportClusterStateAction.class).asEagerSingleton(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/admin/cluster/node/info/ClientTransportNodesInfoAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/admin/cluster/node/info/ClientTransportNodesInfoAction.java new file mode 100644 index 00000000000..db8be626662 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/admin/cluster/node/info/ClientTransportNodesInfoAction.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.transport.action.admin.cluster.node.info; + +import com.google.inject.Inject; +import org.elasticsearch.action.TransportActions; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.client.transport.action.support.BaseClientTransportAction; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class ClientTransportNodesInfoAction extends BaseClientTransportAction { + + @Inject public ClientTransportNodesInfoAction(Settings settings, TransportService transportService) { + super(settings, transportService, NodesInfoResponse.class); + } + + @Override protected String action() { + return TransportActions.Admin.Cluster.Node.INFO; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/admin/cluster/ping/broadcast/ClientTransportBroadcastPingAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/admin/cluster/ping/broadcast/ClientTransportBroadcastPingAction.java new file mode 100644 index 00000000000..38c97828a3d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/admin/cluster/ping/broadcast/ClientTransportBroadcastPingAction.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.transport.action.admin.cluster.ping.broadcast; + +import com.google.inject.Inject; +import org.elasticsearch.action.TransportActions; +import org.elasticsearch.action.admin.cluster.ping.broadcast.BroadcastPingRequest; +import org.elasticsearch.action.admin.cluster.ping.broadcast.BroadcastPingResponse; +import org.elasticsearch.client.transport.action.support.BaseClientTransportAction; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class ClientTransportBroadcastPingAction extends BaseClientTransportAction { + + @Inject public ClientTransportBroadcastPingAction(Settings settings, TransportService transportService) { + super(settings, transportService, BroadcastPingResponse.class); + } + + @Override protected String action() { + return TransportActions.Admin.Cluster.Ping.BROADCAST; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/admin/cluster/ping/replication/ClientTransportReplicationPingAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/admin/cluster/ping/replication/ClientTransportReplicationPingAction.java new file mode 100644 index 00000000000..4471b374df5 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/admin/cluster/ping/replication/ClientTransportReplicationPingAction.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.transport.action.admin.cluster.ping.replication; + +import com.google.inject.Inject; +import org.elasticsearch.action.TransportActions; +import org.elasticsearch.action.admin.cluster.ping.replication.ReplicationPingRequest; +import org.elasticsearch.action.admin.cluster.ping.replication.ReplicationPingResponse; +import org.elasticsearch.client.transport.action.support.BaseClientTransportAction; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class ClientTransportReplicationPingAction extends BaseClientTransportAction { + + @Inject public ClientTransportReplicationPingAction(Settings settings, TransportService transportService) { + super(settings, transportService, ReplicationPingResponse.class); + } + + @Override protected String action() { + return TransportActions.Admin.Cluster.Ping.REPLICATION; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/admin/cluster/ping/single/ClientTransportSinglePingAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/admin/cluster/ping/single/ClientTransportSinglePingAction.java new file mode 100644 index 00000000000..1b32a207eb5 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/admin/cluster/ping/single/ClientTransportSinglePingAction.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.transport.action.admin.cluster.ping.single; + +import com.google.inject.Inject; +import org.elasticsearch.action.TransportActions; +import org.elasticsearch.action.admin.cluster.ping.single.SinglePingRequest; +import org.elasticsearch.action.admin.cluster.ping.single.SinglePingResponse; +import org.elasticsearch.client.transport.action.support.BaseClientTransportAction; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class ClientTransportSinglePingAction extends BaseClientTransportAction { + + @Inject public ClientTransportSinglePingAction(Settings settings, TransportService transportService) { + super(settings, transportService, SinglePingResponse.class); + } + + @Override protected String action() { + return TransportActions.Admin.Cluster.Ping.SINGLE; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/admin/cluster/state/ClientTransportClusterStateAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/admin/cluster/state/ClientTransportClusterStateAction.java new file mode 100644 index 00000000000..df9f21a0ded --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/admin/cluster/state/ClientTransportClusterStateAction.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.transport.action.admin.cluster.state; + +import com.google.inject.Inject; +import org.elasticsearch.action.TransportActions; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.client.transport.action.support.BaseClientTransportAction; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class ClientTransportClusterStateAction extends BaseClientTransportAction { + + @Inject public ClientTransportClusterStateAction(Settings settings, TransportService transportService) { + super(settings, transportService, ClusterStateResponse.class); + } + + @Override protected String action() { + return TransportActions.Admin.Cluster.STATE; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/admin/indices/create/ClientTransportCreateIndexAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/admin/indices/create/ClientTransportCreateIndexAction.java new file mode 100644 index 00000000000..c03a5a109f0 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/admin/indices/create/ClientTransportCreateIndexAction.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.transport.action.admin.indices.create; + +import com.google.inject.Inject; +import org.elasticsearch.action.TransportActions; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; +import org.elasticsearch.client.transport.action.support.BaseClientTransportAction; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class ClientTransportCreateIndexAction extends BaseClientTransportAction { + + @Inject public ClientTransportCreateIndexAction(Settings settings, TransportService transportService) { + super(settings, transportService, CreateIndexResponse.class); + } + + @Override protected String action() { + return TransportActions.Admin.Indices.CREATE; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/admin/indices/delete/ClientTransportDeleteIndexAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/admin/indices/delete/ClientTransportDeleteIndexAction.java new file mode 100644 index 00000000000..a8512c7ce84 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/admin/indices/delete/ClientTransportDeleteIndexAction.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.transport.action.admin.indices.delete; + +import com.google.inject.Inject; +import org.elasticsearch.action.TransportActions; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; +import org.elasticsearch.client.transport.action.support.BaseClientTransportAction; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class ClientTransportDeleteIndexAction extends BaseClientTransportAction { + + @Inject public ClientTransportDeleteIndexAction(Settings settings, TransportService transportService) { + super(settings, transportService, DeleteIndexResponse.class); + } + + @Override protected String action() { + return TransportActions.Admin.Indices.DELETE; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/admin/indices/flush/ClientTransportFlushAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/admin/indices/flush/ClientTransportFlushAction.java new file mode 100644 index 00000000000..eafe41d0730 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/admin/indices/flush/ClientTransportFlushAction.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.transport.action.admin.indices.flush; + +import com.google.inject.Inject; +import org.elasticsearch.action.TransportActions; +import org.elasticsearch.action.admin.indices.flush.FlushRequest; +import org.elasticsearch.action.admin.indices.flush.FlushResponse; +import org.elasticsearch.client.transport.action.support.BaseClientTransportAction; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class ClientTransportFlushAction extends BaseClientTransportAction { + + @Inject public ClientTransportFlushAction(Settings settings, TransportService transportService) { + super(settings, transportService, FlushResponse.class); + } + + @Override protected String action() { + return TransportActions.Admin.Indices.FLUSH; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/admin/indices/gateway/snapshot/ClientTransportGatewaySnapshotAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/admin/indices/gateway/snapshot/ClientTransportGatewaySnapshotAction.java new file mode 100644 index 00000000000..f865d3d36a8 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/admin/indices/gateway/snapshot/ClientTransportGatewaySnapshotAction.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.transport.action.admin.indices.gateway.snapshot; + +import com.google.inject.Inject; +import org.elasticsearch.action.TransportActions; +import org.elasticsearch.action.admin.indices.gateway.snapshot.GatewaySnapshotRequest; +import org.elasticsearch.action.admin.indices.gateway.snapshot.GatewaySnapshotResponse; +import org.elasticsearch.client.transport.action.support.BaseClientTransportAction; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class ClientTransportGatewaySnapshotAction extends BaseClientTransportAction { + + @Inject public ClientTransportGatewaySnapshotAction(Settings settings, TransportService transportService) { + super(settings, transportService, GatewaySnapshotResponse.class); + } + + @Override protected String action() { + return TransportActions.Admin.Indices.Gateway.SNAPSHOT; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/admin/indices/mapping/create/ClientTransportCreateMappingAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/admin/indices/mapping/create/ClientTransportCreateMappingAction.java new file mode 100644 index 00000000000..2f0ebb840d3 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/admin/indices/mapping/create/ClientTransportCreateMappingAction.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.transport.action.admin.indices.mapping.create; + +import com.google.inject.Inject; +import org.elasticsearch.action.TransportActions; +import org.elasticsearch.action.admin.indices.mapping.create.CreateMappingRequest; +import org.elasticsearch.action.admin.indices.mapping.create.CreateMappingResponse; +import org.elasticsearch.client.transport.action.support.BaseClientTransportAction; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class ClientTransportCreateMappingAction extends BaseClientTransportAction { + + @Inject public ClientTransportCreateMappingAction(Settings settings, TransportService transportService) { + super(settings, transportService, CreateMappingResponse.class); + } + + @Override protected String action() { + return TransportActions.Admin.Indices.Mapping.CREATE; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/admin/indices/refresh/ClientTransportRefreshAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/admin/indices/refresh/ClientTransportRefreshAction.java new file mode 100644 index 00000000000..d0b45a99a86 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/admin/indices/refresh/ClientTransportRefreshAction.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.transport.action.admin.indices.refresh; + +import com.google.inject.Inject; +import org.elasticsearch.action.TransportActions; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.client.transport.action.support.BaseClientTransportAction; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class ClientTransportRefreshAction extends BaseClientTransportAction { + + @Inject public ClientTransportRefreshAction(Settings settings, TransportService transportService) { + super(settings, transportService, RefreshResponse.class); + } + + @Override protected String action() { + return TransportActions.Admin.Indices.REFRESH; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/admin/indices/status/ClientTransportIndicesStatusAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/admin/indices/status/ClientTransportIndicesStatusAction.java new file mode 100644 index 00000000000..e0e350c16b8 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/admin/indices/status/ClientTransportIndicesStatusAction.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.transport.action.admin.indices.status; + +import com.google.inject.Inject; +import org.elasticsearch.action.TransportActions; +import org.elasticsearch.action.admin.indices.status.IndicesStatusRequest; +import org.elasticsearch.action.admin.indices.status.IndicesStatusResponse; +import org.elasticsearch.client.transport.action.support.BaseClientTransportAction; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class ClientTransportIndicesStatusAction extends BaseClientTransportAction { + + @Inject public ClientTransportIndicesStatusAction(Settings settings, TransportService transportService) { + super(settings, transportService, IndicesStatusResponse.class); + } + + @Override protected String action() { + return TransportActions.Admin.Indices.STATUS; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/count/ClientTransportCountAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/count/ClientTransportCountAction.java new file mode 100644 index 00000000000..cb3681ee17c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/count/ClientTransportCountAction.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.transport.action.count; + +import com.google.inject.Inject; +import org.elasticsearch.action.TransportActions; +import org.elasticsearch.action.count.CountRequest; +import org.elasticsearch.action.count.CountResponse; +import org.elasticsearch.client.transport.action.support.BaseClientTransportAction; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class ClientTransportCountAction extends BaseClientTransportAction { + + @Inject public ClientTransportCountAction(Settings settings, TransportService transportService) { + super(settings, transportService, CountResponse.class); + } + + @Override protected String action() { + return TransportActions.COUNT; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/delete/ClientTransportDeleteAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/delete/ClientTransportDeleteAction.java new file mode 100644 index 00000000000..13183ea5053 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/delete/ClientTransportDeleteAction.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.transport.action.delete; + +import com.google.inject.Inject; +import org.elasticsearch.action.TransportActions; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.delete.DeleteResponse; +import org.elasticsearch.client.transport.action.support.BaseClientTransportAction; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class ClientTransportDeleteAction extends BaseClientTransportAction { + + @Inject public ClientTransportDeleteAction(Settings settings, TransportService transportService) { + super(settings, transportService, DeleteResponse.class); + } + + @Override protected String action() { + return TransportActions.DELETE; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/deletebyquery/ClientTransportDeleteByQueryAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/deletebyquery/ClientTransportDeleteByQueryAction.java new file mode 100644 index 00000000000..457b70812ff --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/deletebyquery/ClientTransportDeleteByQueryAction.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.transport.action.deletebyquery; + +import com.google.inject.Inject; +import org.elasticsearch.action.TransportActions; +import org.elasticsearch.action.deletebyquery.DeleteByQueryRequest; +import org.elasticsearch.action.deletebyquery.DeleteByQueryResponse; +import org.elasticsearch.client.transport.action.support.BaseClientTransportAction; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class ClientTransportDeleteByQueryAction extends BaseClientTransportAction { + + @Inject public ClientTransportDeleteByQueryAction(Settings settings, TransportService transportService) { + super(settings, transportService, DeleteByQueryResponse.class); + } + + @Override protected String action() { + return TransportActions.DELETE_BY_QUERY; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/get/ClientTransportGetAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/get/ClientTransportGetAction.java new file mode 100644 index 00000000000..c5bd8863d37 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/get/ClientTransportGetAction.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.transport.action.get; + +import com.google.inject.Inject; +import org.elasticsearch.action.TransportActions; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.client.transport.action.support.BaseClientTransportAction; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class ClientTransportGetAction extends BaseClientTransportAction { + + @Inject public ClientTransportGetAction(Settings settings, TransportService transportService) { + super(settings, transportService, GetResponse.class); + } + + @Override protected String action() { + return TransportActions.GET; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/index/ClientTransportIndexAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/index/ClientTransportIndexAction.java new file mode 100644 index 00000000000..2b94dc8d2f8 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/index/ClientTransportIndexAction.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.transport.action.index; + +import com.google.inject.Inject; +import org.elasticsearch.action.TransportActions; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.client.transport.action.support.BaseClientTransportAction; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class ClientTransportIndexAction extends BaseClientTransportAction { + + @Inject public ClientTransportIndexAction(Settings settings, TransportService transportService) { + super(settings, transportService, IndexResponse.class); + } + + @Override protected String action() { + return TransportActions.INDEX; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/search/ClientTransportSearchAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/search/ClientTransportSearchAction.java new file mode 100644 index 00000000000..f0822efcc3f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/search/ClientTransportSearchAction.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.transport.action.search; + +import com.google.inject.Inject; +import org.elasticsearch.action.TransportActions; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.client.transport.action.support.BaseClientTransportAction; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class ClientTransportSearchAction extends BaseClientTransportAction { + + @Inject public ClientTransportSearchAction(Settings settings, TransportService transportService) { + super(settings, transportService, SearchResponse.class); + } + + @Override protected String action() { + return TransportActions.SEARCH; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/search/ClientTransportSearchScrollAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/search/ClientTransportSearchScrollAction.java new file mode 100644 index 00000000000..398b492fa62 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/search/ClientTransportSearchScrollAction.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.transport.action.search; + +import com.google.inject.Inject; +import org.elasticsearch.action.TransportActions; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchScrollRequest; +import org.elasticsearch.client.transport.action.support.BaseClientTransportAction; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class ClientTransportSearchScrollAction extends BaseClientTransportAction { + + @Inject public ClientTransportSearchScrollAction(Settings settings, TransportService transportService) { + super(settings, transportService, SearchResponse.class); + } + + @Override protected String action() { + return TransportActions.SEARCH_SCROLL; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/support/BaseClientTransportAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/support/BaseClientTransportAction.java new file mode 100644 index 00000000000..e8764007446 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/action/support/BaseClientTransportAction.java @@ -0,0 +1,107 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.transport.action.support; + +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.ElasticSearchIllegalArgumentException; +import org.elasticsearch.ElasticSearchIllegalStateException; +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.transport.action.ClientTransportAction; +import org.elasticsearch.cluster.node.Node; +import org.elasticsearch.transport.BaseTransportResponseHandler; +import org.elasticsearch.transport.RemoteTransportException; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.Nullable; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.settings.Settings; + +import java.lang.reflect.Constructor; + +import static org.elasticsearch.action.support.PlainActionFuture.*; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class BaseClientTransportAction extends AbstractComponent implements ClientTransportAction { + + protected final TransportService transportService; + + private final Constructor responseConstructor; + + protected BaseClientTransportAction(Settings settings, TransportService transportService, Class type) { + super(settings); + this.transportService = transportService; + try { + this.responseConstructor = type.getDeclaredConstructor(); + } catch (NoSuchMethodException e) { + throw new ElasticSearchIllegalArgumentException("No default constructor is declared for [" + type.getName() + "]"); + } + responseConstructor.setAccessible(true); + } + + @Override public ActionFuture submit(Node node, Request request) throws ElasticSearchException { + return submit(node, request, null); + } + + @Override public ActionFuture submit(Node node, Request request, @Nullable ActionListener listener) { + PlainActionFuture future = newFuture(listener); + if (listener == null) { + // since we don't have a listener, and we release a possible lock with the future + // there is no need to execute it under a listener thread + request.listenerThreaded(false); + } + execute(node, request, future); + return future; + } + + @Override public void execute(Node node, final Request request, final ActionListener listener) { + transportService.sendRequest(node, action(), request, new BaseTransportResponseHandler() { + @Override public Response newInstance() { + return BaseClientTransportAction.this.newInstance(); + } + + @Override public void handleResponse(Response response) { + listener.onResponse(response); + } + + @Override public void handleException(RemoteTransportException exp) { + listener.onFailure(exp); + } + + @Override public boolean spawn() { + return request.listenerThreaded(); + } + }); + } + + protected abstract String action(); + + protected Response newInstance() { + try { + return responseConstructor.newInstance(); + } catch (Exception e) { + throw new ElasticSearchIllegalStateException("Failed to create a new instance"); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/support/InternalTransportAdminClient.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/support/InternalTransportAdminClient.java new file mode 100644 index 00000000000..d9a31812d95 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/support/InternalTransportAdminClient.java @@ -0,0 +1,56 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.transport.support; + +import com.google.inject.Inject; +import org.elasticsearch.client.AdminClient; +import org.elasticsearch.client.ClusterAdminClient; +import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.client.transport.TransportClientNodesService; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class InternalTransportAdminClient extends AbstractComponent implements AdminClient { + + private final TransportClientNodesService nodesService; + + private final InternalTransportIndicesAdminClient indicesAdminClient; + + private final InternalTransportClusterAdminClient clusterAdminClient; + + @Inject public InternalTransportAdminClient(Settings settings, TransportClientNodesService nodesService, + InternalTransportIndicesAdminClient indicesAdminClient, InternalTransportClusterAdminClient clusterAdminClient) { + super(settings); + this.nodesService = nodesService; + this.indicesAdminClient = indicesAdminClient; + this.clusterAdminClient = clusterAdminClient; + } + + @Override public IndicesAdminClient indices() { + return indicesAdminClient; + } + + @Override public ClusterAdminClient cluster() { + return clusterAdminClient; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/support/InternalTransportClient.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/support/InternalTransportClient.java new file mode 100644 index 00000000000..4bc07c716c9 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/support/InternalTransportClient.java @@ -0,0 +1,182 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.transport.support; + +import com.google.inject.Inject; +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.count.CountRequest; +import org.elasticsearch.action.count.CountResponse; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.delete.DeleteResponse; +import org.elasticsearch.action.deletebyquery.DeleteByQueryRequest; +import org.elasticsearch.action.deletebyquery.DeleteByQueryResponse; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchScrollRequest; +import org.elasticsearch.client.AdminClient; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.transport.TransportClientNodesService; +import org.elasticsearch.client.transport.action.count.ClientTransportCountAction; +import org.elasticsearch.client.transport.action.delete.ClientTransportDeleteAction; +import org.elasticsearch.client.transport.action.deletebyquery.ClientTransportDeleteByQueryAction; +import org.elasticsearch.client.transport.action.get.ClientTransportGetAction; +import org.elasticsearch.client.transport.action.index.ClientTransportIndexAction; +import org.elasticsearch.client.transport.action.search.ClientTransportSearchAction; +import org.elasticsearch.client.transport.action.search.ClientTransportSearchScrollAction; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class InternalTransportClient extends AbstractComponent implements Client { + + private final TransportClientNodesService nodesService; + + private final InternalTransportAdminClient adminClient; + + private final ClientTransportIndexAction indexAction; + + private final ClientTransportDeleteAction deleteAction; + + private final ClientTransportGetAction getAction; + + private final ClientTransportDeleteByQueryAction deleteByQueryAction; + + private final ClientTransportCountAction countAction; + + private final ClientTransportSearchAction searchAction; + + private final ClientTransportSearchScrollAction searchScrollAction; + + @Inject public InternalTransportClient(Settings settings, TransportClientNodesService nodesService, InternalTransportAdminClient adminClient, + ClientTransportIndexAction indexAction, ClientTransportDeleteAction deleteAction, ClientTransportGetAction getAction, + ClientTransportDeleteByQueryAction deleteByQueryAction, ClientTransportCountAction countAction, + ClientTransportSearchAction searchAction, ClientTransportSearchScrollAction searchScrollAction) { + super(settings); + this.nodesService = nodesService; + this.adminClient = adminClient; + + this.indexAction = indexAction; + this.deleteAction = deleteAction; + this.getAction = getAction; + this.deleteByQueryAction = deleteByQueryAction; + this.countAction = countAction; + this.searchAction = searchAction; + this.searchScrollAction = searchScrollAction; + } + + @Override public void close() { + // nothing to do here + } + + @Override public AdminClient admin() { + return adminClient; + } + + @Override public ActionFuture index(IndexRequest request) { + return indexAction.submit(nodesService.randomNode(), request); + } + + @Override public ActionFuture index(IndexRequest request, ActionListener listener) { + return indexAction.submit(nodesService.randomNode(), request, listener); + } + + @Override public void execIndex(IndexRequest request, ActionListener listener) { + indexAction.execute(nodesService.randomNode(), request, listener); + } + + @Override public ActionFuture delete(DeleteRequest request) { + return deleteAction.submit(nodesService.randomNode(), request); + } + + @Override public ActionFuture delete(DeleteRequest request, ActionListener listener) { + return deleteAction.submit(nodesService.randomNode(), request, listener); + } + + @Override public void execDelete(DeleteRequest request, ActionListener listener) { + deleteAction.execute(nodesService.randomNode(), request, listener); + } + + @Override public ActionFuture deleteByQuery(DeleteByQueryRequest request) { + return deleteByQueryAction.submit(nodesService.randomNode(), request); + } + + @Override public ActionFuture deleteByQuery(DeleteByQueryRequest request, ActionListener listener) { + return deleteByQueryAction.submit(nodesService.randomNode(), request, listener); + } + + @Override public void execDeleteByQuery(DeleteByQueryRequest request, ActionListener listener) { + deleteByQueryAction.execute(nodesService.randomNode(), request, listener); + } + + @Override public ActionFuture get(GetRequest request) { + return getAction.submit(nodesService.randomNode(), request); + } + + @Override public ActionFuture get(GetRequest request, ActionListener listener) { + return getAction.submit(nodesService.randomNode(), request, listener); + } + + @Override public void execGet(GetRequest request, ActionListener listener) { + getAction.execute(nodesService.randomNode(), request, listener); + } + + @Override public ActionFuture count(CountRequest request) { + return countAction.submit(nodesService.randomNode(), request); + } + + @Override public ActionFuture count(CountRequest request, ActionListener listener) { + return countAction.submit(nodesService.randomNode(), request, listener); + } + + @Override public void execCount(CountRequest request, ActionListener listener) { + countAction.execute(nodesService.randomNode(), request, listener); + } + + @Override public ActionFuture search(SearchRequest request) { + return searchAction.submit(nodesService.randomNode(), request); + } + + @Override public ActionFuture search(SearchRequest request, ActionListener listener) { + return searchAction.submit(nodesService.randomNode(), request, listener); + } + + @Override public void execSearch(SearchRequest request, ActionListener listener) { + searchAction.execute(nodesService.randomNode(), request, listener); + } + + @Override public ActionFuture searchScroll(SearchScrollRequest request) { + return searchScrollAction.submit(nodesService.randomNode(), request); + } + + @Override public ActionFuture searchScroll(SearchScrollRequest request, ActionListener listener) { + return searchScrollAction.submit(nodesService.randomNode(), request, listener); + } + + @Override public void execSearchScroll(SearchScrollRequest request, ActionListener listener) { + searchScrollAction.execute(nodesService.randomNode(), request, listener); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/support/InternalTransportClusterAdminClient.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/support/InternalTransportClusterAdminClient.java new file mode 100644 index 00000000000..ee105f6d43d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/support/InternalTransportClusterAdminClient.java @@ -0,0 +1,134 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.transport.support; + +import com.google.inject.Inject; +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.action.admin.cluster.ping.broadcast.BroadcastPingRequest; +import org.elasticsearch.action.admin.cluster.ping.broadcast.BroadcastPingResponse; +import org.elasticsearch.action.admin.cluster.ping.replication.ReplicationPingRequest; +import org.elasticsearch.action.admin.cluster.ping.replication.ReplicationPingResponse; +import org.elasticsearch.action.admin.cluster.ping.single.SinglePingRequest; +import org.elasticsearch.action.admin.cluster.ping.single.SinglePingResponse; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.client.ClusterAdminClient; +import org.elasticsearch.client.transport.TransportClientNodesService; +import org.elasticsearch.client.transport.action.admin.cluster.node.info.ClientTransportNodesInfoAction; +import org.elasticsearch.client.transport.action.admin.cluster.ping.broadcast.ClientTransportBroadcastPingAction; +import org.elasticsearch.client.transport.action.admin.cluster.ping.replication.ClientTransportReplicationPingAction; +import org.elasticsearch.client.transport.action.admin.cluster.ping.single.ClientTransportSinglePingAction; +import org.elasticsearch.client.transport.action.admin.cluster.state.ClientTransportClusterStateAction; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class InternalTransportClusterAdminClient extends AbstractComponent implements ClusterAdminClient { + + private final TransportClientNodesService nodesService; + + private final ClientTransportClusterStateAction clusterStateAction; + + private final ClientTransportSinglePingAction singlePingAction; + + private final ClientTransportReplicationPingAction replicationPingAction; + + private final ClientTransportBroadcastPingAction broadcastPingAction; + + private final ClientTransportNodesInfoAction nodesInfoAction; + + @Inject public InternalTransportClusterAdminClient(Settings settings, TransportClientNodesService nodesService, + ClientTransportClusterStateAction clusterStateAction, + ClientTransportSinglePingAction singlePingAction, ClientTransportReplicationPingAction replicationPingAction, ClientTransportBroadcastPingAction broadcastPingAction, + ClientTransportNodesInfoAction nodesInfoAction) { + super(settings); + this.nodesService = nodesService; + this.clusterStateAction = clusterStateAction; + this.nodesInfoAction = nodesInfoAction; + this.singlePingAction = singlePingAction; + this.replicationPingAction = replicationPingAction; + this.broadcastPingAction = broadcastPingAction; + } + + @Override public ActionFuture state(ClusterStateRequest request) { + return clusterStateAction.submit(nodesService.randomNode(), request); + } + + @Override public ActionFuture state(ClusterStateRequest request, ActionListener listener) { + return clusterStateAction.submit(nodesService.randomNode(), request, listener); + } + + @Override public void execState(ClusterStateRequest request, ActionListener listener) { + clusterStateAction.execute(nodesService.randomNode(), request, listener); + } + + @Override public ActionFuture ping(SinglePingRequest request) { + return singlePingAction.submit(nodesService.randomNode(), request); + } + + @Override public ActionFuture ping(SinglePingRequest request, ActionListener listener) { + return singlePingAction.submit(nodesService.randomNode(), request, listener); + } + + @Override public void execPing(SinglePingRequest request, ActionListener listener) { + singlePingAction.execute(nodesService.randomNode(), request, listener); + } + + @Override public ActionFuture ping(BroadcastPingRequest request) { + return broadcastPingAction.submit(nodesService.randomNode(), request); + } + + @Override public ActionFuture ping(BroadcastPingRequest request, ActionListener listener) { + return broadcastPingAction.submit(nodesService.randomNode(), request, listener); + } + + @Override public void execPing(BroadcastPingRequest request, ActionListener listener) { + broadcastPingAction.execute(nodesService.randomNode(), request, listener); + } + + @Override public ActionFuture ping(ReplicationPingRequest request) { + return replicationPingAction.submit(nodesService.randomNode(), request); + } + + @Override public ActionFuture ping(ReplicationPingRequest request, ActionListener listener) { + return replicationPingAction.submit(nodesService.randomNode(), request, listener); + } + + @Override public void execPing(ReplicationPingRequest request, ActionListener listener) { + replicationPingAction.execute(nodesService.randomNode(), request, listener); + } + + @Override public ActionFuture nodesInfo(NodesInfoRequest request) { + return nodesInfoAction.submit(nodesService.randomNode(), request); + } + + @Override public ActionFuture nodesInfo(NodesInfoRequest request, ActionListener listener) { + return nodesInfoAction.submit(nodesService.randomNode(), request, listener); + } + + @Override public void execNodesInfo(NodesInfoRequest request, ActionListener listener) { + nodesInfoAction.execute(nodesService.randomNode(), request, listener); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/support/InternalTransportIndicesAdminClient.java b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/support/InternalTransportIndicesAdminClient.java new file mode 100644 index 00000000000..6de359c5ce4 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/client/transport/support/InternalTransportIndicesAdminClient.java @@ -0,0 +1,171 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.transport.support; + +import com.google.inject.Inject; +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; +import org.elasticsearch.action.admin.indices.flush.FlushRequest; +import org.elasticsearch.action.admin.indices.flush.FlushResponse; +import org.elasticsearch.action.admin.indices.gateway.snapshot.GatewaySnapshotRequest; +import org.elasticsearch.action.admin.indices.gateway.snapshot.GatewaySnapshotResponse; +import org.elasticsearch.action.admin.indices.mapping.create.CreateMappingRequest; +import org.elasticsearch.action.admin.indices.mapping.create.CreateMappingResponse; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.action.admin.indices.status.IndicesStatusRequest; +import org.elasticsearch.action.admin.indices.status.IndicesStatusResponse; +import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.client.transport.TransportClientNodesService; +import org.elasticsearch.client.transport.action.admin.indices.create.ClientTransportCreateIndexAction; +import org.elasticsearch.client.transport.action.admin.indices.delete.ClientTransportDeleteIndexAction; +import org.elasticsearch.client.transport.action.admin.indices.flush.ClientTransportFlushAction; +import org.elasticsearch.client.transport.action.admin.indices.gateway.snapshot.ClientTransportGatewaySnapshotAction; +import org.elasticsearch.client.transport.action.admin.indices.mapping.create.ClientTransportCreateMappingAction; +import org.elasticsearch.client.transport.action.admin.indices.refresh.ClientTransportRefreshAction; +import org.elasticsearch.client.transport.action.admin.indices.status.ClientTransportIndicesStatusAction; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class InternalTransportIndicesAdminClient extends AbstractComponent implements IndicesAdminClient { + + private final TransportClientNodesService nodesService; + + private final ClientTransportIndicesStatusAction indicesStatusAction; + + private final ClientTransportCreateIndexAction createIndexAction; + + private final ClientTransportDeleteIndexAction deleteIndexAction; + + private final ClientTransportRefreshAction refreshAction; + + private final ClientTransportFlushAction flushAction; + + private final ClientTransportCreateMappingAction createMappingAction; + + private final ClientTransportGatewaySnapshotAction gatewaySnapshotAction; + + @Inject public InternalTransportIndicesAdminClient(Settings settings, TransportClientNodesService nodesService, + ClientTransportIndicesStatusAction indicesStatusAction, + ClientTransportCreateIndexAction createIndexAction, ClientTransportDeleteIndexAction deleteIndexAction, + ClientTransportRefreshAction refreshAction, ClientTransportFlushAction flushAction, + ClientTransportCreateMappingAction createMappingAction, ClientTransportGatewaySnapshotAction gatewaySnapshotAction) { + super(settings); + this.nodesService = nodesService; + this.indicesStatusAction = indicesStatusAction; + this.createIndexAction = createIndexAction; + this.deleteIndexAction = deleteIndexAction; + this.refreshAction = refreshAction; + this.flushAction = flushAction; + this.createMappingAction = createMappingAction; + this.gatewaySnapshotAction = gatewaySnapshotAction; + } + + @Override public ActionFuture status(IndicesStatusRequest request) { + return indicesStatusAction.submit(nodesService.randomNode(), request); + } + + @Override public ActionFuture status(IndicesStatusRequest request, ActionListener listener) { + return indicesStatusAction.submit(nodesService.randomNode(), request, listener); + } + + @Override public void execStatus(IndicesStatusRequest request, ActionListener listener) { + indicesStatusAction.execute(nodesService.randomNode(), request, listener); + } + + @Override public ActionFuture create(CreateIndexRequest request) { + return createIndexAction.submit(nodesService.randomNode(), request); + } + + @Override public ActionFuture create(CreateIndexRequest request, ActionListener listener) { + return createIndexAction.submit(nodesService.randomNode(), request, listener); + } + + @Override public void execCreate(CreateIndexRequest request, ActionListener listener) { + createIndexAction.execute(nodesService.randomNode(), request, listener); + } + + @Override public ActionFuture delete(DeleteIndexRequest request) { + return deleteIndexAction.submit(nodesService.randomNode(), request); + } + + @Override public ActionFuture delete(DeleteIndexRequest request, ActionListener listener) { + return deleteIndexAction.submit(nodesService.randomNode(), request, listener); + } + + @Override public void execDelete(DeleteIndexRequest request, ActionListener listener) { + deleteIndexAction.execute(nodesService.randomNode(), request, listener); + } + + @Override public ActionFuture refresh(RefreshRequest request) { + return refreshAction.submit(nodesService.randomNode(), request); + } + + @Override public ActionFuture refresh(RefreshRequest request, ActionListener listener) { + return refreshAction.submit(nodesService.randomNode(), request, listener); + } + + @Override public void execRefresh(RefreshRequest request, ActionListener listener) { + refreshAction.execute(nodesService.randomNode(), request, listener); + } + + @Override public ActionFuture flush(FlushRequest request) { + return flushAction.submit(nodesService.randomNode(), request); + } + + @Override public ActionFuture flush(FlushRequest request, ActionListener listener) { + return flushAction.submit(nodesService.randomNode(), request, listener); + } + + @Override public void execFlush(FlushRequest request, ActionListener listener) { + flushAction.execute(nodesService.randomNode(), request, listener); + } + + @Override public ActionFuture createMapping(CreateMappingRequest request) { + return createMappingAction.submit(nodesService.randomNode(), request); + } + + @Override public ActionFuture createMapping(CreateMappingRequest request, ActionListener listener) { + return createMappingAction.submit(nodesService.randomNode(), request, listener); + } + + @Override public void execCreateMapping(CreateMappingRequest request, ActionListener listener) { + createMappingAction.execute(nodesService.randomNode(), request, listener); + } + + @Override public ActionFuture gatewaySnapshot(GatewaySnapshotRequest request) { + return gatewaySnapshotAction.submit(nodesService.randomNode(), request); + } + + @Override public ActionFuture gatewaySnapshot(GatewaySnapshotRequest request, ActionListener listener) { + return gatewaySnapshotAction.submit(nodesService.randomNode(), request, listener); + } + + @Override public void execGatewaySnapshot(GatewaySnapshotRequest request, ActionListener listener) { + gatewaySnapshotAction.execute(nodesService.randomNode(), request, listener); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java new file mode 100644 index 00000000000..62b70fe9d2f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java @@ -0,0 +1,93 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster; + +import org.elasticsearch.cluster.node.Nodes; + +/** + * @author kimchy (Shay Banon) + */ +public class ClusterChangedEvent { + + private final String source; + + private final ClusterState previousState; + + private final ClusterState state; + + private final boolean firstMaster; + + private final Nodes.Delta nodesDelta; + + public ClusterChangedEvent(String source, ClusterState state, ClusterState previousState, boolean firstMaster) { + this.source = source; + this.state = state; + this.previousState = previousState; + this.firstMaster = firstMaster; + this.nodesDelta = state.nodes().delta(previousState.nodes()); + } + + /** + * The source that caused this cluster event to be raised. + */ + public String source() { + return this.source; + } + + public ClusterState state() { + return this.state; + } + + public ClusterState previousState() { + return this.previousState; + } + + public boolean routingTableChanged() { + return state.routingTable() != previousState.routingTable(); + } + + public boolean metaDataChanged() { + return state.metaData() != previousState.metaData(); + } + + public boolean localNodeMaster() { + return state.nodes().localNodeMaster(); + } + + public boolean firstMaster() { + return firstMaster; + } + + public Nodes.Delta nodesDelta() { + return this.nodesDelta; + } + + public boolean nodesRemoved() { + return nodesDelta.removed(); + } + + public boolean nodesAdded() { + return nodesDelta.added(); + } + + public boolean nodesChanged() { + return nodesRemoved() || nodesAdded(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/ClusterModule.java new file mode 100644 index 00000000000..a01afe6dbb2 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -0,0 +1,57 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster; + +import com.google.inject.AbstractModule; +import org.elasticsearch.cluster.action.index.NodeIndexCreatedAction; +import org.elasticsearch.cluster.action.index.NodeIndexDeletedAction; +import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.cluster.metadata.MetaDataService; +import org.elasticsearch.cluster.routing.RoutingService; +import org.elasticsearch.cluster.routing.strategy.DefaultShardsRoutingStrategy; +import org.elasticsearch.cluster.routing.strategy.ShardsRoutingStrategy; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class ClusterModule extends AbstractModule { + + private final Settings settings; + + public ClusterModule(Settings settings) { + this.settings = settings; + } + + @Override + protected void configure() { + bind(ShardsRoutingStrategy.class) + .to(settings.getAsClass("cluster.routing.shards.type", DefaultShardsRoutingStrategy.class)) + .asEagerSingleton(); + + bind(ClusterService.class).to(DefaultClusterService.class).asEagerSingleton(); + bind(MetaDataService.class).asEagerSingleton(); + bind(RoutingService.class).asEagerSingleton(); + + bind(ShardStateAction.class).asEagerSingleton(); + bind(NodeIndexCreatedAction.class).asEagerSingleton(); + bind(NodeIndexDeletedAction.class).asEagerSingleton(); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/ClusterName.java b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/ClusterName.java new file mode 100644 index 00000000000..915b921ac88 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/ClusterName.java @@ -0,0 +1,88 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster; + +import org.elasticsearch.util.io.Streamable; +import org.elasticsearch.util.settings.Settings; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class ClusterName implements Streamable { + + public static final String SETTING = "cluster.name"; + + public static final ClusterName DEFAULT = new ClusterName("elasticsearch"); + + private String value; + + public static ClusterName clusterNameFromSettings(Settings settings) { + return new ClusterName(settings.get("cluster.name", ClusterName.DEFAULT.value())); + } + + private ClusterName() { + + } + + public ClusterName(String value) { + this.value = value; + } + + public String value() { + return this.value; + } + + public static ClusterName readClusterName(DataInput in) throws ClassNotFoundException, IOException { + ClusterName clusterName = new ClusterName(); + clusterName.readFrom(in); + return clusterName; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + value = in.readUTF(); + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeUTF(value); + } + + @Override public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + ClusterName that = (ClusterName) o; + + if (value != null ? !value.equals(that.value) : that.value != null) return false; + + return true; + } + + @Override public int hashCode() { + return value != null ? value.hashCode() : 0; + } + + @Override public String toString() { + return "Cluster [" + value + "]"; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/ClusterNameModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/ClusterNameModule.java new file mode 100644 index 00000000000..711de7bbf10 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/ClusterNameModule.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster; + +import com.google.inject.AbstractModule; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class ClusterNameModule extends AbstractModule { + + private final Settings settings; + + public ClusterNameModule(Settings settings) { + this.settings = settings; + } + + @Override protected void configure() { + bind(ClusterName.class).toInstance(ClusterName.clusterNameFromSettings(settings)); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/ClusterService.java b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/ClusterService.java new file mode 100644 index 00000000000..6d9242b352e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/ClusterService.java @@ -0,0 +1,41 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster; + +import org.elasticsearch.util.TimeValue; +import org.elasticsearch.util.component.LifecycleComponent; + +/** + * @author kimchy (Shay Banon) + */ +public interface ClusterService extends LifecycleComponent { + + ClusterState state(); + + void add(ClusterStateListener listener); + + void remove(ClusterStateListener listener); + + void add(TimeValue timeout, TimeoutClusterStateListener listener); + + void remove(TimeoutClusterStateListener listener); + + void submitStateUpdateTask(final String source, final ClusterStateUpdateTask updateTask); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/ClusterState.java b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/ClusterState.java new file mode 100644 index 00000000000..b38aff595b9 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -0,0 +1,170 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster; + +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.Node; +import org.elasticsearch.cluster.node.Nodes; +import org.elasticsearch.cluster.routing.RoutingNodes; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.util.Nullable; +import org.elasticsearch.util.io.ByteArrayDataInputStream; +import org.elasticsearch.util.io.ByteArrayDataOutputStream; +import org.elasticsearch.util.settings.Settings; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class ClusterState { + + private final long version; + + private final RoutingTable routingTable; + + private final Nodes nodes; + + private final MetaData metaData; + + // built on demand + private volatile RoutingNodes routingNodes; + + public ClusterState(long version, MetaData metaData, RoutingTable routingTable, Nodes nodes) { + this.version = version; + this.metaData = metaData; + this.routingTable = routingTable; + this.nodes = nodes; + } + + public long version() { + return this.version; + } + + public Nodes nodes() { + return this.nodes; + } + + public MetaData metaData() { + return this.metaData; + } + + public RoutingTable routingTable() { + return routingTable; + } + + /** + * Returns a built (on demand) routing nodes view of the routing table. + */ + public RoutingNodes routingNodes() { + if (routingNodes != null) { + return routingNodes; + } + routingNodes = routingTable.routingNodes(metaData); + return routingNodes; + } + + public static Builder newClusterStateBuilder() { + return new Builder(); + } + + public static class Builder { + + private long version = 0; + + private MetaData metaData = MetaData.EMPTY_META_DATA; + + private RoutingTable routingTable = RoutingTable.EMPTY_ROUTING_TABLE; + + private Nodes nodes = Nodes.EMPTY_NODES; + + public Builder nodes(Nodes.Builder nodesBuilder) { + return nodes(nodesBuilder.build()); + } + + public Builder nodes(Nodes nodes) { + this.nodes = nodes; + return this; + } + + public Builder routingTable(RoutingTable.Builder routingTable) { + return routingTable(routingTable.build()); + } + + public Builder routingTable(RoutingTable routingTable) { + this.routingTable = routingTable; + return this; + } + + public Builder metaData(MetaData.Builder metaDataBuilder) { + return metaData(metaDataBuilder.build()); + } + + public Builder metaData(MetaData metaData) { + this.metaData = metaData; + return this; + } + + public Builder state(ClusterState state) { + this.version = state.version(); + this.nodes = state.nodes(); + this.routingTable = state.routingTable(); + this.metaData = state.metaData(); + return this; + } + + Builder incrementVersion() { + this.version++; + return this; + } + + public ClusterState build() { + return new ClusterState(version, metaData, routingTable, nodes); + } + + public static byte[] toBytes(ClusterState state) throws IOException { + ByteArrayDataOutputStream os = ByteArrayDataOutputStream.Cached.cached(); + writeTo(state, os); + return os.copiedByteArray(); + } + + public static ClusterState fromBytes(byte[] data, Settings globalSettings, Node localNode) throws IOException, ClassNotFoundException { + return readFrom(new ByteArrayDataInputStream(data), globalSettings, localNode); + } + + public static void writeTo(ClusterState state, DataOutput out) throws IOException { + out.writeLong(state.version()); + MetaData.Builder.writeTo(state.metaData(), out); + RoutingTable.Builder.writeTo(state.routingTable(), out); + Nodes.Builder.writeTo(state.nodes(), out); + } + + public static ClusterState readFrom(DataInput in, @Nullable Settings globalSettings, @Nullable Node localNode) throws ClassNotFoundException, IOException { + Builder builder = new Builder(); + builder.version = in.readLong(); + builder.metaData = MetaData.Builder.readFrom(in, globalSettings); + builder.routingTable = RoutingTable.Builder.readFrom(in); + builder.nodes = Nodes.Builder.readFrom(in, localNode); + return builder.build(); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/ClusterStateListener.java b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/ClusterStateListener.java new file mode 100644 index 00000000000..c6f5a39e86b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/ClusterStateListener.java @@ -0,0 +1,28 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster; + +/** + * @author kimchy (Shay Banon) + */ +public interface ClusterStateListener { + + void clusterChanged(ClusterChangedEvent event); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java new file mode 100644 index 00000000000..b70663994c4 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java @@ -0,0 +1,28 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster; + +/** + * @author kimchy (Shay Banon) + */ +public interface ClusterStateUpdateTask { + + ClusterState execute(ClusterState currentState); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/DefaultClusterService.java b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/DefaultClusterService.java new file mode 100644 index 00000000000..337538c2b23 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/DefaultClusterService.java @@ -0,0 +1,240 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster; + +import com.google.inject.Inject; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.cluster.node.Nodes; +import org.elasticsearch.discovery.DiscoveryService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.TimeValue; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.component.Lifecycle; +import org.elasticsearch.util.settings.Settings; + +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; + +import static java.util.concurrent.Executors.*; +import static org.elasticsearch.cluster.ClusterState.*; +import static org.elasticsearch.util.TimeValue.*; +import static org.elasticsearch.util.concurrent.DynamicExecutors.*; + +/** + * @author kimchy (Shay Banon) + */ +public class DefaultClusterService extends AbstractComponent implements ClusterService { + + private final Lifecycle lifecycle = new Lifecycle(); + + private final TimeValue timeoutInterval; + + private final ThreadPool threadPool; + + private final DiscoveryService discoveryService; + + private final TransportService transportService; + + private volatile ExecutorService updateTasksExecutor; + + private final List clusterStateListeners = new CopyOnWriteArrayList(); + + private final List clusterStateTimeoutListeners = new CopyOnWriteArrayList(); + + private volatile ScheduledFuture scheduledFuture; + + private volatile ClusterState clusterState = newClusterStateBuilder().build(); + + @Inject public DefaultClusterService(Settings settings, DiscoveryService discoveryService, TransportService transportService, ThreadPool threadPool) { + super(settings); + this.transportService = transportService; + this.discoveryService = discoveryService; + this.threadPool = threadPool; + + this.timeoutInterval = componentSettings.getAsTime("timeoutInterval", timeValueMillis(500)); + } + + @Override public Lifecycle.State lifecycleState() { + return this.lifecycle.state(); + } + + @Override public ClusterService start() throws ElasticSearchException { + if (!lifecycle.moveToStarted()) { + return this; + } + this.updateTasksExecutor = newSingleThreadExecutor(daemonThreadFactory(settings, "clusterService#updateTask")); + scheduledFuture = threadPool.scheduleWithFixedDelay(new Runnable() { + @Override public void run() { + long timestamp = System.currentTimeMillis(); + for (final TimeoutHolder holder : clusterStateTimeoutListeners) { + if ((timestamp - holder.timestamp) > holder.timeout.millis()) { + clusterStateTimeoutListeners.remove(holder); + DefaultClusterService.this.threadPool.execute(new Runnable() { + @Override public void run() { + holder.listener.onTimeout(holder.timeout); + } + }); + } + } + } + }, timeoutInterval); + return this; + } + + @Override public ClusterService stop() throws ElasticSearchException { + if (!lifecycle.moveToStopped()) { + return this; + } + scheduledFuture.cancel(false); + for (TimeoutHolder holder : clusterStateTimeoutListeners) { + holder.listener.onTimeout(holder.timeout); + } + updateTasksExecutor.shutdown(); + try { + updateTasksExecutor.awaitTermination(10, TimeUnit.SECONDS); + } catch (InterruptedException e) { + // ignore + } + return this; + } + + @Override public void close() throws ElasticSearchException { + if (lifecycle.started()) { + stop(); + } + if (!lifecycle.moveToClosed()) { + return; + } + } + + public ClusterState state() { + return this.clusterState; + } + + public void add(ClusterStateListener listener) { + clusterStateListeners.add(listener); + } + + public void remove(ClusterStateListener listener) { + clusterStateListeners.remove(listener); + } + + public void add(TimeValue timeout, TimeoutClusterStateListener listener) { + clusterStateTimeoutListeners.add(new TimeoutHolder(listener, System.currentTimeMillis(), timeout)); + } + + public void remove(TimeoutClusterStateListener listener) { + clusterStateTimeoutListeners.remove(new TimeoutHolder(listener, -1, null)); + } + + public void submitStateUpdateTask(final String source, final ClusterStateUpdateTask updateTask) { + if (!lifecycle.started()) { + return; + } + updateTasksExecutor.execute(new Runnable() { + @Override public void run() { + if (!lifecycle.started()) { + return; + } + ClusterState previousClusterState = clusterState; + clusterState = updateTask.execute(previousClusterState); + if (previousClusterState != clusterState) { + if (clusterState.nodes().localNodeMaster()) { + // only the master controls the version numbers + clusterState = newClusterStateBuilder().state(clusterState).incrementVersion().build(); + } + + if (logger.isDebugEnabled()) { + logger.debug("Cluster state updated, version [{}], source [{}]", clusterState.version(), source); + } + if (logger.isTraceEnabled()) { + StringBuilder sb = new StringBuilder("Cluster State:\n"); + sb.append(clusterState.nodes().prettyPrint()); + sb.append(clusterState.routingTable().prettyPrint()); + sb.append(clusterState.routingNodes().prettyPrint()); + logger.trace(sb.toString()); + } + + ClusterChangedEvent clusterChangedEvent = new ClusterChangedEvent(source, clusterState, previousClusterState, discoveryService.firstMaster()); + // new cluster state, notify all listeners + final Nodes.Delta nodesDelta = clusterChangedEvent.nodesDelta(); + if (nodesDelta.hasChanges() && logger.isInfoEnabled()) { + String summary = nodesDelta.shortSummary(); + if (summary.length() > 0) { + logger.info(summary); + } + } + + threadPool.execute(new Runnable() { + @Override public void run() { + transportService.nodesAdded(nodesDelta.addedNodes()); + } + }); + + for (TimeoutHolder timeoutHolder : clusterStateTimeoutListeners) { + timeoutHolder.listener.clusterChanged(clusterChangedEvent); + } + for (ClusterStateListener listener : clusterStateListeners) { + listener.clusterChanged(clusterChangedEvent); + } + + threadPool.execute(new Runnable() { + @Override public void run() { + transportService.nodesRemoved(nodesDelta.removedNodes()); + } + }); + + // if we are the master, publish the new state to all nodes + if (clusterState.nodes().localNodeMaster()) { + discoveryService.publish(clusterState); + } + + if (updateTask instanceof ProcessedClusterStateUpdateTask) { + ((ProcessedClusterStateUpdateTask) updateTask).clusterStateProcessed(clusterState); + } + } + } + }); + } + + private static class TimeoutHolder { + final TimeoutClusterStateListener listener; + final long timestamp; + final TimeValue timeout; + + private TimeoutHolder(TimeoutClusterStateListener listener, long timestamp, TimeValue timeout) { + this.listener = listener; + this.timestamp = timestamp; + this.timeout = timeout; + } + + @Override public int hashCode() { + return listener.hashCode(); + } + + @Override public boolean equals(Object obj) { + return ((TimeoutHolder) obj).listener == listener; + } + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/ProcessedClusterStateUpdateTask.java b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/ProcessedClusterStateUpdateTask.java new file mode 100644 index 00000000000..761ebdc457e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/ProcessedClusterStateUpdateTask.java @@ -0,0 +1,35 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster; + +/** + * An extension interface to {@link ClusterStateUpdateTask} that allows to be notified when + * the cluster state update has been processed. + * + * @author kimchy (Shay Banon) + */ +public interface ProcessedClusterStateUpdateTask extends ClusterStateUpdateTask { + + /** + * Called when the result of the {@link #execute(ClusterState)} have been processed + * properly by all listeners. + */ + void clusterStateProcessed(ClusterState clusterState); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/TimeoutClusterStateListener.java b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/TimeoutClusterStateListener.java new file mode 100644 index 00000000000..dd587a73584 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/TimeoutClusterStateListener.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster; + +import org.elasticsearch.util.TimeValue; + +/** + * @author kimchy (Shay Banon) + */ +public interface TimeoutClusterStateListener extends ClusterStateListener { + + void onTimeout(TimeValue timeout); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/action/index/NodeIndexCreatedAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/action/index/NodeIndexCreatedAction.java new file mode 100644 index 00000000000..46bff75f32d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/action/index/NodeIndexCreatedAction.java @@ -0,0 +1,133 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.action.index; + +import com.google.inject.Inject; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.node.Nodes; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.BaseTransportRequestHandler; +import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.transport.VoidTransportResponseHandler; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.io.Streamable; +import org.elasticsearch.util.io.VoidStreamable; +import org.elasticsearch.util.settings.Settings; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; + +/** + * @author kimchy (Shay Banon) + */ +public class NodeIndexCreatedAction extends AbstractComponent { + + private final ThreadPool threadPool; + + private final TransportService transportService; + + private final ClusterService clusterService; + + private final List listeners = new CopyOnWriteArrayList(); + + @Inject public NodeIndexCreatedAction(Settings settings, ThreadPool threadPool, TransportService transportService, ClusterService clusterService) { + super(settings); + this.threadPool = threadPool; + this.transportService = transportService; + this.clusterService = clusterService; + transportService.registerHandler(NodeIndexCreatedTransportHandler.ACTION, new NodeIndexCreatedTransportHandler()); + } + + public void add(Listener listener) { + listeners.add(listener); + } + + public void remove(Listener listener) { + listeners.remove(listener); + } + + public void nodeIndexCreated(final String index, final String nodeId) throws ElasticSearchException { + Nodes nodes = clusterService.state().nodes(); + if (nodes.localNodeMaster()) { + threadPool.execute(new Runnable() { + @Override public void run() { + innerNodeIndexCreated(index, nodeId); + } + }); + } else { + transportService.sendRequest(clusterService.state().nodes().masterNode(), + NodeIndexCreatedTransportHandler.ACTION, new NodeIndexCreatedMessage(index, nodeId), VoidTransportResponseHandler.INSTANCE); + } + } + + private void innerNodeIndexCreated(String index, String nodeId) { + for (Listener listener : listeners) { + listener.onNodeIndexCreated(index, nodeId); + } + } + + public static interface Listener { + void onNodeIndexCreated(String index, String nodeId); + } + + private class NodeIndexCreatedTransportHandler extends BaseTransportRequestHandler { + + static final String ACTION = "cluster/nodeIndexCreated"; + + @Override public NodeIndexCreatedMessage newInstance() { + return new NodeIndexCreatedMessage(); + } + + @Override public void messageReceived(NodeIndexCreatedMessage message, TransportChannel channel) throws Exception { + innerNodeIndexCreated(message.index, message.nodeId); + channel.sendResponse(VoidStreamable.INSTANCE); + } + } + + private static class NodeIndexCreatedMessage implements Streamable { + + String index; + + String nodeId; + + private NodeIndexCreatedMessage() { + } + + private NodeIndexCreatedMessage(String index, String nodeId) { + this.index = index; + this.nodeId = nodeId; + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeUTF(index); + out.writeUTF(nodeId); + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + index = in.readUTF(); + nodeId = in.readUTF(); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/action/index/NodeIndexDeletedAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/action/index/NodeIndexDeletedAction.java new file mode 100644 index 00000000000..48b7879c66f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/action/index/NodeIndexDeletedAction.java @@ -0,0 +1,133 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.action.index; + +import com.google.inject.Inject; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.node.Nodes; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.BaseTransportRequestHandler; +import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.transport.VoidTransportResponseHandler; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.io.Streamable; +import org.elasticsearch.util.io.VoidStreamable; +import org.elasticsearch.util.settings.Settings; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; + +/** + * @author kimchy (Shay Banon) + */ +public class NodeIndexDeletedAction extends AbstractComponent { + + private final ThreadPool threadPool; + + private final TransportService transportService; + + private final ClusterService clusterService; + + private final List listeners = new CopyOnWriteArrayList(); + + @Inject public NodeIndexDeletedAction(Settings settings, ThreadPool threadPool, TransportService transportService, ClusterService clusterService) { + super(settings); + this.threadPool = threadPool; + this.transportService = transportService; + this.clusterService = clusterService; + transportService.registerHandler(NodeIndexDeletedTransportHandler.ACTION, new NodeIndexDeletedTransportHandler()); + } + + public void add(Listener listener) { + listeners.add(listener); + } + + public void remove(Listener listener) { + listeners.remove(listener); + } + + public void nodeIndexDeleted(final String index, final String nodeId) throws ElasticSearchException { + Nodes nodes = clusterService.state().nodes(); + if (nodes.localNodeMaster()) { + threadPool.execute(new Runnable() { + @Override public void run() { + innerNodeIndexDeleted(index, nodeId); + } + }); + } else { + transportService.sendRequest(clusterService.state().nodes().masterNode(), + NodeIndexDeletedTransportHandler.ACTION, new NodeIndexDeletedMessage(index, nodeId), VoidTransportResponseHandler.INSTANCE); + } + } + + private void innerNodeIndexDeleted(String index, String nodeId) { + for (Listener listener : listeners) { + listener.onNodeIndexDeleted(index, nodeId); + } + } + + public static interface Listener { + void onNodeIndexDeleted(String index, String nodeId); + } + + private class NodeIndexDeletedTransportHandler extends BaseTransportRequestHandler { + + static final String ACTION = "cluster/nodeIndexDeleted"; + + @Override public NodeIndexDeletedMessage newInstance() { + return new NodeIndexDeletedMessage(); + } + + @Override public void messageReceived(NodeIndexDeletedMessage message, TransportChannel channel) throws Exception { + innerNodeIndexDeleted(message.index, message.nodeId); + channel.sendResponse(VoidStreamable.INSTANCE); + } + } + + private static class NodeIndexDeletedMessage implements Streamable { + + String index; + + String nodeId; + + private NodeIndexDeletedMessage() { + } + + private NodeIndexDeletedMessage(String index, String nodeId) { + this.index = index; + this.nodeId = nodeId; + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeUTF(index); + out.writeUTF(nodeId); + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + index = in.readUTF(); + nodeId = in.readUTF(); + } + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java new file mode 100644 index 00000000000..5aba5a71f7d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -0,0 +1,178 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.action.shard; + +import com.google.inject.Inject; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.node.Nodes; +import org.elasticsearch.cluster.routing.ImmutableShardRouting; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.strategy.ShardsRoutingStrategy; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.BaseTransportRequestHandler; +import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.transport.VoidTransportResponseHandler; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.io.VoidStreamable; +import org.elasticsearch.util.settings.Settings; + +import static com.google.common.collect.Lists.*; +import static org.elasticsearch.cluster.ClusterState.*; + +/** + * @author kimchy (Shay Banon) + */ +public class ShardStateAction extends AbstractComponent { + + private final TransportService transportService; + + private final ClusterService clusterService; + + private final ShardsRoutingStrategy shardsRoutingStrategy; + + private final ThreadPool threadPool; + + @Inject public ShardStateAction(Settings settings, ClusterService clusterService, TransportService transportService, + ShardsRoutingStrategy shardsRoutingStrategy, ThreadPool threadPool) { + super(settings); + this.clusterService = clusterService; + this.transportService = transportService; + this.shardsRoutingStrategy = shardsRoutingStrategy; + this.threadPool = threadPool; + + transportService.registerHandler(ShardStartedTransportHandler.ACTION, new ShardStartedTransportHandler()); + transportService.registerHandler(ShardFailedTransportHandler.ACTION, new ShardFailedTransportHandler()); + } + + public void shardFailed(final ShardRouting shardRouting) throws ElasticSearchException { + logger.warn("Sending failed shard for {}", shardRouting); + Nodes nodes = clusterService.state().nodes(); + if (nodes.localNodeMaster()) { + threadPool.execute(new Runnable() { + @Override public void run() { + innerShardFailed(shardRouting); + } + }); + } else { + transportService.sendRequest(clusterService.state().nodes().masterNode(), + ShardFailedTransportHandler.ACTION, shardRouting, VoidTransportResponseHandler.INSTANCE); + } + } + + public void shardStarted(final ShardRouting shardRouting) throws ElasticSearchException { + if (logger.isDebugEnabled()) { + logger.debug("Sending shard started for {}", shardRouting); + } + Nodes nodes = clusterService.state().nodes(); + if (nodes.localNodeMaster()) { + threadPool.execute(new Runnable() { + @Override public void run() { + innerShardStarted(shardRouting); + } + }); + } else { + transportService.sendRequest(clusterService.state().nodes().masterNode(), + ShardStartedTransportHandler.ACTION, shardRouting, VoidTransportResponseHandler.INSTANCE); + } + } + + private void innerShardFailed(final ShardRouting shardRouting) { + logger.warn("Received shard failed for {}", shardRouting); + clusterService.submitStateUpdateTask("shard-failed (" + shardRouting + ")", new ClusterStateUpdateTask() { + @Override public ClusterState execute(ClusterState currentState) { + if (logger.isDebugEnabled()) { + logger.debug("Applying failed shard {}", shardRouting); + } + RoutingTable prevRoutingTable = currentState.routingTable(); + RoutingTable newRoutingTable = shardsRoutingStrategy.applyFailedShards(currentState, newArrayList(shardRouting)); + if (prevRoutingTable == newRoutingTable) { + return currentState; + } + return newClusterStateBuilder().state(currentState).routingTable(newRoutingTable).build(); + } + }); + } + + private void innerShardStarted(final ShardRouting shardRouting) { + if (logger.isDebugEnabled()) { + logger.debug("Received shard started for {}", shardRouting); + } + clusterService.submitStateUpdateTask("shard-started (" + shardRouting + ")", new ClusterStateUpdateTask() { + @Override public ClusterState execute(ClusterState currentState) { + RoutingTable routingTable = currentState.routingTable(); + // find the one that maps to us, if its already started, no need to do anything... + // the shard might already be started since the nodes that is starting the shards might get cluster events + // with the shard still initializing, and it will try and start it again (until the verification comes) + IndexShardRoutingTable indexShardRoutingTable = routingTable.index(shardRouting.index()).shard(shardRouting.id()); + for (ShardRouting entry : indexShardRoutingTable) { + if (shardRouting.currentNodeId().equals(entry.currentNodeId())) { + // we found the same shard that exists on the same node id + if (entry.started()) { + // already started, do nothing here... + return currentState; + } + } + } + if (logger.isDebugEnabled()) { + logger.debug("Applying started shard {}", shardRouting); + } + RoutingTable newRoutingTable = shardsRoutingStrategy.applyStartedShards(currentState, newArrayList(shardRouting)); + if (routingTable == newRoutingTable) { + return currentState; + } + return newClusterStateBuilder().state(currentState).routingTable(newRoutingTable).build(); + } + }); + } + + private class ShardFailedTransportHandler extends BaseTransportRequestHandler { + + static final String ACTION = "cluster/shardFailure"; + + @Override public ShardRouting newInstance() { + return new ImmutableShardRouting(); + } + + @Override public void messageReceived(ShardRouting request, TransportChannel channel) throws Exception { + innerShardFailed(request); + channel.sendResponse(VoidStreamable.INSTANCE); + } + } + + private class ShardStartedTransportHandler extends BaseTransportRequestHandler { + + static final String ACTION = "cluster/shardStarted"; + + @Override public ShardRouting newInstance() { + return new ImmutableShardRouting(); + } + + @Override public void messageReceived(ShardRouting request, TransportChannel channel) throws Exception { + innerShardStarted(request); + channel.sendResponse(VoidStreamable.INSTANCE); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java new file mode 100644 index 00000000000..6404a68996c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -0,0 +1,174 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.metadata; + +import com.google.common.collect.ImmutableMap; +import org.elasticsearch.util.MapBuilder; +import org.elasticsearch.util.Preconditions; +import org.elasticsearch.util.concurrent.Immutable; +import org.elasticsearch.util.settings.ImmutableSettings; +import org.elasticsearch.util.settings.Settings; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.Map; + +import static org.elasticsearch.util.settings.ImmutableSettings.*; + +/** + * @author kimchy (Shay Banon) + */ +@Immutable +public class IndexMetaData { + + public static final String SETTING_NUMBER_OF_SHARDS = "index.numberOfShards"; + + public static final String SETTING_NUMBER_OF_REPLICAS = "index.numberOfReplicas"; + + private final String index; + + private final Settings settings; + + private final ImmutableMap mappings; + + private transient final int totalNumberOfShards; + + private IndexMetaData(String index, Settings settings, ImmutableMap mappings) { + Preconditions.checkArgument(settings.getAsInt(SETTING_NUMBER_OF_SHARDS, -1) != -1, "must specify numberOfShards"); + Preconditions.checkArgument(settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, -1) != -1, "must specify numberOfReplicas"); + this.index = index; + this.settings = settings; + this.mappings = mappings; + this.totalNumberOfShards = numberOfShards() * (numberOfReplicas() + 1); + } + + public String index() { + return index; + } + + public int numberOfShards() { + return settings.getAsInt(SETTING_NUMBER_OF_SHARDS, -1); + } + + public int numberOfReplicas() { + return settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, -1); + } + + public int totalNumberOfShards() { + return totalNumberOfShards; + } + + public Settings settings() { + return settings; + } + + public ImmutableMap mappings() { + return mappings; + } + + public static Builder newIndexMetaDataBuilder(String index) { + return new Builder(index); + } + + public static Builder newIndexMetaDataBuilder(IndexMetaData indexMetaData) { + return new Builder(indexMetaData); + } + + public static class Builder { + + private String index; + + private Settings settings = ImmutableSettings.Builder.EMPTY_SETTINGS; + + private MapBuilder mappings = MapBuilder.newMapBuilder(); + + public Builder(String index) { + this.index = index; + } + + public Builder(IndexMetaData indexMetaData) { + this(indexMetaData.index()); + settings(indexMetaData.settings()); + mappings.putAll(indexMetaData.mappings); + } + + public String index() { + return index; + } + + public Builder numberOfShards(int numberOfShards) { + settings = ImmutableSettings.settingsBuilder().putAll(settings).putInt(SETTING_NUMBER_OF_SHARDS, numberOfShards).build(); + return this; + } + + public int numberOfShards() { + return settings.getAsInt(SETTING_NUMBER_OF_SHARDS, -1); + } + + public Builder numberOfReplicas(int numberOfReplicas) { + settings = ImmutableSettings.settingsBuilder().putAll(settings).putInt(SETTING_NUMBER_OF_REPLICAS, numberOfReplicas).build(); + return this; + } + + public int numberOfReplicas() { + return settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, -1); + } + + public Builder settings(Settings settings) { + this.settings = settings; + return this; + } + + public Builder removeMapping(String mappingType) { + mappings.remove(mappingType); + return this; + } + + public Builder addMapping(String mappingType, String mappingSource) { + mappings.put(mappingType, mappingSource); + return this; + } + + public IndexMetaData build() { + return new IndexMetaData(index, settings, mappings.immutableMap()); + } + + public static IndexMetaData readFrom(DataInput in, Settings globalSettings) throws ClassNotFoundException, IOException { + Builder builder = new Builder(in.readUTF()); + builder.settings(readSettingsFromStream(in, globalSettings)); + int mappingsSize = in.readInt(); + for (int i = 0; i < mappingsSize; i++) { + builder.addMapping(in.readUTF(), in.readUTF()); + } + return builder.build(); + } + + public static void writeTo(IndexMetaData indexMetaData, DataOutput out) throws IOException { + out.writeUTF(indexMetaData.index()); + writeSettingsToStream(indexMetaData.settings(), out); + out.writeInt(indexMetaData.mappings().size()); + for (Map.Entry entry : indexMetaData.mappings().entrySet()) { + out.writeUTF(entry.getKey()); + out.writeUTF(entry.getValue()); + } + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java new file mode 100644 index 00000000000..a8b2c1f20cd --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -0,0 +1,141 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.metadata; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.UnmodifiableIterator; +import org.elasticsearch.util.MapBuilder; +import org.elasticsearch.util.Nullable; +import org.elasticsearch.util.concurrent.Immutable; +import org.elasticsearch.util.settings.Settings; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import static org.elasticsearch.util.MapBuilder.*; + +/** + * @author kimchy (Shay Banon) + */ +@Immutable +public class MetaData implements Iterable { + + public static MetaData EMPTY_META_DATA = newMetaDataBuilder().build(); + + private final ImmutableMap indices; + + // limits the number of shards per node + private final int maxNumberOfShardsPerNode; + + private final transient int totalNumberOfShards; + + private MetaData(ImmutableMap indices, int maxNumberOfShardsPerNode) { + this.indices = ImmutableMap.copyOf(indices); + this.maxNumberOfShardsPerNode = maxNumberOfShardsPerNode; + int totalNumberOfShards = 0; + for (IndexMetaData indexMetaData : indices.values()) { + totalNumberOfShards += indexMetaData.totalNumberOfShards(); + } + this.totalNumberOfShards = totalNumberOfShards; + } + + public boolean hasIndex(String index) { + return indices.containsKey(index); + } + + public IndexMetaData index(String index) { + return indices.get(index); + } + + public ImmutableMap indices() { + return this.indices; + } + + public int maxNumberOfShardsPerNode() { + return this.maxNumberOfShardsPerNode; + } + + public int totalNumberOfShards() { + return this.totalNumberOfShards; + } + + @Override public UnmodifiableIterator iterator() { + return indices.values().iterator(); + } + + public static Builder newMetaDataBuilder() { + return new Builder(); + } + + public static class Builder { + + // limits the number of shards per node + private int maxNumberOfShardsPerNode = 100; + + private MapBuilder indices = newMapBuilder(); + + public Builder put(IndexMetaData.Builder indexMetaDataBuilder) { + return put(indexMetaDataBuilder.build()); + } + + public Builder put(IndexMetaData indexMetaData) { + indices.put(indexMetaData.index(), indexMetaData); + return this; + } + + public Builder remove(String index) { + indices.remove(index); + return this; + } + + public Builder metaData(MetaData metaData) { + indices.putAll(metaData.indices); + return this; + } + + public Builder maxNumberOfShardsPerNode(int maxNumberOfShardsPerNode) { + this.maxNumberOfShardsPerNode = maxNumberOfShardsPerNode; + return this; + } + + public MetaData build() { + return new MetaData(indices.immutableMap(), maxNumberOfShardsPerNode); + } + + public static MetaData readFrom(DataInput in, @Nullable Settings globalSettings) throws IOException, ClassNotFoundException { + Builder builder = new Builder(); + builder.maxNumberOfShardsPerNode(in.readInt()); + int size = in.readInt(); + for (int i = 0; i < size; i++) { + builder.put(IndexMetaData.Builder.readFrom(in, globalSettings)); + } + return builder.build(); + } + + public static void writeTo(MetaData metaData, DataOutput out) throws IOException { + out.writeInt(metaData.maxNumberOfShardsPerNode()); + out.writeInt(metaData.indices.size()); + for (IndexMetaData indexMetaData : metaData) { + IndexMetaData.Builder.writeTo(indexMetaData, out); + } + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/metadata/MetaDataService.java b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/metadata/MetaDataService.java new file mode 100644 index 00000000000..ea79199c8d0 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/metadata/MetaDataService.java @@ -0,0 +1,242 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.metadata; + +import com.google.inject.Inject; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.action.index.NodeIndexCreatedAction; +import org.elasticsearch.cluster.action.index.NodeIndexDeletedAction; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.strategy.ShardsRoutingStrategy; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.index.mapper.InvalidTypeNameException; +import org.elasticsearch.indices.IndexAlreadyExistsException; +import org.elasticsearch.indices.IndexMissingException; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.indices.InvalidIndexNameException; +import org.elasticsearch.util.Strings; +import org.elasticsearch.util.TimeValue; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.settings.ImmutableSettings; +import org.elasticsearch.util.settings.Settings; + +import java.util.Arrays; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.cluster.ClusterState.*; +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; +import static org.elasticsearch.cluster.metadata.MetaData.*; + +/** + * @author kimchy (Shay Banon) + */ +public class MetaDataService extends AbstractComponent { + + private final ClusterService clusterService; + + private final ShardsRoutingStrategy shardsRoutingStrategy; + + private final IndicesService indicesService; + + private final NodeIndexCreatedAction nodeIndexCreatedAction; + + private final NodeIndexDeletedAction nodeIndexDeletedAction; + + @Inject public MetaDataService(Settings settings, ClusterService clusterService, IndicesService indicesService, ShardsRoutingStrategy shardsRoutingStrategy, + NodeIndexCreatedAction nodeIndexCreatedAction, NodeIndexDeletedAction nodeIndexDeletedAction) { + super(settings); + this.clusterService = clusterService; + this.indicesService = indicesService; + this.shardsRoutingStrategy = shardsRoutingStrategy; + this.nodeIndexCreatedAction = nodeIndexCreatedAction; + this.nodeIndexDeletedAction = nodeIndexDeletedAction; + } + + public synchronized boolean createIndex(final String index, final Settings indexSettings, TimeValue timeout) throws IndexAlreadyExistsException { + if (clusterService.state().routingTable().hasIndex(index)) { + throw new IndexAlreadyExistsException(new Index(index)); + } + if (index.contains(" ")) { + throw new InvalidIndexNameException(new Index(index), index, "must not contain whitespace"); + } + if (index.contains(",")) { + throw new InvalidIndexNameException(new Index(index), index, "must not contain ',"); + } + if (index.contains("#")) { + throw new InvalidIndexNameException(new Index(index), index, "must not contain '#"); + } + if (index.charAt(0) == '_') { + throw new InvalidIndexNameException(new Index(index), index, "must not start with '_'"); + } + if (!index.toLowerCase().equals(index)) { + throw new InvalidIndexNameException(new Index(index), index, "must be lowercase"); + } + if (!Strings.validFileName(index)) { + throw new InvalidIndexNameException(new Index(index), index, "must not contain the following characters " + Strings.INVALID_FILENAME_CHARS); + } + + final CountDownLatch latch = new CountDownLatch(clusterService.state().nodes().size()); + NodeIndexCreatedAction.Listener nodeCreatedListener = new NodeIndexCreatedAction.Listener() { + @Override public void onNodeIndexCreated(String mIndex, String nodeId) { + if (index.equals(mIndex)) { + latch.countDown(); + } + } + }; + nodeIndexCreatedAction.add(nodeCreatedListener); + clusterService.submitStateUpdateTask("create-index [" + index + "]", new ClusterStateUpdateTask() { + @Override public ClusterState execute(ClusterState currentState) { + RoutingTable.Builder routingTableBuilder = new RoutingTable.Builder(); + for (IndexRoutingTable indexRoutingTable : currentState.routingTable().indicesRouting().values()) { + routingTableBuilder.add(indexRoutingTable); + } + ImmutableSettings.Builder indexSettingsBuilder = new ImmutableSettings.Builder().putAll(indexSettings); + if (indexSettings.get(SETTING_NUMBER_OF_SHARDS) == null) { + indexSettingsBuilder.putInt(SETTING_NUMBER_OF_SHARDS, settings.getAsInt(SETTING_NUMBER_OF_SHARDS, 5)); + } + if (indexSettings.get(SETTING_NUMBER_OF_REPLICAS) == null) { + indexSettingsBuilder.putInt(SETTING_NUMBER_OF_REPLICAS, settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, 1)); + } + Settings actualIndexSettings = indexSettingsBuilder.build(); + + IndexMetaData indexMetaData = newIndexMetaDataBuilder(index).settings(actualIndexSettings).build(); + MetaData newMetaData = newMetaDataBuilder() + .metaData(currentState.metaData()) + .put(indexMetaData) + .build(); + + IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(index) + .initializeEmpty(newMetaData.index(index)); + routingTableBuilder.add(indexRoutingBuilder); + + logger.info("Creating Index [{}], shards [{}]/[{}]", new Object[]{index, indexMetaData.numberOfShards(), indexMetaData.numberOfReplicas()}); + RoutingTable newRoutingTable = shardsRoutingStrategy.reroute(newClusterStateBuilder().state(currentState).routingTable(routingTableBuilder).metaData(newMetaData).build()); + return newClusterStateBuilder().state(currentState).routingTable(newRoutingTable).metaData(newMetaData).build(); + } + }); + + try { + return latch.await(timeout.millis(), TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + return false; + } finally { + nodeIndexCreatedAction.remove(nodeCreatedListener); + } + } + + public synchronized boolean deleteIndex(final String index, TimeValue timeout) throws IndexMissingException { + RoutingTable routingTable = clusterService.state().routingTable(); + if (!routingTable.hasIndex(index)) { + throw new IndexMissingException(new Index(index)); + } + + logger.info("Deleting index [{}]", index); + + final CountDownLatch latch = new CountDownLatch(clusterService.state().nodes().size()); + NodeIndexDeletedAction.Listener listener = new NodeIndexDeletedAction.Listener() { + @Override public void onNodeIndexDeleted(String fIndex, String nodeId) { + if (fIndex.equals(index)) { + latch.countDown(); + } + } + }; + nodeIndexDeletedAction.add(listener); + clusterService.submitStateUpdateTask("delete-index [" + index + "]", new ClusterStateUpdateTask() { + @Override public ClusterState execute(ClusterState currentState) { + RoutingTable.Builder routingTableBuilder = new RoutingTable.Builder(); + for (IndexRoutingTable indexRoutingTable : currentState.routingTable().indicesRouting().values()) { + if (!indexRoutingTable.index().equals(index)) { + routingTableBuilder.add(indexRoutingTable); + } + } + MetaData newMetaData = newMetaDataBuilder() + .metaData(currentState.metaData()) + .remove(index) + .build(); + + RoutingTable newRoutingTable = shardsRoutingStrategy.reroute( + newClusterStateBuilder().state(currentState).routingTable(routingTableBuilder).metaData(newMetaData).build()); + return newClusterStateBuilder().state(currentState).routingTable(newRoutingTable).metaData(newMetaData).build(); + } + }); + try { + return latch.await(timeout.millis(), TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + return false; + } finally { + nodeIndexDeletedAction.remove(listener); + } + } + + public void addMapping(final String[] indices, String mappingType, final String mappingSource) throws ElasticSearchException { + ClusterState clusterState = clusterService.state(); + for (String index : indices) { + IndexRoutingTable indexTable = clusterState.routingTable().indicesRouting().get(index); + if (indexTable == null) { + throw new IndexMissingException(new Index(index)); + } + } + + DocumentMapper documentMapper = null; + for (String index : indices) { + IndexService indexService = indicesService.indexService(index); + if (indexService != null) { + // try and parse it (no need to add it here) so we can bail early in case of parsing exception + documentMapper = indexService.mapperService().parse(mappingType, mappingSource); + } else { + throw new IndexMissingException(new Index(index)); + } + } + + if (mappingType == null) { + mappingType = documentMapper.type(); + } else if (!mappingType.equals(documentMapper.type())) { + throw new InvalidTypeNameException("Type name provided does not match type name within mapping definition"); + } + if (mappingType.charAt(0) == '_') { + throw new InvalidTypeNameException("Document mapping type name can't start with '_'"); + } + + logger.info("Indices [" + Arrays.toString(indices) + "]: Creating mapping [" + mappingType + "] with source [" + mappingSource + "]"); + + final String mappingTypeP = mappingType; + clusterService.submitStateUpdateTask("create-mapping [" + mappingTypeP + "]", new ClusterStateUpdateTask() { + @Override public ClusterState execute(ClusterState currentState) { + MetaData.Builder builder = newMetaDataBuilder().metaData(currentState.metaData()); + for (String indexName : indices) { + IndexMetaData indexMetaData = currentState.metaData().index(indexName); + if (indexMetaData == null) { + throw new IndexMissingException(new Index(indexName)); + } + builder.put(newIndexMetaDataBuilder(indexMetaData).addMapping(mappingTypeP, mappingSource)); + } + return newClusterStateBuilder().state(currentState).metaData(builder).build(); + } + }); + } + +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/node/Node.java b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/node/Node.java new file mode 100644 index 00000000000..41d57c492ea --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/node/Node.java @@ -0,0 +1,140 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.node; + +import com.google.common.collect.ImmutableList; +import org.elasticsearch.util.io.Streamable; +import org.elasticsearch.util.transport.TransportAddress; +import org.elasticsearch.util.transport.TransportAddressSerializers; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.io.Serializable; + +/** + * @author kimchy (Shay Banon) + */ +public class Node implements Streamable, Serializable { + + public static final ImmutableList EMPTY_LIST = ImmutableList.of(); + + private String nodeName = ""; + + private String nodeId; + + private TransportAddress address; + + private boolean dataNode = true; + + private Node() { + } + + public Node(String nodeId, TransportAddress address) { + this("", true, nodeId, address); + } + + public Node(String nodeName, boolean dataNode, String nodeId, TransportAddress address) { + this.nodeName = nodeName; + this.dataNode = dataNode; + if (this.nodeName == null) { + this.nodeName = ""; + } + this.nodeId = nodeId; + this.address = address; + } + + /** + * The address that the node can be communicated with. + */ + public TransportAddress address() { + return address; + } + + /** + * The unique id of the node. + */ + public String id() { + return nodeId; + } + + /** + * The name of the node. + */ + public String name() { + return this.nodeName; + } + + /** + * Should this node hold data (shards) or not. + */ + public boolean dataNode() { + return dataNode; + } + + public static Node readNode(DataInput in) throws IOException, ClassNotFoundException { + Node node = new Node(); + node.readFrom(in); + return node; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + nodeName = in.readUTF(); + dataNode = in.readBoolean(); + nodeId = in.readUTF(); + address = TransportAddressSerializers.addressFromStream(in); + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeUTF(nodeName); + out.writeBoolean(dataNode); + out.writeUTF(nodeId); + TransportAddressSerializers.addressToStream(out, address); + } + + @Override public boolean equals(Object obj) { + if (!(obj instanceof Node)) + return false; + + Node other = (Node) obj; + return this.nodeId.equals(other.nodeId); + } + + @Override public int hashCode() { + return nodeId.hashCode(); + } + + @Override public String toString() { + StringBuilder sb = new StringBuilder(); + if (nodeName.length() > 0) { + sb.append('[').append(nodeName).append(']'); + } + if (nodeId != null) { + sb.append('[').append(nodeId).append(']'); + } + if (dataNode) { + sb.append("[data]"); + } + if (address != null) { + sb.append('[').append(address).append(']'); + } + return sb.toString(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/node/Nodes.java b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/node/Nodes.java new file mode 100644 index 00000000000..b40fde4bc6d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/node/Nodes.java @@ -0,0 +1,353 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.node; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.UnmodifiableIterator; +import org.elasticsearch.util.Nullable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static com.google.common.collect.Lists.*; +import static com.google.common.collect.Maps.*; + +/** + * @author kimchy (Shay Banon) + */ +public class Nodes implements Iterable { + + public static Nodes EMPTY_NODES = newNodesBuilder().build(); + + private final ImmutableMap nodes; + + private final ImmutableMap dataNodes; + + private final String masterNodeId; + + private final String localNodeId; + + private Nodes(ImmutableMap nodes, ImmutableMap dataNodes, String masterNodeId, String localNodeId) { + this.nodes = nodes; + this.dataNodes = dataNodes; + this.masterNodeId = masterNodeId; + this.localNodeId = localNodeId; + } + + @Override public UnmodifiableIterator iterator() { + return nodes.values().iterator(); + } + + /** + * Returns true if the local node is the master node. + */ + public boolean localNodeMaster() { + return localNodeId.equals(masterNodeId); + } + + public int size() { + return nodes.size(); + } + + public ImmutableMap nodes() { + return this.nodes; + } + + public ImmutableMap dataNodes() { + return this.dataNodes; + } + + public Node get(String nodeId) { + return nodes.get(nodeId); + } + + public boolean nodeExists(String nodeId) { + return nodes.containsKey(nodeId); + } + + public String masterNodeId() { + return this.masterNodeId; + } + + public String localNodeId() { + return this.localNodeId; + } + + public Node localNode() { + return nodes.get(localNodeId); + } + + public Node masterNode() { + return nodes.get(masterNodeId); + } + + public Nodes removeDeadMembers(Set newNodes, String masterNodeId) { + Builder builder = new Builder().masterNodeId(masterNodeId).localNodeId(localNodeId); + for (Node node : this) { + if (newNodes.contains(node.id())) { + builder.put(node); + } + } + return builder.build(); + } + + public Nodes newNode(Node node) { + return new Builder().putAll(this).put(node).build(); + } + + /** + * Returns the changes comparing this nodes to the provided nodes. + */ + public Delta delta(Nodes other) { + List removed = newArrayList(); + List added = newArrayList(); + for (Node node : other) { + if (!this.nodeExists(node.id())) { + removed.add(node); + } + } + for (Node node : this) { + if (!other.nodeExists(node.id())) { + added.add(node); + } + } + Node previousMasterNode = null; + Node newMasterNode = null; + if (masterNodeId != null) { + if (other.masterNodeId == null || !other.masterNodeId.equals(masterNodeId)) { + previousMasterNode = other.masterNode(); + newMasterNode = masterNode(); + } + } + return new Delta(previousMasterNode, newMasterNode, localNodeId, ImmutableList.copyOf(removed), ImmutableList.copyOf(added)); + } + + public String prettyPrint() { + StringBuilder sb = new StringBuilder(); + sb.append("Nodes: \n"); + for (Node node : this) { + sb.append(" ").append(node); + if (node == localNode()) { + sb.append(", local"); + } + if (node == masterNode()) { + sb.append(", master"); + } + sb.append("\n"); + } + return sb.toString(); + } + + public Delta emptyDelta() { + return new Delta(null, null, localNodeId, Node.EMPTY_LIST, Node.EMPTY_LIST); + } + + public static class Delta { + + private final String localNodeId; + private final Node previousMasterNode; + private final Node newMasterNode; + private final ImmutableList removed; + private final ImmutableList added; + + + public Delta(String localNodeId, ImmutableList removed, ImmutableList added) { + this(null, null, localNodeId, removed, added); + } + + public Delta(@Nullable Node previousMasterNode, @Nullable Node newMasterNode, String localNodeId, ImmutableList removed, ImmutableList added) { + this.previousMasterNode = previousMasterNode; + this.newMasterNode = newMasterNode; + this.localNodeId = localNodeId; + this.removed = removed; + this.added = added; + } + + public boolean hasChanges() { + return masterNodeChanged() || !removed.isEmpty() || !added.isEmpty(); + } + + public boolean masterNodeChanged() { + return newMasterNode != null; + } + + public Node previousMasterNode() { + return previousMasterNode; + } + + public Node newMasterNode() { + return newMasterNode; + } + + public boolean removed() { + return !removed.isEmpty(); + } + + public ImmutableList removedNodes() { + return removed; + } + + public boolean added() { + return !added.isEmpty(); + } + + public ImmutableList addedNodes() { + return added; + } + + public String shortSummary() { + StringBuilder sb = new StringBuilder(); + if (!removed() && masterNodeChanged()) { + if (newMasterNode.id().equals(localNodeId)) { + // we are the master, no nodes we removed, we are actually the first master + sb.append("New Master ").append(newMasterNode()); + } else { + // we are not the master, so we just got this event. No nodes were removed, so its not a *new* master + sb.append("Detected Master ").append(newMasterNode()); + } + } else { + if (masterNodeChanged()) { + sb.append("Master {New ").append(newMasterNode()); + if (previousMasterNode() != null) { + sb.append(", Previous ").append(previousMasterNode()); + } + sb.append("}"); + } + if (removed()) { + if (masterNodeChanged()) { + sb.append(", "); + } + sb.append("Removed {"); + for (Node node : removedNodes()) { + sb.append(node).append(','); + } + sb.append("}"); + } + } + if (added()) { + // don't print if there is one added, and it is us + if (!(addedNodes().size() == 1 && addedNodes().get(0).id().equals(localNodeId))) { + if (removed() || masterNodeChanged()) { + sb.append(", "); + } + sb.append("Added {"); + for (Node node : addedNodes()) { + if (!node.id().equals(localNodeId)) { + // don't print ourself + sb.append(node).append(','); + } + } + sb.append("}"); + } + } + return sb.toString(); + } + } + + public static Builder newNodesBuilder() { + return new Builder(); + } + + public static class Builder { + + private Map nodes = newHashMap(); + + private String masterNodeId; + + private String localNodeId; + + public Builder putAll(Nodes nodes) { + this.masterNodeId = nodes.masterNodeId(); + this.localNodeId = nodes.localNodeId(); + for (Node node : nodes) { + put(node); + } + return this; + } + + public Builder put(Node node) { + nodes.put(node.id(), node); + return this; + } + + public Builder putAll(Iterable nodes) { + for (Node node : nodes) { + put(node); + } + return this; + } + + public Builder remove(String nodeId) { + nodes.remove(nodeId); + return this; + } + + public Builder masterNodeId(String masterNodeId) { + this.masterNodeId = masterNodeId; + return this; + } + + public Builder localNodeId(String localNodeId) { + this.localNodeId = localNodeId; + return this; + } + + public Nodes build() { + ImmutableMap.Builder dataNodesBuilder = ImmutableMap.builder(); + for (Map.Entry nodeEntry : nodes.entrySet()) { + if (nodeEntry.getValue().dataNode()) { + dataNodesBuilder.put(nodeEntry.getKey(), nodeEntry.getValue()); + } + } + return new Nodes(ImmutableMap.copyOf(nodes), dataNodesBuilder.build(), masterNodeId, localNodeId); + } + + public static void writeTo(Nodes nodes, DataOutput out) throws IOException { + out.writeUTF(nodes.masterNodeId); + out.writeInt(nodes.size()); + for (Node node : nodes) { + node.writeTo(out); + } + } + + public static Nodes readFrom(DataInput in, @Nullable Node localNode) throws IOException, ClassNotFoundException { + Builder builder = new Builder(); + builder.masterNodeId(in.readUTF()); + if (localNode != null) { + builder.localNodeId(localNode.id()); + } + int size = in.readInt(); + for (int i = 0; i < size; i++) { + Node node = Node.readNode(in); + if (localNode != null && node.id().equals(localNode.id())) { + // reuse the same instance of our address and local node id for faster equality + node = localNode; + } + builder.put(node); + } + return builder.build(); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/CompoundShardsIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/CompoundShardsIterator.java new file mode 100644 index 00000000000..239455088dd --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/CompoundShardsIterator.java @@ -0,0 +1,105 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.routing; + +import org.elasticsearch.index.shard.ShardId; + +import java.util.Iterator; +import java.util.List; +import java.util.NoSuchElementException; + +/** + * @author kimchy (Shay Banon) + */ +public class CompoundShardsIterator implements ShardsIterator, Iterator { + + private int index = 0; + + private final List iterators; + + private Iterator current; + + public CompoundShardsIterator(List iterators) { + this.iterators = iterators; + } + + @Override public ShardsIterator reset() { + for (ShardsIterator it : iterators) { + it.reset(); + } + index = 0; + current = null; + return this; + } + + @Override public int size() { + int size = 0; + for (ShardsIterator it : iterators) { + size += it.size(); + } + return size; + } + + @Override public boolean hasNext() { + if (index == iterators.size()) { + return false; + } + if (current == null) { + current = iterators.get(index).iterator(); + } + while (!current.hasNext()) { + if (++index == iterators.size()) { + return false; + } + current = iterators.get(index).iterator(); + } + return true; + } + + @Override public ShardRouting next() { + if (!hasNext()) { + throw new NoSuchElementException(); + } + return current.next(); + } + + @Override public void remove() { + throw new UnsupportedOperationException(); + } + + @Override public ShardId shardId() { + return currentShardsIterator().shardId(); + } + + @Override public Iterator iterator() { + return this; + } + + private ShardsIterator currentShardsIterator() throws NoSuchElementException { + if (iterators.size() == 0) { + throw new NoSuchElementException(); + } + if (index == iterators.size()) { + return iterators.get(index - 1); + } + return iterators.get(index); + + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/GroupShardsIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/GroupShardsIterator.java new file mode 100644 index 00000000000..b8dae204a92 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/GroupShardsIterator.java @@ -0,0 +1,71 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.routing; + +import org.elasticsearch.util.IdentityHashSet; + +import java.util.Iterator; +import java.util.Set; + +/** + * @author kimchy (Shay Banon) + */ +public class GroupShardsIterator implements Iterable { + + private final Set iterators; + + public GroupShardsIterator() { + this(new IdentityHashSet()); + } + + public GroupShardsIterator(Set iterators) { + this.iterators = iterators; + } + + public void add(ShardsIterator shardsIterator) { + iterators.add(shardsIterator); + } + + public void add(Iterable shardsIterator) { + for (ShardsIterator it : shardsIterator) { + add(it); + } + } + + public int totalSize() { + int size = 0; + for (ShardsIterator shard : iterators) { + size += shard.size(); + } + return size; + } + + public int size() { + return iterators.size(); + } + + public Set iterators() { + return iterators; + } + + @Override public Iterator iterator() { + return iterators.iterator(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/IllegalShardRoutingStateException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/IllegalShardRoutingStateException.java new file mode 100644 index 00000000000..99d94698700 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/IllegalShardRoutingStateException.java @@ -0,0 +1,41 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.routing; + +/** + * @author kimchy (Shay Banon) + */ +public class IllegalShardRoutingStateException extends RoutingException { + + private final ShardRouting shard; + + public IllegalShardRoutingStateException(ShardRouting shard, String message) { + this(shard, message, null); + } + + public IllegalShardRoutingStateException(ShardRouting shard, String message, Throwable cause) { + super(shard.shortSummary() + ": " + message, cause); + this.shard = shard; + } + + public ShardRouting shard() { + return shard; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/ImmutableShardRouting.java b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/ImmutableShardRouting.java new file mode 100644 index 00000000000..c64379808ac --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/ImmutableShardRouting.java @@ -0,0 +1,240 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.routing; + +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.io.Serializable; + +/** + * @author kimchy (Shay Banon) + */ +public class ImmutableShardRouting implements Streamable, Serializable, ShardRouting { + + protected String index; + + protected int shardId; + + protected String currentNodeId; + + protected String relocatingNodeId; + + protected boolean primary; + + protected ShardRoutingState state; + + private transient ShardId shardIdentifier; + + public ImmutableShardRouting() { + } + + public ImmutableShardRouting(ShardRouting copy) { + this(copy.index(), copy.id(), copy.currentNodeId(), copy.primary(), copy.state()); + this.relocatingNodeId = copy.relocatingNodeId(); + } + + public ImmutableShardRouting(String index, int shardId, String currentNodeId, boolean primary, ShardRoutingState state) { + this.index = index; + this.shardId = shardId; + this.currentNodeId = currentNodeId; + this.primary = primary; + this.state = state; + } + + public ImmutableShardRouting(String index, int shardId, String currentNodeId, + String relocatingNodeId, boolean primary, ShardRoutingState state) { + this(index, shardId, currentNodeId, primary, state); + this.relocatingNodeId = relocatingNodeId; + } + + @Override public String index() { + return this.index; + } + + @Override public int id() { + return this.shardId; + } + + @Override public boolean unassigned() { + return state == ShardRoutingState.UNASSIGNED; + } + + @Override public boolean initializing() { + return state == ShardRoutingState.INITIALIZING; + } + + @Override public boolean active() { + return started() || relocating(); + } + + @Override public boolean started() { + return state == ShardRoutingState.STARTED; + } + + @Override public boolean relocating() { + return state == ShardRoutingState.RELOCATING; + } + + @Override public boolean assignedToNode() { + return currentNodeId != null; + } + + @Override public String currentNodeId() { + return this.currentNodeId; + } + + @Override public String relocatingNodeId() { + return this.relocatingNodeId; + } + + @Override public boolean primary() { + return this.primary; + } + + @Override public ShardRoutingState state() { + return this.state; + } + + @Override public ShardId shardId() { + if (shardIdentifier != null) { + return shardIdentifier; + } + shardIdentifier = new ShardId(index, shardId); + return shardIdentifier; + } + + public static ImmutableShardRouting readShardRoutingEntry(DataInput in) throws IOException, ClassNotFoundException { + ImmutableShardRouting entry = new ImmutableShardRouting(); + entry.readFrom(in); + return entry; + } + + public static ImmutableShardRouting readShardRoutingEntry(DataInput in, String index, int shardId) throws IOException, ClassNotFoundException { + ImmutableShardRouting entry = new ImmutableShardRouting(); + entry.readFrom(in, index, shardId); + return entry; + } + + public void readFrom(DataInput in, String index, int shardId) throws IOException, ClassNotFoundException { + this.index = index; + this.shardId = shardId; + readFromThin(in); + } + + @Override public void readFromThin(DataInput in) throws IOException { + if (in.readBoolean()) { + currentNodeId = in.readUTF(); + } + + if (in.readBoolean()) { + relocatingNodeId = in.readUTF(); + } + + primary = in.readBoolean(); + state = ShardRoutingState.fromValue(in.readByte()); + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + readFrom(in, in.readUTF(), in.readInt()); + } + + /** + * Does not write index name and shard id + */ + public void writeToThin(DataOutput out) throws IOException { + if (currentNodeId != null) { + out.writeBoolean(true); + out.writeUTF(currentNodeId); + } else { + out.writeBoolean(false); + } + + if (relocatingNodeId != null) { + out.writeBoolean(true); + out.writeUTF(relocatingNodeId); + } else { + out.writeBoolean(false); + } + + out.writeBoolean(primary); + out.writeByte(state.value()); + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeUTF(index); + out.writeInt(shardId); + writeToThin(out); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + ImmutableShardRouting that = (ImmutableShardRouting) o; + + if (primary != that.primary) return false; + if (shardId != that.shardId) return false; + if (currentNodeId != null ? !currentNodeId.equals(that.currentNodeId) : that.currentNodeId != null) + return false; + if (index != null ? !index.equals(that.index) : that.index != null) return false; + if (relocatingNodeId != null ? !relocatingNodeId.equals(that.relocatingNodeId) : that.relocatingNodeId != null) + return false; + if (state != that.state) return false; + + return true; + } + + @Override + public int hashCode() { + int result = index != null ? index.hashCode() : 0; + result = 31 * result + shardId; + result = 31 * result + (currentNodeId != null ? currentNodeId.hashCode() : 0); + result = 31 * result + (relocatingNodeId != null ? relocatingNodeId.hashCode() : 0); + result = 31 * result + (primary ? 1 : 0); + result = 31 * result + (state != null ? state.hashCode() : 0); + return result; + } + + @Override public String toString() { + return shortSummary(); + } + + @Override public String shortSummary() { + StringBuilder sb = new StringBuilder(); + sb.append('[').append(index).append(']').append('[').append(shardId).append(']'); + sb.append(", Node[").append(currentNodeId).append("], "); + if (relocatingNodeId != null) { + sb.append("Relocating [").append(relocatingNodeId).append("], "); + } + if (primary) { + sb.append("[P]"); + } else { + sb.append("[B]"); + } + sb.append(", S[").append(state).append("]"); + return sb.toString(); + } + +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java new file mode 100644 index 00000000000..9a864e981b2 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java @@ -0,0 +1,165 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.routing; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.UnmodifiableIterator; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.util.IdentityHashSet; +import org.elasticsearch.util.concurrent.Immutable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +/** + * @author kimchy (Shay Banon) + */ +@Immutable +public class IndexRoutingTable implements Iterable { + + private final String index; + + // note, we assume that when the index routing is created, ShardRoutings are created for all possible number of + // shards with state set to UNASSIGNED + private final ImmutableMap shards; + + IndexRoutingTable(String index, Map shards) { + this.index = index; + this.shards = ImmutableMap.copyOf(shards); + } + + public String index() { + return this.index; + } + + @Override public UnmodifiableIterator iterator() { + return shards.values().iterator(); + } + + public ImmutableMap shards() { + return shards; + } + + public IndexShardRoutingTable shard(int shardId) { + return shards.get(shardId); + } + + public GroupShardsIterator groupByShardsIt() { + IdentityHashSet set = new IdentityHashSet(); + for (IndexShardRoutingTable indexShard : this) { + set.add(indexShard.shardsIt()); + } + return new GroupShardsIterator(set); + } + + public void validate() throws RoutingValidationException { + } + + public static class Builder { + + private final String index; + + private final Map shards = new HashMap(); + + public Builder(String index) { + this.index = index; + } + + public static IndexRoutingTable readFrom(DataInput in) throws IOException, ClassNotFoundException { + String index = in.readUTF(); + Builder builder = new Builder(index); + + int size = in.readInt(); + for (int i = 0; i < size; i++) { + builder.addIndexShard(IndexShardRoutingTable.Builder.readFromThin(in, index)); + } + + return builder.build(); + } + + public static void writeTo(IndexRoutingTable index, DataOutput out) throws IOException { + out.writeUTF(index.index()); + out.writeInt(index.shards.size()); + for (IndexShardRoutingTable indexShard : index) { + IndexShardRoutingTable.Builder.writeToThin(indexShard, out); + } + } + + /** + * Initializes a new empry index + */ + public Builder initializeEmpty(IndexMetaData indexMetaData) { + for (int shardId = 0; shardId < indexMetaData.numberOfShards(); shardId++) { + for (int i = 0; i <= indexMetaData.numberOfReplicas(); i++) { + addShard(shardId, null, i == 0, ShardRoutingState.UNASSIGNED); + } + } + return this; + } + + public Builder addIndexShard(IndexShardRoutingTable indexShard) { + shards.put(indexShard.shardId().id(), indexShard); + return this; + } + + public Builder addShard(ShardRouting shard) { + return internalAddShard(new ImmutableShardRouting(shard)); + } + + public Builder addShard(int shardId, String nodeId, boolean primary, ShardRoutingState state) { + ImmutableShardRouting shard = new ImmutableShardRouting(index, shardId, nodeId, primary, state); + return internalAddShard(shard); + } + + private Builder internalAddShard(ImmutableShardRouting shard) { + IndexShardRoutingTable indexShard = shards.get(shard.id()); + if (indexShard == null) { + indexShard = new IndexShardRoutingTable.Builder(shard.shardId()).addShard(shard).build(); + } else { + indexShard = new IndexShardRoutingTable.Builder(indexShard).addShard(shard).build(); + } + shards.put(indexShard.shardId().id(), indexShard); + return this; + } + + public IndexRoutingTable build() throws RoutingValidationException { + IndexRoutingTable indexRoutingTable = new IndexRoutingTable(index, ImmutableMap.copyOf(shards)); + indexRoutingTable.validate(); + return indexRoutingTable; + } + } + + + public String prettyPrint() { + StringBuilder sb = new StringBuilder("-- Index[" + index + "]\n"); + for (IndexShardRoutingTable indexShard : this) { + sb.append("----ShardId[").append(indexShard.shardId()).append("]\n"); + for (ShardRouting shard : indexShard) { + sb.append("--------").append(shard.shortSummary()).append("\n"); + } + } + return sb.toString(); + } + + +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java new file mode 100644 index 00000000000..17ffc81da57 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java @@ -0,0 +1,223 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.routing; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.UnmodifiableIterator; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.util.concurrent.ThreadLocalRandom; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.Iterator; +import java.util.List; +import java.util.NoSuchElementException; +import java.util.concurrent.atomic.AtomicInteger; + +import static com.google.common.collect.Lists.*; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexShardRoutingTable implements Iterable { + + private final ShardId shardId; + + private final ImmutableList shards; + + private final AtomicInteger counter; + + IndexShardRoutingTable(ShardId shardId, ImmutableList shards) { + this.shardId = shardId; + this.shards = shards; + this.counter = new AtomicInteger(ThreadLocalRandom.current().nextInt(shards.size())); + } + + public ShardId shardId() { + return shardId; + } + + @Override public UnmodifiableIterator iterator() { + return shards.iterator(); + } + + public int size() { + return shards.size(); + } + + public ImmutableList shards() { + return shards; + } + + public ShardsIterator shardsIt() { + return new IndexShardsIterator(0); + } + + public ShardsIterator shardsRandomIt() { + return new IndexShardsIterator(nextCounter()); + } + + public ShardRouting primaryShard() { + for (ShardRouting shardRouting : this) { + if (shardRouting.primary()) { + return shardRouting; + } + } + return null; + } + + public List backupsShards() { + List backupShards = newArrayListWithExpectedSize(2); + for (ShardRouting shardRouting : this) { + if (!shardRouting.primary()) { + backupShards.add(shardRouting); + } + } + return backupShards; + } + + int nextCounter() { + return counter.getAndIncrement(); + } + + ShardRouting shardModulo(int shardId) { + return shards.get((Math.abs(shardId) % size())); + } + + /** + *

The class can be used from different threads, though not designed to be used concurrently + * from different threads. + */ + private class IndexShardsIterator implements ShardsIterator, Iterator { + + private final int origIndex; + + private volatile int index; + + private volatile int counter = 0; + + private IndexShardsIterator(int index) { + this.origIndex = index; + this.index = index; + } + + @Override public Iterator iterator() { + return this; + } + + @Override public ShardsIterator reset() { + counter = 0; + index = origIndex; + return this; + } + + @Override public boolean hasNext() { + return counter != size(); + } + + @Override public ShardRouting next() { + if (!hasNext()) { + throw new NoSuchElementException(); + } + counter++; + return shardModulo(index++); + } + + @Override public void remove() { + throw new UnsupportedOperationException(); + } + + @Override public int size() { + return IndexShardRoutingTable.this.size(); + } + + @Override public ShardId shardId() { + return IndexShardRoutingTable.this.shardId(); + } + } + + public static class Builder { + + private ShardId shardId; + + private final List shards; + + public Builder(IndexShardRoutingTable indexShard) { + this.shardId = indexShard.shardId; + this.shards = newArrayList(indexShard.shards); + } + + public Builder(ShardId shardId) { + this.shardId = shardId; + this.shards = newArrayList(); + } + + public Builder addShard(ImmutableShardRouting shardEntry) { + for (ShardRouting shard : shards) { + // don't add two that map to the same node id + // we rely on the fact that a node does not have primary and backup of the same shard + if (shard.assignedToNode() && shardEntry.assignedToNode() + && shard.currentNodeId().equals(shardEntry.currentNodeId())) { + return this; + } + } + shards.add(shardEntry); + return this; + } + + public IndexShardRoutingTable build() { + return new IndexShardRoutingTable(shardId, ImmutableList.copyOf(shards)); + } + + public static IndexShardRoutingTable readFrom(DataInput in) throws IOException, ClassNotFoundException { + String index = in.readUTF(); + return readFromThin(in, index); + } + + public static IndexShardRoutingTable readFromThin(DataInput in, String index) throws IOException, ClassNotFoundException { + int iShardId = in.readInt(); + ShardId shardId = new ShardId(index, iShardId); + Builder builder = new Builder(shardId); + + int size = in.readInt(); + for (int i = 0; i < size; i++) { + ImmutableShardRouting shard = ImmutableShardRouting.readShardRoutingEntry(in, index, iShardId); + builder.addShard(shard); + } + + return builder.build(); + } + + public static void writeTo(IndexShardRoutingTable indexShard, DataOutput out) throws IOException { + out.writeUTF(indexShard.shardId().index().name()); + writeToThin(indexShard, out); + } + + public static void writeToThin(IndexShardRoutingTable indexShard, DataOutput out) throws IOException { + out.writeInt(indexShard.shardId.id()); + out.writeInt(indexShard.shards.size()); + for (ShardRouting entry : indexShard) { + entry.writeToThin(out); + } + } + + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/MutableShardRouting.java b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/MutableShardRouting.java new file mode 100644 index 00000000000..7d6cc37badb --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/MutableShardRouting.java @@ -0,0 +1,101 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.routing; + +/** + * @author kimchy (Shay Banon) + */ +public class MutableShardRouting extends ImmutableShardRouting { + + public MutableShardRouting() { + } + + public MutableShardRouting(ShardRouting copy) { + super(copy); + } + + public MutableShardRouting(String index, int shardId, String currentNodeId, boolean primary, ShardRoutingState state) { + super(index, shardId, currentNodeId, primary, state); + } + + public MutableShardRouting(String index, int shardId, String currentNodeId, + String relocatingNodeId, boolean primary, ShardRoutingState state) { + super(index, shardId, currentNodeId, relocatingNodeId, primary, state); + } + + public void assignToNode(String nodeId) { + if (currentNodeId == null) { + assert state == ShardRoutingState.UNASSIGNED; + + state = ShardRoutingState.INITIALIZING; + currentNodeId = nodeId; + relocatingNodeId = null; + } else if (state == ShardRoutingState.STARTED) { + state = ShardRoutingState.RELOCATING; + relocatingNodeId = nodeId; + } else if (state == ShardRoutingState.RELOCATING) { + assert nodeId.equals(relocatingNodeId); + } + } + + public void relocate(String relocatingNodeId) { + assert state == ShardRoutingState.STARTED; + state = ShardRoutingState.RELOCATING; + this.relocatingNodeId = relocatingNodeId; + } + + public void cancelRelocation() { + assert state == ShardRoutingState.RELOCATING; + assert assignedToNode(); + assert relocatingNodeId != null; + + state = ShardRoutingState.STARTED; + relocatingNodeId = null; + } + + public void deassignNode() { + assert state != ShardRoutingState.UNASSIGNED; + + state = ShardRoutingState.UNASSIGNED; + this.currentNodeId = null; + this.relocatingNodeId = null; + } + + public void moveToStarted() { + assert state == ShardRoutingState.INITIALIZING || state == ShardRoutingState.RELOCATING; + relocatingNodeId = null; + state = ShardRoutingState.STARTED; + } + + public void moveToPrimary() { + if (primary) { + throw new IllegalShardRoutingStateException(this, "Already primary, can't move to primary"); + } + primary = true; + } + + public void moveToBackup() { + if (!primary) { + throw new IllegalShardRoutingStateException(this, "Already primary, can't move to backup"); + } + primary = false; + } +} + diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/PlainShardsIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/PlainShardsIterator.java new file mode 100644 index 00000000000..dbe837f703c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/PlainShardsIterator.java @@ -0,0 +1,72 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.routing; + +import org.elasticsearch.index.shard.ShardId; + +import java.util.Iterator; +import java.util.List; + +/** + * @author kimchy (Shay Banon) + */ +public class PlainShardsIterator implements ShardsIterator { + + private final ShardId shardId; + + private final List shards; + + private Iterator iterator; + + public PlainShardsIterator(ShardId shardId, List shards) { + this.shardId = shardId; + this.shards = shards; + this.iterator = shards.iterator(); + } + + @Override public ShardsIterator reset() { + this.iterator = shards.iterator(); + return this; + } + + @Override public int size() { + return shards.size(); + } + + @Override public ShardId shardId() { + return this.shardId; + } + + @Override public Iterator iterator() { + return this; + } + + @Override public boolean hasNext() { + return iterator.hasNext(); + } + + @Override public ShardRouting next() { + return iterator.next(); + } + + @Override public void remove() { + throw new UnsupportedOperationException(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/RoutingBuilders.java b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/RoutingBuilders.java new file mode 100644 index 00000000000..4e2f02a3c2e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/RoutingBuilders.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.routing; + +/** + * @author kimchy (Shay Banon) + */ +public final class RoutingBuilders { + + private RoutingBuilders() { + + } + + public static RoutingTable.Builder routingTable() { + return new RoutingTable.Builder(); + } + + public static IndexRoutingTable.Builder indexRoutingTable(String index) { + return new IndexRoutingTable.Builder(index); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/RoutingException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/RoutingException.java new file mode 100644 index 00000000000..69c7c7d3016 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/RoutingException.java @@ -0,0 +1,36 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.routing; + +import org.elasticsearch.ElasticSearchException; + +/** + * @author kimchy (Shay Banon) + */ +public class RoutingException extends ElasticSearchException { + + public RoutingException(String message) { + super(message); + } + + public RoutingException(String message, Throwable cause) { + super(message, cause); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java new file mode 100644 index 00000000000..e7185f2f555 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.routing; + +import org.elasticsearch.cluster.metadata.MetaData; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; + +import static com.google.common.collect.Lists.*; + +/** + * @author kimchy (Shay Banon) + */ +public class RoutingNode implements Iterable { + + private final String nodeId; + + private final List shards; + + public RoutingNode(String nodeId) { + this(nodeId, new ArrayList()); + } + + public RoutingNode(String nodeId, List shards) { + this.nodeId = nodeId; + this.shards = shards; + } + + @Override public Iterator iterator() { + return shards.iterator(); + } + + public String nodeId() { + return this.nodeId; + } + + public List shards() { + return this.shards; + } + + public void add(MutableShardRouting shard) { + shards.add(shard); + shard.assignToNode(nodeId); + } + + public void removeByShardId(int shardId) { + for (Iterator it = shards.iterator(); it.hasNext();) { + MutableShardRouting shard = it.next(); + if (shard.id() == shardId) { + it.remove(); + } + } + } + + public int numberOfShardsWithState(ShardRoutingState... states) { + int count = 0; + for (MutableShardRouting shardEntry : this) { + for (ShardRoutingState state : states) { + if (shardEntry.state() == state) { + count++; + } + } + } + return count; + } + + public List shardsWithState(ShardRoutingState... states) { + List shards = newArrayList(); + for (MutableShardRouting shardEntry : this) { + for (ShardRoutingState state : states) { + if (shardEntry.state() == state) { + shards.add(shardEntry); + } + } + } + return shards; + } + + public int numberOfShardsNotWithState(ShardRoutingState state) { + int count = 0; + for (MutableShardRouting shardEntry : this) { + if (shardEntry.state() != state) { + count++; + } + } + return count; + } + + /** + * The number fo shards on this node that will not be eventually relocated. + */ + public int numberOfOwningShards() { + int count = 0; + for (MutableShardRouting shardEntry : this) { + if (shardEntry.state() != ShardRoutingState.RELOCATING) { + count++; + } + } + + return count; + } + + public boolean canAllocate(MetaData metaData, RoutingTable routingTable) { + return shards().size() < metaData.maxNumberOfShardsPerNode(); + } + + public boolean canAllocate(ShardRouting requested) { + for (MutableShardRouting current : shards) { + // we do not allow for two shards of the same shard id to exists on the same node + if (current.shardId().equals(requested.shardId())) { + return false; + } + } + return true; + } + + public boolean canAllocate(MutableShardRouting requested) { + for (MutableShardRouting current : shards) { + // we do not allow for two shards of the same shard id to exists on the same node + if (current.shardId().equals(requested.shardId())) { + return false; + } + } + return true; + } + + public String prettyPrint() { + StringBuilder sb = new StringBuilder(); + sb.append("-----NodeId[").append(nodeId).append("]\n"); + for (MutableShardRouting entry : shards) { + sb.append("--------").append(entry.shortSummary()).append('\n'); + } + return sb.toString(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java new file mode 100644 index 00000000000..c17a98e4e4b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java @@ -0,0 +1,156 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.routing; + +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.util.concurrent.NotThreadSafe; + +import java.util.*; + +import static com.google.common.collect.Lists.*; +import static com.google.common.collect.Maps.*; + +/** + * @author kimchy (Shay Banon) + */ +@NotThreadSafe +public class RoutingNodes implements Iterable { + + private final MetaData metaData; + + private final RoutingTable routingTable; + + private final Map nodesToShards = newHashMap(); + + private final List unassigned = newArrayList(); + + public RoutingNodes(MetaData metaData, RoutingTable routingTable) { + this.metaData = metaData; + this.routingTable = routingTable; + Map> nodesToShards = newHashMap(); + for (IndexRoutingTable indexRoutingTable : routingTable.indicesRouting().values()) { + for (IndexShardRoutingTable indexShard : indexRoutingTable) { + for (ShardRouting shard : indexShard) { + if (shard.assignedToNode()) { + List entries = nodesToShards.get(shard.currentNodeId()); + if (entries == null) { + entries = newArrayList(); + nodesToShards.put(shard.currentNodeId(), entries); + } + entries.add(new MutableShardRouting(shard)); + if (shard.relocating()) { + entries = nodesToShards.get(shard.relocatingNodeId()); + if (entries == null) { + entries = newArrayList(); + nodesToShards.put(shard.relocatingNodeId(), entries); + } + // add the counterpart shard with relocatingNodeId reflecting the source from which + // it's relocating from. + entries.add(new MutableShardRouting(shard.index(), shard.id(), shard.relocatingNodeId(), + shard.currentNodeId(), shard.primary(), ShardRoutingState.INITIALIZING)); + } + } else { + unassigned.add(new MutableShardRouting(shard)); + } + } + } + } + for (Map.Entry> entry : nodesToShards.entrySet()) { + String nodeId = entry.getKey(); + this.nodesToShards.put(nodeId, new RoutingNode(nodeId, entry.getValue())); + } + } + + @Override public Iterator iterator() { + return nodesToShards.values().iterator(); + } + + public RoutingTable routingTable() { + return routingTable; + } + + public MetaData metaData() { + return this.metaData; + } + + public int requiredAverageNumberOfShardsPerNode() { + return metaData.totalNumberOfShards() / nodesToShards.size(); + } + + public boolean hasUnassigned() { + return !unassigned.isEmpty(); + } + + public List unassigned() { + return this.unassigned; + } + + public Map nodesToShards() { + return nodesToShards; + } + + public RoutingNode node(String nodeId) { + return nodesToShards.get(nodeId); + } + + public int numberOfShardsOfType(ShardRoutingState state) { + int count = 0; + for (RoutingNode routingNode : this) { + count += routingNode.numberOfShardsWithState(state); + } + return count; + } + + public List shardsOfType(ShardRoutingState state) { + List shards = newArrayList(); + for (RoutingNode routingNode : this) { + shards.addAll(routingNode.shardsWithState(state)); + } + return shards; + } + + public List sortedNodesLeastToHigh() { + return nodesToShardsSorted(new Comparator() { + @Override public int compare(RoutingNode o1, RoutingNode o2) { + return o1.shards().size() - o2.shards().size(); + } + }); + } + + public List nodesToShardsSorted(Comparator comparator) { + List nodes = new ArrayList(nodesToShards.values()); + if (comparator != null) { + Collections.sort(nodes, comparator); + } + return nodes; + } + + public String prettyPrint() { + StringBuilder sb = new StringBuilder("Routing Nodes:\n"); + for (RoutingNode routingNode : this) { + sb.append(routingNode.prettyPrint()); + } + sb.append("---- Unassigned\n"); + for (MutableShardRouting shardEntry : unassigned) { + sb.append("--------").append(shardEntry.shortSummary()).append('\n'); + } + return sb.toString(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java new file mode 100644 index 00000000000..99dce0c9f9e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java @@ -0,0 +1,149 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.routing; + +import com.google.inject.Inject; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.cluster.*; +import org.elasticsearch.cluster.routing.strategy.ShardsRoutingStrategy; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.util.TimeValue; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.component.Lifecycle; +import org.elasticsearch.util.component.LifecycleComponent; +import org.elasticsearch.util.settings.Settings; + +import java.util.concurrent.Future; + +import static org.elasticsearch.cluster.ClusterState.*; +import static org.elasticsearch.util.TimeValue.*; + +/** + * @author kimchy (Shay Banon) + */ +public class RoutingService extends AbstractComponent implements ClusterStateListener, LifecycleComponent { + + private final Lifecycle lifecycle = new Lifecycle(); + + private final ThreadPool threadPool; + + private final ClusterService clusterService; + + private final ShardsRoutingStrategy shardsRoutingStrategy; + + private final TimeValue schedule; + + private volatile boolean routingTableDirty = false; + + private volatile Future scheduledRoutingTableFuture; + + @Inject public RoutingService(Settings settings, ThreadPool threadPool, ClusterService clusterService, ShardsRoutingStrategy shardsRoutingStrategy) { + super(settings); + this.threadPool = threadPool; + this.clusterService = clusterService; + this.shardsRoutingStrategy = shardsRoutingStrategy; + this.schedule = componentSettings.getAsTime("schedule", timeValueSeconds(10)); + } + + @Override public Lifecycle.State lifecycleState() { + return this.lifecycle.state(); + } + + @Override public RoutingService start() throws ElasticSearchException { + if (!lifecycle.moveToStarted()) { + return this; + } + clusterService.add(this); + return this; + } + + @Override public RoutingService stop() throws ElasticSearchException { + if (!lifecycle.moveToStopped()) { + return this; + } + if (scheduledRoutingTableFuture != null) { + scheduledRoutingTableFuture.cancel(true); + } + clusterService.remove(this); + return this; + } + + public void close() { + if (lifecycle.started()) { + stop(); + } + if (!lifecycle.moveToClosed()) { + return; + } + } + + @Override public void clusterChanged(ClusterChangedEvent event) { + if (event.source().equals(RoutingTableUpdater.CLUSTER_UPDATE_TASK_SOURCE)) { + // that's us, ignore this event + return; + } + if (event.state().nodes().localNodeMaster()) { + // we are master, schedule the routing table updater + if (scheduledRoutingTableFuture == null) { + scheduledRoutingTableFuture = threadPool.scheduleWithFixedDelay(new RoutingTableUpdater(), schedule); + } + if (event.nodesRemoved()) { + // if nodes were removed, we don't want to wait for the scheduled task + // since we want to get primary election as fast as possible + routingTableDirty = true; + threadPool.execute(new RoutingTableUpdater()); + } else { + if (event.routingTableChanged() || event.nodesAdded()) { + routingTableDirty = true; + } + } + } else { + if (scheduledRoutingTableFuture != null) { + scheduledRoutingTableFuture.cancel(true); + scheduledRoutingTableFuture = null; + } + } + } + + private class RoutingTableUpdater implements Runnable { + + private static final String CLUSTER_UPDATE_TASK_SOURCE = "routing-table-updater"; + + @Override public void run() { + try { + if (!routingTableDirty) { + return; + } + if (lifecycle.stopped()) { + return; + } + routingTableDirty = false; + clusterService.submitStateUpdateTask(CLUSTER_UPDATE_TASK_SOURCE, new ClusterStateUpdateTask() { + @Override public ClusterState execute(ClusterState currentState) { + RoutingTable newRoutingTable = shardsRoutingStrategy.reroute(currentState); + return newClusterStateBuilder().state(currentState).routingTable(newRoutingTable).build(); + } + }); + } catch (Exception e) { + logger.warn("Failed to reroute routing table", e); + } + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java new file mode 100644 index 00000000000..cec71a68760 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java @@ -0,0 +1,175 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.routing; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Lists; +import com.google.common.collect.UnmodifiableIterator; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.index.Index; +import org.elasticsearch.indices.IndexMissingException; +import org.elasticsearch.util.concurrent.Immutable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static com.google.common.collect.Maps.*; + +/** + * @author kimchy (Shay Banon) + */ +@Immutable +public class RoutingTable implements Iterable { + + public static final RoutingTable EMPTY_ROUTING_TABLE = newRoutingTableBuilder().build(); + + // index to IndexRoutingTable map + private final ImmutableMap indicesRouting; + + RoutingTable(Map indicesRouting) { + this.indicesRouting = ImmutableMap.copyOf(indicesRouting); + } + + @Override public UnmodifiableIterator iterator() { + return indicesRouting.values().iterator(); + } + + public boolean hasIndex(String index) { + return indicesRouting.containsKey(index); + } + + public IndexRoutingTable index(String index) { + return indicesRouting.get(index); + } + + public Map indicesRouting() { + return indicesRouting; + } + + public RoutingNodes routingNodes(MetaData metaData) { + return new RoutingNodes(metaData, this); + } + + public List allShards(String... indices) throws IndexMissingException { + List shards = Lists.newArrayList(); + if (indices == null || indices.length == 0) { + indices = indicesRouting.keySet().toArray(new String[indicesRouting.keySet().size()]); + } + for (String index : indices) { + IndexRoutingTable indexRoutingTable = index(index); + if (indexRoutingTable == null) { + throw new IndexMissingException(new Index(index)); + } + for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) { + for (ShardRouting shardRouting : indexShardRoutingTable) { + shards.add(shardRouting); + } + } + } + return shards; + } + + public static Builder newRoutingTableBuilder() { + return new Builder(); + } + + public static class Builder { + + private final Map indicesRouting = newHashMap(); + + public Builder add(IndexRoutingTable indexRoutingTable) { + indexRoutingTable.validate(); + indicesRouting.put(indexRoutingTable.index(), indexRoutingTable); + return this; + } + + public Builder add(IndexRoutingTable.Builder indexRoutingTableBuilder) { + add(indexRoutingTableBuilder.build()); + return this; + } + + public Builder updateNodes(RoutingNodes routingNodes) { + Map indexRoutingTableBuilders = newHashMap(); + for (RoutingNode routingNode : routingNodes) { + for (MutableShardRouting shardRoutingEntry : routingNode) { + // every relocating shard has a double entry, ignore the target one. + if (shardRoutingEntry.state() == ShardRoutingState.INITIALIZING && shardRoutingEntry.relocatingNodeId() != null) + continue; + + String index = shardRoutingEntry.index(); + IndexRoutingTable.Builder indexBuilder = indexRoutingTableBuilders.get(index); + if (indexBuilder == null) { + indexBuilder = new IndexRoutingTable.Builder(index); + indexRoutingTableBuilders.put(index, indexBuilder); + } + indexBuilder.addShard(new ImmutableShardRouting(shardRoutingEntry)); + } + } + for (MutableShardRouting shardRoutingEntry : routingNodes.unassigned()) { + String index = shardRoutingEntry.index(); + IndexRoutingTable.Builder indexBuilder = indexRoutingTableBuilders.get(index); + if (indexBuilder == null) { + indexBuilder = new IndexRoutingTable.Builder(index); + indexRoutingTableBuilders.put(index, indexBuilder); + } + indexBuilder.addShard(new ImmutableShardRouting(shardRoutingEntry)); + } + for (IndexRoutingTable.Builder indexBuilder : indexRoutingTableBuilders.values()) { + add(indexBuilder); + } + return this; + } + + public RoutingTable build() { + return new RoutingTable(indicesRouting); + } + + public static RoutingTable readFrom(DataInput in) throws IOException, ClassNotFoundException { + Builder builder = new Builder(); + int size = in.readInt(); + for (int i = 0; i < size; i++) { + IndexRoutingTable index = IndexRoutingTable.Builder.readFrom(in); + builder.add(index); + } + + return builder.build(); + } + + public static void writeTo(RoutingTable table, DataOutput out) throws IOException { + out.writeInt(table.indicesRouting.size()); + for (IndexRoutingTable index : table.indicesRouting.values()) { + IndexRoutingTable.Builder.writeTo(index, out); + } + } + } + + public String prettyPrint() { + StringBuilder sb = new StringBuilder("Routing Table:\n"); + for (Map.Entry entry : indicesRouting.entrySet()) { + sb.append(entry.getValue().prettyPrint()).append('\n'); + } + return sb.toString(); + } + + +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/RoutingValidationException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/RoutingValidationException.java new file mode 100644 index 00000000000..328e10328cd --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/RoutingValidationException.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.routing; + +/** + * @author kimchy (Shay Banon) + */ +public class RoutingValidationException extends RoutingException { + + public RoutingValidationException(String message) { + super(message); + } + + public RoutingValidationException(String message, Throwable cause) { + super(message, cause); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java new file mode 100644 index 00000000000..700e3bafa97 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java @@ -0,0 +1,72 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.routing; + +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.io.Serializable; + +/** + * @author kimchy (Shay Banon) + */ +public interface ShardRouting extends Streamable, Serializable { + + String index(); + + int id(); + + boolean unassigned(); + + boolean initializing(); + + boolean started(); + + boolean relocating(); + + /** + * Relocating or started. + */ + boolean active(); + + boolean assignedToNode(); + + String currentNodeId(); + + String relocatingNodeId(); + + boolean primary(); + + ShardRoutingState state(); + + ShardId shardId(); + + String shortSummary(); + + /** + * Does not write index name and shard id + */ + void writeToThin(DataOutput out) throws IOException; + + void readFromThin(DataInput in) throws ClassNotFoundException, IOException; +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/ShardRoutingState.java b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/ShardRoutingState.java new file mode 100644 index 00000000000..cce19ec1c22 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/ShardRoutingState.java @@ -0,0 +1,54 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.routing; + +import org.elasticsearch.ElasticSearchIllegalStateException; + +/** + * @author kimchy (Shay Banon) + */ +public enum ShardRoutingState { + UNASSIGNED((byte) 1), INITIALIZING((byte) 2), STARTED((byte) 3), RELOCATING((byte) 4); + + private byte value; + + ShardRoutingState(byte value) { + this.value = value; + } + + public byte value() { + return this.value; + } + + public static ShardRoutingState fromValue(byte value) { + switch (value) { + case 1: + return UNASSIGNED; + case 2: + return INITIALIZING; + case 3: + return STARTED; + case 4: + return RELOCATING; + default: + throw new ElasticSearchIllegalStateException("No should routing state mapped for [" + value + "]"); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/ShardsIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/ShardsIterator.java new file mode 100644 index 00000000000..9a372a2854d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/ShardsIterator.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.routing; + +import org.elasticsearch.index.shard.ShardId; + +import java.util.Iterator; + +/** + * @author kimchy (Shay Banon) + */ +public interface ShardsIterator extends Iterable, Iterator { + + /** + * Resets the iterator. + */ + ShardsIterator reset(); + + int size(); + + ShardId shardId(); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/strategy/DefaultShardsRoutingStrategy.java b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/strategy/DefaultShardsRoutingStrategy.java new file mode 100644 index 00000000000..5728061ac90 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/strategy/DefaultShardsRoutingStrategy.java @@ -0,0 +1,402 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.routing.strategy; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.Node; +import org.elasticsearch.cluster.routing.*; + +import java.util.Iterator; +import java.util.List; +import java.util.Set; + +import static com.google.common.collect.Sets.*; +import static org.elasticsearch.cluster.routing.ShardRoutingState.*; + +/** + * @author kimchy (Shay Banon) + */ +public class DefaultShardsRoutingStrategy implements ShardsRoutingStrategy { + + @Override public RoutingTable applyStartedShards(ClusterState clusterState, Iterable startedShardEntries) { + RoutingNodes routingNodes = clusterState.routingNodes(); + if (!applyStartedShards(routingNodes, startedShardEntries)) { + return clusterState.routingTable(); + } + return new RoutingTable.Builder().updateNodes(routingNodes).build(); + } + + @Override public RoutingTable applyFailedShards(ClusterState clusterState, Iterable failedShardEntries) { + RoutingNodes routingNodes = clusterState.routingNodes(); + if (!applyFailedShards(routingNodes, failedShardEntries)) { + return clusterState.routingTable(); + } + return new RoutingTable.Builder().updateNodes(routingNodes).build(); + } + + @Override public RoutingTable reroute(ClusterState clusterState) { + RoutingNodes routingNodes = clusterState.routingNodes(); + + Iterable dataNodes = clusterState.nodes().dataNodes().values(); + + boolean changed = false; + // first, clear from the shards any node id they used to belong to that is now dead + changed |= deassociateDeadNodes(routingNodes, dataNodes); + + // create a sorted list of from nodes with least number of shards to the maximum ones + applyNewNodes(routingNodes, dataNodes); + + // now allocate all the unassigned to available nodes + if (routingNodes.hasUnassigned()) { + changed |= allocateUnassigned(routingNodes); + } + + // elect new primaries (backups that should become primaries) + changed |= electPrimaries(routingNodes); + + // rebalance + changed |= rebalance(routingNodes); + + if (!changed) { + return clusterState.routingTable(); + } + + return new RoutingTable.Builder().updateNodes(routingNodes).build(); + } + + private boolean rebalance(RoutingNodes routingNodes) { + boolean changed = false; + List sortedNodesLeastToHigh = routingNodes.sortedNodesLeastToHigh(); + if (sortedNodesLeastToHigh.isEmpty()) { + return false; + } + int lowIndex = 0; + int highIndex = sortedNodesLeastToHigh.size() - 1; + boolean relocationPerformed; + do { + relocationPerformed = false; + while (lowIndex != highIndex) { + RoutingNode lowRoutingNode = sortedNodesLeastToHigh.get(lowIndex); + RoutingNode highRoutingNode = sortedNodesLeastToHigh.get(highIndex); + int averageNumOfShards = routingNodes.requiredAverageNumberOfShardsPerNode(); + + // only active shards can be removed so must count only active ones. + if (highRoutingNode.numberOfOwningShards() <= averageNumOfShards) { + highIndex--; + continue; + } + + if (lowRoutingNode.shards().size() >= averageNumOfShards) { + lowIndex++; + continue; + } + + boolean relocated = false; + List activeShards = highRoutingNode.shardsWithState(STARTED); + for (MutableShardRouting activeShard : activeShards) { + if (lowRoutingNode.canAllocate(routingNodes.metaData(), routingNodes.routingTable()) && lowRoutingNode.canAllocate(activeShard)) { + changed = true; + lowRoutingNode.add(new MutableShardRouting(activeShard.index(), activeShard.id(), + lowRoutingNode.nodeId(), activeShard.currentNodeId(), + activeShard.primary(), INITIALIZING)); + + activeShard.relocate(lowRoutingNode.nodeId()); + relocated = true; + relocationPerformed = true; + break; + } + } + + if (!relocated) { + highIndex--; + } + } + } while (relocationPerformed); + return changed; + } + + private boolean electPrimaries(RoutingNodes routingNodes) { + boolean changed = false; + for (MutableShardRouting shardEntry : routingNodes.unassigned()) { + if (shardEntry.primary() && !shardEntry.assignedToNode()) { + boolean elected = false; + // primary and not assigned, go over and find a backup that is assigned + for (RoutingNode routingNode : routingNodes.nodesToShards().values()) { + + for (MutableShardRouting shardEntry2 : routingNode.shards()) { + if (shardEntry.shardId().equals(shardEntry2.shardId())) { + assert shardEntry2.assignedToNode(); + assert !shardEntry2.primary(); + + changed = true; + shardEntry.moveToBackup(); + shardEntry2.moveToPrimary(); + elected = true; + break; + } + } + + if (elected) { + break; + } + } + } + } + return changed; + } + + private boolean allocateUnassigned(RoutingNodes routingNodes) { + boolean changed = false; + List nodes = routingNodes.sortedNodesLeastToHigh(); + + Iterator unassignedIterator = routingNodes.unassigned().iterator(); + int lastNode = 0; + while (unassignedIterator.hasNext()) { + MutableShardRouting shard = unassignedIterator.next(); + for (int i = 0; i < nodes.size(); i++) { + RoutingNode node = nodes.get(lastNode); + lastNode++; + if (lastNode == nodes.size()) + lastNode = 0; + + if (node.canAllocate(routingNodes.metaData(), routingNodes.routingTable()) && node.canAllocate(shard)) { + int numberOfShardsToAllocate = routingNodes.requiredAverageNumberOfShardsPerNode() - node.shards().size(); + if (numberOfShardsToAllocate == 0) { + continue; + } + + changed = true; + node.add(shard); + unassignedIterator.remove(); + break; + } + } + } + + // allocate all the unassigned shards above the average per node. + for (Iterator it = routingNodes.unassigned().iterator(); it.hasNext();) { + MutableShardRouting shardRoutingEntry = it.next(); + // go over the nodes and try and allocate the remaining ones + for (RoutingNode routingNode : routingNodes.nodesToShards().values()) { + if (routingNode.canAllocate(routingNodes.metaData(), routingNodes.routingTable()) && routingNode.canAllocate(shardRoutingEntry)) { + changed = true; + routingNode.add(shardRoutingEntry); + it.remove(); + break; + } + } + } + return changed; + } + + /** + * Applies the new nodes to the routing nodes and returns them (just the + * new nodes); + * + * @param liveNodes currently live nodes. + */ + private void applyNewNodes(RoutingNodes routingNodes, Iterable liveNodes) { + for (Node node : liveNodes) { + if (!routingNodes.nodesToShards().containsKey(node.id())) { + RoutingNode routingNode = new RoutingNode(node.id()); + routingNodes.nodesToShards().put(node.id(), routingNode); + } + } + } + + private boolean deassociateDeadNodes(RoutingNodes routingNodes, Iterable liveNodes) { + boolean changed = false; + Set liveNodeIds = newHashSet(); + for (Node liveNode : liveNodes) { + liveNodeIds.add(liveNode.id()); + } + Set nodeIdsToRemove = newHashSet(); + for (RoutingNode routingNode : routingNodes) { + for (Iterator shardsIterator = routingNode.shards().iterator(); shardsIterator.hasNext();) { + MutableShardRouting shardRoutingEntry = shardsIterator.next(); + if (shardRoutingEntry.assignedToNode()) { + // we store the relocation state here since when we call de-assign node + // later on, we will loose this state + boolean relocating = shardRoutingEntry.relocating(); + String relocatingNodeId = shardRoutingEntry.relocatingNodeId(); + // is this the destination shard that we are relocating an existing shard to? + // we know this since it has a relocating node id (the node we relocate from) and our state is INITIALIZING (and not RELOCATING) + boolean isRelocationDestinationShard = relocatingNodeId != null && shardRoutingEntry.initializing(); + + boolean currentNodeIsDead = false; + if (!liveNodeIds.contains(shardRoutingEntry.currentNodeId())) { + changed = true; + nodeIdsToRemove.add(shardRoutingEntry.currentNodeId()); + + if (!isRelocationDestinationShard) { + routingNodes.unassigned().add(shardRoutingEntry); + } + + shardRoutingEntry.deassignNode(); + currentNodeIsDead = true; + shardsIterator.remove(); + } + + // move source shard back to active state and cancel relocation mode. + if (relocating && !liveNodeIds.contains(relocatingNodeId)) { + nodeIdsToRemove.add(relocatingNodeId); + if (!currentNodeIsDead) { + changed = true; + shardRoutingEntry.cancelRelocation(); + } + } + + if (isRelocationDestinationShard && !liveNodeIds.contains(relocatingNodeId)) { + changed = true; + shardsIterator.remove(); + } + } + } + } + for (String nodeIdToRemove : nodeIdsToRemove) { + routingNodes.nodesToShards().remove(nodeIdToRemove); + } + return changed; + } + + private boolean applyStartedShards(RoutingNodes routingNodes, Iterable startedShardEntries) { + boolean dirty = false; + // apply shards might be called several times with the same shard, ignore it + for (ShardRouting startedShard : startedShardEntries) { + assert startedShard.state() == INITIALIZING; + + // retrieve the relocating node id before calling moveToStarted(). + String relocatingNodeId = null; + + RoutingNode currentRoutingNode = routingNodes.nodesToShards().get(startedShard.currentNodeId()); + if (currentRoutingNode != null) { + for (MutableShardRouting shard : currentRoutingNode) { + if (shard.shardId().equals(startedShard.shardId())) { + relocatingNodeId = shard.relocatingNodeId(); + if (!shard.started()) { + dirty = true; + shard.moveToStarted(); + } + break; + } + } + } + + // startedShard is the current state of the shard (post relocation for example) + // this means that after relocation, the state will be started and the currentNodeId will be + // the node we relocated to + + if (relocatingNodeId == null) + continue; + + RoutingNode sourceRoutingNode = routingNodes.nodesToShards().get(relocatingNodeId); + if (sourceRoutingNode != null) { + Iterator shardsIter = sourceRoutingNode.iterator(); + while (shardsIter.hasNext()) { + MutableShardRouting shard = shardsIter.next(); + if (shard.shardId().equals(startedShard.shardId())) { + if (shard.relocating()) { + dirty = true; + shardsIter.remove(); + break; + } + } + } + } + } + return dirty; + } + + private boolean applyFailedShards(RoutingNodes routingNodes, Iterable failedShardEntries) { + boolean dirty = false; + // apply shards might be called several times with the same shard, ignore it + for (ShardRouting failedShard : failedShardEntries) { + + boolean shardDirty = false; + boolean inRelocation = failedShard.relocatingNodeId() != null; + if (inRelocation) { + RoutingNode routingNode = routingNodes.nodesToShards().get(failedShard.currentNodeId()); + Iterator shards = routingNode.iterator(); + while (shards.hasNext()) { + MutableShardRouting shard = shards.next(); + if (shard.shardId().equals(failedShard.shardId())) { + shardDirty = true; + shard.deassignNode(); + shards.remove(); + break; + } + } + } + + String nodeId = inRelocation ? failedShard.relocatingNodeId() : failedShard.currentNodeId(); + RoutingNode currentRoutingNode = routingNodes.nodesToShards().get(nodeId); + + Iterator shards = currentRoutingNode.iterator(); + while (shards.hasNext()) { + MutableShardRouting shard = shards.next(); + if (shard.shardId().equals(failedShard.shardId())) { + shardDirty = true; + if (!inRelocation) { + shard.deassignNode(); + shards.remove(); + } else { + assert shard.state() == ShardRoutingState.RELOCATING; + shard.cancelRelocation(); + } + break; + } + } + + if (!shardDirty) { + continue; + } else { + dirty = true; + } + + // if in relocation no need to find a new target, just cancel the relocation. + if (inRelocation) { + continue; + } + + // not in relocation so find a new target. + + boolean allocated = false; + List sortedNodesLeastToHigh = routingNodes.sortedNodesLeastToHigh(); + for (RoutingNode target : sortedNodesLeastToHigh) { + if (target.canAllocate(failedShard) && + target.canAllocate(routingNodes.metaData(), routingNodes.routingTable()) && + !target.nodeId().equals(failedShard.currentNodeId())) { + + target.add(new MutableShardRouting(failedShard.index(), failedShard.id(), + target.nodeId(), failedShard.relocatingNodeId(), + failedShard.primary(), INITIALIZING)); + allocated = true; + break; + } + } + if (!allocated) { + // we did not manage to allocate it, put it in the unassigned + routingNodes.unassigned().add(new MutableShardRouting(failedShard.index(), failedShard.id(), + null, failedShard.primary(), ShardRoutingState.UNASSIGNED)); + } + } + return dirty; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/strategy/ShardsRoutingStrategy.java b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/strategy/ShardsRoutingStrategy.java new file mode 100644 index 00000000000..daec41bd8a4 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/cluster/routing/strategy/ShardsRoutingStrategy.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.routing.strategy; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; + +/** + * @author kimchy (Shay Banon) + */ +public interface ShardsRoutingStrategy { + + /** + * Applies the started shards. Note, shards can be called several times within this method. + * + *

If the same instance of the routing table is returned, then no change has been made. + */ + RoutingTable applyStartedShards(ClusterState clusterState, Iterable startedShardEntries); + + /** + * Applies the failed shards. Note, shards can be called several times within this method. + * + *

If the same instance of the routing table is returned, then no change has been made. + */ + RoutingTable applyFailedShards(ClusterState clusterState, Iterable failedShardEntries); + + /** + * Reroutes the routing table based on the live nodes. + * + *

If the same instance of the routing table is returned, then no change has been made. + */ + RoutingTable reroute(ClusterState clusterState); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/discovery/Discovery.java b/modules/elasticsearch/src/main/java/org/elasticsearch/discovery/Discovery.java new file mode 100644 index 00000000000..c60d7b018cc --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/discovery/Discovery.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.util.component.LifecycleComponent; + +/** + * @author kimchy (Shay Banon) + */ +public interface Discovery extends LifecycleComponent { + + void addListener(InitialStateDiscoveryListener listener); + + void removeListener(InitialStateDiscoveryListener listener); + + String nodeDescription(); + + boolean firstMaster(); + + void publish(ClusterState clusterState); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/discovery/DiscoveryException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/discovery/DiscoveryException.java new file mode 100644 index 00000000000..90b7ef5a820 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/discovery/DiscoveryException.java @@ -0,0 +1,36 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery; + +import org.elasticsearch.ElasticSearchException; + +/** + * @author kimchy (Shay Banon) + */ +public class DiscoveryException extends ElasticSearchException { + + public DiscoveryException(String message) { + super(message); + } + + public DiscoveryException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java new file mode 100644 index 00000000000..00b5138f408 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java @@ -0,0 +1,55 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery; + +import com.google.inject.AbstractModule; +import com.google.inject.Module; +import org.elasticsearch.util.Classes; +import org.elasticsearch.util.settings.Settings; + +import static org.elasticsearch.util.guice.ModulesFactory.*; + +/** + * @author kimchy (Shay Banon) + */ +public class DiscoveryModule extends AbstractModule { + + private final Settings settings; + + public DiscoveryModule(Settings settings) { + this.settings = settings; + } + + @Override + protected void configure() { + Class defaultDiscoveryModule = null; + try { + Classes.getDefaultClassLoader().loadClass("org.elasticsearch.discovery.jgroups.JgroupsDiscovery"); + defaultDiscoveryModule = (Class) Classes.getDefaultClassLoader().loadClass("org.elasticsearch.discovery.jgroups.JgroupsDiscoveryModule"); + } catch (ClassNotFoundException e) { + // TODO default to the local one + } + + Class moduleClass = settings.getAsClass("discovery.type", defaultDiscoveryModule, "org.elasticsearch.discovery.", "DiscoveryModule"); + createModule(moduleClass, settings).configure(binder()); + + bind(DiscoveryService.class).asEagerSingleton(); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/discovery/DiscoveryService.java b/modules/elasticsearch/src/main/java/org/elasticsearch/discovery/DiscoveryService.java new file mode 100644 index 00000000000..e6f3f4737d6 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/discovery/DiscoveryService.java @@ -0,0 +1,117 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery; + +import com.google.inject.Inject; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.util.TimeValue; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.component.Lifecycle; +import org.elasticsearch.util.component.LifecycleComponent; +import org.elasticsearch.util.settings.Settings; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +/** + * @author kimchy (Shay Banon) + */ +public class DiscoveryService extends AbstractComponent implements LifecycleComponent { + + private final Lifecycle lifecycle = new Lifecycle(); + + private final TimeValue initialStateTimeout; + + private final Discovery discovery; + + @Inject public DiscoveryService(Settings settings, Discovery discovery) { + super(settings); + this.discovery = discovery; + this.initialStateTimeout = componentSettings.getAsTime("initialStateTimeout", TimeValue.timeValueSeconds(30)); + } + + @Override public Lifecycle.State lifecycleState() { + return this.lifecycle.state(); + } + + @Override public DiscoveryService start() throws ElasticSearchException { + if (!lifecycle.moveToStarted()) { + return this; + } + final CountDownLatch latch = new CountDownLatch(1); + InitialStateDiscoveryListener listener = new InitialStateDiscoveryListener() { + @Override public void initialStateProcessed() { + latch.countDown(); + } + }; + discovery.addListener(listener); + try { + discovery.start(); + try { + logger.trace("Waiting for {} for the initial state to be set by the discovery", initialStateTimeout); + if (latch.await(initialStateTimeout.millis(), TimeUnit.MILLISECONDS)) { + logger.trace("Initial state set from discovery"); + } else { + logger.warn("Waited for {} and no initial state was set by the discovery", initialStateTimeout); + } + } catch (InterruptedException e) { + // ignore + } + } finally { + discovery.removeListener(listener); + } + logger.info(discovery.nodeDescription()); + return this; + } + + @Override public DiscoveryService stop() throws ElasticSearchException { + if (!lifecycle.moveToStopped()) { + return this; + } + discovery.stop(); + return this; + } + + @Override public void close() throws ElasticSearchException { + if (lifecycle.started()) { + stop(); + } + if (!lifecycle.moveToClosed()) { + return; + } + discovery.close(); + } + + public String nodeDescription() { + return discovery.nodeDescription(); + } + + public boolean firstMaster() { + return discovery.firstMaster(); + } + + public void publish(ClusterState clusterState) { + if (!lifecycle.started()) { + return; + } + discovery.publish(clusterState); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/discovery/InitialStateDiscoveryListener.java b/modules/elasticsearch/src/main/java/org/elasticsearch/discovery/InitialStateDiscoveryListener.java new file mode 100644 index 00000000000..1e441a6e06c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/discovery/InitialStateDiscoveryListener.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery; + +/** + * A listener that should be called by the {@link org.elasticsearch.discovery.Discovery} component + * when the first valid initial cluster state has been submitted and processed by the cluster service. + * + *

Note, this listener should be registered with the discovery service before it has started. + * + * @author kimchy (Shay Banon) + */ +public interface InitialStateDiscoveryListener { + + void initialStateProcessed(); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/discovery/jgroups/JgroupsClientDiscovery.java b/modules/elasticsearch/src/main/java/org/elasticsearch/discovery/jgroups/JgroupsClientDiscovery.java new file mode 100644 index 00000000000..ac04c2280a8 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/discovery/jgroups/JgroupsClientDiscovery.java @@ -0,0 +1,292 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery.jgroups; + +import com.google.inject.Inject; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.ElasticSearchIllegalStateException; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ProcessedClusterStateUpdateTask; +import org.elasticsearch.cluster.node.Node; +import org.elasticsearch.discovery.Discovery; +import org.elasticsearch.discovery.DiscoveryException; +import org.elasticsearch.discovery.InitialStateDiscoveryListener; +import org.elasticsearch.env.Environment; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.component.Lifecycle; +import org.elasticsearch.util.io.HostResolver; +import org.elasticsearch.util.settings.Settings; +import org.jgroups.*; + +import java.io.IOException; +import java.net.Inet4Address; +import java.net.Inet6Address; +import java.net.InetAddress; +import java.net.URL; +import java.util.Map; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import static com.google.common.collect.Maps.*; +import static org.elasticsearch.cluster.ClusterState.*; +import static org.elasticsearch.cluster.node.Nodes.*; + +/** + * A simplified discovery implementation based on JGroups that only works in client mode. + * + * @author kimchy (Shay Banon) + */ +public class JgroupsClientDiscovery extends AbstractComponent implements Discovery, Receiver { + + private final Lifecycle lifecycle = new Lifecycle(); + + private final ClusterName clusterName; + + private final ThreadPool threadPool; + + private final ClusterService clusterService; + + private final Channel channel; + + private volatile ScheduledFuture reconnectFuture; + + private final AtomicBoolean initialStateSent = new AtomicBoolean(); + + private final CopyOnWriteArrayList initialStateListeners = new CopyOnWriteArrayList(); + + private final Node localNode = new Node("#client#", null); // dummy local node + + @Inject public JgroupsClientDiscovery(Settings settings, Environment environment, ClusterName clusterName, ClusterService clusterService, ThreadPool threadPool) { + super(settings); + this.clusterName = clusterName; + this.clusterService = clusterService; + this.threadPool = threadPool; + + String config = componentSettings.get("config", "udp"); + String actualConfig = config; + if (!config.endsWith(".xml")) { + actualConfig = "jgroups/" + config + ".xml"; + } + URL configUrl = environment.resolveConfig(actualConfig); + logger.debug("Using configuration [{}]", configUrl); + + Map sysPropsSet = newHashMap(); + try { + // prepare system properties to configure jgroups based on the settings + for (Map.Entry entry : settings.getAsMap().entrySet()) { + if (entry.getKey().startsWith("discovery.jgroups")) { + String jgroupsKey = entry.getKey().substring("discovery.".length()); + if (System.getProperty(jgroupsKey) == null) { + sysPropsSet.put(jgroupsKey, entry.getValue()); + System.setProperty(jgroupsKey, entry.getValue()); + } + } + } + + if (System.getProperty("jgroups.bind_addr") == null) { + // automatically set the bind address based on ElasticSearch default bindings... + try { + InetAddress bindAddress = HostResolver.resultBindHostAddress(null, settings, HostResolver.LOCAL_IP); + if ((bindAddress instanceof Inet4Address && HostResolver.isIPv4()) || (bindAddress instanceof Inet6Address && !HostResolver.isIPv4())) { + sysPropsSet.put("jgroups.bind_addr", bindAddress.getHostAddress()); + System.setProperty("jgroups.bind_addr", bindAddress.getHostAddress()); + } + } catch (IOException e) { + // ignore this + } + } + + channel = new JChannel(configUrl); + } catch (ChannelException e) { + throw new DiscoveryException("Failed to create jgroups channel with config [" + configUrl + "]", e); + } finally { + for (String keyToRemove : sysPropsSet.keySet()) { + System.getProperties().remove(keyToRemove); + } + } + } + + @Override public Lifecycle.State lifecycleState() { + return this.lifecycle.state(); + } + + @Override public Discovery start() throws ElasticSearchException { + if (!lifecycle.moveToStarted()) { + return this; + } + channel.setReceiver(this); + try { + channel.connect(clusterName.value()); + } catch (ChannelException e) { + throw new DiscoveryException("Failed to connect to cluster [" + clusterName.value() + "]", e); + } + connectTillMasterIfNeeded(); + sendInitialStateEventIfNeeded(); + return this; + } + + @Override public Discovery stop() throws ElasticSearchException { + if (!lifecycle.moveToStopped()) { + return this; + } + if (reconnectFuture != null) { + reconnectFuture.cancel(true); + reconnectFuture = null; + } + if (channel.isConnected()) { + channel.disconnect(); + } + return this; + } + + @Override public void close() throws ElasticSearchException { + if (lifecycle.started()) { + stop(); + } + if (!lifecycle.moveToClosed()) { + return; + } + if (channel.isOpen()) { + channel.close(); + } + } + + @Override public void addListener(InitialStateDiscoveryListener listener) { + initialStateListeners.add(listener); + } + + @Override public void removeListener(InitialStateDiscoveryListener listener) { + initialStateListeners.remove(listener); + } + + @Override public void receive(Message msg) { + if (msg.getSrc().equals(channel.getAddress())) { + return; // my own message, ignore. + } + if (msg.getSrc().equals(channel.getView().getCreator())) { + try { + byte[] buffer = msg.getBuffer(); + final ClusterState origClusterState = ClusterState.Builder.fromBytes(buffer, settings, localNode); + // remove the dummy local node + final ClusterState clusterState = newClusterStateBuilder().state(origClusterState) + .nodes(newNodesBuilder().putAll(origClusterState.nodes()).remove(localNode.id())).build(); + System.err.println("Nodes: " + clusterState.nodes().prettyPrint()); + clusterService.submitStateUpdateTask("jgroups-disco-receive(from master)", new ProcessedClusterStateUpdateTask() { + @Override public ClusterState execute(ClusterState currentState) { + return clusterState; + } + + @Override public void clusterStateProcessed(ClusterState clusterState) { + sendInitialStateEventIfNeeded(); + } + }); + } catch (Exception e) { + logger.error("Received corrupted cluster state.", e); + } + } + } + + @Override public void viewAccepted(View newView) { + // we became master, reconnect + if (channel.getAddress().equals(newView.getCreator())) { + try { + channel.disconnect(); + } catch (Exception e) { + // ignore + } + if (!lifecycle.started()) { + return; + } + connectTillMasterIfNeeded(); + } + } + + private void sendInitialStateEventIfNeeded() { + if (initialStateSent.compareAndSet(false, true)) { + for (InitialStateDiscoveryListener listener : initialStateListeners) { + listener.initialStateProcessed(); + } + } + } + + @Override public String nodeDescription() { + return "clientNode"; + } + + @Override public void publish(ClusterState clusterState) { + throw new ElasticSearchIllegalStateException("When in client mode, cluster state should not be published"); + } + + @Override public boolean firstMaster() { + return false; + } + + @Override public byte[] getState() { + return new byte[0]; + } + + @Override public void setState(byte[] state) { + } + + @Override public void suspect(Address suspectedMember) { + } + + @Override public void block() { + logger.warn("Blocked..."); + } + + private void connectTillMasterIfNeeded() { + Runnable command = new Runnable() { + @Override public void run() { + try { + channel.connect(clusterName.value()); + if (isMaster()) { + logger.debug("Act as master, reconnecting..."); + channel.disconnect(); + reconnectFuture = threadPool.schedule(this, 3, TimeUnit.SECONDS); + } else { + logger.debug("Reconnected not as master"); + reconnectFuture = null; + } + } catch (Exception e) { + logger.warn("Failed to connect to cluster", e); + } + } + }; + + if (channel.isConnected()) { + if (!isMaster()) { + logger.debug("Connected not as master"); + return; + } + channel.disconnect(); + } + reconnectFuture = threadPool.schedule(command, 3, TimeUnit.SECONDS); + } + + private boolean isMaster() { + return channel.getAddress().equals(channel.getView().getCreator()); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/discovery/jgroups/JgroupsCustomLogFactory.java b/modules/elasticsearch/src/main/java/org/elasticsearch/discovery/jgroups/JgroupsCustomLogFactory.java new file mode 100644 index 00000000000..a0b0efd52c8 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/discovery/jgroups/JgroupsCustomLogFactory.java @@ -0,0 +1,136 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery.jgroups; + +import org.jgroups.logging.CustomLogFactory; +import org.jgroups.logging.Log; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * @author kimchy (Shay Banon) + */ +public class JgroupsCustomLogFactory implements CustomLogFactory { + + @Override public Log getLog(Class clazz) { + return getLog(clazz.getName()); + } + + @Override public Log getLog(String category) { + return new Slf4jLog(LoggerFactory.getLogger(category.replace("org.jgroups.", "jgroups.").replace(".protocols.", "."))); + } + + private static class Slf4jLog implements Log { + + private final Logger logger; + + private Slf4jLog(Logger logger) { + this.logger = logger; + } + + @Override public boolean isFatalEnabled() { + return logger.isErrorEnabled(); + } + + @Override public boolean isErrorEnabled() { + return logger.isErrorEnabled(); + } + + @Override public boolean isWarnEnabled() { + return logger.isWarnEnabled(); + } + + @Override public boolean isInfoEnabled() { + return logger.isInfoEnabled(); + } + + @Override public boolean isDebugEnabled() { + return logger.isDebugEnabled(); + } + + @Override public boolean isTraceEnabled() { + return logger.isTraceEnabled(); + } + + @Override public void debug(String msg) { + logger.debug(msg); + } + + @Override public void debug(String msg, Throwable throwable) { + logger.debug(msg, throwable); + } + + @Override public void error(String msg) { + logger.error(msg); + } + + @Override public void error(String msg, Throwable throwable) { + logger.error(msg, throwable); + } + + @Override public void fatal(String msg) { + logger.error(msg); + } + + @Override public void fatal(String msg, Throwable throwable) { + logger.error(msg, throwable); + } + + @Override public void info(String msg) { + logger.info(msg); + } + + @Override public void info(String msg, Throwable throwable) { + logger.info(msg, throwable); + } + + @Override public void trace(Object msg) { + logger.trace(msg.toString()); + } + + @Override public void trace(Object msg, Throwable throwable) { + logger.trace(msg.toString(), throwable); + } + + @Override public void trace(String msg) { + logger.trace(msg); + } + + @Override public void trace(String msg, Throwable throwable) { + logger.trace(msg, throwable); + } + + @Override public void warn(String msg) { + logger.warn(msg); + } + + @Override public void warn(String msg, Throwable throwable) { + logger.warn(msg, throwable); + } + + @Override public void setLevel(String level) { + // ignore + } + + @Override public String getLevel() { + return null; + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/discovery/jgroups/JgroupsDiscovery.java b/modules/elasticsearch/src/main/java/org/elasticsearch/discovery/jgroups/JgroupsDiscovery.java new file mode 100644 index 00000000000..4d1129a72c6 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/discovery/jgroups/JgroupsDiscovery.java @@ -0,0 +1,388 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery.jgroups; + +import com.google.inject.Inject; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.ElasticSearchIllegalStateException; +import org.elasticsearch.cluster.*; +import org.elasticsearch.cluster.node.Node; +import org.elasticsearch.cluster.node.Nodes; +import org.elasticsearch.discovery.Discovery; +import org.elasticsearch.discovery.DiscoveryException; +import org.elasticsearch.discovery.InitialStateDiscoveryListener; +import org.elasticsearch.env.Environment; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.component.Lifecycle; +import org.elasticsearch.util.io.ByteArrayDataInputStream; +import org.elasticsearch.util.io.ByteArrayDataOutputStream; +import org.elasticsearch.util.io.HostResolver; +import org.elasticsearch.util.settings.Settings; +import org.jgroups.*; + +import java.io.IOException; +import java.net.Inet4Address; +import java.net.Inet6Address; +import java.net.InetAddress; +import java.net.URL; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.atomic.AtomicBoolean; + +import static com.google.common.collect.Maps.*; +import static com.google.common.collect.Sets.*; +import static org.elasticsearch.cluster.ClusterState.*; + +/** + * @author kimchy (Shay Banon) + */ +public class JgroupsDiscovery extends AbstractComponent implements Discovery, Receiver { + + static { + System.setProperty("jgroups.logging.log_factory_class", JgroupsCustomLogFactory.class.getName()); + } + + private final Lifecycle lifecycle = new Lifecycle(); + + private final ClusterName clusterName; + + private final ThreadPool threadPool; + + private final TransportService transportService; + + private final ClusterService clusterService; + + private final Channel channel; + + private volatile boolean addressSet = false; + + private Node localNode; + + private volatile boolean firstMaster = false; + + private final AtomicBoolean initialStateSent = new AtomicBoolean(); + + private final CopyOnWriteArrayList initialStateListeners = new CopyOnWriteArrayList(); + + @Inject public JgroupsDiscovery(Settings settings, Environment environment, ClusterName clusterName, + ThreadPool threadPool, TransportService transportService, ClusterService clusterService) { + super(settings); + this.clusterName = clusterName; + this.threadPool = threadPool; + this.transportService = transportService; + this.clusterService = clusterService; + + String config = componentSettings.get("config", "udp"); + String actualConfig = config; + if (!config.endsWith(".xml")) { + actualConfig = "jgroups/" + config + ".xml"; + } + URL configUrl = environment.resolveConfig(actualConfig); + logger.debug("Using configuration [{}]", configUrl); + + Map sysPropsSet = newHashMap(); + try { + // prepare system properties to configure jgroups based on the settings + for (Map.Entry entry : settings.getAsMap().entrySet()) { + if (entry.getKey().startsWith("discovery.jgroups")) { + String jgroupsKey = entry.getKey().substring("discovery.".length()); + if (System.getProperty(jgroupsKey) == null) { + sysPropsSet.put(jgroupsKey, entry.getValue()); + System.setProperty(jgroupsKey, entry.getValue()); + } + } + } + + if (System.getProperty("jgroups.bind_addr") == null) { + // automatically set the bind address based on ElasticSearch default bindings... + try { + InetAddress bindAddress = HostResolver.resultBindHostAddress(null, settings, HostResolver.LOCAL_IP); + if ((bindAddress instanceof Inet4Address && HostResolver.isIPv4()) || (bindAddress instanceof Inet6Address && !HostResolver.isIPv4())) { + sysPropsSet.put("jgroups.bind_addr", bindAddress.getHostAddress()); + System.setProperty("jgroups.bind_addr", bindAddress.getHostAddress()); + } + } catch (IOException e) { + // ignore this + } + } + + channel = new JChannel(configUrl); + } catch (ChannelException e) { + throw new DiscoveryException("Failed to create jgroups channel with config [" + configUrl + "]", e); + } finally { + for (String keyToRemove : sysPropsSet.keySet()) { + System.getProperties().remove(keyToRemove); + } + } + } + + @Override public void addListener(InitialStateDiscoveryListener listener) { + initialStateListeners.add(listener); + } + + @Override public void removeListener(InitialStateDiscoveryListener listener) { + initialStateListeners.remove(listener); + } + + @Override public Lifecycle.State lifecycleState() { + return this.lifecycle.state(); + } + + @Override public Discovery start() throws ElasticSearchException { + if (!lifecycle.moveToStarted()) { + return this; + } + try { + channel.connect(clusterName.value()); + channel.setReceiver(this); + logger.debug("Connected to cluster [{}], address [{}]", channel.getClusterName(), channel.getAddress()); + this.localNode = new Node(settings.get("name"), settings.getAsBoolean("node.data", true), channel.getAddress().toString(), transportService.boundAddress().publishAddress()); + + if (isMaster()) { + clusterService.submitStateUpdateTask("jgroups-disco-initialconnect(master)", new ProcessedClusterStateUpdateTask() { + @Override public ClusterState execute(ClusterState currentState) { + Nodes.Builder builder = new Nodes.Builder() + .localNodeId(localNode.id()) + .masterNodeId(localNode.id()) + // put our local node + .put(localNode); + return newClusterStateBuilder().state(currentState).nodes(builder).build(); + } + + @Override public void clusterStateProcessed(ClusterState clusterState) { + sendInitialStateEventIfNeeded(); + } + }); + firstMaster = true; + addressSet = true; + } else { + clusterService.submitStateUpdateTask("jgroups-disco-initialconnect", new ClusterStateUpdateTask() { + @Override public ClusterState execute(ClusterState currentState) { + Nodes.Builder builder = new Nodes.Builder() + .localNodeId(localNode.id()) + .put(localNode); + return newClusterStateBuilder().state(currentState).nodes(builder).build(); + } + }); + try { + channel.send(new Message(channel.getView().getCreator(), channel.getAddress(), nodeMessagePayload())); + addressSet = true; + logger.debug("Sent address [{}] to master [{}]", transportService.boundAddress().publishAddress(), channel.getView().getCreator()); + } catch (Exception e) { + logger.warn("Can't send address to master [" + channel.getView().getCreator() + "] will try again later...", e); + } + } + } catch (ChannelException e) { + throw new DiscoveryException("Can't connect to group [" + clusterName + "]", e); + } + return this; + } + + @Override public Discovery stop() throws ElasticSearchException { + if (!lifecycle.moveToStopped()) { + return this; + } + initialStateSent.set(false); + if (channel.isConnected()) { + channel.disconnect(); + } + return this; + } + + @Override public void close() throws DiscoveryException { + if (lifecycle.started()) { + stop(); + } + if (!lifecycle.moveToClosed()) { + return; + } + if (channel.isOpen()) { + channel.close(); + } + } + + public String nodeDescription() { + return channel.getClusterName() + "/" + channel.getAddress(); + } + + @Override public boolean firstMaster() { + return firstMaster; + } + + @Override public void publish(ClusterState clusterState) { + if (!isMaster()) { + throw new ElasticSearchIllegalStateException("Shouldn't publish state when not master"); + } + try { + channel.send(new Message(null, null, ClusterState.Builder.toBytes(clusterState))); + } catch (Exception e) { + logger.error("Failed to send cluster state to nodes", e); + } + } + + @Override public void receive(Message msg) { + if (msg.getSrc().equals(channel.getAddress())) { + return; // my own message, ignore. + } + + // message from the master, the cluster state has changed. + if (msg.getSrc().equals(channel.getView().getCreator())) { + try { + byte[] buffer = msg.getBuffer(); + final ClusterState clusterState = ClusterState.Builder.fromBytes(buffer, settings, localNode); + // ignore cluster state messages that do not include "me", not in the game yet... + if (clusterState.nodes().localNode() != null) { + clusterService.submitStateUpdateTask("jgroups-disco-receive(from master)", new ProcessedClusterStateUpdateTask() { + @Override public ClusterState execute(ClusterState currentState) { + return clusterState; + } + + @Override public void clusterStateProcessed(ClusterState clusterState) { + sendInitialStateEventIfNeeded(); + } + }); + } + } catch (Exception e) { + logger.error("Received corrupted cluster state.", e); + } + + return; + } + + // direct message from a member that indicate his state has changed. + if (isMaster()) { + try { + ByteArrayDataInputStream is = new ByteArrayDataInputStream(msg.getBuffer()); + final Node newNode = Node.readNode(is); + is.close(); + clusterService.submitStateUpdateTask("jgroups-disco-receive(from node[" + newNode + "])", new ClusterStateUpdateTask() { + @Override public ClusterState execute(ClusterState currentState) { + if (currentState.nodes().nodeExists(newNode.id())) { + // no change, the node already exists in the cluster + logger.warn("Received an address [{}] for an existing node [{}]", newNode.address(), newNode); + return currentState; + } + return newClusterStateBuilder().state(currentState).nodes(currentState.nodes().newNode(newNode)).build(); + } + }); + } catch (Exception e) { + logger.warn("Can't read address from cluster member, message [" + msg.getClass().getName() + "/" + msg + "]", e); + } + + return; + } + + logger.error("A message between two members that neither of them is the master is not allowed."); + } + + private boolean isMaster() { + return channel.getAddress().equals(channel.getView().getCreator()); + } + + @Override public byte[] getState() { + return new byte[0]; + } + + @Override public void setState(byte[] state) { + } + + @Override public void viewAccepted(final View newView) { + if (!addressSet) { + try { + channel.send(new Message(newView.getCreator(), channel.getAddress(), nodeMessagePayload())); + logger.debug("Sent address [{}] to master [{}]", localNode.address(), newView.getCreator()); + addressSet = true; + } catch (Exception e) { + logger.warn("Can't send address to master [" + newView.getCreator() + "] will try again later...", e); + } + } + // I am the master + if (channel.getAddress().equals(newView.getCreator())) { + final Set newMembers = newHashSet(); + for (Address address : newView.getMembers()) { + newMembers.add(address.toString()); + } + + clusterService.submitStateUpdateTask("jgroups-disco-view", new ClusterStateUpdateTask() { + @Override public ClusterState execute(ClusterState currentState) { + Nodes newNodes = currentState.nodes().removeDeadMembers(newMembers, newView.getCreator().toString()); + Nodes.Delta delta = newNodes.delta(currentState.nodes()); + if (delta.added()) { + logger.warn("No new nodes should be created when a new discovery view is accepted"); + } + // we want to send a new cluster state any how on view change (that's why its commented) + // for cases where we have client node joining (and it needs the cluster state) +// if (!delta.removed()) { +// // no nodes were removed, return the current state +// return currentState; +// } + return newClusterStateBuilder().state(currentState).nodes(newNodes).build(); + } + }); + } else { + // check whether I have been removed due to temporary disconnect + final String me = channel.getAddress().toString(); + boolean foundMe = false; + for (Node node : clusterService.state().nodes()) { + if (node.id().equals(me)) { + foundMe = true; + break; + } + } + + if (!foundMe) { + logger.warn("Disconnected from cluster, resending address [{}] to master [{}]", localNode.address(), newView.getCreator()); + try { + channel.send(new Message(newView.getCreator(), channel.getAddress(), nodeMessagePayload())); + addressSet = true; + } catch (Exception e) { + addressSet = false; + logger.warn("Can't send address to master [" + newView.getCreator() + "] will try again later...", e); + } + } + } + } + + private byte[] nodeMessagePayload() throws IOException { + ByteArrayDataOutputStream os = new ByteArrayDataOutputStream(); + localNode.writeTo(os); + os.close(); + return os.unsafeByteArray(); + } + + private void sendInitialStateEventIfNeeded() { + if (initialStateSent.compareAndSet(false, true)) { + for (InitialStateDiscoveryListener listener : initialStateListeners) { + listener.initialStateProcessed(); + } + } + } + + + @Override public void suspect(Address suspectedMember) { + } + + @Override public void block() { + logger.warn("Blocked..."); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/discovery/jgroups/JgroupsDiscoveryModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/discovery/jgroups/JgroupsDiscoveryModule.java new file mode 100644 index 00000000000..4f89acec7e9 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/discovery/jgroups/JgroupsDiscoveryModule.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery.jgroups; + +import com.google.inject.AbstractModule; +import org.elasticsearch.discovery.Discovery; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class JgroupsDiscoveryModule extends AbstractModule { + + private final Settings settings; + + public JgroupsDiscoveryModule(Settings settings) { + this.settings = settings; + } + + @Override protected void configure() { + if (settings.getAsBoolean("discovery.client", false)) { + bind(Discovery.class).to(JgroupsClientDiscovery.class).asEagerSingleton(); + } else { + bind(Discovery.class).to(JgroupsDiscovery.class).asEagerSingleton(); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/env/Environment.java b/modules/elasticsearch/src/main/java/org/elasticsearch/env/Environment.java new file mode 100644 index 00000000000..a36ae22f762 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/env/Environment.java @@ -0,0 +1,134 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.env; + +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.util.Classes; +import org.elasticsearch.util.settings.Settings; + +import java.io.File; +import java.net.MalformedURLException; +import java.net.URL; + +import static org.elasticsearch.util.Strings.*; +import static org.elasticsearch.util.settings.ImmutableSettings.Builder.*; + +/** + * @author kimchy (Shay Banon) + */ +public class Environment { + + private final File homeFile; + + private final File workFile; + + private final File workWithClusterFile; + + private final File configFile; + + private final File logsFile; + + public Environment() { + this(EMPTY_SETTINGS); + } + + public Environment(Settings settings) { + if (settings.get("path.home") != null) { + homeFile = new File(cleanPath(settings.get("path.home"))); + } else { + homeFile = new File("."); + } + homeFile.mkdirs(); + + if (settings.get("path.conf") != null) { + configFile = new File(cleanPath(settings.get("path.conf"))); + } else { + configFile = new File(homeFile, "config"); + } + + if (settings.get("path.work") != null) { + workFile = new File(cleanPath(settings.get("path.work"))); + } else { + workFile = new File(homeFile, "work"); + } + workFile.mkdirs(); + workWithClusterFile = new File(workFile, ClusterName.clusterNameFromSettings(settings).value()); + workWithClusterFile.mkdirs(); + + if (settings.get("path.logs") != null) { + logsFile = new File(cleanPath(settings.get("path.logs"))); + } else { + logsFile = new File(workFile, "logs"); + } + } + + public File homeFile() { + return homeFile; + } + + public File workFile() { + return workFile; + } + + public File workWithClusterFile() { + return workWithClusterFile; + } + + public File logsFile() { + return logsFile; + } + + public URL resolveConfig(String path) throws FailedToResolveConfigException { + // first, try it as a path on the file system + File f1 = new File(path); + if (f1.exists()) { + try { + return f1.toURI().toURL(); + } catch (MalformedURLException e) { + throw new FailedToResolveConfigException("Failed to resolve path [" + f1 + "]", e); + } + } + if (path.startsWith("/")) { + path = path.substring(1); + } + // next, try it relative to the config location + File f2 = new File(configFile, path); + if (f2.exists()) { + try { + return f2.toURI().toURL(); + } catch (MalformedURLException e) { + throw new FailedToResolveConfigException("Failed to resolve path [" + f2 + "]", e); + } + } + // try and load it from the classpath directly + URL resource = Classes.getDefaultClassLoader().getResource(path); + if (resource != null) { + return resource; + } + // try and load it from the classpath with config/ prefix + if (!path.startsWith("config/")) { + resource = Classes.getDefaultClassLoader().getResource("config/" + path); + if (resource != null) { + return resource; + } + } + throw new FailedToResolveConfigException("Failed to resolve config path [" + path + "], tried file path [" + f1 + "], path file [" + f2 + "], and classpath"); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/env/EnvironmentModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/env/EnvironmentModule.java new file mode 100644 index 00000000000..96e29950f1e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/env/EnvironmentModule.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.env; + +import com.google.inject.AbstractModule; + +/** + * @author kimchy (Shay Banon) + */ +public class EnvironmentModule extends AbstractModule { + + private final Environment environment; + + public EnvironmentModule(Environment environment) { + this.environment = environment; + } + + @Override protected void configure() { + bind(Environment.class).toInstance(environment); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/env/FailedToResolveConfigException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/env/FailedToResolveConfigException.java new file mode 100644 index 00000000000..898bf1b476a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/env/FailedToResolveConfigException.java @@ -0,0 +1,36 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.env; + +import org.elasticsearch.ElasticSearchException; + +/** + * @author kimchy (Shay Banon) + */ +public class FailedToResolveConfigException extends ElasticSearchException { + + public FailedToResolveConfigException(String msg) { + super(msg); + } + + public FailedToResolveConfigException(String msg, Throwable cause) { + super(msg, cause); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/gateway/Gateway.java b/modules/elasticsearch/src/main/java/org/elasticsearch/gateway/Gateway.java new file mode 100644 index 00000000000..a9697b1c477 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/gateway/Gateway.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gateway; + +import com.google.inject.Module; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.util.component.LifecycleComponent; + +/** + * @author kimchy (Shay Banon) + */ +public interface Gateway extends LifecycleComponent { + + void write(MetaData metaData) throws GatewayException; + + MetaData read() throws GatewayException; + + Class suggestIndexGateway(); + + void reset(); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/gateway/GatewayException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/gateway/GatewayException.java new file mode 100644 index 00000000000..8a7a35ce255 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/gateway/GatewayException.java @@ -0,0 +1,36 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gateway; + +import org.elasticsearch.ElasticSearchException; + +/** + * @author kimchy (Shay Banon) + */ +public class GatewayException extends ElasticSearchException { + + public GatewayException(String msg) { + super(msg); + } + + public GatewayException(String msg, Throwable cause) { + super(msg, cause); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/gateway/GatewayModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/gateway/GatewayModule.java new file mode 100644 index 00000000000..ee149e2f393 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/gateway/GatewayModule.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gateway; + +import com.google.inject.AbstractModule; +import org.elasticsearch.gateway.none.NoneGatewayModule; +import org.elasticsearch.util.guice.ModulesFactory; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class GatewayModule extends AbstractModule { + + private final Settings settings; + + public GatewayModule(Settings settings) { + this.settings = settings; + } + + @Override protected void configure() { + ModulesFactory.createModule(settings.getAsClass("gateway.type", NoneGatewayModule.class, "org.elasticsearch.gateway.", "GatewayModule"), settings).configure(binder()); + bind(GatewayService.class).asEagerSingleton(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/gateway/GatewayService.java b/modules/elasticsearch/src/main/java/org/elasticsearch/gateway/GatewayService.java new file mode 100644 index 00000000000..7834db1583b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/gateway/GatewayService.java @@ -0,0 +1,178 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gateway; + +import com.google.inject.Inject; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.cluster.*; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.metadata.MetaDataService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.component.Lifecycle; +import org.elasticsearch.util.component.LifecycleComponent; +import org.elasticsearch.util.concurrent.DynamicExecutors; +import org.elasticsearch.util.settings.Settings; + +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.elasticsearch.cluster.ClusterState.*; +import static org.elasticsearch.cluster.metadata.MetaData.*; +import static org.elasticsearch.util.TimeValue.*; + +/** + * @author kimchy (Shay Banon) + */ +public class GatewayService extends AbstractComponent implements ClusterStateListener, LifecycleComponent { + + private final Lifecycle lifecycle = new Lifecycle(); + + private final Gateway gateway; + + private final ThreadPool threadPool; + + private volatile ExecutorService executor; + + private final ClusterService clusterService; + + private final MetaDataService metaDataService; + + private final AtomicBoolean firstMasterRead = new AtomicBoolean(); + + @Inject public GatewayService(Settings settings, Gateway gateway, ClusterService clusterService, + ThreadPool threadPool, MetaDataService metaDataService) { + super(settings); + this.gateway = gateway; + this.clusterService = clusterService; + this.threadPool = threadPool; + this.metaDataService = metaDataService; + } + + @Override public Lifecycle.State lifecycleState() { + return lifecycle.state(); + } + + @Override public GatewayService start() throws ElasticSearchException { + if (!lifecycle.moveToStarted()) { + return this; + } + gateway.start(); + this.executor = Executors.newSingleThreadExecutor(DynamicExecutors.daemonThreadFactory(settings, "gateway")); + clusterService.add(this); + return this; + } + + @Override public GatewayService stop() throws ElasticSearchException { + if (!lifecycle.moveToStopped()) { + return this; + } + clusterService.remove(this); + executor.shutdown(); + try { + executor.awaitTermination(10, TimeUnit.SECONDS); + } catch (InterruptedException e) { + // ignore + } + gateway.stop(); + return this; + } + + public void close() { + if (lifecycle.started()) { + stop(); + } + if (!lifecycle.moveToClosed()) { + return; + } + gateway.close(); + } + + @Override public void clusterChanged(final ClusterChangedEvent event) { + if (lifecycle.started() && event.localNodeMaster()) { + if (event.firstMaster() && firstMasterRead.compareAndSet(false, true)) { + readFromGateway(); + } else { + writeToGateway(event); + } + } + } + + private void writeToGateway(final ClusterChangedEvent event) { + if (!event.metaDataChanged()) { + return; + } + executor.execute(new Runnable() { + @Override public void run() { + logger.debug("Writing to gateway"); + try { + gateway.write(event.state().metaData()); + // TODO, we need to remember that we failed, maybe add a retry scheduler? + } catch (Exception e) { + logger.error("Failed to write to gateway", e); + } + } + }); + } + + private void readFromGateway() { + // we are the first master, go ahead and read and create indices + logger.debug("First master in the cluster, reading state from gateway"); + executor.execute(new Runnable() { + @Override public void run() { + MetaData metaData; + try { + metaData = gateway.read(); + } catch (Exception e) { + logger.error("Failed to read from gateway", e); + return; + } + if (metaData == null) { + logger.debug("No state read from gateway"); + return; + } + final MetaData fMetaData = metaData; + clusterService.submitStateUpdateTask("gateway (recovered meta-data)", new ClusterStateUpdateTask() { + @Override public ClusterState execute(ClusterState currentState) { + MetaData.Builder metaDataBuilder = newMetaDataBuilder() + .metaData(currentState.metaData()).maxNumberOfShardsPerNode(fMetaData.maxNumberOfShardsPerNode()); + // go over the meta data and create indices, we don't really need to copy over + // the meta data per index, since we create the index and it will be added automatically + for (final IndexMetaData indexMetaData : fMetaData) { + threadPool.execute(new Runnable() { + @Override public void run() { + try { + metaDataService.createIndex(indexMetaData.index(), indexMetaData.settings(), timeValueMillis(10)); + } catch (Exception e) { + logger.error("Failed to create index [" + indexMetaData.index() + "]", e); + } + } + }); + } + return newClusterStateBuilder().state(currentState).metaData(metaDataBuilder).build(); + } + }); + } + }); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/gateway/fs/FsGateway.java b/modules/elasticsearch/src/main/java/org/elasticsearch/gateway/fs/FsGateway.java new file mode 100644 index 00000000000..afb1de0c13b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/gateway/fs/FsGateway.java @@ -0,0 +1,213 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gateway.fs; + +import com.google.inject.Inject; +import com.google.inject.Module; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.env.Environment; +import org.elasticsearch.gateway.Gateway; +import org.elasticsearch.gateway.GatewayException; +import org.elasticsearch.index.gateway.fs.FsIndexGatewayModule; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.component.Lifecycle; +import org.elasticsearch.util.io.FastDataOutputStream; +import org.elasticsearch.util.io.FileSystemUtils; +import org.elasticsearch.util.settings.Settings; + +import java.io.*; + +/** + * @author kimchy (Shay Banon) + */ +public class FsGateway extends AbstractComponent implements Gateway { + + private final Lifecycle lifecycle = new Lifecycle(); + + private final Environment environment; + + private final ClusterName clusterName; + + private final String location; + + private final File gatewayHome; + + private volatile int currentIndex; + + @Inject public FsGateway(Settings settings, Environment environment, ClusterName clusterName) throws IOException { + super(settings); + this.clusterName = clusterName; + this.environment = environment; + + this.location = componentSettings.get("location"); + + this.gatewayHome = createGatewayHome(location, environment, clusterName); + this.currentIndex = findLatestIndex(gatewayHome); + } + + @Override public Lifecycle.State lifecycleState() { + return lifecycle.state(); + } + + @Override public Gateway start() throws ElasticSearchException { + if (!lifecycle.moveToStarted()) { + return this; + } + return this; + } + + @Override public Gateway stop() throws ElasticSearchException { + if (!lifecycle.moveToStopped()) { + return this; + } + return this; + } + + @Override public void close() throws ElasticSearchException { + if (lifecycle.started()) { + stop(); + } + if (!lifecycle.moveToClosed()) { + return; + } + } + + public File gatewayHome() { + return gatewayHome; + } + + private static int findLatestIndex(File gatewayHome) { + File[] files = gatewayHome.listFiles(new FilenameFilter() { + @Override public boolean accept(File dir, String name) { + return name.startsWith("metadata-"); + } + }); + + int index = -1; + for (File file : files) { + String name = file.getName(); + int fileIndex = Integer.parseInt(name.substring(name.indexOf('-') + 1)); + if (fileIndex >= index) { + index = fileIndex; + } + } + + return index; + } + + private static File createGatewayHome(String location, Environment environment, ClusterName clusterName) { + File f; + if (location != null) { + // if its a custom location, append the cluster name to it just so we have unique + // in case two clusters point to the same location + f = new File(new File(location), clusterName.value()); + } else { + // work already includes the cluster name + f = new File(environment.workWithClusterFile(), "gateway"); + } + if (f.exists() && f.isDirectory()) { + return f; + } + boolean result; + for (int i = 0; i < 5; i++) { + result = f.mkdirs(); + if (result) { + break; + } + } + + return f; + } + + @Override public void write(MetaData metaData) throws GatewayException { + try { + final File file = new File(gatewayHome, "metadata-" + (currentIndex + 1)); + for (int i = 0; i < 5; i++) { + if (file.createNewFile()) + break; + } + if (!file.exists()) { + throw new GatewayException("Failed to create new file [" + file + "]"); + } + + FileOutputStream fileStream = new FileOutputStream(file); + FastDataOutputStream outStream = new FastDataOutputStream(fileStream); + + MetaData.Builder.writeTo(metaData, outStream); + + outStream.close(); + + FileSystemUtils.syncFile(file); + + currentIndex++; + + //delete old files. + File[] oldFiles = gatewayHome.listFiles(new FilenameFilter() { + @Override public boolean accept(File dir, String name) { + return name.startsWith("metadata-") && !name.equals(file.getName()); + } + }); + + for (File oldFile : oldFiles) { + oldFile.delete(); + } + + } catch (IOException e) { + throw new GatewayException("can't write new metadata file into the gateway", e); + } + } + + @Override public MetaData read() throws GatewayException { + try { + if (currentIndex == -1) + return null; + + File file = new File(gatewayHome, "metadata-" + currentIndex); + if (!file.exists()) { + throw new GatewayException("can't find current metadata file"); + } + + FileInputStream fileStream = new FileInputStream(file); + DataInputStream inStream = new DataInputStream(fileStream); + + MetaData metaData = MetaData.Builder.readFrom(inStream, settings); + + inStream.close(); + + return metaData; + + } catch (GatewayException e) { + throw e; + } catch (Exception e) { + throw new GatewayException("can't read metadata file from the gateway", e); + } + } + + @Override public Class suggestIndexGateway() { + return FsIndexGatewayModule.class; + } + + @Override public void reset() { + FileSystemUtils.deleteRecursively(gatewayHome, false); + currentIndex = -1; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/gateway/fs/FsGatewayModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/gateway/fs/FsGatewayModule.java new file mode 100644 index 00000000000..2cd475ad778 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/gateway/fs/FsGatewayModule.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gateway.fs; + +import com.google.inject.AbstractModule; +import org.elasticsearch.gateway.Gateway; + +/** + * @author kimchy (Shay Banon) + */ +public class FsGatewayModule extends AbstractModule { + + @Override protected void configure() { + bind(Gateway.class).to(FsGateway.class).asEagerSingleton(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/gateway/none/NoneGateway.java b/modules/elasticsearch/src/main/java/org/elasticsearch/gateway/none/NoneGateway.java new file mode 100644 index 00000000000..f258f682954 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/gateway/none/NoneGateway.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gateway.none; + +import com.google.inject.Module; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.gateway.Gateway; +import org.elasticsearch.gateway.GatewayException; +import org.elasticsearch.index.gateway.none.NoneIndexGatewayModule; +import org.elasticsearch.util.component.Lifecycle; + +/** + * @author kimchy (Shay Banon) + */ +public class NoneGateway implements Gateway { + + private final Lifecycle lifecycle = new Lifecycle(); + + @Override public Lifecycle.State lifecycleState() { + return lifecycle.state(); + } + + @Override public Gateway start() throws ElasticSearchException { + if (!lifecycle.moveToStarted()) { + return this; + } + return this; + } + + @Override public Gateway stop() throws ElasticSearchException { + if (!lifecycle.moveToStopped()) { + return this; + } + return this; + } + + @Override public void close() throws ElasticSearchException { + if (lifecycle.started()) { + stop(); + } + if (!lifecycle.moveToClosed()) { + return; + } + } + + @Override public void write(MetaData metaData) throws GatewayException { + + } + + @Override public MetaData read() throws GatewayException { + return null; + } + + @Override public Class suggestIndexGateway() { + return NoneIndexGatewayModule.class; + } + + @Override public void reset() { + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/gateway/none/NoneGatewayModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/gateway/none/NoneGatewayModule.java new file mode 100644 index 00000000000..3b54e17b82b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/gateway/none/NoneGatewayModule.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gateway.none; + +import com.google.inject.AbstractModule; +import org.elasticsearch.gateway.Gateway; + +/** + * @author kimchy (Shay Banon) + */ +public class NoneGatewayModule extends AbstractModule { + + @Override protected void configure() { + bind(Gateway.class).to(NoneGateway.class).asEagerSingleton(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/AbstractHttpResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/AbstractHttpResponse.java new file mode 100644 index 00000000000..2963b74d9d8 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/AbstractHttpResponse.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class AbstractHttpResponse implements HttpResponse { + + @Override public byte[] prefixContent() { + return null; + } + + @Override public int prefixContentLength() { + return -1; + } + + @Override public byte[] suffixContent() { + return null; + } + + @Override public int suffixContentLength() { + return -1; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/BaseHttpServerHandler.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/BaseHttpServerHandler.java new file mode 100644 index 00000000000..d700b861a72 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/BaseHttpServerHandler.java @@ -0,0 +1,41 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http; + +import org.elasticsearch.client.Client; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class BaseHttpServerHandler extends AbstractComponent implements HttpServerHandler { + + protected final Client client; + + protected BaseHttpServerHandler(Settings settings, Client client) { + super(settings); + this.client = client; + } + + @Override public boolean spawn() { + return true; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/BindHttpException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/BindHttpException.java new file mode 100644 index 00000000000..c15c4772dbe --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/BindHttpException.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http; + +/** + * @author kimchy (Shay Banon) + */ +public class BindHttpException extends HttpException { + + public BindHttpException(String message) { + super(message); + } + + public BindHttpException(String message, Throwable cause) { + super(message, cause); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/HttpChannel.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/HttpChannel.java new file mode 100644 index 00000000000..40d87a688c9 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/HttpChannel.java @@ -0,0 +1,27 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http; + +/** + * @author kimchy (Shay Banon) + */ +public interface HttpChannel { + void sendResponse(HttpResponse response); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/HttpException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/HttpException.java new file mode 100644 index 00000000000..265da0bc0aa --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/HttpException.java @@ -0,0 +1,36 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http; + +import org.elasticsearch.ElasticSearchException; + +/** + * @author kimchy (Shay Banon) + */ +public class HttpException extends ElasticSearchException { + + public HttpException(String message) { + super(message); + } + + public HttpException(String message, Throwable cause) { + super(message, cause); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/HttpRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/HttpRequest.java new file mode 100644 index 00000000000..7437eb36ead --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/HttpRequest.java @@ -0,0 +1,55 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http; + +import java.util.List; +import java.util.Map; +import java.util.Set; + +/** + * @author kimchy (Shay Banon) + */ +public interface HttpRequest { + enum Method { + GET, POST, PUT, DELETE + } + + Method method(); + + String uri(); + + boolean hasContent(); + + String contentAsString(); + + Set headerNames(); + + String header(String name); + + List headers(String name); + + String cookie(); + + String param(String key); + + List params(String key); + + Map> params(); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/HttpResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/HttpResponse.java new file mode 100644 index 00000000000..ad79f23c3c8 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/HttpResponse.java @@ -0,0 +1,514 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http; + +/** + * @author kimchy (Shay Banon) + */ +public interface HttpResponse { + + enum Status { + /** + * The client SHOULD continue with its request. This interim response is used to inform the client that the + * initial part of the request has been received and has not yet been rejected by the server. The client + * SHOULD continue by sending the remainder of the request or, if the request has already been completed, + * ignore this response. The server MUST send a final response after the request has been completed. + */ + CONTINUE(100), + /** + * The server understands and is willing to comply with the client's request, via the Upgrade message header field + * (section 14.42), for a change in the application protocol being used on this connection. The server will + * switch protocols to those defined by the response's Upgrade header field immediately after the empty line + * which terminates the 101 response. + */ + SWITCHING_PROTOCOLS(101), + /** + * The request has succeeded. The information returned with the response is dependent on the method + * used in the request, for example: + *

    + *
  • GET: an entity corresponding to the requested resource is sent in the response;
  • + *
  • HEAD: the entity-header fields corresponding to the requested resource are sent in the response without any message-body;
  • + *
  • POST: an entity describing or containing the result of the action;
  • + *
  • TRACE: an entity containing the request message as received by the end server.
  • + *
+ */ + OK(200), + /** + * The request has been fulfilled and resulted in a new resource being created. The newly created resource can + * be referenced by the URI(s) returned in the entity of the response, with the most specific URI for the + * resource given by a Location header field. The response SHOULD include an entity containing a list of resource + * characteristics and location(s) from which the user or user agent can choose the one most appropriate. The + * entity format is specified by the media type given in the Content-Type header field. The origin server MUST + * create the resource before returning the 201 status code. If the action cannot be carried out immediately, the + * server SHOULD respond with 202 (Accepted) response instead. + * + *

A 201 response MAY contain an ETag response header field indicating the current value of the entity tag + * for the requested variant just created, see section 14.19. + */ + CREATED(201), + /** + * The request has been accepted for processing, but the processing has not been completed. The request might + * or might not eventually be acted upon, as it might be disallowed when processing actually takes place. There + * is no facility for re-sending a status code from an asynchronous operation such as this. + * + *

The 202 response is intentionally non-committal. Its purpose is to allow a server to accept a request for + * some other process (perhaps a batch-oriented process that is only run once per day) without requiring that + * the user agent's connection to the server persist until the process is completed. The entity returned with + * this response SHOULD include an indication of the request's current status and either a pointer to a status + * monitor or some estimate of when the user can expect the request to be fulfilled. + */ + ACCEPTED(202), + /** + * The returned meta information in the entity-header is not the definitive set as available from the origin + * server, but is gathered from a local or a third-party copy. The set presented MAY be a subset or super set + * of the original version. For example, including local annotation information about the resource might + * result in a super set of the meta information known by the origin server. Use of this response code + * is not required and is only appropriate when the response would otherwise be 200 (OK). + */ + NON_AUTHORITATIVE_INFORMATION(203), + /** + * The server has fulfilled the request but does not need to return an entity-body, and might want to return + * updated meta information. The response MAY include new or updated meta information in the form of + * entity-headers, which if present SHOULD be associated with the requested variant. + * + *

If the client is a user agent, it SHOULD NOT change its document view from that which caused the request + * to be sent. This response is primarily intended to allow input for actions to take place without causing a + * change to the user agent's active document view, although any new or updated meta information SHOULD be + * applied to the document currently in the user agent's active view. + * + *

The 204 response MUST NOT include a message-body, and thus is always terminated by the first empty + * line after the header fields. + */ + NO_CONTENT(204), + /** + * The server has fulfilled the request and the user agent SHOULD reset the document view which caused the + * request to be sent. This response is primarily intended to allow input for actions to take place via user + * input, followed by a clearing of the form in which the input is given so that the user can easily initiate + * another input action. The response MUST NOT include an entity. + */ + RESET_CONTENT(205), + /** + * The server has fulfilled the partial GET request for the resource. The request MUST have included a Range + * header field (section 14.35) indicating the desired range, and MAY have included an If-Range header + * field (section 14.27) to make the request conditional. + * + *

The response MUST include the following header fields: + *

    + *
  • Either a Content-Range header field (section 14.16) indicating the range included with this response, + * or a multipart/byteranges Content-Type including Content-Range fields for each part. If a Content-Length + * header field is present in the response, its value MUST match the actual number of OCTETs transmitted in + * the message-body.
  • + *
  • Date
  • + *
  • ETag and/or Content-Location, if the header would have been sent in a 200 response to the same request
  • + *
  • Expires, Cache-Control, and/or Vary, if the field-value might differ from that sent in any previous + * response for the same variant
  • + *
+ * + *

If the 206 response is the result of an If-Range request that used a strong cache validator + * (see section 13.3.3), the response SHOULD NOT include other entity-headers. If the response is the result + * of an If-Range request that used a weak validator, the response MUST NOT include other entity-headers; + * this prevents inconsistencies between cached entity-bodies and updated headers. Otherwise, the response MUST + * include all of the entity-headers that would have been returned with a 200 (OK) response to the same request. + * + *

A cache MUST NOT combine a 206 response with other previously cached content if the ETag or Last-Modified + * headers do not match exactly, see 13.5.4. + * + *

A cache that does not support the Range and Content-Range headers MUST NOT cache 206 (Partial) responses. + */ + PARTIAL_CONTENT(206), + /** + * The 207 (Multi-Status) status code provides status for multiple independent operations (see Section 13 for + * more information). + * + *

A Multi-Status response conveys information about multiple resources in situations where multiple status + * codes might be appropriate. The default Multi-Status response body is a text/xml or application/xml HTTP + * entity with a 'multistatus' root element. Further elements contain 200, 300, 400, and 500 series status codes + * generated during the method invocation. 100 series status codes SHOULD NOT be recorded in a 'response' + * XML element. + * + *

Although '207' is used as the overall response status code, the recipient needs to consult the contents + * of the multistatus response body for further information about the success or failure of the method execution. + * The response MAY be used in success, partial success and also in failure situations. + * + *

The 'multistatus' root element holds zero or more 'response' elements in any order, each with + * information about an individual resource. Each 'response' element MUST have an 'href' element + * to identify the resource. + */ + MULTI_STATUS(207), + /** + * The requested resource corresponds to any one of a set of representations, each with its own specific + * location, and agent-driven negotiation information (section 12) is being provided so that the user (or user + * agent) can select a preferred representation and redirect its request to that location. + * + *

Unless it was a HEAD request, the response SHOULD include an entity containing a list of resource + * characteristics and location(s) from which the user or user agent can choose the one most appropriate. + * The entity format is specified by the media type given in the Content-Type header field. Depending upon the + * format and the capabilities of the user agent, selection of the most appropriate choice MAY be performed + * automatically. However, this specification does not define any standard for such automatic selection. + * + *

If the server has a preferred choice of representation, it SHOULD include the specific URI for that + * representation in the Location field; user agents MAY use the Location field value for automatic redirection. + * This response is cacheable unless indicated otherwise. + */ + MULTIPLE_CHOICES(300), + /** + * The requested resource has been assigned a new permanent URI and any future references to this resource + * SHOULD use one of the returned URIs. Clients with link editing capabilities ought to automatically re-link + * references to the Request-URI to one or more of the new references returned by the server, where possible. + * This response is cacheable unless indicated otherwise. + * + *

The new permanent URI SHOULD be given by the Location field in the response. Unless the request method + * was HEAD, the entity of the response SHOULD contain a short hypertext note with a hyperlink to the new URI(s). + * + *

If the 301 status code is received in response to a request other than GET or HEAD, the user agent + * MUST NOT automatically redirect the request unless it can be confirmed by the user, since this might change + * the conditions under which the request was issued. + */ + MOVED_PERMANENTLY(301), + /** + * The requested resource resides temporarily under a different URI. Since the redirection might be altered on + * occasion, the client SHOULD continue to use the Request-URI for future requests. This response is only + * cacheable if indicated by a Cache-Control or Expires header field. + * + *

The temporary URI SHOULD be given by the Location field in the response. Unless the request method was + * HEAD, the entity of the response SHOULD contain a short hypertext note with a hyperlink to the new URI(s). + * + *

If the 302 status code is received in response to a request other than GET or HEAD, the user agent + * MUST NOT automatically redirect the request unless it can be confirmed by the user, since this might change + * the conditions under which the request was issued. + */ + FOUND(302), + /** + * The response to the request can be found under a different URI and SHOULD be retrieved using a GET method on + * that resource. This method exists primarily to allow the output of a POST-activated script to redirect the + * user agent to a selected resource. The new URI is not a substitute reference for the originally requested + * resource. The 303 response MUST NOT be cached, but the response to the second (redirected) request might be + * cacheable. + * + *

The different URI SHOULD be given by the Location field in the response. Unless the request method was + * HEAD, the entity of the response SHOULD contain a short hypertext note with a hyperlink to the new URI(s). + */ + SEE_OTHER(303), + /** + * If the client has performed a conditional GET request and access is allowed, but the document has not been + * modified, the server SHOULD respond with this status code. The 304 response MUST NOT contain a message-body, + * and thus is always terminated by the first empty line after the header fields. + * + *

The response MUST include the following header fields: + *

    + *
  • Date, unless its omission is required by section 14.18.1 + * If a clockless origin server obeys these rules, and proxies and clients add their own Date to any + * response received without one (as already specified by [RFC 2068], section 14.19), caches will operate + * correctly. + *
  • + *
  • ETag and/or Content-Location, if the header would have been sent in a 200 response to the same request
  • + *
  • Expires, Cache-Control, and/or Vary, if the field-value might differ from that sent in any previous + * response for the same variant
  • + *
+ * + *

If the conditional GET used a strong cache validator (see section 13.3.3), the response SHOULD NOT include + * other entity-headers. Otherwise (i.e., the conditional GET used a weak validator), the response MUST NOT + * include other entity-headers; this prevents inconsistencies between cached entity-bodies and updated headers. + * + *

If a 304 response indicates an entity not currently cached, then the cache MUST disregard the response + * and repeat the request without the conditional. + * + *

If a cache uses a received 304 response to update a cache entry, the cache MUST update the entry to + * reflect any new field values given in the response. + */ + NOT_MODIFIED(304), + /** + * The requested resource MUST be accessed through the proxy given by the Location field. The Location field + * gives the URI of the proxy. The recipient is expected to repeat this single request via the proxy. + * 305 responses MUST only be generated by origin servers. + */ + USE_PROXY(305), + /** + * The requested resource resides temporarily under a different URI. Since the redirection MAY be altered on + * occasion, the client SHOULD continue to use the Request-URI for future requests. This response is only + * cacheable if indicated by a Cache-Control or Expires header field. + * + *

The temporary URI SHOULD be given by the Location field in the response. Unless the request method was + * HEAD, the entity of the response SHOULD contain a short hypertext note with a hyperlink to the new URI(s) , + * since many pre-HTTP/1.1 user agents do not understand the 307 status. Therefore, the note SHOULD contain + * the information necessary for a user to repeat the original request on the new URI. + * + *

If the 307 status code is received in response to a request other than GET or HEAD, the user agent MUST NOT + * automatically redirect the request unless it can be confirmed by the user, since this might change the + * conditions under which the request was issued. + */ + TEMPORARY_REDIRECT(307), + /** + * The request could not be understood by the server due to malformed syntax. The client SHOULD NOT repeat the + * request without modifications. + */ + BAD_REQUEST(400), + /** + * The request requires user authentication. The response MUST include a WWW-Authenticate header field + * (section 14.47) containing a challenge applicable to the requested resource. The client MAY repeat the request + * with a suitable Authorization header field (section 14.8). If the request already included Authorization + * credentials, then the 401 response indicates that authorization has been refused for those credentials. + * If the 401 response contains the same challenge as the prior response, and the user agent has already attempted + * authentication at least once, then the user SHOULD be presented the entity that was given in the response, + * since that entity might include relevant diagnostic information. HTTP access authentication is explained in + * "HTTP Authentication: Basic and Digest Access Authentication" [43]. + */ + UNAUTHORIZED(401), + /** + * This code is reserved for future use. + */ + PAYMENT_REQUIRED(402), + /** + * The server understood the request, but is refusing to fulfill it. Authorization will not help and the request + * SHOULD NOT be repeated. If the request method was not HEAD and the server wishes to make public why the + * request has not been fulfilled, it SHOULD describe the reason for the refusal in the entity. If the server + * does not wish to make this information available to the client, the status code 404 (Not Found) can be used + * instead. + */ + FORBIDDEN(403), + /** + * The server has not found anything matching the Request-URI. No indication is given of whether the condition + * is temporary or permanent. The 410 (Gone) status code SHOULD be used if the server knows, through some + * internally configurable mechanism, that an old resource is permanently unavailable and has no forwarding + * address. This status code is commonly used when the server does not wish to reveal exactly why the request + * has been refused, or when no other response is applicable. + */ + NOT_FOUND(404), + /** + * The method specified in the Request-Line is not allowed for the resource identified by the Request-URI. + * The response MUST include an Allow header containing a list of valid methods for the requested resource. + */ + METHOD_NOT_ALLOWED(405), + /** + * The resource identified by the request is only capable of generating response entities which have content + * characteristics not acceptable according to the accept headers sent in the request. + * + *

Unless it was a HEAD request, the response SHOULD include an entity containing a list of available entity + * characteristics and location(s) from which the user or user agent can choose the one most appropriate. + * The entity format is specified by the media type given in the Content-Type header field. Depending upon the + * format and the capabilities of the user agent, selection of the most appropriate choice MAY be performed + * automatically. However, this specification does not define any standard for such automatic selection. + * + *

Note: HTTP/1.1 servers are allowed to return responses which are not acceptable according to the accept + * headers sent in the request. In some cases, this may even be preferable to sending a 406 response. User + * agents are encouraged to inspect the headers of an incoming response to determine if it is acceptable. + * + *

If the response could be unacceptable, a user agent SHOULD temporarily stop receipt of more data and query + * the user for a decision on further actions. + */ + NOT_ACCEPTABLE(406), + /** + * This code is similar to 401 (Unauthorized), but indicates that the client must first authenticate itself with + * the proxy. The proxy MUST return a Proxy-Authenticate header field (section 14.33) containing a challenge + * applicable to the proxy for the requested resource. The client MAY repeat the request with a suitable + * Proxy-Authorization header field (section 14.34). HTTP access authentication is explained in + * "HTTP Authentication: Basic and Digest Access Authentication" [43]. + */ + PROXY_AUTHENTICATION(407), + /** + * The client did not produce a request within the time that the server was prepared to wait. The client MAY + * repeat the request without modifications at any later time. + */ + REQUEST_TIMEOUT(408), + /** + * The request could not be completed due to a conflict with the current state of the resource. This code is + * only allowed in situations where it is expected that the user might be able to resolve the conflict and + * resubmit the request. The response body SHOULD include enough information for the user to recognize the + * source of the conflict. Ideally, the response entity would include enough information for the user or user + * agent to fix the problem; however, that might not be possible and is not required. + * + *

Conflicts are most likely to occur in response to a PUT request. For example, if versioning were being + * used and the entity being PUT included changes to a resource which conflict with those made by an earlier + * (third-party) request, the server might use the 409 response to indicate that it can't complete the request. + * In this case, the response entity would likely contain a list of the differences between the two versions in + * a format defined by the response Content-Type. + */ + CONFLICT(409), + /** + * The requested resource is no longer available at the server and no forwarding address is known. This condition + * is expected to be considered permanent. Clients with link editing capabilities SHOULD delete references to + * the Request-URI after user approval. If the server does not know, or has no facility to determine, whether or + * not the condition is permanent, the status code 404 (Not Found) SHOULD be used instead. This response is + * cacheable unless indicated otherwise. + * + *

The 410 response is primarily intended to assist the task of web maintenance by notifying the recipient + * that the resource is intentionally unavailable and that the server owners desire that remote links to that + * resource be removed. Such an event is common for limited-time, promotional services and for resources belonging + * to individuals no longer working at the server's site. It is not necessary to mark all permanently unavailable + * resources as "gone" or to keep the mark for any length of time -- that is left to the discretion of the server + * owner. + */ + GONE(410), + /** + * The server refuses to accept the request without a defined Content-Length. The client MAY repeat the request + * if it adds a valid Content-Length header field containing the length of the message-body in the request message. + */ + LENGTH_REQUIRED(411), + /** + * The precondition given in one or more of the request-header fields evaluated to false when it was tested on + * the server. This response code allows the client to place preconditions on the current resource metainformation + * (header field data) and thus prevent the requested method from being applied to a resource other than the one + * intended. + */ + PRECONDITION_FAILED(412), + /** + * The server is refusing to process a request because the request entity is larger than the server is willing + * or able to process. The server MAY close the connection to prevent the client from continuing the request. + * + *

If the condition is temporary, the server SHOULD include a Retry-After header field to indicate that it + * is temporary and after what time the client MAY try again. + */ + REQUEST_ENTITY_TOO_LARGE(413), + /** + * The server is refusing to service the request because the Request-URI is longer than the server is willing + * to interpret. This rare condition is only likely to occur when a client has improperly converted a POST + * request to a GET request with long query information, when the client has descended into a URI "black hole" + * of redirection (e.g., a redirected URI prefix that points to a suffix of itself), or when the server is + * under attack by a client attempting to exploit security holes present in some servers using fixed-length + * buffers for reading or manipulating the Request-URI. + */ + REQUEST_URI_TOO_LONG(414), + /** + * The server is refusing to service the request because the entity of the request is in a format not supported + * by the requested resource for the requested method. + */ + UNSUPPORTED_MEDIA_TYPE(415), + /** + * A server SHOULD return a response with this status code if a request included a Range request-header field + * (section 14.35), and none of the range-specifier values in this field overlap the current extent of the + * selected resource, and the request did not include an If-Range request-header field. (For byte-ranges, this + * means that the first-byte-pos of all of the byte-range-spec values were greater than the current length of + * the selected resource.) + * + *

When this status code is returned for a byte-range request, the response SHOULD include a Content-Range + * entity-header field specifying the current length of the selected resource (see section 14.16). This + * response MUST NOT use the multipart/byteranges content-type. + */ + REQUESTED_RANGE_NOT_SATISFIED(416), + /** + * The expectation given in an Expect request-header field (see section 14.20) could not be met by this server, + * or, if the server is a proxy, the server has unambiguous evidence that the request could not be met by the + * next-hop server. + */ + EXPECTATION_FAILED(417), + /** + * The 422 (Unprocessable Entity) status code means the server understands the content type of the request + * entity (hence a 415(Unsupported Media Type) status code is inappropriate), and the syntax of the request + * entity is correct (thus a 400 (Bad Request) status code is inappropriate) but was unable to process the + * contained instructions. For example, this error condition may occur if an XML request body contains + * well-formed (i.e., syntactically correct), but semantically erroneous, XML instructions. + */ + UNPROCESSABLE_ENTITY(422), + /** + * The 423 (Locked) status code means the source or destination resource of a method is locked. This response + * SHOULD contain an appropriate precondition or postcondition code, such as 'lock-token-submitted' or + * 'no-conflicting-lock'. + */ + LOCKED(423), + /** + * The 424 (Failed Dependency) status code means that the method could not be performed on the resource because + * the requested action depended on another action and that action failed. For example, if a command in a + * PROPPATCH method fails, then, at minimum, the rest of the commands will also fail with 424 (Failed Dependency). + */ + FAILED_DEPENDENCY(424), + /** + * The server encountered an unexpected condition which prevented it from fulfilling the request. + */ + INTERNAL_SERVER_ERROR(500), + /** + * The server does not support the functionality required to fulfill the request. This is the appropriate + * response when the server does not recognize the request method and is not capable of supporting it for any + * resource. + */ + NOT_IMPLEMENTED(501), + /** + * The server, while acting as a gateway or proxy, received an invalid response from the upstream server it + * accessed in attempting to fulfill the request. + */ + BAD_GATEWAY(502), + /** + * The server is currently unable to handle the request due to a temporary overloading or maintenance of the + * server. The implication is that this is a temporary condition which will be alleviated after some delay. + * If known, the length of the delay MAY be indicated in a Retry-After header. If no Retry-After is given, + * the client SHOULD handle the response as it would for a 500 response. + */ + SERVICE_UNAVAILABLE(503), + /** + * The server, while acting as a gateway or proxy, did not receive a timely response from the upstream server + * specified by the URI (e.g. HTTP, FTP, LDAP) or some other auxiliary server (e.g. DNS) it needed to access + * in attempting to complete the request. + */ + GATEWAY_TIMEOUT(504), + /** + * The server does not support, or refuses to support, the HTTP protocol version that was used in the request + * message. The server is indicating that it is unable or unwilling to complete the request using the same major + * version as the client, as described in section 3.1, other than with this error message. The response SHOULD + * contain an entity describing why that version is not supported and what other protocols are supported by + * that server. + */ + HTTP_VERSION_NOT_SUPPORTED(505), + /** + * The 507 (Insufficient Storage) status code means the method could not be performed on the resource because + * the server is unable to store the representation needed to successfully complete the request. This condition + * is considered to be temporary. If the request that received this status code was the result of a user action, + * the request MUST NOT be repeated until it is requested by a separate user action. + */ + INSUFFICIENT_STORAGE(506); + + + private int status; + + Status(int status) { + this.status = status; + } + + public int getStatus() { + return status; + } + } + + /** + * Can the content byte[] be used only with this thread (false), or by any thread (true). + */ + boolean contentThreadSafe(); + + String contentType(); + + /** + * Returns the actual content. Note, use {@link #contentLength()} in order to know the + * content length of the byte array. + */ + byte[] content(); + + /** + * The content length. + */ + int contentLength(); + + byte[] prefixContent(); + + int prefixContentLength(); + + byte[] suffixContent(); + + int suffixContentLength(); + + Status status(); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/HttpServer.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/HttpServer.java new file mode 100644 index 00000000000..4402a9fb85a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/HttpServer.java @@ -0,0 +1,171 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http; + +import com.google.inject.Inject; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.component.Lifecycle; +import org.elasticsearch.util.component.LifecycleComponent; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; + +import static org.elasticsearch.http.HttpResponse.Status.*; + +/** + * @author kimchy (Shay Banon) + */ +public class HttpServer extends AbstractComponent implements LifecycleComponent { + + private final Lifecycle lifecycle = new Lifecycle(); + + private final HttpServerTransport transport; + + private final ThreadPool threadPool; + + private final PathTrie getHandlers; + private final PathTrie postHandlers; + private final PathTrie putHandlers; + private final PathTrie deleteHandlers; + + @Inject public HttpServer(Settings settings, HttpServerTransport transport, ThreadPool threadPool) { + super(settings); + this.transport = transport; + this.threadPool = threadPool; + + getHandlers = new PathTrie(); + postHandlers = new PathTrie(); + putHandlers = new PathTrie(); + deleteHandlers = new PathTrie(); + + transport.httpServerAdapter(new HttpServerAdapter() { + @Override public void dispatchRequest(HttpRequest request, HttpChannel channel) { + internalDispatchRequest(request, channel); + } + }); + } + + @Override public Lifecycle.State lifecycleState() { + return this.lifecycle.state(); + } + + public void registerHandler(HttpRequest.Method method, String path, HttpServerHandler handler) { + if (method == HttpRequest.Method.GET) { + getHandlers.insert(path, handler); + } else if (method == HttpRequest.Method.POST) { + postHandlers.insert(path, handler); + } else if (method == HttpRequest.Method.PUT) { + putHandlers.insert(path, handler); + } else if (method == HttpRequest.Method.DELETE) { + deleteHandlers.insert(path, handler); + } + } + + public HttpServer start() throws ElasticSearchException { + if (!lifecycle.moveToStarted()) { + return this; + } + transport.start(); + if (logger.isInfoEnabled()) { + logger.info("{}", transport.boundAddress()); + } + return this; + } + + public HttpServer stop() throws ElasticSearchException { + if (!lifecycle.moveToStopped()) { + return this; + } + transport.stop(); + return this; + } + + public void close() { + if (lifecycle.started()) { + stop(); + } + if (!lifecycle.moveToClosed()) { + return; + } + transport.close(); + } + + private void internalDispatchRequest(final HttpRequest request, final HttpChannel channel) { + final HttpServerHandler httpHandler = getHandler(request); + if (httpHandler != null) { + if (httpHandler.spawn()) { + threadPool.execute(new Runnable() { + @Override public void run() { + try { + httpHandler.handleRequest(request, channel); + } catch (Exception e) { + try { + channel.sendResponse(new JsonThrowableHttpResponse(request, e)); + } catch (IOException e1) { + logger.error("Failed to send failure response for uri [" + request.uri() + "]", e1); + } + } + } + }); + } else { + try { + httpHandler.handleRequest(request, channel); + } catch (Exception e) { + try { + channel.sendResponse(new JsonThrowableHttpResponse(request, e)); + } catch (IOException e1) { + logger.error("Failed to send failure response for uri [" + request.uri() + "]", e1); + } + } + } + } else { + channel.sendResponse(new StringHttpResponse(BAD_REQUEST, "No handler found for uri [" + request.uri() + "] and method [" + request.method() + "]")); + } + } + + private HttpServerHandler getHandler(HttpRequest request) { + String path = getPath(request); + HttpRequest.Method method = request.method(); + if (method == HttpRequest.Method.GET) { + return getHandlers.retrieve(path, request.params()); + } else if (method == HttpRequest.Method.POST) { + return postHandlers.retrieve(path, request.params()); + } else if (method == HttpRequest.Method.PUT) { + return putHandlers.retrieve(path, request.params()); + } else if (method == HttpRequest.Method.DELETE) { + return deleteHandlers.retrieve(path, request.params()); + } else { + return null; + } + } + + private String getPath(HttpRequest request) { + String uri = request.uri(); + int questionMarkIndex = uri.indexOf('?'); + if (questionMarkIndex == -1) { + return uri; + } + return uri.substring(0, questionMarkIndex); + } + + +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/HttpServerAdapter.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/HttpServerAdapter.java new file mode 100644 index 00000000000..59fd54d931f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/HttpServerAdapter.java @@ -0,0 +1,28 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http; + +/** + * @author kimchy (Shay Banon) + */ +public interface HttpServerAdapter { + + void dispatchRequest(HttpRequest request, HttpChannel channel); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/HttpServerHandler.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/HttpServerHandler.java new file mode 100644 index 00000000000..f7fc318ec80 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/HttpServerHandler.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http; + +/** + * @author kimchy (Shay Banon) + */ +public interface HttpServerHandler { + + void handleRequest(HttpRequest request, HttpChannel channel); + + boolean spawn(); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/HttpServerModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/HttpServerModule.java new file mode 100644 index 00000000000..ad1f8de7305 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/HttpServerModule.java @@ -0,0 +1,61 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http; + +import com.google.inject.AbstractModule; +import com.google.inject.Module; +import org.elasticsearch.http.action.HttpActionModule; +import org.elasticsearch.util.Classes; +import org.elasticsearch.util.settings.Settings; + +import static org.elasticsearch.util.guice.ModulesFactory.*; + +/** + * @author kimchy (Shay Banon) + */ +public class HttpServerModule extends AbstractModule { + + private final Settings settings; + + public HttpServerModule(Settings settings) { + this.settings = settings; + } + + @SuppressWarnings({"unchecked"}) @Override protected void configure() { + bind(HttpServer.class).asEagerSingleton(); + + Class defaultHttpServerTransportModule = null; + try { + Classes.getDefaultClassLoader().loadClass("org.elasticsearch.http.netty.NettyHttpServerTransport"); + defaultHttpServerTransportModule = (Class) Classes.getDefaultClassLoader().loadClass("org.elasticsearch.http.netty.NettyHttpServerTransportModule"); + } catch (ClassNotFoundException e) { + // no netty one, ok... + if (settings.get("http.type") == null) { + // no explicit one is configured, bail + return; + } + } + + Class moduleClass = settings.getAsClass("http.type", defaultHttpServerTransportModule, "org.elasticsearch.http.", "HttpServerTransportModule"); + createModule(moduleClass, settings).configure(binder()); + + new HttpActionModule().configure(binder()); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/HttpServerTransport.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/HttpServerTransport.java new file mode 100644 index 00000000000..f8305d90d30 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/HttpServerTransport.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http; + +import org.elasticsearch.util.component.LifecycleComponent; +import org.elasticsearch.util.transport.BoundTransportAddress; + +/** + * @author kimchy (Shay Banon) + */ +public interface HttpServerTransport extends LifecycleComponent { + + BoundTransportAddress boundAddress(); + + void httpServerAdapter(HttpServerAdapter httpServerAdapter); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/JsonHttpResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/JsonHttpResponse.java new file mode 100644 index 00000000000..85f95f7acd6 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/JsonHttpResponse.java @@ -0,0 +1,92 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http; + +import org.apache.lucene.util.UnicodeUtil; +import org.elasticsearch.util.json.JsonBuilder; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class JsonHttpResponse extends Utf8HttpResponse { + + private static ThreadLocal cache = new ThreadLocal() { + @Override protected UnicodeUtil.UTF8Result initialValue() { + return new UnicodeUtil.UTF8Result(); + } + }; + + private static final UnicodeUtil.UTF8Result END_JSONP = new UnicodeUtil.UTF8Result(); + + static { + UnicodeUtil.UTF16toUTF8(");", 0, ");".length(), END_JSONP); + } + + private static ThreadLocal prefixCache = new ThreadLocal() { + @Override protected UnicodeUtil.UTF8Result initialValue() { + return new UnicodeUtil.UTF8Result(); + } + }; + + public JsonHttpResponse(HttpRequest request, Status status) { + super(status, EMPTY, startJsonp(request), endJsonp(request)); + } + + public JsonHttpResponse(HttpRequest request, Status status, JsonBuilder jsonBuilder) throws IOException { + super(status, jsonBuilder.utf8(), startJsonp(request), endJsonp(request)); + } + + public JsonHttpResponse(HttpRequest request, Status status, String source) throws IOException { + super(status, convert(source), startJsonp(request), endJsonp(request)); + } + + @Override public String contentType() { + return "application/json; charset=UTF-8"; + } + + private static UnicodeUtil.UTF8Result convert(String content) { + UnicodeUtil.UTF8Result result = cache.get(); + UnicodeUtil.UTF16toUTF8(content, 0, content.length(), result); + return result; + } + + private static UnicodeUtil.UTF8Result startJsonp(HttpRequest request) { + String callback = request.param("callback"); + if (callback == null) { + return null; + } + UnicodeUtil.UTF8Result result = prefixCache.get(); + UnicodeUtil.UTF16toUTF8(callback, 0, callback.length(), result); + result.result[result.length] = '('; + result.length++; + return result; + } + + private static UnicodeUtil.UTF8Result endJsonp(HttpRequest request) { + String callback = request.param("callback"); + if (callback == null) { + return null; + } + return END_JSONP; + } + +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/JsonThrowableHttpResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/JsonThrowableHttpResponse.java new file mode 100644 index 00000000000..72ecc82a7a4 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/JsonThrowableHttpResponse.java @@ -0,0 +1,97 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http; + +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.util.io.FastCharArrayWriter; +import org.elasticsearch.util.json.JsonBuilder; + +import java.io.IOException; +import java.io.PrintWriter; + +import static org.elasticsearch.util.json.JsonBuilder.Cached.*; + +/** + * @author kimchy (Shay Banon) + */ +public class JsonThrowableHttpResponse extends JsonHttpResponse { + + private static class Holder { + FastCharArrayWriter writer; + PrintWriter printWriter; + } + + private static ThreadLocal cache = new ThreadLocal() { + @Override protected Holder initialValue() { + Holder holder = new Holder(); + holder.writer = new FastCharArrayWriter(); + holder.printWriter = new PrintWriter(holder.writer); + return holder; + } + }; + + public JsonThrowableHttpResponse(HttpRequest request, Throwable t) throws IOException { + this(request, Status.INTERNAL_SERVER_ERROR, t); + } + + public JsonThrowableHttpResponse(HttpRequest request, Status status, Throwable t) throws IOException { + super(request, status, convert(t)); + } + + private static JsonBuilder convert(Throwable t) throws IOException { + Holder holder = cache.get(); + holder.writer.reset(); + t.printStackTrace(holder.printWriter); + JsonBuilder builder = cached().prettyPrint() + .startObject().field("error", ExceptionsHelper.detailedMessage(t, false, 0)); + builder.startObject("debug"); + boolean first = true; + while (t != null) { + if (!first) { + builder.startObject("cause"); + } + buildThrowable(t, builder); + if (!first) { + builder.endObject(); + } + t = t.getCause(); + first = false; + } + builder.endObject(); + builder.endObject(); + return builder; + } + + private static void buildThrowable(Throwable t, JsonBuilder builder) throws IOException { + builder.field("message", t.getMessage()); + for (StackTraceElement stElement : t.getStackTrace()) { + builder.startObject("at") + .field("className", stElement.getClassName()) + .field("methodName", stElement.getMethodName()); + if (stElement.getFileName() != null) { + builder.field("fileName", stElement.getFileName()); + } + if (stElement.getLineNumber() >= 0) { + builder.field("lineNumber", stElement.getLineNumber()); + } + builder.endObject(); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/PathTrie.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/PathTrie.java new file mode 100644 index 00000000000..33a89d8c8fc --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/PathTrie.java @@ -0,0 +1,218 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http; + +import com.google.common.collect.ImmutableMap; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.regex.Pattern; + +import static org.elasticsearch.util.MapBuilder.*; + +/** + * @author kimchy (Shay Banon) + */ +public class PathTrie { + private final TrieNode root; + private final Pattern pattern; + private T rootValue; + + public PathTrie() { + this("/", "*"); + } + + public PathTrie(String separator, String wildcard) { + pattern = Pattern.compile(separator); + root = new TrieNode(separator, null, null, wildcard); + } + + public static class TrieNode { + private transient String key; + private transient T value; + private boolean isWildcard; + private final String wildcard; + + private transient String namedWildcard; + + private ImmutableMap> children; + + private final TrieNode parent; + + public TrieNode(String key, T value, TrieNode parent, String wildcard) { + this.key = key; + this.wildcard = wildcard; + this.isWildcard = (key.equals(wildcard)); + this.parent = parent; + this.value = value; + this.children = ImmutableMap.of(); + if (isNamedWildcard(key)) { + namedWildcard = key.substring(key.indexOf('{') + 1, key.indexOf('}')); + } else { + namedWildcard = null; + } + } + + public void updateKeyWithNamedWildcard(String key) { + this.key = key; + namedWildcard = key.substring(key.indexOf('{') + 1, key.indexOf('}')); + } + + public boolean isWildcard() { + return isWildcard; + } + + public synchronized void addChild(TrieNode child) { + children = newMapBuilder(children).put(child.key, child).immutableMap(); + } + + public TrieNode getChild(String key) { + return children.get(key); + } + + public synchronized void insert(String[] path, int index, T value) { + if (index >= path.length) + return; + + String token = path[index]; + String key = token; + if (isNamedWildcard(token)) { + key = wildcard; + } + TrieNode node = children.get(key); + if (node == null) { + if (index == (path.length - 1)) { + node = new TrieNode(token, value, this, wildcard); + } else { + node = new TrieNode(token, null, this, wildcard); + } + children = newMapBuilder(children).put(key, node).immutableMap(); + } else { + if (isNamedWildcard(token)) { + node.updateKeyWithNamedWildcard(token); + } + + // in case the target(last) node already exist but without a value + // than the value should be updated. + if (index == (path.length - 1)) { + assert (node.value == null || node.value == value); + if (node.value == null) { + node.value = value; + } + } + } + + node.insert(path, index + 1, value); + } + + private boolean isNamedWildcard(String key) { + return key.indexOf('{') != -1 && key.indexOf('}') != -1; + } + + private String namedWildcard() { + return namedWildcard; + } + + private boolean isNamedWildcard() { + return namedWildcard != null; + } + + public T retrieve(String[] path, int index, Map> params) { + if (index >= path.length) + return null; + + String token = path[index]; + TrieNode node = children.get(token); + boolean usedWildcard = false; + if (node == null) { + node = children.get(wildcard); + if (node == null) { + return null; + } else { + usedWildcard = true; + if (params != null && node.isNamedWildcard()) { + put(params, node.namedWildcard(), token); + } + } + } + + if (index == (path.length - 1)) { + return node.value; + } + + T res = node.retrieve(path, index + 1, params); + if (res == null && !usedWildcard) { + node = children.get(wildcard); + if (node != null) { + if (params != null && node.isNamedWildcard()) { + put(params, node.namedWildcard(), token); + } + res = node.retrieve(path, index + 1, params); + } + } + + return res; + } + } + + public void insert(String path, T value) { + String[] strings = pattern.split(path); + if (strings.length == 0) { + rootValue = value; + return; + } + int index = 0; + // supports initial delimiter. + if (strings.length > 0 && strings[0].isEmpty()) { + index = 1; + } + root.insert(strings, index, value); + } + + public T retrieve(String path) { + return retrieve(path, null); + } + + public T retrieve(String path, Map> params) { + if (path.length() == 0) { + return rootValue; + } + String[] strings = pattern.split(path); + if (strings.length == 0) { + return rootValue; + } + int index = 0; + // supports initial delimiter. + if (strings.length > 0 && strings[0].isEmpty()) { + index = 1; + } + return root.retrieve(strings, index, params); + } + + private static void put(Map> params, String key, String value) { + List list = params.get(key); + if (list == null) { + list = new ArrayList(1); + params.put(key, list); + } + list.add(value); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/StringHttpResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/StringHttpResponse.java new file mode 100644 index 00000000000..6247e367575 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/StringHttpResponse.java @@ -0,0 +1,48 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http; + +import org.apache.lucene.util.UnicodeUtil; + +/** + * @author kimchy (Shay Banon) + */ +public class StringHttpResponse extends Utf8HttpResponse { + + private static ThreadLocal cache = new ThreadLocal() { + @Override protected UnicodeUtil.UTF8Result initialValue() { + return new UnicodeUtil.UTF8Result(); + } + }; + + public StringHttpResponse(Status status) { + super(status); + } + + public StringHttpResponse(Status status, String content) { + super(status, convert(content)); + } + + private static UnicodeUtil.UTF8Result convert(String content) { + UnicodeUtil.UTF8Result result = cache.get(); + UnicodeUtil.UTF16toUTF8(content, 0, content.length(), result); + return result; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/Utf8HttpResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/Utf8HttpResponse.java new file mode 100644 index 00000000000..fbf0ed15742 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/Utf8HttpResponse.java @@ -0,0 +1,101 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http; + +import org.apache.lucene.util.UnicodeUtil; + +/** + * An http response that is built on top of {@link org.apache.lucene.util.UnicodeUtil.UTF8Result}. + *

+ *

Note, this class assumes that the utf8 result is not thread safe. + * + * @author kimchy (Shay Banon) + */ +public class Utf8HttpResponse extends AbstractHttpResponse implements HttpResponse { + + public static final UnicodeUtil.UTF8Result EMPTY; + + static { + UnicodeUtil.UTF8Result temp = new UnicodeUtil.UTF8Result(); + temp.result = new byte[0]; + temp.length = 0; + EMPTY = temp; + } + + private final Status status; + + private final UnicodeUtil.UTF8Result utf8Result; + + private final UnicodeUtil.UTF8Result prefixUtf8Result; + + private final UnicodeUtil.UTF8Result suffixUtf8Result; + + public Utf8HttpResponse(Status status) { + this(status, EMPTY); + } + + public Utf8HttpResponse(Status status, UnicodeUtil.UTF8Result utf8Result) { + this(status, utf8Result, null, null); + } + + public Utf8HttpResponse(Status status, UnicodeUtil.UTF8Result utf8Result, + UnicodeUtil.UTF8Result prefixUtf8Result, UnicodeUtil.UTF8Result suffixUtf8Result) { + this.status = status; + this.utf8Result = utf8Result; + this.prefixUtf8Result = prefixUtf8Result; + this.suffixUtf8Result = suffixUtf8Result; + } + + @Override public boolean contentThreadSafe() { + return false; + } + + @Override public String contentType() { + return "text/plain; charset=UTF-8"; + } + + @Override public byte[] content() { + return utf8Result.result; + } + + @Override public int contentLength() { + return utf8Result.length; + } + + @Override public Status status() { + return status; + } + + @Override public byte[] prefixContent() { + return prefixUtf8Result != null ? prefixUtf8Result.result : null; + } + + @Override public int prefixContentLength() { + return prefixUtf8Result != null ? prefixUtf8Result.length : -1; + } + + @Override public byte[] suffixContent() { + return suffixUtf8Result != null ? suffixUtf8Result.result : null; + } + + @Override public int suffixContentLength() { + return suffixUtf8Result != null ? suffixUtf8Result.length : -1; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/HttpActionModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/HttpActionModule.java new file mode 100644 index 00000000000..96681ae16af --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/HttpActionModule.java @@ -0,0 +1,82 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.action; + +import com.google.inject.AbstractModule; +import org.elasticsearch.http.action.admin.cluster.node.info.HttpNodesInfoAction; +import org.elasticsearch.http.action.admin.cluster.ping.broadcast.HttpBroadcastPingAction; +import org.elasticsearch.http.action.admin.cluster.ping.replication.HttpReplicationPingAction; +import org.elasticsearch.http.action.admin.cluster.ping.single.HttpSinglePingAction; +import org.elasticsearch.http.action.admin.cluster.state.HttpClusterStateAction; +import org.elasticsearch.http.action.admin.indices.create.HttpCreateIndexAction; +import org.elasticsearch.http.action.admin.indices.delete.HttpDeleteIndexAction; +import org.elasticsearch.http.action.admin.indices.flush.HttpFlushAction; +import org.elasticsearch.http.action.admin.indices.gateway.snapshot.HttpGatewaySnapshotAction; +import org.elasticsearch.http.action.admin.indices.mapping.create.HttpCreateMappingAction; +import org.elasticsearch.http.action.admin.indices.refresh.HttpRefreshAction; +import org.elasticsearch.http.action.admin.indices.status.HttpIndicesStatusAction; +import org.elasticsearch.http.action.count.HttpCountAction; +import org.elasticsearch.http.action.delete.HttpDeleteAction; +import org.elasticsearch.http.action.deletebyquery.HttpDeleteByQueryAction; +import org.elasticsearch.http.action.get.HttpGetAction; +import org.elasticsearch.http.action.index.HttpIndexAction; +import org.elasticsearch.http.action.main.HttpMainAction; +import org.elasticsearch.http.action.search.HttpSearchAction; + +/** + * @author kimchy (Shay Banon) + */ +public class HttpActionModule extends AbstractModule { + + @Override protected void configure() { + bind(HttpMainAction.class).asEagerSingleton(); + + bind(HttpNodesInfoAction.class).asEagerSingleton(); + bind(HttpClusterStateAction.class).asEagerSingleton(); + + bind(HttpSinglePingAction.class).asEagerSingleton(); + bind(HttpBroadcastPingAction.class).asEagerSingleton(); + bind(HttpReplicationPingAction.class).asEagerSingleton(); + + bind(HttpIndicesStatusAction.class).asEagerSingleton(); + bind(HttpCreateIndexAction.class).asEagerSingleton(); + bind(HttpDeleteIndexAction.class).asEagerSingleton(); + + bind(HttpCreateMappingAction.class).asEagerSingleton(); + + bind(HttpGatewaySnapshotAction.class).asEagerSingleton(); + + bind(HttpRefreshAction.class).asEagerSingleton(); + + bind(HttpFlushAction.class).asEagerSingleton(); + + bind(HttpIndexAction.class).asEagerSingleton(); + + bind(HttpGetAction.class).asEagerSingleton(); + + bind(HttpDeleteAction.class).asEagerSingleton(); + + bind(HttpDeleteByQueryAction.class).asEagerSingleton(); + + bind(HttpCountAction.class).asEagerSingleton(); + + bind(HttpSearchAction.class).asEagerSingleton(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/admin/cluster/node/info/HttpNodesInfoAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/admin/cluster/node/info/HttpNodesInfoAction.java new file mode 100644 index 00000000000..12ef2985cdb --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/admin/cluster/node/info/HttpNodesInfoAction.java @@ -0,0 +1,87 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.action.admin.cluster.node.info; + +import com.google.inject.Inject; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.http.*; +import org.elasticsearch.http.action.support.HttpActions; +import org.elasticsearch.http.action.support.HttpJsonBuilder; +import org.elasticsearch.util.json.JsonBuilder; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class HttpNodesInfoAction extends BaseHttpServerHandler { + + @Inject public HttpNodesInfoAction(Settings settings, HttpServer httpServer, Client client) { + super(settings, client); + + httpServer.registerHandler(HttpRequest.Method.GET, "/_cluster/nodes", this); + httpServer.registerHandler(HttpRequest.Method.GET, "/_cluster/nodes/${nodeId}", this); + } + + @Override public void handleRequest(final HttpRequest request, final HttpChannel channel) { + String[] nodesIds = HttpActions.splitNodes(request.param("nodeId")); + NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(nodesIds); + nodesInfoRequest.listenerThreaded(false); + client.admin().cluster().execNodesInfo(nodesInfoRequest, new ActionListener() { + @Override public void onResponse(NodesInfoResponse result) { + try { + JsonBuilder builder = HttpJsonBuilder.cached(request); + builder.startObject(); + builder.field("clusterName", result.clusterName().value()); + for (NodeInfo nodeInfo : result) { + builder.startObject(nodeInfo.node().id()); + + builder.field("name", nodeInfo.node().name()); + builder.field("transportAddress", nodeInfo.node().address().toString()); + builder.field("dataNode", nodeInfo.node().dataNode()); + + builder.endObject(); + } + builder.endObject(); + channel.sendResponse(new JsonHttpResponse(request, HttpResponse.Status.OK, builder)); + } catch (Exception e) { + onFailure(e); + } + } + + @Override public void onFailure(Throwable e) { + try { + channel.sendResponse(new JsonThrowableHttpResponse(request, e)); + } catch (IOException e1) { + logger.error("Failed to send failure response", e1); + } + } + }); + } + + @Override public boolean spawn() { + return false; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/admin/cluster/ping/broadcast/HttpBroadcastPingAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/admin/cluster/ping/broadcast/HttpBroadcastPingAction.java new file mode 100644 index 00000000000..1e2b690c066 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/admin/cluster/ping/broadcast/HttpBroadcastPingAction.java @@ -0,0 +1,87 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.action.admin.cluster.ping.broadcast; + +import com.google.inject.Inject; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.ping.broadcast.BroadcastPingRequest; +import org.elasticsearch.action.admin.cluster.ping.broadcast.BroadcastPingResponse; +import org.elasticsearch.action.support.broadcast.BroadcastOperationThreading; +import org.elasticsearch.client.Client; +import org.elasticsearch.http.*; +import org.elasticsearch.http.action.support.HttpActions; +import org.elasticsearch.http.action.support.HttpJsonBuilder; +import org.elasticsearch.util.json.JsonBuilder; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; + +import static org.elasticsearch.http.HttpResponse.Status.*; + +/** + * @author kimchy (Shay Banon) + */ +public class HttpBroadcastPingAction extends BaseHttpServerHandler { + + @Inject public HttpBroadcastPingAction(Settings settings, HttpServer httpService, Client client) { + super(settings, client); + httpService.registerHandler(HttpRequest.Method.GET, "/{index}/_ping/broadcast", this); + httpService.registerHandler(HttpRequest.Method.GET, "/_cluster/{index}/_ping/broadcast", this); + } + + @Override public void handleRequest(final HttpRequest request, final HttpChannel channel) { + BroadcastPingRequest broadcastPingRequest = new BroadcastPingRequest(HttpActions.splitIndices(request.param("index"))); + broadcastPingRequest.queryHint(request.param("queryHint")); + BroadcastOperationThreading operationThreading = BroadcastOperationThreading.fromString(request.param("operationThreading"), BroadcastOperationThreading.SINGLE_THREAD); + if (operationThreading == BroadcastOperationThreading.NO_THREADS) { + // since we don't spawn, don't allow no_threads, but change it to a single thread + operationThreading = BroadcastOperationThreading.SINGLE_THREAD; + } + broadcastPingRequest.operationThreading(operationThreading); + client.admin().cluster().execPing(broadcastPingRequest, new ActionListener() { + @Override public void onResponse(BroadcastPingResponse result) { + try { + JsonBuilder generator = HttpJsonBuilder.cached(request); + generator.startObject() + .field("ok", true) + .field("totalShards", result.totalShards()) + .field("successfulShards", result.successfulShards()) + .field("failedShards", result.failedShards()) + .endObject(); + channel.sendResponse(new JsonHttpResponse(request, OK, generator)); + } catch (Exception e) { + onFailure(e); + } + } + + @Override public void onFailure(Throwable e) { + try { + channel.sendResponse(new JsonThrowableHttpResponse(request, e)); + } catch (IOException e1) { + logger.error("Failed to send failure response", e1); + } + } + }); + } + + @Override public boolean spawn() { + return false; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/admin/cluster/ping/replication/HttpReplicationPingAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/admin/cluster/ping/replication/HttpReplicationPingAction.java new file mode 100644 index 00000000000..ac8c3fe962b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/admin/cluster/ping/replication/HttpReplicationPingAction.java @@ -0,0 +1,90 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.action.admin.cluster.ping.replication; + +import com.google.inject.Inject; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.ping.replication.IndexReplicationPingResponse; +import org.elasticsearch.action.admin.cluster.ping.replication.ReplicationPingRequest; +import org.elasticsearch.action.admin.cluster.ping.replication.ReplicationPingResponse; +import org.elasticsearch.action.admin.cluster.ping.replication.ShardReplicationPingRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.http.*; +import org.elasticsearch.http.action.support.HttpActions; +import org.elasticsearch.http.action.support.HttpJsonBuilder; +import org.elasticsearch.util.TimeValue; +import org.elasticsearch.util.json.JsonBuilder; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; + +import static org.elasticsearch.http.HttpResponse.Status.*; + +/** + * @author kimchy (Shay Banon) + */ +public class HttpReplicationPingAction extends BaseHttpServerHandler { + + @Inject public HttpReplicationPingAction(Settings settings, HttpServer httpService, Client client) { + super(settings, client); + httpService.registerHandler(HttpRequest.Method.GET, "/{index}/_ping/replication", this); + httpService.registerHandler(HttpRequest.Method.GET, "/_cluster/{index}/_ping/replication", this); + } + + @Override public void handleRequest(final HttpRequest request, final HttpChannel channel) { + ReplicationPingRequest replicationPingRequest = new ReplicationPingRequest(HttpActions.splitIndices(request.param("index"))); + replicationPingRequest.timeout(TimeValue.parseTimeValue(request.param("timeout"), ShardReplicationPingRequest.DEFAULT_TIMEOUT)); + replicationPingRequest.listenerThreaded(false); + client.admin().cluster().execPing(replicationPingRequest, new ActionListener() { + @Override public void onResponse(ReplicationPingResponse result) { + try { + JsonBuilder builder = HttpJsonBuilder.cached(request); + builder.startObject(); + builder.field("ok", true); + for (IndexReplicationPingResponse indexResponse : result.indices().values()) { + builder.startObject(indexResponse.index()) + .field("ok", true) + .field("totalShards", indexResponse.totalShards()) + .field("successfulShards", indexResponse.successfulShards()) + .field("failedShards", indexResponse.failedShards()) + .endObject(); + } + builder.endObject(); + channel.sendResponse(new JsonHttpResponse(request, OK, builder)); + } catch (Exception e) { + onFailure(e); + } + } + + @Override public void onFailure(Throwable e) { + try { + channel.sendResponse(new JsonThrowableHttpResponse(request, e)); + } catch (IOException e1) { + logger.error("Failed to send failure response", e1); + } + } + }); + } + + @Override public boolean spawn() { + // we don't spawn since we fork in index replication based on operation + return false; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/admin/cluster/ping/single/HttpSinglePingAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/admin/cluster/ping/single/HttpSinglePingAction.java new file mode 100644 index 00000000000..335d2b465ee --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/admin/cluster/ping/single/HttpSinglePingAction.java @@ -0,0 +1,77 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.action.admin.cluster.ping.single; + +import com.google.inject.Inject; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.ping.single.SinglePingRequest; +import org.elasticsearch.action.admin.cluster.ping.single.SinglePingResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.http.*; +import org.elasticsearch.http.action.support.HttpJsonBuilder; +import org.elasticsearch.util.json.JsonBuilder; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; + +import static org.elasticsearch.http.HttpResponse.Status.*; + +/** + * @author kimchy (Shay Banon) + */ +public class HttpSinglePingAction extends BaseHttpServerHandler { + + @Inject public HttpSinglePingAction(Settings settings, HttpServer httpService, Client client) { + super(settings, client); + httpService.registerHandler(HttpRequest.Method.GET, "/{index}/{type}/{id}/_ping", this); + httpService.registerHandler(HttpRequest.Method.GET, "/_cluster/{index}/{type}/{id}/_ping", this); + } + + @Override public void handleRequest(final HttpRequest request, final HttpChannel channel) { + SinglePingRequest singlePingRequest = new SinglePingRequest(request.param("index"), request.param("type"), request.param("id")); + // no need to have a threaded listener since we just send back a response + singlePingRequest.listenerThreaded(false); + // if we have a local operation, execute it on a thread since we don't spawn + singlePingRequest.threadedOperation(true); + client.admin().cluster().execPing(singlePingRequest, new ActionListener() { + @Override public void onResponse(SinglePingResponse result) { + try { + JsonBuilder generator = HttpJsonBuilder.cached(request); + generator.startObject().field("ok", true).endObject(); + channel.sendResponse(new JsonHttpResponse(request, OK, generator)); + } catch (Exception e) { + onFailure(e); + } + } + + @Override public void onFailure(Throwable e) { + try { + channel.sendResponse(new JsonThrowableHttpResponse(request, e)); + } catch (IOException e1) { + logger.error("Failed to send failure response", e1); + } + } + }); + } + + @Override public boolean spawn() { + return false; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/admin/cluster/state/HttpClusterStateAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/admin/cluster/state/HttpClusterStateAction.java new file mode 100644 index 00000000000..badcd811e87 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/admin/cluster/state/HttpClusterStateAction.java @@ -0,0 +1,152 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.action.admin.cluster.state; + +import com.google.inject.Inject; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.http.*; +import org.elasticsearch.http.action.support.HttpJsonBuilder; +import org.elasticsearch.util.json.JsonBuilder; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; +import java.util.Map; + +/** + * @author kimchy (Shay Banon) + */ +public class HttpClusterStateAction extends BaseHttpServerHandler { + + @Inject public HttpClusterStateAction(Settings settings, HttpServer httpServer, Client client) { + super(settings, client); + + httpServer.registerHandler(HttpRequest.Method.GET, "/_cluster/state", this); + } + + @Override public void handleRequest(final HttpRequest request, final HttpChannel channel) { + client.admin().cluster().execState(new ClusterStateRequest(), new ActionListener() { + @Override public void onResponse(ClusterStateResponse response) { + try { + ClusterState state = response.state(); + JsonBuilder builder = HttpJsonBuilder.cached(request); + builder.startObject(); + + // meta data + builder.startObject("metadata"); + builder.field("maxNumberOfShardsPerNode", state.metaData().maxNumberOfShardsPerNode()); + builder.startObject("indices"); + for (IndexMetaData indexMetaData : state.metaData()) { + builder.startObject(indexMetaData.index()); + + builder.startObject("settings"); + for (Map.Entry entry : indexMetaData.settings().getAsMap().entrySet()) { + builder.startObject("setting").field("name", entry.getKey()).field("value", entry.getValue()).endObject(); + } + builder.endObject(); + + builder.startObject("mappings"); + for (Map.Entry entry : indexMetaData.mappings().entrySet()) { + builder.startObject("mapping").field("name", entry.getKey()).field("value", entry.getValue()).endObject(); + } + builder.endObject(); + + builder.endObject(); + } + builder.endObject(); + builder.endObject(); + + // routing table + builder.startObject("routingTable"); + builder.startObject("indices"); + for (IndexRoutingTable indexRoutingTable : state.routingTable()) { + builder.startObject(indexRoutingTable.index()); + builder.startObject("shards"); + for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) { + builder.startArray(Integer.toString(indexShardRoutingTable.shardId().id())); + for (ShardRouting shardRouting : indexShardRoutingTable) { + jsonShardRouting(builder, shardRouting); + } + builder.endArray(); + } + builder.endObject(); + builder.endObject(); + } + builder.endObject(); + builder.endObject(); + + // routing nodes + builder.startObject("routingNodes"); + builder.startArray("unassigned"); + for (ShardRouting shardRouting : state.routingNodes().unassigned()) { + jsonShardRouting(builder, shardRouting); + } + builder.endArray(); + builder.startObject("nodes"); + for (RoutingNode routingNode : state.routingNodes()) { + builder.startArray(routingNode.nodeId()); + for (ShardRouting shardRouting : routingNode) { + jsonShardRouting(builder, shardRouting); + } + builder.endArray(); + } + builder.endObject(); + builder.endObject(); + + builder.endObject(); + channel.sendResponse(new JsonHttpResponse(request, HttpResponse.Status.OK, builder)); + } catch (Exception e) { + onFailure(e); + } + } + + private void jsonShardRouting(JsonBuilder builder, ShardRouting shardRouting) throws IOException { + builder.startObject() + .field("state", shardRouting.state()) + .field("primary", shardRouting.primary()) + .field("nodeId", shardRouting.currentNodeId()) + .field("relocatingNodeId", shardRouting.relocatingNodeId()) + .field("shardId", shardRouting.shardId().id()) + .field("index", shardRouting.shardId().index().name()) + .endObject(); + } + + @Override public void onFailure(Throwable e) { + try { + channel.sendResponse(new JsonThrowableHttpResponse(request, e)); + } catch (IOException e1) { + logger.error("Failed to send failure response", e1); + } + } + }); + } + + @Override public boolean spawn() { + return false; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/admin/indices/create/HttpCreateIndexAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/admin/indices/create/HttpCreateIndexAction.java new file mode 100644 index 00000000000..4383d9fe62b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/admin/indices/create/HttpCreateIndexAction.java @@ -0,0 +1,98 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.action.admin.indices.create; + +import com.google.inject.Inject; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.http.*; +import org.elasticsearch.http.action.support.HttpJsonBuilder; +import org.elasticsearch.indices.IndexAlreadyExistsException; +import org.elasticsearch.indices.InvalidIndexNameException; +import org.elasticsearch.util.Strings; +import org.elasticsearch.util.TimeValue; +import org.elasticsearch.util.json.JsonBuilder; +import org.elasticsearch.util.settings.ImmutableSettings; +import org.elasticsearch.util.settings.Settings; +import org.elasticsearch.util.settings.SettingsException; + +import java.io.IOException; + +import static org.elasticsearch.ExceptionsHelper.*; +import static org.elasticsearch.http.HttpResponse.Status.*; +import static org.elasticsearch.util.TimeValue.*; + +/** + * @author kimchy (Shay Banon) + */ +public class HttpCreateIndexAction extends BaseHttpServerHandler { + + @Inject public HttpCreateIndexAction(Settings settings, HttpServer httpService, Client client) { + super(settings, client); + httpService.registerHandler(HttpRequest.Method.PUT, "/{index}", this); + } + + @Override public void handleRequest(final HttpRequest request, final HttpChannel channel) { + String bodySettings = request.contentAsString(); + Settings indexSettings = ImmutableSettings.Builder.EMPTY_SETTINGS; + if (Strings.hasText(bodySettings)) { + try { + indexSettings = ImmutableSettings.settingsBuilder().loadFromSource(bodySettings).build(); + } catch (Exception e) { + try { + channel.sendResponse(new JsonThrowableHttpResponse(request, BAD_REQUEST, new SettingsException("Failed to parse index settings", e))); + } catch (IOException e1) { + logger.warn("Failed to send response", e1); + return; + } + } + } + CreateIndexRequest createIndexRequest = new CreateIndexRequest(request.param("index"), indexSettings); + createIndexRequest.timeout(TimeValue.parseTimeValue(request.param("timeout"), timeValueSeconds(10))); + client.admin().indices().execCreate(createIndexRequest, new ActionListener() { + @Override public void onResponse(CreateIndexResponse result) { + try { + JsonBuilder builder = HttpJsonBuilder.cached(request); + builder.startObject() + .field("ok", true) + .endObject(); + channel.sendResponse(new JsonHttpResponse(request, OK, builder)); + } catch (Exception e) { + onFailure(e); + } + } + + @Override public void onFailure(Throwable e) { + try { + Throwable t = unwrapCause(e); + if (t instanceof IndexAlreadyExistsException || t instanceof InvalidIndexNameException) { + channel.sendResponse(new JsonHttpResponse(request, BAD_REQUEST, JsonBuilder.cached().startObject().field("error", t.getMessage()).endObject())); + } else { + channel.sendResponse(new JsonThrowableHttpResponse(request, e)); + } + } catch (IOException e1) { + logger.error("Failed to send failure response", e1); + } + } + }); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/admin/indices/delete/HttpDeleteIndexAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/admin/indices/delete/HttpDeleteIndexAction.java new file mode 100644 index 00000000000..591ffc53eda --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/admin/indices/delete/HttpDeleteIndexAction.java @@ -0,0 +1,80 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.action.admin.indices.delete; + +import com.google.inject.Inject; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.http.*; +import org.elasticsearch.http.action.support.HttpJsonBuilder; +import org.elasticsearch.indices.IndexMissingException; +import org.elasticsearch.util.TimeValue; +import org.elasticsearch.util.json.JsonBuilder; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; + +import static org.elasticsearch.http.HttpResponse.Status.*; +import static org.elasticsearch.util.TimeValue.*; +import static org.elasticsearch.util.json.JsonBuilder.Cached.*; + +/** + * @author kimchy (Shay Banon) + */ +public class HttpDeleteIndexAction extends BaseHttpServerHandler { + + @Inject public HttpDeleteIndexAction(Settings settings, HttpServer httpService, Client client) { + super(settings, client); + httpService.registerHandler(HttpRequest.Method.DELETE, "/{index}", this); + } + + @Override public void handleRequest(final HttpRequest request, final HttpChannel channel) { + DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(request.param("index")); + deleteIndexRequest.timeout(TimeValue.parseTimeValue(request.param("timeout"), timeValueSeconds(10))); + client.admin().indices().execDelete(deleteIndexRequest, new ActionListener() { + @Override public void onResponse(DeleteIndexResponse result) { + try { + channel.sendResponse(new JsonHttpResponse(request, OK, cached().startObject().field("ok", true).endObject())); + } catch (IOException e) { + onFailure(e); + } + } + + @Override public void onFailure(Throwable e) { + try { + if (ExceptionsHelper.unwrapCause(e) instanceof IndexMissingException) { + JsonBuilder builder = HttpJsonBuilder.cached(request); + builder.startObject() + .field("ok", true) + .endObject(); + channel.sendResponse(new JsonHttpResponse(request, NOT_FOUND, builder)); + } else { + channel.sendResponse(new JsonThrowableHttpResponse(request, e)); + } + } catch (IOException e1) { + logger.error("Failed to send failure response", e1); + } + } + }); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/admin/indices/flush/HttpFlushAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/admin/indices/flush/HttpFlushAction.java new file mode 100644 index 00000000000..19898a1a49d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/admin/indices/flush/HttpFlushAction.java @@ -0,0 +1,93 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.action.admin.indices.flush; + +import com.google.inject.Inject; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.flush.FlushRequest; +import org.elasticsearch.action.admin.indices.flush.FlushResponse; +import org.elasticsearch.action.admin.indices.flush.IndexFlushResponse; +import org.elasticsearch.action.support.replication.ShardReplicationOperationRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.http.*; +import org.elasticsearch.http.action.support.HttpActions; +import org.elasticsearch.http.action.support.HttpJsonBuilder; +import org.elasticsearch.util.TimeValue; +import org.elasticsearch.util.json.JsonBuilder; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; + +import static org.elasticsearch.http.HttpResponse.Status.*; + +/** + * @author kimchy (Shay Banon) + */ +public class HttpFlushAction extends BaseHttpServerHandler { + + @Inject public HttpFlushAction(Settings settings, HttpServer httpService, Client client) { + super(settings, client); + httpService.registerHandler(HttpRequest.Method.POST, "/_flush", this); + httpService.registerHandler(HttpRequest.Method.POST, "/{index}/_flush", this); + } + + @Override public void handleRequest(final HttpRequest request, final HttpChannel channel) { + FlushRequest flushRequest = new FlushRequest(HttpActions.splitIndices(request.param("index"))); + flushRequest.listenerThreaded(false); + flushRequest.timeout(TimeValue.parseTimeValue(request.param("timeout"), ShardReplicationOperationRequest.DEFAULT_TIMEOUT)); + client.admin().indices().execFlush(flushRequest, new ActionListener() { + @Override public void onResponse(FlushResponse result) { + try { + JsonBuilder builder = HttpJsonBuilder.cached(request); + builder.startObject(); + builder.field("ok", true); + builder.startObject("indices"); + for (IndexFlushResponse indexFlushResponse : result.indices().values()) { + builder.startObject(indexFlushResponse.index()) + .field("ok", true) + .field("totalShards", indexFlushResponse.totalShards()) + .field("successfulShards", indexFlushResponse.successfulShards()) + .field("failedShards", indexFlushResponse.failedShards()) + .endObject(); + } + builder.endObject(); + builder.endObject(); + channel.sendResponse(new JsonHttpResponse(request, OK, builder)); + } catch (Exception e) { + onFailure(e); + } + } + + @Override public void onFailure(Throwable e) { + try { + channel.sendResponse(new JsonThrowableHttpResponse(request, e)); + } catch (IOException e1) { + logger.error("Failed to send failure response", e1); + } + } + }); + } + + + @Override public boolean spawn() { + // we don't spawn since we fork in index replication based on operation + return false; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/admin/indices/gateway/snapshot/HttpGatewaySnapshotAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/admin/indices/gateway/snapshot/HttpGatewaySnapshotAction.java new file mode 100644 index 00000000000..772654fdd93 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/admin/indices/gateway/snapshot/HttpGatewaySnapshotAction.java @@ -0,0 +1,92 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.action.admin.indices.gateway.snapshot; + +import com.google.inject.Inject; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.gateway.snapshot.GatewaySnapshotRequest; +import org.elasticsearch.action.admin.indices.gateway.snapshot.GatewaySnapshotResponse; +import org.elasticsearch.action.admin.indices.gateway.snapshot.IndexGatewaySnapshotResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.http.*; +import org.elasticsearch.http.action.support.HttpActions; +import org.elasticsearch.http.action.support.HttpJsonBuilder; +import org.elasticsearch.util.TimeValue; +import org.elasticsearch.util.json.JsonBuilder; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; + +import static org.elasticsearch.action.support.replication.ShardReplicationOperationRequest.*; +import static org.elasticsearch.http.HttpResponse.Status.*; + +/** + * @author kimchy (Shay Banon) + */ +public class HttpGatewaySnapshotAction extends BaseHttpServerHandler { + + @Inject public HttpGatewaySnapshotAction(Settings settings, HttpServer httpService, Client client) { + super(settings, client); + httpService.registerHandler(HttpRequest.Method.POST, "/_gateway/snapshot", this); + httpService.registerHandler(HttpRequest.Method.POST, "/{index}/_gateway/snapshot", this); + } + + @Override public void handleRequest(final HttpRequest request, final HttpChannel channel) { + GatewaySnapshotRequest gatewaySnapshotRequest = new GatewaySnapshotRequest(HttpActions.splitIndices(request.param("index"))); + gatewaySnapshotRequest.timeout(TimeValue.parseTimeValue(request.param("timeout"), DEFAULT_TIMEOUT)); + gatewaySnapshotRequest.listenerThreaded(false); + client.admin().indices().execGatewaySnapshot(gatewaySnapshotRequest, new ActionListener() { + @Override public void onResponse(GatewaySnapshotResponse result) { + try { + JsonBuilder builder = HttpJsonBuilder.cached(request); + builder.startObject(); + builder.field("ok", true); + builder.startObject("indices"); + for (IndexGatewaySnapshotResponse indexResponse : result.indices().values()) { + builder.startObject(indexResponse.index()) + .field("ok", true) + .field("totalShards", indexResponse.totalShards()) + .field("successfulShards", indexResponse.successfulShards()) + .field("failedShards", indexResponse.failedShards()) + .endObject(); + } + builder.endObject(); + builder.endObject(); + channel.sendResponse(new JsonHttpResponse(request, OK, builder)); + } catch (Exception e) { + onFailure(e); + } + } + + @Override public void onFailure(Throwable e) { + try { + channel.sendResponse(new JsonThrowableHttpResponse(request, e)); + } catch (IOException e1) { + logger.error("Failed to send failure response", e1); + } + } + }); + } + + @Override public boolean spawn() { + // we don't spawn since we fork in index replication based on operation + return false; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/admin/indices/mapping/create/HttpCreateMappingAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/admin/indices/mapping/create/HttpCreateMappingAction.java new file mode 100644 index 00000000000..e3724d626bf --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/admin/indices/mapping/create/HttpCreateMappingAction.java @@ -0,0 +1,82 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.action.admin.indices.mapping.create; + +import com.google.inject.Inject; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.mapping.create.CreateMappingRequest; +import org.elasticsearch.action.admin.indices.mapping.create.CreateMappingResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.http.*; +import org.elasticsearch.http.action.support.HttpActions; +import org.elasticsearch.http.action.support.HttpJsonBuilder; +import org.elasticsearch.index.mapper.InvalidTypeNameException; +import org.elasticsearch.indices.IndexMissingException; +import org.elasticsearch.util.json.JsonBuilder; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; + +import static org.elasticsearch.ExceptionsHelper.*; +import static org.elasticsearch.http.HttpResponse.Status.*; + +/** + * @author kimchy (Shay Banon) + */ +public class HttpCreateMappingAction extends BaseHttpServerHandler { + + @Inject public HttpCreateMappingAction(Settings settings, HttpServer httpService, Client client) { + super(settings, client); + httpService.registerHandler(HttpRequest.Method.PUT, "/{index}/_mapping", this); + httpService.registerHandler(HttpRequest.Method.PUT, "/{index}/{type}/_mapping", this); + } + + @Override public void handleRequest(final HttpRequest request, final HttpChannel channel) { + String[] indices = HttpActions.splitIndices(request.param("index")); + String mappingType = request.param("type"); + String mappingSource = request.contentAsString(); + client.admin().indices().execCreateMapping(new CreateMappingRequest(indices, mappingType, mappingSource), new ActionListener() { + @Override public void onResponse(CreateMappingResponse result) { + try { + JsonBuilder builder = HttpJsonBuilder.cached(request); + builder.startObject() + .field("ok", true) + .endObject(); + channel.sendResponse(new JsonHttpResponse(request, OK, builder)); + } catch (IOException e) { + onFailure(e); + } + } + + @Override public void onFailure(Throwable e) { + try { + Throwable t = unwrapCause(e); + if (t instanceof IndexMissingException || t instanceof InvalidTypeNameException) { + channel.sendResponse(new JsonHttpResponse(request, BAD_REQUEST, JsonBuilder.cached().startObject().field("error", t.getMessage()).endObject())); + } else { + channel.sendResponse(new JsonThrowableHttpResponse(request, e)); + } + } catch (IOException e1) { + logger.error("Failed to send failure response", e1); + } + } + }); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/admin/indices/refresh/HttpRefreshAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/admin/indices/refresh/HttpRefreshAction.java new file mode 100644 index 00000000000..29a3a83eee9 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/admin/indices/refresh/HttpRefreshAction.java @@ -0,0 +1,92 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.action.admin.indices.refresh; + +import com.google.inject.Inject; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.refresh.IndexRefreshResponse; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.http.*; +import org.elasticsearch.http.action.support.HttpActions; +import org.elasticsearch.http.action.support.HttpJsonBuilder; +import org.elasticsearch.util.TimeValue; +import org.elasticsearch.util.json.JsonBuilder; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; + +import static org.elasticsearch.action.support.replication.ShardReplicationOperationRequest.*; +import static org.elasticsearch.http.HttpResponse.Status.*; + +/** + * @author kimchy (Shay Banon) + */ +public class HttpRefreshAction extends BaseHttpServerHandler { + + @Inject public HttpRefreshAction(Settings settings, HttpServer httpService, Client client) { + super(settings, client); + httpService.registerHandler(HttpRequest.Method.POST, "/_refresh", this); + httpService.registerHandler(HttpRequest.Method.POST, "/{index}/_refresh", this); + } + + @Override public void handleRequest(final HttpRequest request, final HttpChannel channel) { + RefreshRequest refreshRequest = new RefreshRequest(HttpActions.splitIndices(request.param("index"))); + refreshRequest.timeout(TimeValue.parseTimeValue(request.param("timeout"), DEFAULT_TIMEOUT)); + refreshRequest.listenerThreaded(false); + client.admin().indices().execRefresh(refreshRequest, new ActionListener() { + @Override public void onResponse(RefreshResponse result) { + try { + JsonBuilder builder = HttpJsonBuilder.cached(request); + builder.startObject(); + builder.field("ok", true); + builder.startObject("indices"); + for (IndexRefreshResponse indexResponse : result.indices().values()) { + builder.startObject(indexResponse.index()) + .field("ok", true) + .field("totalShards", indexResponse.totalShards()) + .field("successfulShards", indexResponse.successfulShards()) + .field("failedShards", indexResponse.failedShards()) + .endObject(); + } + builder.endObject(); + builder.endObject(); + channel.sendResponse(new JsonHttpResponse(request, OK, builder)); + } catch (Exception e) { + onFailure(e); + } + } + + @Override public void onFailure(Throwable e) { + try { + channel.sendResponse(new JsonThrowableHttpResponse(request, e)); + } catch (IOException e1) { + logger.error("Failed to send failure response", e1); + } + } + }); + } + + @Override public boolean spawn() { + // we don't spawn since we fork in index replication based on operation + return false; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/admin/indices/status/HttpIndicesStatusAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/admin/indices/status/HttpIndicesStatusAction.java new file mode 100644 index 00000000000..f4f7adc65d5 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/admin/indices/status/HttpIndicesStatusAction.java @@ -0,0 +1,138 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.action.admin.indices.status; + +import com.google.inject.Inject; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.status.*; +import org.elasticsearch.client.Client; +import org.elasticsearch.http.*; +import org.elasticsearch.http.action.support.HttpJsonBuilder; +import org.elasticsearch.util.json.JsonBuilder; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; +import java.util.Map; + +import static org.elasticsearch.http.HttpResponse.Status.*; +import static org.elasticsearch.http.action.support.HttpActions.*; + +/** + * @author kimchy (Shay Banon) + */ +public class HttpIndicesStatusAction extends BaseHttpServerHandler { + + @Inject public HttpIndicesStatusAction(Settings settings, HttpServer httpService, Client client) { + super(settings, client); + httpService.registerHandler(HttpRequest.Method.GET, "/_status", this); + httpService.registerHandler(HttpRequest.Method.GET, "/{index}/_status", this); + } + + @Override public void handleRequest(final HttpRequest request, final HttpChannel channel) { + IndicesStatusRequest indicesStatusRequest = new IndicesStatusRequest(splitIndices(request.param("index"))); + indicesStatusRequest.listenerThreaded(false); + client.admin().indices().execStatus(indicesStatusRequest, new ActionListener() { + @Override public void onResponse(IndicesStatusResponse response) { + try { + JsonBuilder builder = HttpJsonBuilder.cached(request); + builder.startObject(); + builder.field("ok", true); + + builder.startObject("indices"); + for (IndexStatus indexStatus : response.indices().values()) { + builder.startObject(indexStatus.index()); + + builder.startObject("settings"); + for (Map.Entry entry : indexStatus.settings().getAsMap().entrySet()) { + builder.startObject("setting").field("name", entry.getKey()).field("value", entry.getValue()).endObject(); + } + builder.endObject(); + + builder.field("storeSize", indexStatus.storeSize().toString()); + builder.field("storeSizeInBytes", indexStatus.storeSize().bytes()); + builder.field("estimatedFlushableMemorySize", indexStatus.estimatedFlushableMemorySize().toString()); + builder.field("estimatedFlushableMemorySizeInBytes", indexStatus.estimatedFlushableMemorySize().bytes()); + builder.field("translogOperations", indexStatus.translogOperations()); + builder.startObject("docs"); + builder.field("numDocs", indexStatus.docs().numDocs()); + builder.field("maxDoc", indexStatus.docs().maxDoc()); + builder.field("deletedDocs", indexStatus.docs().deletedDocs()); + builder.endObject(); + + builder.startObject("shards"); + for (IndexShardStatus indexShardStatus : indexStatus) { + builder.startArray(Integer.toString(indexShardStatus.shardId().id())); + for (ShardStatus shardStatus : indexShardStatus) { + builder.startObject(); + + builder.startObject("routing") + .field("state", shardStatus.shardRouting().state()) + .field("primary", shardStatus.shardRouting().primary()) + .field("nodeId", shardStatus.shardRouting().currentNodeId()) + .field("relocatingNodeId", shardStatus.shardRouting().relocatingNodeId()) + .field("shardId", shardStatus.shardRouting().shardId().id()) + .field("index", shardStatus.shardRouting().shardId().index().name()) + .endObject(); + + builder.field("state", shardStatus.state()); + builder.field("storeSize", shardStatus.storeSize().toString()); + builder.field("storeSizeInBytes", shardStatus.storeSize().bytes()); + builder.field("estimatedFlushableMemorySize", shardStatus.estimatedFlushableMemorySize().toString()); + builder.field("estimatedFlushableMemorySizeInBytes", shardStatus.estimatedFlushableMemorySize().bytes()); + builder.field("translogId", shardStatus.translogId()); + builder.field("translogOperations", shardStatus.translogOperations()); + builder.startObject("docs"); + builder.field("numDocs", shardStatus.docs().numDocs()); + builder.field("maxDoc", shardStatus.docs().maxDoc()); + builder.field("deletedDocs", shardStatus.docs().deletedDocs()); + builder.endObject(); + + builder.endObject(); + } + builder.endArray(); + } + builder.endObject(); + + builder.endObject(); + } + builder.endObject(); + + builder.endObject(); + channel.sendResponse(new JsonHttpResponse(request, OK, builder)); + } catch (Exception e) { + onFailure(e); + } + } + + @Override public void onFailure(Throwable e) { + try { + channel.sendResponse(new JsonThrowableHttpResponse(request, e)); + } catch (IOException e1) { + logger.error("Failed to send failure response", e1); + } + } + }); + } + + @Override public boolean spawn() { + // we don't spawn since we fork in index replication based on operation + return false; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/count/HttpCountAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/count/HttpCountAction.java new file mode 100644 index 00000000000..5e4cb026f7f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/count/HttpCountAction.java @@ -0,0 +1,114 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.action.count; + +import com.google.inject.Inject; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.count.CountRequest; +import org.elasticsearch.action.count.CountResponse; +import org.elasticsearch.action.support.broadcast.BroadcastOperationThreading; +import org.elasticsearch.client.Client; +import org.elasticsearch.http.*; +import org.elasticsearch.http.action.support.HttpActions; +import org.elasticsearch.http.action.support.HttpJsonBuilder; +import org.elasticsearch.util.json.JsonBuilder; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; + +import static org.elasticsearch.action.count.CountRequest.*; +import static org.elasticsearch.http.HttpResponse.Status.*; +import static org.elasticsearch.http.action.support.HttpActions.*; + +/** + * @author kimchy (Shay Banon) + */ +public class HttpCountAction extends BaseHttpServerHandler { + + @Inject public HttpCountAction(Settings settings, HttpServer httpService, Client client) { + super(settings, client); + httpService.registerHandler(HttpRequest.Method.POST, "/{index}/_count", this); + httpService.registerHandler(HttpRequest.Method.GET, "/{index}/_count", this); + httpService.registerHandler(HttpRequest.Method.POST, "/{index}/{type}/_count", this); + httpService.registerHandler(HttpRequest.Method.GET, "/{index}/{type}/_count", this); + } + + @Override public void handleRequest(final HttpRequest request, final HttpChannel channel) { + CountRequest countRequest = new CountRequest(HttpActions.splitIndices(request.param("index"))); + // we just send back a response, no need to fork a listener + countRequest.listenerThreaded(false); + try { + BroadcastOperationThreading operationThreading = BroadcastOperationThreading.fromString(request.param("operationThreading"), BroadcastOperationThreading.SINGLE_THREAD); + if (operationThreading == BroadcastOperationThreading.NO_THREADS) { + // since we don't spawn, don't allow no_threads, but change it to a single thread + operationThreading = BroadcastOperationThreading.SINGLE_THREAD; + } + countRequest.operationThreading(operationThreading); + countRequest.querySource(HttpActions.parseQuerySource(request)); + countRequest.queryParserName(request.param("queryParserName")); + countRequest.queryHint(request.param("queryHint")); + countRequest.minScore(HttpActions.paramAsFloat(request.param("minScore"), DEFAULT_MIN_SCORE)); + String typesParam = request.param("type"); + if (typesParam != null) { + countRequest.types(splitTypes(typesParam)); + } + } catch (Exception e) { + try { + channel.sendResponse(new JsonHttpResponse(request, BAD_REQUEST, JsonBuilder.cached().startObject().field("error", e.getMessage()).endObject())); + } catch (IOException e1) { + logger.error("Failed to send failure response", e1); + } + return; + } + + client.execCount(countRequest, new ActionListener() { + @Override public void onResponse(CountResponse response) { + try { + JsonBuilder builder = HttpJsonBuilder.cached(request); + builder.startObject(); + builder.field("count", response.count()); + + builder.startObject("_shards"); + builder.field("total", response.totalShards()); + builder.field("successful", response.successfulShards()); + builder.field("failed", response.failedShards()); + builder.endObject(); + + builder.endObject(); + channel.sendResponse(new JsonHttpResponse(request, OK, builder)); + } catch (Exception e) { + onFailure(e); + } + } + + @Override public void onFailure(Throwable e) { + try { + channel.sendResponse(new JsonThrowableHttpResponse(request, e)); + } catch (IOException e1) { + logger.error("Failed to send failure response", e1); + } + } + }); + } + + @Override public boolean spawn() { + return false; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/delete/HttpDeleteAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/delete/HttpDeleteAction.java new file mode 100644 index 00000000000..df292a58ff4 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/delete/HttpDeleteAction.java @@ -0,0 +1,83 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.action.delete; + +import com.google.inject.Inject; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.delete.DeleteResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.http.*; +import org.elasticsearch.http.action.support.HttpJsonBuilder; +import org.elasticsearch.util.TimeValue; +import org.elasticsearch.util.json.JsonBuilder; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; + +import static org.elasticsearch.http.HttpResponse.Status.*; + +/** + * @author kimchy (Shay Banon) + */ +public class HttpDeleteAction extends BaseHttpServerHandler { + + @Inject public HttpDeleteAction(Settings settings, HttpServer httpService, Client client) { + super(settings, client); + httpService.registerHandler(HttpRequest.Method.DELETE, "/{index}/{type}/{id}", this); + } + + @Override public void handleRequest(final HttpRequest request, final HttpChannel channel) { + DeleteRequest deleteRequest = new DeleteRequest(request.param("index"), request.param("type"), request.param("id")); + deleteRequest.timeout(TimeValue.parseTimeValue(request.param("timeout"), DeleteRequest.DEFAULT_TIMEOUT)); + // we just send a response, no need to fork + deleteRequest.listenerThreaded(false); + // we don't spawn, then fork if local + deleteRequest.operationThreaded(true); + client.execDelete(deleteRequest, new ActionListener() { + @Override public void onResponse(DeleteResponse result) { + try { + JsonBuilder builder = HttpJsonBuilder.cached(request); + builder.startObject() + .field("ok", true) + .field("_index", result.index()) + .field("_type", result.type()) + .field("_id", result.id()) + .endObject(); + channel.sendResponse(new JsonHttpResponse(request, OK, builder)); + } catch (Exception e) { + onFailure(e); + } + } + + @Override public void onFailure(Throwable e) { + try { + channel.sendResponse(new JsonThrowableHttpResponse(request, e)); + } catch (IOException e1) { + logger.error("Failed to send failure response", e1); + } + } + }); + } + + @Override public boolean spawn() { + return false; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/deletebyquery/HttpDeleteByQueryAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/deletebyquery/HttpDeleteByQueryAction.java new file mode 100644 index 00000000000..ff015075256 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/deletebyquery/HttpDeleteByQueryAction.java @@ -0,0 +1,114 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.action.deletebyquery; + +import com.google.inject.Inject; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.deletebyquery.DeleteByQueryRequest; +import org.elasticsearch.action.deletebyquery.DeleteByQueryResponse; +import org.elasticsearch.action.deletebyquery.IndexDeleteByQueryResponse; +import org.elasticsearch.action.deletebyquery.ShardDeleteByQueryRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.http.*; +import org.elasticsearch.http.action.support.HttpActions; +import org.elasticsearch.http.action.support.HttpJsonBuilder; +import org.elasticsearch.util.TimeValue; +import org.elasticsearch.util.json.JsonBuilder; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; + +import static org.elasticsearch.http.HttpResponse.Status.*; +import static org.elasticsearch.http.action.support.HttpActions.*; + +/** + * @author kimchy (Shay Banon) + */ +public class HttpDeleteByQueryAction extends BaseHttpServerHandler { + + @Inject public HttpDeleteByQueryAction(Settings settings, HttpServer httpService, Client client) { + super(settings, client); + httpService.registerHandler(HttpRequest.Method.DELETE, "/{index}/_query", this); + httpService.registerHandler(HttpRequest.Method.DELETE, "/{index}/{type}/_query", this); + } + + @Override public void handleRequest(final HttpRequest request, final HttpChannel channel) { + DeleteByQueryRequest deleteByQueryRequest = new DeleteByQueryRequest(splitIndices(request.param("index"))); + // we just build a response and send it, no need to fork a thread + deleteByQueryRequest.listenerThreaded(false); + try { + deleteByQueryRequest.querySource(HttpActions.parseQuerySource(request)); + deleteByQueryRequest.queryParserName(request.param("queryParserName")); + String typesParam = request.param("type"); + if (typesParam != null) { + deleteByQueryRequest.types(HttpActions.splitTypes(typesParam)); + } + deleteByQueryRequest.timeout(TimeValue.parseTimeValue(request.param("timeout"), ShardDeleteByQueryRequest.DEFAULT_TIMEOUT)); + } catch (Exception e) { + try { + channel.sendResponse(new JsonHttpResponse(request, PRECONDITION_FAILED, JsonBuilder.cached().startObject().field("error", e.getMessage()).endObject())); + } catch (IOException e1) { + logger.error("Failed to send failure response", e1); + } + return; + } + client.execDeleteByQuery(deleteByQueryRequest, new ActionListener() { + @Override public void onResponse(DeleteByQueryResponse result) { + try { + JsonBuilder builder = HttpJsonBuilder.cached(request); + builder.startObject().field("ok", true); + + builder.startObject("_indices"); + for (IndexDeleteByQueryResponse indexDeleteByQueryResponse : result.indices().values()) { + builder.startObject(indexDeleteByQueryResponse.index()); + + builder.startObject("_shards"); + builder.field("total", indexDeleteByQueryResponse.totalShards()); + builder.field("successful", indexDeleteByQueryResponse.successfulShards()); + builder.field("failed", indexDeleteByQueryResponse.failedShards()); + builder.endObject(); + + builder.endObject(); + } + builder.endObject(); + + builder.endObject(); + channel.sendResponse(new JsonHttpResponse(request, OK, builder)); + } catch (Exception e) { + onFailure(e); + } + } + + @Override public void onFailure(Throwable e) { + try { + channel.sendResponse(new JsonThrowableHttpResponse(request, e)); + } catch (IOException e1) { + logger.error("Failed to send failure response", e1); + } + } + }); + } + + + @Override public boolean spawn() { + // we don't spawn since we fork in index replication based on operation + return false; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/get/HttpGetAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/get/HttpGetAction.java new file mode 100644 index 00000000000..ca5fd6d7e41 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/get/HttpGetAction.java @@ -0,0 +1,86 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.action.get; + +import com.google.inject.Inject; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.http.*; +import org.elasticsearch.http.action.support.HttpJsonBuilder; +import org.elasticsearch.util.json.JsonBuilder; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; + +import static org.elasticsearch.http.HttpResponse.Status.*; + +/** + * @author kimchy (Shay Banon) + */ +public class HttpGetAction extends BaseHttpServerHandler { + + @Inject public HttpGetAction(Settings settings, HttpServer httpService, Client client) { + super(settings, client); + httpService.registerHandler(HttpRequest.Method.GET, "/{index}/{type}/{id}", this); + } + + @Override public void handleRequest(final HttpRequest request, final HttpChannel channel) { + final GetRequest getRequest = new GetRequest(request.param("index"), request.param("type"), request.param("id")); + // no need to have a threaded listener since we just send back a response + getRequest.listenerThreaded(false); + // if we have a local operation, execute it on a thread since we don't spawn + getRequest.threadedOperation(true); + client.execGet(getRequest, new ActionListener() { + @Override public void onResponse(GetResponse result) { + try { + if (result.empty()) { + channel.sendResponse(new JsonHttpResponse(request, NOT_FOUND)); + } else { + JsonBuilder builder = HttpJsonBuilder.cached(request); + builder.startObject(); + builder.field("_index", result.index()); + builder.field("_type", result.type()); + builder.field("_id", result.id()); + builder.raw(", \"_source\" : "); + builder.raw(result.source()); + builder.endObject(); + channel.sendResponse(new JsonHttpResponse(request, OK, builder)); + } + } catch (Exception e) { + onFailure(e); + } + } + + @Override public void onFailure(Throwable e) { + try { + channel.sendResponse(new JsonThrowableHttpResponse(request, e)); + } catch (IOException e1) { + logger.error("Failed to send failure response", e1); + } + } + }); + } + + @Override public boolean spawn() { + return false; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/index/HttpIndexAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/index/HttpIndexAction.java new file mode 100644 index 00000000000..3645782949d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/index/HttpIndexAction.java @@ -0,0 +1,99 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.action.index; + +import com.google.inject.Inject; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.http.*; +import org.elasticsearch.http.action.support.HttpJsonBuilder; +import org.elasticsearch.util.TimeValue; +import org.elasticsearch.util.json.JsonBuilder; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; + +import static org.elasticsearch.http.HttpResponse.Status.*; + +/** + * @author kimchy (Shay Banon) + */ +public class HttpIndexAction extends BaseHttpServerHandler { + + @Inject public HttpIndexAction(Settings settings, HttpServer httpService, Client client) { + super(settings, client); + httpService.registerHandler(HttpRequest.Method.POST, "/{index}/{type}", this); // auto id creation + httpService.registerHandler(HttpRequest.Method.PUT, "/{index}/{type}/{id}", this); + } + + @Override public void handleRequest(final HttpRequest request, final HttpChannel channel) { + IndexRequest indexRequest = new IndexRequest(request.param("index"), request.param("type"), request.param("id"), request.contentAsString()); + indexRequest.timeout(TimeValue.parseTimeValue(request.param("timeout"), IndexRequest.DEFAULT_TIMEOUT)); + String sOpType = request.param("opType"); + if (sOpType != null) { + if ("index".equals(sOpType)) { + indexRequest.opType(IndexRequest.OpType.INDEX); + } else if ("create".equals(sOpType)) { + indexRequest.opType(IndexRequest.OpType.CREATE); + } else { + try { + channel.sendResponse(new JsonHttpResponse(request, BAD_REQUEST, JsonBuilder.cached().startObject().field("error", "opType [" + sOpType + "] not allowed, either [index] or [create] are allowed").endObject())); + } catch (IOException e1) { + logger.warn("Failed to send response", e1); + return; + } + } + } + // we just send a response, no need to fork + indexRequest.listenerThreaded(false); + // we don't spawn, then fork if local + indexRequest.operationThreaded(true); + client.execIndex(indexRequest, new ActionListener() { + @Override public void onResponse(IndexResponse result) { + try { + JsonBuilder builder = HttpJsonBuilder.cached(request); + builder.startObject() + .field("ok", true) + .field("_index", result.index()) + .field("_type", result.type()) + .field("_id", result.id()) + .endObject(); + channel.sendResponse(new JsonHttpResponse(request, OK, builder)); + } catch (Exception e) { + onFailure(e); + } + } + + @Override public void onFailure(Throwable e) { + try { + channel.sendResponse(new JsonThrowableHttpResponse(request, e)); + } catch (IOException e1) { + logger.error("Failed to send failure response", e1); + } + } + }); + } + + @Override public boolean spawn() { + return false; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/main/HttpMainAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/main/HttpMainAction.java new file mode 100644 index 00000000000..34dca719a75 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/main/HttpMainAction.java @@ -0,0 +1,104 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.action.main; + +import com.google.common.collect.Iterators; +import com.google.inject.Inject; +import org.codehaus.jackson.JsonNode; +import org.codehaus.jackson.node.ArrayNode; +import org.elasticsearch.Version; +import org.elasticsearch.client.Client; +import org.elasticsearch.http.*; +import org.elasticsearch.http.action.support.HttpJsonBuilder; +import org.elasticsearch.util.Classes; +import org.elasticsearch.util.concurrent.ThreadLocalRandom; +import org.elasticsearch.util.json.Jackson; +import org.elasticsearch.util.json.JsonBuilder; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class HttpMainAction extends BaseHttpServerHandler { + + private final JsonNode rootNode; + + private final int quotesSize; + + @Inject public HttpMainAction(Settings settings, HttpServer httpServer, Client client) { + super(settings, client); + JsonNode rootNode; + int quotesSize; + try { + rootNode = Jackson.newObjectMapper().readValue(Classes.getDefaultClassLoader().getResourceAsStream("org/elasticsearch/http/action/main/quotes.json"), JsonNode.class); + ArrayNode arrayNode = (ArrayNode) rootNode.get("quotes"); + quotesSize = Iterators.size(arrayNode.getElements()); + } catch (Exception e) { + rootNode = null; + quotesSize = -1; + } + this.rootNode = rootNode; + this.quotesSize = quotesSize; + + httpServer.registerHandler(HttpRequest.Method.GET, "/", this); + } + + @Override public void handleRequest(HttpRequest request, HttpChannel channel) { + try { + JsonBuilder builder = HttpJsonBuilder.cached(request).prettyPrint(); + builder.startObject(); + builder.field("ok", true); + if (settings.get("name") != null) { + builder.field("name", settings.get("name")); + } + builder.startObject("version").field("number", Version.number()).field("date", Version.date()).field("devBuild", Version.devBuild()).endObject(); + builder.field("version", Version.number()); + builder.field("cover", "DON'T PANIC"); + if (rootNode != null) { + builder.startObject("quote"); + ArrayNode arrayNode = (ArrayNode) rootNode.get("quotes"); + JsonNode quoteNode = arrayNode.get(ThreadLocalRandom.current().nextInt(quotesSize)); + builder.field("book", quoteNode.get("book").getValueAsText()); + builder.field("chapter", quoteNode.get("chapter").getValueAsText()); + ArrayNode textNodes = (ArrayNode) quoteNode.get("text"); +// builder.startArray("text"); +// for (JsonNode textNode : textNodes) { +// builder.value(textNode.getValueAsText()); +// } +// builder.endArray(); + int index = 0; + for (JsonNode textNode : textNodes) { + builder.field("text" + (++index), textNode.getValueAsText()); + } + builder.endObject(); + } + builder.endObject(); + channel.sendResponse(new JsonHttpResponse(request, HttpResponse.Status.OK, builder)); + } catch (Exception e) { + try { + channel.sendResponse(new JsonThrowableHttpResponse(request, e)); + } catch (IOException e1) { + logger.warn("Failed to send response", e); + } + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/main/quotes.json b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/main/quotes.json new file mode 100644 index 00000000000..9740cfc638a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/main/quotes.json @@ -0,0 +1,653 @@ +{ + quotes : [ + { + book : "The Hitchhiker's Guide to the Galaxy", + chapter: "Introduction", + text : ["This planet has - or rather had - a problem, which was this: most of the people living on it were unhappy for pretty much all of the time. Many solutions were suggested for this problem, but most of these were largely concerned with the movement of small green pieces of paper, which was odd because on the whole it wasn't the small green pieces of paper that were unhappy."] + }, + { + book : "The Hitchhiker's Guide to the Galaxy", + chapter: "Introduction", + text : ["Many were increasingly of the opinion that they'd all made a big mistake in coming down from the trees in the first place. And some said that even the trees had been a bad move, and that no one should ever have left the oceans."] + }, + { + book : "The Hitchhiker's Guide to the Galaxy", + chapter: "Introduction", + text : ["In many of the more relaxed civilizations on the Outer Eastern Rim of the Galaxy, the Hitch-Hiker's Guide has already supplanted the great Encyclopaedia Galactica as the standard repository of all knowledge and wisdom, for though it has many omissions and contains much that is apocryphal, or at least wildly inaccurate, it scores over the older, more pedestrian work in two important respects.", + "First, it is slightly cheaper; and secondly it has the words DON'T PANIC inscribed in large friendly letters on its cover." + ] + }, + { + book : "The Hitchhiker's Guide to the Galaxy", + chapter: "Chapter 1", + text : [ + "\"Some factual information for you. Have you any idea how much damage that bulldozer would suffer if I just let it roll straight over you?\"", + "\"How much?\" said Arthur.", + "\"None at all,\" said Mr Prosser." + ] + }, + { + book : "The Hitchhiker's Guide to the Galaxy", + chapter: "Chapter 1", + text : [ + "\"The mere thought,\" growled Mr. Prosser, \"hadn't even begun to speculate,\" he continued, settling himself back, \"about the merest possibility of crossing my mind.\"" + ] + }, + { + book : "The Hitchhiker's Guide to the Galaxy", + chapter: "Chapter 2", + text : [ + "[The Guide] says that the best drink in existence is the Pan Galactic Gargle Blaster. It says that the effect of a Pan Galactic Gargle Blaster is like having your brains smashed out by a slice of lemon wrapped round a large gold brick." + ] + }, + { + book : "The Hitchhiker's Guide to the Galaxy", + chapter: "Chapter 2", + text : [ + "\"Time is an illusion. Lunchtime doubly so.\"", + "\"Very deep,\" said Arthur, \"you should send that in to the Reader's Digest. They've got a page for people like you.\"" + ] + }, + { + book : "The Hitchhiker's Guide to the Galaxy", + chapter: "Chapter 2", + text : [ + "\"This must be Thursday,\" said Arthur to himself, sinking low over his beer, \"I never could get the hang of Thursdays.\"" + ] + }, + { + book : "The Hitchhiker's Guide to the Galaxy", + chapter: "Chapter 3", + text : [ + "The ships hung in the sky in much the same way that bricks don't." + ] + }, + { + book : "The Hitchhiker's Guide to the Galaxy", + chapter: "Chapter 5", + text : [ + "One of the things Ford Prefect had always found hardest to understand about humans was their habit of continually stating and repeating the very very obvious, as in It's a nice day, or You're very tall, or Oh dear you seem to have fallen down a thirty-foot well, are you alright? At first Ford had formed a theory to account for this strange behaviour. If human beings don't keep exercising their lips, he thought, their mouths probably seize up. After a few months' consideration and observation he abandoned this theory in favour of a new one. If they don't keep on exercising their lips, he thought, their brains start working. After a while he abandoned this one as well as being obstructively cynical." + ] + }, + { + book : "The Hitchhiker's Guide to the Galaxy", + chapter: "Chapter 7", + text : [ + "\"You know,\" said Arthur, \"it's at times like this, when I'm trapped in a Vogon airlock with a man from Betelgeuse, and about to die of asphyxiation in deep space that I really wish I'd listened to what my mother told me when I was young.\"", + "\"Why, what did she tell you?\"", + "\"I don't know, I didn't listen.\"" + ] + }, + { + book : "The Hitchhiker's Guide to the Galaxy", + chapter: "Chapter 8", + text : [ + "\"Space,\" it says, \"is big. Really big. You just won't believe how vastly hugely mindbogglingly big it is. I mean you may think it's a long way down the road to the chemist, but that's just peanuts to space, LISTEN!\" and so on..." + ] + }, + { + book : "The Hitchhiker's Guide to the Galaxy", + chapter: "Chapter 9", + text : [ + "Arthur looked up. \"Ford!\" he said, \"there's an infinite number of monkeys outside who want to talk to us about this script for Hamlet they've worked out.\"" + ] + }, + { + book : "The Hitchhiker's Guide to the Galaxy", + chapter: "Chapter 9", + text : [ + "\"Ford,\" he said, \"you're turning into a penguin. Stop it\"" + ] + }, + { + book : "The Hitchhiker's Guide to the Galaxy", + chapter: "Chapter 9", + text : [ + "\"But that's not the point!\" raged Ford \"The point is that I am now a perfectly safe penguin, and my colleague here is rapidly running out of limbs!\"" + ] + }, + { + book : "The Hitchhiker's Guide to the Galaxy", + chapter: "Chapter 11", + text : [ + "\"Five to one against and falling...\" she said, \"four to one against and falling...three to one...two...one...probability factor of one to one...we have normality, I repeat we have normality.\" She turned her microphone off – then turned it back on, with a slight smile and continued: \"Anything you still can’t cope with is therefore your own problem.\"" + ] + }, + { + book : "The Hitchhiker's Guide to the Galaxy", + chapter: "Chapter 11", + text : [ + "\"I think you ought to know I'm feeling very depressed,\" Marvin said." + ] + }, + { + book : "The Hitchhiker's Guide to the Galaxy", + chapter: "Chapter 11", + text : [ + "He reached out and pressed an invitingly large red button on a nearby panel. The panel lit up with the words Please do not press this button again." + ] + }, + { + book : "The Hitchhiker's Guide to the Galaxy", + chapter: "Chapter 11", + text : [ + "\"All the doors in this spaceship have a cheerful and sunny disposition. It is their pleasure to open for you, and their satisfaction to close again with the knowledge of a job well done.\"" + ] + }, + { + book : "The Hitchhiker's Guide to the Galaxy", + chapter: "Chapter 11", + text : [ + "\"Come on,\" he droned, \"I've been ordered to take you down to the bridge. Here I am, brain the size of a planet and they ask me to take you down to the bridge. Call that job satisfaction? 'Cos I don't.\"" + ] + }, + { + book : "The Hitchhiker's Guide to the Galaxy", + chapter: "Chapter 11", + text : [ + "\"Sorry, did I say something wrong?\" said Marvin, dragging himself on regardless. \"Pardon me for breathing, which I never do anyway so I don't know why I bother to say it, oh God I'm so depressed. Here's another one of those self-satisfied doors. Life! Don't talk to me about life.\"" + ] + }, + { + book : "The Hitchhiker's Guide to the Galaxy", + chapter: "Chapter 12", + text : [ + "If there's anything bigger than my ego around, I want it caught and shot now." + ] + }, + { + book : "The Hitchhiker's Guide to the Galaxy", + chapter: "Chapter 13", + text : [ + "Marvin trudged on down the corridor, still moaning.", + "\"...and then of course I've got this terrible pain in all the diodes down my left hand side...\"", + "\"No?\" said Arthur grimly as he walked along beside him. \"Really?\"", + "\"Oh yes,\" said Marvin, \"I mean I've asked for them to be replaced but no one ever listens.\"", + "\"I can imagine.\"" + ] + }, + { + book : "The Hitchhiker's Guide to the Galaxy", + chapter: "Chapter 16", + text : [ + "Isn’t it enough to see that a garden is beautiful without having to believe that there are fairies at the bottom of it too?" + ] + }, + { + book : "The Hitchhiker's Guide to the Galaxy", + chapter: "Chapter 17", + text : [ + "He had found a Nutri-Matic machine which had provided him with a plastic cup filled with a liquid that was almost, but not quite, entirely unlike tea." + ] + }, + { + book : "The Hitchhiker's Guide to the Galaxy", + chapter: "Chapter 18", + text : [ + "Curiously enough, the only thing that went through the mind of the bowl of petunias as it fell was Oh no, not again. Many people have speculated that if we knew exactly why the bowl of petunias had thought that we would know a lot more about the nature of the Universe than we do now." + ] + }, + { + book : "The Hitchhiker's Guide to the Galaxy", + chapter: "Chapter 20", + text : [ + "\"Life,\" said Marvin dolefully, \"loathe it or ignore it, you can't like it.\"" + ] + }, + { + book : "The Hitchhiker's Guide to the Galaxy", + chapter: "Chapter 23", + text : [ + "For instance, on the planet Earth, man had always assumed that he was more intelligent than dolphins because he had achieved so much - the wheel, New York, wars and so on - whilst all the dolphins had ever done was muck about in the water having a good time. But conversely, the dolphins had always believed that they were far more intelligent than man - for precisely the same reasons.", + "The last ever dolphin message was misinterpreted as a surprisingly sophisticated attempt to do a double-backwards-somersault through a hoop whilst whistling the 'Star Spangled Banner', but in fact the message was this: So long and thanks for all the fish." + ] + }, + { + book : "The Hitchhiker's Guide to the Galaxy", + chapter: "Chapter 24", + text : [ + "Looking up into the night sky is looking into infinity - distance is incomprehensible and therefore meaningless." + ] + }, + { + book : "The Hitchhiker's Guide to the Galaxy", + chapter: "Chapter 27", + text : [ + "\"Forty-two,\" said Deep Thought, with infinite majesty and calm.", + "\"The Answer to the Great Question, of Life, the Universe and Everything\"" + ] + }, + { + book : "The Hitchhiker's Guide to the Galaxy", + chapter: "Chapter 30", + text : [ + "\"The chances of finding out what's really going on in the universe are so remote, the only thing to do is hang the sense of it and keep yourself occupied. Look at me, I design fjords. I'd far rather be happy than right any day.\"", + "\"And are you?\"", + "\"No, that's where it all falls apart I'm afraid.\"", + "\"Pity, it sounded like quite a nice lifestyle otherwise.\"" + ] + }, + { + book : "The Hitchhiker's Guide to the Galaxy", + chapter: "Chapter 34", + text : [ + "\"What's up?\"", + "\"I don't know,\" said Marvin, \"I've never been there.\"" + ] + }, + { + book : "The Hitchhiker's Guide to the Galaxy", + chapter: "Chapter 35", + text : [ + "It said: \"The History of every major Galactic Civilization tends to pass through three distinct and recognizable phases, those of Survival, Inquiry and Sophistication, otherwise known as the How, Why and Where phases.", + "\"For instance, the first phase is characterized by the question How can we eat? the second by the question Why do we eat? and the third by the question Where shall we have lunch?\"" + ] + }, + { + book : "The Restaurant at the End of the Universe", + chapter: "Preface", + text : [ + "There is a theory which states that if ever anyone discovers exactly what the Universe is for and why it is here, it will instantly disappear and be replaced by something even more bizarre and inexplicable.", + "There is another theory which states that this has already happened." + ] + }, + { + book : "The Restaurant at the End of the Universe", + chapter: "Chapter 1", + text : [ + "The story so far:", + "In the beginning the Universe was created.", + "This has made a lot of people very angry and has been widely regarded as a bad move." + ] + }, + { + book : "The Restaurant at the End of the Universe", + chapter: "Chapter 2", + text : [ + "\"Share and Enjoy\" is the company motto of the hugely successful Sirius Cybernetics Corporation Complaints division, which now covers the major land masses of three medium sized planets and is the only part of the Corporation to have shown a consistent profit in recent years." + ] + }, + { + book : "The Restaurant at the End of the Universe", + chapter: "Chapter 2", + text : [ + "The protruding upper halves of the letters now appear, in the local language, to read \"Go stick your head in a pig\", and are no longer illuminated, except at times of special celebration." + ] + }, + { + book : "The Restaurant at the End of the Universe", + chapter: "Chapter 3", + text : [ + "Quite how Zaphod Beeblebrox arrived at the idea of holding a seance at this point is something he was never quite clear on.", + "Obviously the subject of death was in the air, but more as something to be avoided than harped upon.", + "Possibly the horror that Zaphod experienced at the prospect of being reunited with his deceased relatives led on to the thought that they might just feel the same way about him and, what's more, be able to do something about helping to postpone this reunion." + ] + }, + { + book : "The Restaurant at the End of the Universe", + chapter: "Chapter 3", + text : [ + "\"Concentrate,\" hissed Zaphod, \"on his name.\"", + "\"What is it?\" asked Arthur.", + "\"Zaphod Beeblebrox the Fourth.\"", + "\"What?\"", + "\"Zaphod Beeblebrox the Fourth. Concentrate!\"", + "\"The Fourth?\"", + "\"Yeah. Listen, I'm Zaphod Beeblebrox, my father was Zaphod Beeblebrox the Second, my grandfather Zaphod Beeblebrox the Third...\"", + "\"What?\"", + "\"There was an accident with a contraceptive and a time machine. Now concentrate!\"" + ] + }, + { + book : "The Restaurant at the End of the Universe", + chapter: "Chapter 6", + text : [ + "The Guide is definitive. Reality is frequently inaccurate." + ] + }, + { + book : "The Restaurant at the End of the Universe", + chapter: "Chapter 6", + text : [ + "\"Listen, three eyes,\" he said, \"don't you try to outweird me, I get stranger things than you free with my breakfast cereal.\"" + ] + }, + { + book : "The Restaurant at the End of the Universe", + chapter: "Chapter 17", + text : [ + "I am the main Dish of the Day. May I interest you in parts of my body?" + ] + }, + { + book : "The Restaurant at the End of the Universe", + chapter: "Chapter 17", + text : [ + "Shee, you guys are so unhip it's a wonder your bums don't fall off." + ] + }, + { + book : "The Restaurant at the End of the Universe", + chapter: "Chapter 18", + text : [ + "\"The first ten million years were the worst,\" said Marvin, \"and the second ten million years, they were the worst too. The third ten million years I didn't enjoy at all. After that I went into a bit of a decline.\"" + ] + }, + { + book : "The Restaurant at the End of the Universe", + chapter: "Chapter 18", + text : [ + "\"Er...\" he said, \"hello. Er, look, I'm sorry I'm a bit late. I've had the most ghastly time, all sorts of things cropping up at the last moment.\"", + "He seemed nervous of the expectant awed hush. He cleared his throat.", + "\"Er, how are we for time?\" he said, \"have I just got a min—\"", + "And so the Universe ended." + ] + }, + { + book : "The Restaurant at the End of the Universe", + chapter: "Chapter 19", + text : [ + "It is known that there are an infinite number of worlds, simply because there is an infinite amount of space for them to be in. However, not every one of them is inhabited. Therefore, there must be a finite number of inhabited worlds. Any finite number divided by infinity is as near to nothing as makes no odds, so the average population of all the planets in the Universe can be said to be zero. From this it follows that the population of the whole Universe is also zero, and that any people you may meet from time to time are merely the products of a deranged imagination." + ] + }, + { + book : "The Restaurant at the End of the Universe", + chapter: "Chapter 20", + text : [ + "The ship was rocking and swaying sickeningly as Ford and Zaphod tried to wrest control from the autopilot. The engines howled and whined like tired children in a supermarket." + ] + }, + { + book : "The Restaurant at the End of the Universe", + chapter: "Chapter 22", + text : [ + "The trouble with most forms of transport, he thought, is basically one of them not being worth all the bother. On Earth – when there had been an Earth, before it was demolished to make way for a new hyperspace bypass – the problem had been with cars. The disadvantages involved in pulling lots of black sticky slime from out of the ground where it had been safely hidden out of harm's way, turning it into tar to cover the land with, smoke to fill the air with and pouring the rest into the sea, all seemed to outweigh the advantages of being able to get more quickly from one place to another – particularly when the place you arrived at had probably become, as a result of this, very similar to the place you had left, i.e. covered with tar, full of smoke and short of fish." + ] + }, + { + book : "The Restaurant at the End of the Universe", + chapter: "Chapter 23", + text : [ + "The designer of the gun had clearly not been instructed to beat about the bush. \"Make it evil,\" he'd been told. \"Make it totally clear that this gun has a right end and a wrong end. Make it totally clear to anyone standing at the wrong end that things are going badly for them. If that means sticking all sort of spikes and prongs and blackened bits all over it then so be it. This is not a gun for hanging over the fireplace or sticking in the umbrella stand, it is a gun for going out and making people miserable with.\"" + ] + }, + { + book : "The Restaurant at the End of the Universe", + chapter: "Chapter 28", + text : [ + "The major problem — one of the major problems, for there are several — one of the many major problems with governing people is that of whom you get to do it; or rather of who manages to get people to let them do it to them.", + "To summarize: it is a well known fact that those people who most want to rule people are, ipso facto, those least suited to do it. To summarize the summary: anyone who is capable of getting themselves made President should on no account be allowed to do the job. To summarize the summary of the summary: people are a problem." + ] + }, + { + book : "The Restaurant at the End of the Universe", + chapter: "Chapter 29", + text : [ + "\"How can I tell,\" said the man, \"that the past isn't a fiction designed to account for the discrepancy between my immediate physical sensations and my state of mind?\"" + ] + }, + { + book : "The Restaurant at the End of the Universe", + chapter: "Chapter 32", + text : [ + "\"Well, you’re obviously being totally naive of course\", said the girl, \"When you’ve been in marketing as long as I have, you'll know that before any new product can be developed it has to be properly researched. We’ve got to find out what people want from fire, how they relate to it, what sort of image it has for them.\"", + "The crowd were tense. They were expecting something wonderful from Ford.", + "\"Stick it up your nose,\" he said.", + "\"Which is precisely the sort of thing we need to know,\" insisted the girl, \"Do people want fire that can be fitted nasally?\"" + ] + }, + { + book : "The Restaurant at the End of the Universe", + chapter: "Chapter 32", + text : [ + "\"And the wheel,\" said the Captain, \"What about this wheel thingy? It sounds a terribly interesting project.\"", + "\"Ah,\" said the marketing girl, \"Well, we're having a little difficulty there.\"", + "\"Difficulty?\" exclaimed Ford. \"Difficulty? What do you mean, difficulty? It's the single simplest machine in the entire Universe!\"", + "The marketing girl soured him with a look.", + "\"Alright, Mr. Wiseguy,\" she said, \"if you're so clever, you tell us what colour it should be.\"" + ] + }, + { + book : "Life, the Universe and Everything", + chapter: "Chapter 1", + text : [ + "The regular early morning yell of horror was the sound of Arthur Dent waking up and suddenly remembering where he was." + ] + }, + { + book : "Life, the Universe and Everything", + chapter: "Chapter 1", + text : [ + "In the end, it was the Sunday afternoons he couldn't cope with, and that terrible listlessness that starts to set in about 2:55, when you know you’ve taken all the baths that you can usefully take that day, that however hard you stare at any given paragraph in the newspaper you will never actually read it, or use the revolutionary new pruning technique it describes, and that as you stare at the clock the hands will move relentlessly on to four o’clock, and you will enter the long dark teatime of the soul." + ] + }, + { + book : "Life, the Universe and Everything", + chapter: "Chapter 2", + text : [ + "\"Africa was very interesting,\" said Ford, \"I behaved very oddly there.\" [...] \"I took up being cruel to animals,\" he said airily. \"But only,\" he added, \"as a hobby.\"", + "\"Oh yes,\" said Arthur, warily.", + "\"Yes,\" Ford assured him. \"I won't disturb you with the details because they would—\"", + "\"What?\"", + "\"Disturb you. But you may be interested to know that I am singlehandedly responsible for the evolved shape of the animal you came to know in later centuries as a giraffe.\"" + ] + }, + { + book : "Life, the Universe and Everything", + chapter: "Chapter 2", + text : [ + "He gazed keenly into the distance and looked as if he would quite like the wind to blow his hair back dramatically at that point, but the wind was busy fooling around with some leaves a little way off." + ] + }, + { + book : "Life, the Universe and Everything", + chapter: "Chapter 2", + text : [ + "\"I have detected,\" he said, \"disturbances in the wash.\" [...]", + "\"The wash?\" said Arthur.", + "\"The space-time wash,\" said Ford. [...]", + "Arthur nodded, and then cleared his throat. \"Are we talking about,\" he asked cautiously, \"some sort of Vogon laundromat, or what are we talking about?\"", + "\"Eddies,\" said Ford, \"in the space-time continuum.\"", + "\"Ah,\" nodded Arthur, \"is he? Is he?\" He pushed his hands into the pocket of his dressing gown and looked knowledgeably into the distance.", + "\"What?\" said Ford.", + "\"Er, who,\" said Arthur, \"is Eddy, then, exactly, then?\"" + ] + }, + { + book : "Life, the Universe and Everything", + chapter: "Chapter 2", + text : [ + "\"There!\" said Ford, shooting out his arm. \"There, behind that sofa!\"", + "Arthur looked. Much to his surprise, there was a velvet paisley-covered Chesterfield sofa in the field in front of them. He boggled intelligently at it. Shrewd questions sprang into his mind.", + "\"Why,\" he said, \"is there a sofa in that field?\"", + "\"I told you!\" shouted Ford, leaping to his feet. \"Eddies in the space-time continuum!\"", + "\"And this is his sofa, is it?\" asked Arthur, struggling to his feet and, he hoped, though not very optimistically, to his senses." + ] + }, + { + book : "Life, the Universe and Everything", + chapter: "Chapter 6", + text : [ + "\"My doctor says that I have a malformed public-duty gland and a natural deficiency in moral fibre,\" Ford muttered to himself, \"and that I am therefore excused from saving Universes.\"" + ] + }, + { + book : "Life, the Universe and Everything", + chapter: "Chapter 9", + text : [ + "There is a moment in every dawn when light floats, there is the possibility of magic. Creation holds its breath.", + "The moment passed as it regularly did on Squornshellous Zeta, without incident." + ] + }, + { + book : "Life, the Universe and Everything", + chapter: "Chapter 9", + text : [ + "Very few things actually get manufactured these days, because in an infinitely large Universe such as, for instance, the one in which we live, most things one could possibly imagine, and a lot of things one would rather not, grow somewhere." + ] + }, + { + book : "Life, the Universe and Everything", + chapter: "Chapter 9", + text : [ + "\"My capacity for happiness,\" he added, \"you could fit into a matchbox without taking out the matches first.\" —Marvin" + ] + }, + { + book : "Life, the Universe and Everything", + chapter: "Chapter 9", + text : [ + "\"You may not instantly see why I bring the subject up, but that is because my mind works so phenomenally fast, and I am at a rough estimate thirty billion times more intelligent than you. Let me give you an example. Think of a number, any number.\"", + "\"Er, five,\" said the mattress.", + "\"Wrong,\" said Marvin. \"You see?\"", + "The mattress was much impressed by this and realized that it was in the presence of a not unremarkable mind." + ] + }, + { + book : "Life, the Universe and Everything", + chapter: "Chapter 9", + text : [ + "\"I would like to say that it is a very great pleasure, honour and privilege for me to open this bridge, but I can't because my lying circuits are all out of commission.\" —Marvin" + ] + }, + { + book : "Life, the Universe and Everything", + chapter: "Chapter 11", + text : [ + "[...] the renewed shock had nearly made him spill his drink. He drained it quickly before anything serious happened to it. He then had another quick one to follow the first one down and check that it was all right.", + "\"Freedom,\" he said aloud.", + "Trillian came on to the bridge at that point and said several enthusiastic things on the subject of freedom.", + "\"I can't cope with it,\" Zaphod said darkly, and sent a third drink down to see why the second hadn't yet reported on the condition of the first. He looked uncertainly at both of her and preferred the one on the right.", + "He poured a drink down his other throat with the plan that it would head the previous one off at the pass, join forces with it, and together they would get the second to pull itself together. Then all three would go off in search of the first, give it a good talking to and maybe a bit of a sing as well.", + "He felt uncertain as to whether the fourth drink had understood all that, so he sent down a fifth to explain the plan more fully and a sixth for moral support." + ] + }, + { + book : "Life, the Universe and Everything", + chapter: "Chapter 11", + text : [ + "There is an art, it says, or rather, a knack to flying. The knack lies in learning how to throw yourself at the ground and miss. [...] Clearly, it is this second part, the missing, which presents the difficulties." + ] + }, + { + book : "Life, the Universe and Everything", + chapter: "Chapter 11", + text : [ + "He sat up sharply and started to pull clothes on. He decided that there must be someone in the Universe feeling more wretched, miserable and forsaken than himself, and he determined to set out and find him.", + "Halfway to the bridge it occurred to him that it might be Marvin, and he returned to bed." + ] + }, + { + book : "Life, the Universe and Everything", + chapter: "Chapter 18", + text : [ + "They obstinately persisted in their absence." + ] + }, + { + book : "Life, the Universe and Everything", + chapter: "Chapter 24", + text : [ + "It is a mistake to think you can solve any major problems just with potatoes." + ] + }, + { + book : "Life, the Universe and Everything", + chapter: "Chapter 31", + text : [ + "\"That young girl,\" Marvin added unexpectedly, \"is one of the least benightedly unintelligent organic life forms it has been my profound lack of pleasure not to be able to avoid meeting.\"" + ] + }, + { + book : "Life, the Universe and Everything", + chapter: "Chapter 33", + text : [ + "He hoped and prayed that there wasn't an afterlife. Then he realized there was a contradiction involved here and merely hoped that there wasn't an afterlife." + ] + }, + { + book : "So Long And Thanks for All the Fish", + chapter: "Prologue", + text : [ + "Many were increasingly of the opinion that they'd all made a big mistake in coming down from the trees in the first place. And some said that even the trees had been a bad move, and that no one should ever have left the oceans.", + "And then, one Thursday, nearly two thousand years after one man had been nailed to a tree for saying how great it would be to be nice to people for a change, a girl sitting on her own in a small café in Rickmansworth suddenly realized what it was that had been going wrong all this time, and she finally knew how the world could be made a good and happy place. This time it was right, it would work, and no one would have to get nailed to anything.", + "Sadly, however, before she could get to a phone to tell anyone about it, the Earth was unexpectedly demolished to make way for a new hyperspace bypass, and so the idea was lost, seemingly for ever.", + "This is her story." + ] + }, + { + book : "So Long And Thanks for All the Fish", + chapter: "Chapter 21", + text : [ + "The problem is, or rather one of the problems, for there are many, a sizeable proportion of which are continually clogging up the civil, commercial, and criminal courts in all areas of the Galaxy, and especially, where possible, the more corrupt ones, this.", + "The previous sentence makes sense. That is not the problem.", + "This is:", + "Change.", + "Read it through again and you'll get it." + ] + }, + { + book : "So Long And Thanks for All the Fish", + chapter: "Chapter 23", + text : [ + "Ford: \"Life,\" he said, \"is like a grapefruit.\"", + "Creature:\"Er, how so?\"", + "Ford: \"Well, it's sort of orangey-yellow and dimpled on the outside, wet and squidgy in the middle. It's got pips inside, too. Oh, and some people have half a one for breakfast.\"" + ] + }, + { + book : "So Long And Thanks for All the Fish", + chapter: "Chapter 25", + text : [ + "\"This Arthur Dent,\" comes the cry from the furthest reaches of the galaxy, and has even now been found inscribed on a mysterious deep space probe thought to originate from an alien galaxy at a distance too hideous to contemplate, \"what is he, man or mouse? Is he interested in nothing more than tea and the wider issues of life? Has he no spirit? has he no passion? Does he not, to put it in a nutshell, fuck?\"", + "Those who wish to know should read on. Others may wish to skip on to the last chapter which is a good bit and has Marvin in it." + ] + }, + { + book : "So Long And Thanks for All the Fish", + chapter: "Chapter 31", + text : [ + "The sign said:", + "Hold stick near centre of its length. Moisten pointed end in mouth. Insert in tooth space, blunt end next to gum. Use gentle in-out motion.", + "It seemed to me,\" said Wonko the Sane, \"that any civilization that had so far lost its head as to need to include a set of detailed instructions for use in a packet of toothpicks, was no longer a civilization in which I could live and stay sane.\"" + ] + }, + { + book : "So Long And Thanks for All the Fish", + chapter: "Chapter 35", + text : [ + "The Hitchhiker's Guide to the Galaxy [...] says of the Sirius Cybernetics Corporation products that \"it is very easy to be blinded to the essential uselessness of them by the sense of achievement you get from getting them to work at all.\"" + ] + }, + { + book : "So Long And Thanks for All the Fish", + chapter: "Chapter 40", + text : [ + "\"So much time,\" it groaned, \"oh so much time. And pain as well, so much of that, and so much time to suffer it in too. One or the other on its own I could probably manage. It's the two together that really get me down.\"" + ] + }, + { + book : "So Long And Thanks for All the Fish", + chapter: "Chapter 40", + text : [ + "\"Ha!\" snapped Marvin. \"Ha!\" he repeated. \"What do you know of always? You say 'always' to me, who, because of the silly little errands your organic lifeforms keep on sending me through time on, am now thirty-seven times older than the Universe itself? Pick your words with a little more care,\" he coughed, \"and tact.\"" + ] + }, + { + book : "So Long And Thanks for All the Fish", + chapter: "Chapter 40", + text : [ + "\"We apologise for the inconvenience.\" God's Final Message to His Creation, written in letters of fire on the side of the Quentulus Quazgar Mountains.", + "\"I think,\" Marvin murmured at last, from deep within his corroding rattling thorax, \"I feel good about it.\"", + "The lights went out in his eyes for absolutely the very last time ever." + ] + }, + { + book : "So Long And Thanks for All the Fish", + chapter: "Epilogue", + text : [ + "There was a point to this story, but it has temporarily escaped the chronicler's mind." + ] + } + ] +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/search/HttpSearchAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/search/HttpSearchAction.java new file mode 100644 index 00000000000..a180bf5f521 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/search/HttpSearchAction.java @@ -0,0 +1,229 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.action.search; + +import com.google.inject.Inject; +import org.elasticsearch.ElasticSearchIllegalArgumentException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.SearchOperationThreading; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.client.Client; +import org.elasticsearch.http.*; +import org.elasticsearch.http.action.support.HttpActions; +import org.elasticsearch.http.action.support.HttpJsonBuilder; +import org.elasticsearch.index.query.json.JsonQueryBuilders; +import org.elasticsearch.index.query.json.QueryStringJsonQueryBuilder; +import org.elasticsearch.search.Scroll; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.util.TimeValue; +import org.elasticsearch.util.json.JsonBuilder; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; +import java.util.List; +import java.util.regex.Pattern; + +import static org.elasticsearch.http.HttpResponse.Status.*; + +/** + * @author kimchy (Shay Banon) + */ +public class HttpSearchAction extends BaseHttpServerHandler { + + public final static Pattern fieldsPattern; + + + static { + fieldsPattern = Pattern.compile(","); + } + + @Inject public HttpSearchAction(Settings settings, HttpServer httpService, Client client) { + super(settings, client); + httpService.registerHandler(HttpRequest.Method.GET, "/{index}/_search", this); + httpService.registerHandler(HttpRequest.Method.POST, "/{index}/_search", this); + httpService.registerHandler(HttpRequest.Method.GET, "/{index}/{type}/_search", this); + httpService.registerHandler(HttpRequest.Method.POST, "/{index}/{type}/_search", this); + } + + @Override public void handleRequest(final HttpRequest request, final HttpChannel channel) { + SearchRequest searchRequest; + try { + searchRequest = parseSearchRequest(request); + searchRequest.listenerThreaded(false); + SearchOperationThreading operationThreading = SearchOperationThreading.fromString(request.param("operationThreading"), SearchOperationThreading.SINGLE_THREAD); + if (operationThreading == SearchOperationThreading.NO_THREADS) { + // since we don't spawn, don't allow no_threads, but change it to a single thread + operationThreading = SearchOperationThreading.SINGLE_THREAD; + } + searchRequest.operationThreading(operationThreading); + } catch (Exception e) { + try { + channel.sendResponse(new JsonHttpResponse(request, BAD_REQUEST, JsonBuilder.cached().startObject().field("error", e.getMessage()).endObject())); + } catch (IOException e1) { + logger.error("Failed to send failure response", e1); + } + return; + } + client.execSearch(searchRequest, new ActionListener() { + @Override public void onResponse(SearchResponse result) { + try { + JsonBuilder builder = HttpJsonBuilder.cached(request); + builder.startObject(); + result.toJson(builder); + builder.endObject(); + channel.sendResponse(new JsonHttpResponse(request, OK, builder)); + } catch (Exception e) { + onFailure(e); + } + } + + @Override public void onFailure(Throwable e) { + try { + channel.sendResponse(new JsonThrowableHttpResponse(request, e)); + } catch (IOException e1) { + logger.error("Failed to send failure response", e1); + } + } + }); + } + + @Override public boolean spawn() { + return false; + } + + private SearchRequest parseSearchRequest(HttpRequest request) { + String[] indices = HttpActions.splitIndices(request.param("index")); + SearchRequest searchRequest = new SearchRequest(indices, parseSearchSource(request)); + + String searchType = request.param("searchType"); + if (searchType != null) { + if ("dfs_query_then_fetch".equals(searchType)) { + searchRequest.searchType(SearchType.DFS_QUERY_THEN_FETCH); + } else if ("dfs_query_and_fetch".equals(searchType)) { + searchRequest.searchType(SearchType.DFS_QUERY_AND_FETCH); + } else if ("query_then_fetch".equals(searchType)) { + searchRequest.searchType(SearchType.QUERY_THEN_FETCH); + } else if ("query_and_fetch".equals(searchType)) { + searchRequest.searchType(SearchType.QUERY_AND_FETCH); + } else { + throw new ElasticSearchIllegalArgumentException("No search type for [" + searchType + "]"); + } + } else { + searchRequest.searchType(SearchType.QUERY_THEN_FETCH); + } + + String from = request.param("from"); + if (from != null) { + searchRequest.from(Integer.parseInt(from)); + } + + String size = request.param("size"); + if (size != null) { + searchRequest.size(Integer.parseInt(size)); + } + + // TODO query boost per index +// searchRequest.queryBoost(); + + String scroll = request.param("scroll"); + if (scroll != null) { + searchRequest.scroll(new Scroll(TimeValue.parseTimeValue(scroll, null))); + } + + String timeout = request.param("timeout"); + if (timeout != null) { + searchRequest.timeout(TimeValue.parseTimeValue(timeout, null)); + } + + String typesParam = request.param("type"); + if (typesParam != null) { + searchRequest.types(HttpActions.splitTypes(typesParam)); + } + + searchRequest.queryHint(request.param("queryHint")); + + return searchRequest; + } + + private String parseSearchSource(HttpRequest request) { + if (request.hasContent()) { + return request.contentAsString(); + } + String queryString = request.param("q"); + if (queryString == null) { + throw new ElasticSearchIllegalArgumentException("No query to execute, not in body, and not bounded to 'q' parameter"); + } + QueryStringJsonQueryBuilder queryBuilder = JsonQueryBuilders.queryString(queryString); + queryBuilder.defaultField(request.param("df")); + queryBuilder.analyzer(request.param("analyzer")); + String defaultOperator = request.param("defaultOperator"); + if (defaultOperator != null) { + if ("OR".equals(defaultOperator)) { + queryBuilder.defualtOperator(QueryStringJsonQueryBuilder.Operator.OR); + } else if ("AND".equals(defaultOperator)) { + queryBuilder.defualtOperator(QueryStringJsonQueryBuilder.Operator.AND); + } else { + throw new ElasticSearchIllegalArgumentException("Unsupported defaultOperator [" + defaultOperator + "], can either be [OR] or [AND]"); + } + } + // TODO add different parameters to the query + + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().query(queryBuilder); + + searchSourceBuilder.queryParserName(request.param("queryParserName")); + String explain = request.param("explain"); + if (explain != null) { + searchSourceBuilder.explain(Boolean.parseBoolean(explain)); + } + + List fields = request.params("field"); + if (fields != null && !fields.isEmpty()) { + searchSourceBuilder.fields(fields); + } + String sField = request.param("fields"); + if (sField != null) { + String[] sFields = fieldsPattern.split(sField); + if (sFields != null) { + for (String field : sFields) { + searchSourceBuilder.field(field); + } + } + } + + List sorts = request.params("sort"); + if (sorts != null && !sorts.isEmpty()) { + for (String sort : sorts) { + int delimiter = sort.lastIndexOf(":"); + if (delimiter != -1) { + String sortField = sort.substring(0, delimiter); + String reverse = sort.substring(delimiter + 1); + searchSourceBuilder.sort(sortField, reverse.equals("reverse")); + } else { + searchSourceBuilder.sort(sort); + } + } + } + + // TODO add different parameters to the source + return searchSourceBuilder.build(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/support/HttpActions.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/support/HttpActions.java new file mode 100644 index 00000000000..aa789eef2af --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/support/HttpActions.java @@ -0,0 +1,94 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.action.support; + +import org.elasticsearch.ElasticSearchIllegalArgumentException; +import org.elasticsearch.http.HttpRequest; +import org.elasticsearch.index.query.json.JsonQueryBuilders; +import org.elasticsearch.index.query.json.QueryStringJsonQueryBuilder; +import org.elasticsearch.util.Strings; + +import java.util.regex.Pattern; + +/** + * @author kimchy (Shay Banon) + */ +public class HttpActions { + + public final static Pattern indicesPattern; + public final static Pattern typesPattern; + private final static Pattern nodesIdsPattern; + + + static { + indicesPattern = Pattern.compile(","); + typesPattern = Pattern.compile(","); + nodesIdsPattern = Pattern.compile(","); + } + + public static String parseQuerySource(HttpRequest request) { + if (request.hasContent()) { + return request.contentAsString(); + } + String queryString = request.param("q"); + if (queryString == null) { + throw new ElasticSearchIllegalArgumentException("No query to execute, not in body, and not bounded to 'q' parameter"); + } + QueryStringJsonQueryBuilder queryBuilder = JsonQueryBuilders.queryString(queryString); + queryBuilder.defaultField(request.param("df")); + queryBuilder.analyzer(request.param("analyzer")); + String defaultOperator = request.param("defaultOperator"); + if (defaultOperator != null) { + if ("OR".equals(defaultOperator)) { + queryBuilder.defualtOperator(QueryStringJsonQueryBuilder.Operator.OR); + } else if ("AND".equals(defaultOperator)) { + queryBuilder.defualtOperator(QueryStringJsonQueryBuilder.Operator.AND); + } else { + throw new ElasticSearchIllegalArgumentException("Unsupported defaultOperator [" + defaultOperator + "], can either be [OR] or [AND]"); + } + } + return queryBuilder.build(); + } + + public static String[] splitIndices(String indices) { + if (indices == null) { + return Strings.EMPTY_ARRAY; + } + return indicesPattern.split(indices); + } + + public static String[] splitTypes(String typeNames) { + return typesPattern.split(typeNames); + } + + public static String[] splitNodes(String nodes) { + if (nodes == null) { + return Strings.EMPTY_ARRAY; + } + return nodesIdsPattern.split(nodes); + } + + public static float paramAsFloat(String floatValue, float defaultValue) { + if (floatValue == null) { + return defaultValue; + } + return Float.parseFloat(floatValue); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/support/HttpJsonBuilder.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/support/HttpJsonBuilder.java new file mode 100644 index 00000000000..aea0bf12c54 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/action/support/HttpJsonBuilder.java @@ -0,0 +1,40 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.action.support; + +import org.elasticsearch.http.HttpRequest; +import org.elasticsearch.util.json.JsonBuilder; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class HttpJsonBuilder { + + public static JsonBuilder cached(HttpRequest request) throws IOException { + JsonBuilder builder = JsonBuilder.cached(); + String prettyPrint = request.param("pretty"); + if (prettyPrint != null && "true".equals(prettyPrint)) { + builder.prettyPrint(); + } + return builder; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/netty/HttpRequestHandler.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/netty/HttpRequestHandler.java new file mode 100644 index 00000000000..0d6a6edf342 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/netty/HttpRequestHandler.java @@ -0,0 +1,47 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.netty; + +import org.jboss.netty.channel.*; +import org.jboss.netty.handler.codec.http.HttpRequest; + + +/** + * @author kimchy (Shay Banon) + */ +@ChannelPipelineCoverage(ChannelPipelineCoverage.ALL) +public class HttpRequestHandler extends SimpleChannelUpstreamHandler { + + private final NettyHttpServerTransport serverTransport; + + public HttpRequestHandler(NettyHttpServerTransport serverTransport) { + this.serverTransport = serverTransport; + } + + @Override public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) throws Exception { + HttpRequest request = (HttpRequest) e.getMessage(); + serverTransport.dispatchRequest(new NettyHttpRequest(request), new NettyHttpChannel(e.getChannel(), request)); + super.messageReceived(ctx, e); + } + + @Override public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) throws Exception { + serverTransport.exceptionCaught(ctx, e); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/netty/NettyHttpChannel.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/netty/NettyHttpChannel.java new file mode 100644 index 00000000000..f1f98df688f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/netty/NettyHttpChannel.java @@ -0,0 +1,205 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.netty; + +import org.elasticsearch.http.HttpChannel; +import org.elasticsearch.http.HttpResponse; +import org.jboss.netty.buffer.ChannelBuffer; +import org.jboss.netty.buffer.ChannelBuffers; +import org.jboss.netty.channel.Channel; +import org.jboss.netty.channel.ChannelFuture; +import org.jboss.netty.channel.ChannelFutureListener; +import org.jboss.netty.handler.codec.http.*; + +import java.util.Set; + +/** + * @author kimchy (Shay Banon) + */ +public class NettyHttpChannel implements HttpChannel { + private final Channel channel; + private final org.jboss.netty.handler.codec.http.HttpRequest request; + + public NettyHttpChannel(Channel channel, org.jboss.netty.handler.codec.http.HttpRequest request) { + this.channel = channel; + this.request = request; + } + + @Override public void sendResponse(HttpResponse response) { + + // Decide whether to close the connection or not. + boolean http10 = request.getProtocolVersion().equals(HttpVersion.HTTP_1_0); + boolean close = + HttpHeaders.Values.CLOSE.equalsIgnoreCase(request.getHeader(HttpHeaders.Names.CONNECTION)) || + (http10 && !HttpHeaders.Values.KEEP_ALIVE.equalsIgnoreCase(request.getHeader(HttpHeaders.Names.CONNECTION))); + + // Build the response object. + HttpResponseStatus status = getStatus(response.status()); + org.jboss.netty.handler.codec.http.HttpResponse resp; + if (http10) { + resp = new DefaultHttpResponse(HttpVersion.HTTP_1_0, status); + if (!close) { + resp.addHeader(HttpHeaders.Names.CONNECTION, "Keep-Alive"); + } + } else { + resp = new DefaultHttpResponse(HttpVersion.HTTP_1_1, status); + } + // Convert the response content to a ChannelBuffer. + ChannelBuffer buf; + if (response.contentThreadSafe()) { + buf = ChannelBuffers.wrappedBuffer(response.content(), 0, response.contentLength()); + } else { + buf = ChannelBuffers.copiedBuffer(response.content(), 0, response.contentLength()); + } + if (response.prefixContent() != null || response.suffixContent() != null) { + ChannelBuffer prefixBuf = ChannelBuffers.EMPTY_BUFFER; + if (response.prefixContent() != null) { + prefixBuf = ChannelBuffers.copiedBuffer(response.prefixContent(), 0, response.prefixContentLength()); + } + ChannelBuffer suffixBuf = ChannelBuffers.EMPTY_BUFFER; + if (response.suffixContent() != null) { + suffixBuf = ChannelBuffers.copiedBuffer(response.suffixContent(), 0, response.suffixContentLength()); + } + buf = ChannelBuffers.wrappedBuffer(prefixBuf, buf, suffixBuf); + } + resp.setContent(buf); + resp.setHeader(HttpHeaders.Names.CONTENT_TYPE, response.contentType()); + + resp.setHeader(HttpHeaders.Names.CONTENT_LENGTH, String.valueOf(buf.readableBytes())); + + String cookieString = request.getHeader(HttpHeaders.Names.COOKIE); + if (cookieString != null) { + CookieDecoder cookieDecoder = new CookieDecoder(); + Set cookies = cookieDecoder.decode(cookieString); + if (!cookies.isEmpty()) { + // Reset the cookies if necessary. + CookieEncoder cookieEncoder = new CookieEncoder(true); + for (Cookie cookie : cookies) { + cookieEncoder.addCookie(cookie); + } + resp.addHeader(HttpHeaders.Names.SET_COOKIE, cookieEncoder.encode()); + } + } + + // Write the response. + ChannelFuture future = channel.write(resp); + + // Close the connection after the write operation is done if necessary. + if (close) { + future.addListener(ChannelFutureListener.CLOSE); + } + } + + private HttpResponseStatus getStatus(HttpResponse.Status status) { + switch (status) { + case CONTINUE: + return HttpResponseStatus.CONTINUE; + case SWITCHING_PROTOCOLS: + return HttpResponseStatus.SWITCHING_PROTOCOLS; + case OK: + return HttpResponseStatus.OK; + case CREATED: + return HttpResponseStatus.CREATED; + case ACCEPTED: + return HttpResponseStatus.ACCEPTED; + case NON_AUTHORITATIVE_INFORMATION: + return HttpResponseStatus.NON_AUTHORITATIVE_INFORMATION; + case NO_CONTENT: + return HttpResponseStatus.NO_CONTENT; + case RESET_CONTENT: + return HttpResponseStatus.RESET_CONTENT; + case PARTIAL_CONTENT: + return HttpResponseStatus.PARTIAL_CONTENT; + case MULTI_STATUS: + // no status for this?? + return HttpResponseStatus.INTERNAL_SERVER_ERROR; + case MULTIPLE_CHOICES: + return HttpResponseStatus.MULTIPLE_CHOICES; + case MOVED_PERMANENTLY: + return HttpResponseStatus.MOVED_PERMANENTLY; + case FOUND: + return HttpResponseStatus.FOUND; + case SEE_OTHER: + return HttpResponseStatus.SEE_OTHER; + case NOT_MODIFIED: + return HttpResponseStatus.NOT_MODIFIED; + case USE_PROXY: + return HttpResponseStatus.USE_PROXY; + case TEMPORARY_REDIRECT: + return HttpResponseStatus.TEMPORARY_REDIRECT; + case BAD_REQUEST: + return HttpResponseStatus.BAD_REQUEST; + case UNAUTHORIZED: + return HttpResponseStatus.UNUATHORIZED; + case PAYMENT_REQUIRED: + return HttpResponseStatus.PAYMENT_REQUIRED; + case FORBIDDEN: + return HttpResponseStatus.FORBIDDEN; + case NOT_FOUND: + return HttpResponseStatus.NOT_FOUND; + case METHOD_NOT_ALLOWED: + return HttpResponseStatus.METHOD_NOT_ALLOWED; + case NOT_ACCEPTABLE: + return HttpResponseStatus.NOT_ACCEPTABLE; + case PROXY_AUTHENTICATION: + return HttpResponseStatus.PROXY_AUTHENTICATION_REQUIRED; + case REQUEST_TIMEOUT: + return HttpResponseStatus.REQUEST_TIMEOUT; + case CONFLICT: + return HttpResponseStatus.CONFLICT; + case GONE: + return HttpResponseStatus.GONE; + case LENGTH_REQUIRED: + return HttpResponseStatus.LENGTH_REQUIRED; + case PRECONDITION_FAILED: + return HttpResponseStatus.PRECONDITION_FAILED; + case REQUEST_ENTITY_TOO_LARGE: + return HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE; + case REQUEST_URI_TOO_LONG: + return HttpResponseStatus.REQUEST_URI_TOO_LONG; + case UNSUPPORTED_MEDIA_TYPE: + return HttpResponseStatus.UNSUPPORTED_MEDIA_TYPE; + case REQUESTED_RANGE_NOT_SATISFIED: + return HttpResponseStatus.REQUESTED_RANGE_NOT_SATISFIABLE; + case EXPECTATION_FAILED: + return HttpResponseStatus.EXPECTATION_FAILED; + case UNPROCESSABLE_ENTITY: + return HttpResponseStatus.BAD_REQUEST; + case LOCKED: + return HttpResponseStatus.BAD_REQUEST; + case FAILED_DEPENDENCY: + return HttpResponseStatus.BAD_REQUEST; + case INTERNAL_SERVER_ERROR: + return HttpResponseStatus.INTERNAL_SERVER_ERROR; + case NOT_IMPLEMENTED: + return HttpResponseStatus.NOT_IMPLEMENTED; + case BAD_GATEWAY: + return HttpResponseStatus.BAD_GATEWAY; + case SERVICE_UNAVAILABLE: + return HttpResponseStatus.SERVICE_UNAVAILABLE; + case GATEWAY_TIMEOUT: + return HttpResponseStatus.GATEWAY_TIMEOUT; + case HTTP_VERSION_NOT_SUPPORTED: + return HttpResponseStatus.HTTP_VERSION_NOT_SUPPORTED; + default: + return HttpResponseStatus.INTERNAL_SERVER_ERROR; + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/netty/NettyHttpRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/netty/NettyHttpRequest.java new file mode 100644 index 00000000000..75c59be05fe --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/netty/NettyHttpRequest.java @@ -0,0 +1,192 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.netty; + +import org.apache.lucene.util.UnicodeUtil; +import org.elasticsearch.http.HttpRequest; +import org.jboss.netty.buffer.ChannelBuffer; +import org.jboss.netty.handler.codec.http.HttpHeaders; +import org.jboss.netty.handler.codec.http.HttpMethod; +import org.jboss.netty.handler.codec.http.QueryStringDecoder; + +import java.util.List; +import java.util.Map; +import java.util.Set; + +/** + * @author kimchy (Shay Banon) + */ +public class NettyHttpRequest implements HttpRequest { + + private final org.jboss.netty.handler.codec.http.HttpRequest request; + + private QueryStringDecoder queryStringDecoder; + + private static ThreadLocal utf16Result = new ThreadLocal() { + @Override protected UnicodeUtil.UTF16Result initialValue() { + return new UnicodeUtil.UTF16Result(); + } + }; + + public NettyHttpRequest(org.jboss.netty.handler.codec.http.HttpRequest request) { + this.request = request; + this.queryStringDecoder = new QueryStringDecoder(request.getUri()); + } + + @Override public Method method() { + HttpMethod httpMethod = request.getMethod(); + if (httpMethod == HttpMethod.GET) + return Method.GET; + + if (httpMethod == HttpMethod.POST) + return Method.POST; + + if (httpMethod == HttpMethod.PUT) + return Method.PUT; + + if (httpMethod == HttpMethod.DELETE) + return Method.DELETE; + + return Method.GET; + } + + @Override public String uri() { + return request.getUri(); + } + + @Override public boolean hasContent() { + return request.getContent().readableBytes() > 0; + } + + @Override public String contentAsString() { + UnicodeUtil.UTF16Result result = utf16Result.get(); + ChannelBuffer content = request.getContent(); + UTF8toUTF16(content, content.readerIndex(), content.readableBytes(), result); + return new String(result.result, 0, result.length); + } + + @Override public Set headerNames() { + return request.getHeaderNames(); + } + + @Override public String header(String name) { + return request.getHeader(name); + } + + @Override public List headers(String name) { + return request.getHeaders(name); + } + + @Override public String cookie() { + return request.getHeader(HttpHeaders.Names.COOKIE); + } + + @Override public String param(String key) { + List keyParams = params(key); + if (keyParams == null || keyParams.isEmpty()) { + return null; + } + return keyParams.get(0); + } + + @Override public List params(String key) { + return queryStringDecoder.getParameters().get(key); + } + + @Override public Map> params() { + return queryStringDecoder.getParameters(); + } + + // LUCENE TRACK + // The idea here is not to allocate all these byte arrays / char arrays again, just use the channel buffer to convert + // directly into UTF16 from bytes that represent UTF8 ChannelBuffer + + public static void UTF8toUTF16(ChannelBuffer cb, final int offset, final int length, final UnicodeUtil.UTF16Result result) { + + final int end = offset + length; + char[] out = result.result; + if (result.offsets.length <= end) { + int[] newOffsets = new int[2 * end]; + System.arraycopy(result.offsets, 0, newOffsets, 0, result.offsets.length); + result.offsets = newOffsets; + } + final int[] offsets = result.offsets; + + // If incremental decoding fell in the middle of a + // single unicode character, rollback to its start: + int upto = offset; + while (offsets[upto] == -1) + upto--; + + int outUpto = offsets[upto]; + + // Pre-allocate for worst case 1-for-1 + if (outUpto + length >= out.length) { + char[] newOut = new char[2 * (outUpto + length)]; + System.arraycopy(out, 0, newOut, 0, outUpto); + result.result = out = newOut; + } + + while (upto < end) { + + final int b = cb.getByte(upto) & 0xff; + final int ch; + + offsets[upto++] = outUpto; + + if (b < 0xc0) { + assert b < 0x80; + ch = b; + } else if (b < 0xe0) { + ch = ((b & 0x1f) << 6) + (cb.getByte(upto) & 0x3f); + offsets[upto++] = -1; + } else if (b < 0xf0) { + ch = ((b & 0xf) << 12) + ((cb.getByte(upto) & 0x3f) << 6) + (cb.getByte(upto + 1) & 0x3f); + offsets[upto++] = -1; + offsets[upto++] = -1; + } else { + assert b < 0xf8; + ch = ((b & 0x7) << 18) + ((cb.getByte(upto) & 0x3f) << 12) + ((cb.getByte(upto + 1) & 0x3f) << 6) + (cb.getByte(upto + 2) & 0x3f); + offsets[upto++] = -1; + offsets[upto++] = -1; + offsets[upto++] = -1; + } + + if (ch <= UNI_MAX_BMP) { + // target is a character <= 0xFFFF + out[outUpto++] = (char) ch; + } else { + // target is a character in range 0xFFFF - 0x10FFFF + final int chHalf = ch - HALF_BASE; + out[outUpto++] = (char) ((chHalf >> HALF_SHIFT) + UnicodeUtil.UNI_SUR_HIGH_START); + out[outUpto++] = (char) ((chHalf & HALF_MASK) + UnicodeUtil.UNI_SUR_LOW_START); + } + } + + offsets[upto] = outUpto; + result.length = outUpto; + } + + private static final long UNI_MAX_BMP = 0x0000FFFF; + + private static final int HALF_BASE = 0x0010000; + private static final long HALF_SHIFT = 10; + private static final long HALF_MASK = 0x3FFL; +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java new file mode 100644 index 00000000000..603032cf34a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java @@ -0,0 +1,283 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.netty; + +import com.google.inject.Inject; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.http.*; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.BindTransportException; +import org.elasticsearch.util.SizeValue; +import org.elasticsearch.util.TimeValue; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.component.Lifecycle; +import org.elasticsearch.util.settings.Settings; +import org.elasticsearch.util.transport.BoundTransportAddress; +import org.elasticsearch.util.transport.InetSocketTransportAddress; +import org.elasticsearch.util.transport.NetworkExceptionHelper; +import org.elasticsearch.util.transport.PortsRange; +import org.jboss.netty.bootstrap.ServerBootstrap; +import org.jboss.netty.channel.*; +import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory; +import org.jboss.netty.handler.codec.http.HttpRequestDecoder; +import org.jboss.netty.handler.codec.http.HttpResponseEncoder; +import org.jboss.netty.handler.timeout.ReadTimeoutException; +import org.jboss.netty.handler.timeout.ReadTimeoutHandler; +import org.jboss.netty.logging.InternalLogger; +import org.jboss.netty.logging.InternalLoggerFactory; +import org.jboss.netty.logging.Slf4JLoggerFactory; +import org.jboss.netty.util.HashedWheelTimer; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static org.elasticsearch.util.TimeValue.*; +import static org.elasticsearch.util.concurrent.DynamicExecutors.*; +import static org.elasticsearch.util.io.HostResolver.*; + +/** + * @author kimchy (Shay Banon) + */ +public class NettyHttpServerTransport extends AbstractComponent implements HttpServerTransport { + + static { + InternalLoggerFactory.setDefaultFactory(new Slf4JLoggerFactory() { + @Override public InternalLogger newInstance(String name) { + return super.newInstance(name.replace("org.jboss.netty.", "netty.lib.")); + } + }); + } + + private final Lifecycle lifecycle = new Lifecycle(); + + private final ThreadPool threadPool; + + private final int workerCount; + + private final String port; + + private final String bindHost; + + private final String publishHost; + + private final Boolean tcpNoDelay; + + private final Boolean tcpKeepAlive; + + private final Boolean reuseAddress; + + private final SizeValue tcpSendBufferSize; + + private final SizeValue tcpReceiveBufferSize; + + private final TimeValue httpKeepAlive; + + private final TimeValue httpKeepAliveTickDuration; + + private volatile ServerBootstrap serverBootstrap; + + private volatile BoundTransportAddress boundAddress; + + private volatile Channel serverChannel; + + private volatile OpenChannelsHandler serverOpenChannels; + + private volatile HttpServerAdapter httpServerAdapter; + + @Inject public NettyHttpServerTransport(Settings settings, ThreadPool threadPool) { + super(settings); + this.threadPool = threadPool; + this.workerCount = componentSettings.getAsInt("workerCount", Runtime.getRuntime().availableProcessors()); + this.port = componentSettings.get("port", "9200-9300"); + this.bindHost = componentSettings.get("bindHost"); + this.publishHost = componentSettings.get("publishHost"); + this.tcpNoDelay = componentSettings.getAsBoolean("tcpNoDelay", true); + this.tcpKeepAlive = componentSettings.getAsBoolean("tcpKeepAlive", null); + this.reuseAddress = componentSettings.getAsBoolean("reuseAddress", true); + this.tcpSendBufferSize = componentSettings.getAsSize("tcpSendBufferSize", null); + this.tcpReceiveBufferSize = componentSettings.getAsSize("tcpReceiveBufferSize", null); + this.httpKeepAlive = componentSettings.getAsTime("httpKeepAlive", timeValueSeconds(30)); + this.httpKeepAliveTickDuration = componentSettings.getAsTime("httpKeepAliveTickDuration", timeValueMillis(500)); + + if ((httpKeepAliveTickDuration.millis() * 10) > httpKeepAlive.millis()) { + logger.warn("Suspicious keep alive settings, httpKeepAlive set to [{}], while httpKeepAliveTickDuration is set to [{}]", httpKeepAlive, httpKeepAliveTickDuration); + } + } + + @Override public Lifecycle.State lifecycleState() { + return this.lifecycle.state(); + } + + public void httpServerAdapter(HttpServerAdapter httpServerAdapter) { + this.httpServerAdapter = httpServerAdapter; + } + + @Override public HttpServerTransport start() throws HttpException { + if (!lifecycle.moveToStarted()) { + return this; + } + + this.serverOpenChannels = new OpenChannelsHandler(); + + serverBootstrap = new ServerBootstrap(new NioServerSocketChannelFactory( + Executors.newCachedThreadPool(daemonThreadFactory(settings, "httpBoss")), + Executors.newCachedThreadPool(daemonThreadFactory(settings, "httpIoWorker")), + workerCount)); + + final HashedWheelTimer keepAliveTimer = new HashedWheelTimer(daemonThreadFactory(settings, "keepAliveTimer"), httpKeepAliveTickDuration.millis(), TimeUnit.MILLISECONDS); + final HttpRequestHandler requestHandler = new HttpRequestHandler(this); + + ChannelPipelineFactory pipelineFactory = new ChannelPipelineFactory() { + @Override public ChannelPipeline getPipeline() throws Exception { + ChannelPipeline pipeline = Channels.pipeline(); + pipeline.addLast("openChannels", serverOpenChannels); + pipeline.addLast("keepAliveTimeout", new ReadTimeoutHandler(keepAliveTimer, httpKeepAlive.millis(), TimeUnit.MILLISECONDS)); + pipeline.addLast("decoder", new HttpRequestDecoder()); + pipeline.addLast("encoder", new HttpResponseEncoder()); + pipeline.addLast("handler", requestHandler); + return pipeline; + } + }; + + serverBootstrap.setPipelineFactory(pipelineFactory); + + if (tcpNoDelay != null) { + serverBootstrap.setOption("child.tcpNoDelay", tcpNoDelay); + } + if (tcpKeepAlive != null) { + serverBootstrap.setOption("child.keepAlive", tcpKeepAlive); + } + if (tcpSendBufferSize != null) { + serverBootstrap.setOption("child.sendBufferSize", tcpSendBufferSize.bytes()); + } + if (tcpReceiveBufferSize != null) { + serverBootstrap.setOption("child.receiveBufferSize", tcpReceiveBufferSize.bytes()); + } + if (reuseAddress != null) { + serverBootstrap.setOption("reuseAddress", reuseAddress); + serverBootstrap.setOption("child.reuseAddress", reuseAddress); + } + + // Bind and start to accept incoming connections. + InetAddress hostAddressX; + try { + hostAddressX = resultBindHostAddress(bindHost, settings); + } catch (IOException e) { + throw new BindHttpException("Failed to resolve host [" + bindHost + "]", e); + } + final InetAddress hostAddress = hostAddressX; + + PortsRange portsRange = new PortsRange(port); + final AtomicReference lastException = new AtomicReference(); + boolean success = portsRange.iterate(new PortsRange.PortCallback() { + @Override public boolean onPortNumber(int portNumber) { + try { + serverChannel = serverBootstrap.bind(new InetSocketAddress(hostAddress, portNumber)); + } catch (Exception e) { + lastException.set(e); + return false; + } + return true; + } + }); + if (!success) { + throw new BindHttpException("Failed to bind to [" + port + "]", lastException.get()); + } + + InetSocketAddress boundAddress = (InetSocketAddress) serverChannel.getLocalAddress(); + InetSocketAddress publishAddress; + try { + InetAddress publishAddressX = resultPublishHostAddress(publishHost, settings); + if (publishAddressX == null) { + // if its 0.0.0.0, we can't publish that.., default to the local ip address + if (boundAddress.getAddress().isAnyLocalAddress()) { + publishAddress = new InetSocketAddress(resultPublishHostAddress(publishHost, settings, LOCAL_IP), boundAddress.getPort()); + } else { + publishAddress = boundAddress; + } + } else { + publishAddress = new InetSocketAddress(publishAddressX, boundAddress.getPort()); + } + } catch (Exception e) { + throw new BindTransportException("Failed to resolve publish address", e); + } + this.boundAddress = new BoundTransportAddress(new InetSocketTransportAddress(boundAddress), new InetSocketTransportAddress(publishAddress)); + return this; + } + + @Override public HttpServerTransport stop() throws ElasticSearchException { + if (!lifecycle.moveToStopped()) { + return this; + } + if (serverChannel != null) { + serverChannel.close().awaitUninterruptibly(); + serverChannel = null; + } + + if (serverOpenChannels != null) { + serverOpenChannels.close(); + serverOpenChannels = null; + } + + if (serverBootstrap != null) { + serverBootstrap.releaseExternalResources(); + serverBootstrap = null; + } + return this; + } + + @Override public void close() { + if (lifecycle.started()) { + stop(); + } + if (!lifecycle.moveToClosed()) { + return; + } + } + + public BoundTransportAddress boundAddress() { + return this.boundAddress; + } + + void dispatchRequest(HttpRequest request, HttpChannel channel) { + httpServerAdapter.dispatchRequest(request, channel); + } + + void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) throws Exception { + if (e.getCause() instanceof ReadTimeoutException) { + if (logger.isTraceEnabled()) { + logger.trace("Connection timeout [{}]", ctx.getChannel().getRemoteAddress()); + } + ctx.getChannel().close(); + } else { + if (!lifecycle.started()) { + // ignore + return; + } + if (!NetworkExceptionHelper.isCloseConnectionException(e.getCause())) { + logger.warn("Caught exception while handling client http trafic", e.getCause()); + } + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransportModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransportModule.java new file mode 100644 index 00000000000..a36e06d4b6d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransportModule.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.netty; + +import com.google.inject.AbstractModule; +import org.elasticsearch.http.HttpServerTransport; + +/** + * @author kimchy (Shay Banon) + */ +public class NettyHttpServerTransportModule extends AbstractModule { + + @Override protected void configure() { + bind(HttpServerTransport.class).to(NettyHttpServerTransport.class).asEagerSingleton(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/netty/OpenChannelsHandler.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/netty/OpenChannelsHandler.java new file mode 100644 index 00000000000..d551abf9679 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/netty/OpenChannelsHandler.java @@ -0,0 +1,57 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.netty; + +import org.elasticsearch.util.concurrent.highscalelib.NonBlockingHashSet; +import org.jboss.netty.channel.*; + +/** + * @author kimchy (Shay Banon) + */ +@ChannelPipelineCoverage(ChannelPipelineCoverage.ALL) +public class OpenChannelsHandler implements ChannelUpstreamHandler { + + private NonBlockingHashSet openChannels = new NonBlockingHashSet(); + + private final ChannelFutureListener remover = new ChannelFutureListener() { + public void operationComplete(ChannelFuture future) throws Exception { + openChannels.remove(future.getChannel()); + } + }; + + @Override public void handleUpstream(ChannelHandlerContext ctx, ChannelEvent e) throws Exception { + if (e instanceof ChannelStateEvent) { + ChannelStateEvent evt = (ChannelStateEvent) e; + if (evt.getState() == ChannelState.OPEN) { + boolean added = openChannels.add(ctx.getChannel()); + if (added) { + ctx.getChannel().getCloseFuture().addListener(remover); + } + } + } + ctx.sendUpstream(e); + } + + public void close() { + for (Channel channel : openChannels) { + channel.close().awaitUninterruptibly(); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/http/netty/SimpleNettyHttpTransportTests.java b/modules/elasticsearch/src/main/java/org/elasticsearch/http/netty/SimpleNettyHttpTransportTests.java new file mode 100644 index 00000000000..bbd08273a59 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/http/netty/SimpleNettyHttpTransportTests.java @@ -0,0 +1,24 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.netty; + +public class SimpleNettyHttpTransportTests { + +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java new file mode 100644 index 00000000000..da6ad328b02 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java @@ -0,0 +1,64 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index; + +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.jmx.ManagedGroupName; +import org.elasticsearch.util.logging.Loggers; +import org.elasticsearch.util.settings.Settings; +import org.slf4j.Logger; + +import static org.elasticsearch.index.IndexServiceManagement.*; + +/** + * @author kimchy (Shay Banon) + */ +@IndexLifecycle +public abstract class AbstractIndexComponent implements IndexComponent { + + protected final Logger logger; + + protected final Index index; + + protected final Settings indexSettings; + + protected final Settings componentSettings; + + protected AbstractIndexComponent(Index index, @IndexSettings Settings indexSettings) { + this.index = index; + this.indexSettings = indexSettings; + this.componentSettings = indexSettings.getComponentSettings(getClass()); + + this.logger = Loggers.getLogger(getClass(), indexSettings, index); + } + + @Override public Index index() { + return this.index; + } + + public String nodeName() { + return indexSettings.get("name", ""); + } + + @ManagedGroupName + private String managementGroupName() { + return buildIndexGroupName(index); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/Index.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/Index.java new file mode 100644 index 00000000000..3100a20ce6c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/Index.java @@ -0,0 +1,82 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index; + +import org.elasticsearch.util.concurrent.Immutable; +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.io.Serializable; + +/** + * @author kimchy (Shay Banon) + */ +@Immutable +public class Index implements Serializable, Streamable { + + private String name; + + private Index() { + + } + + public Index(String name) { + this.name = name; + } + + public String name() { + return this.name; + } + + @Override public String toString() { + return "Index [" + name + "]"; + } + + @Override public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + Index index1 = (Index) o; + + if (name != null ? !name.equals(index1.name) : index1.name != null) return false; + + return true; + } + + @Override public int hashCode() { + return name != null ? name.hashCode() : 0; + } + + public static Index readIndexName(DataInput in) throws IOException, ClassNotFoundException { + Index index = new Index(); + index.readFrom(in); + return index; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + name = in.readUTF(); + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeUTF(name); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/IndexComponent.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/IndexComponent.java new file mode 100644 index 00000000000..9f7fbef3d18 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/IndexComponent.java @@ -0,0 +1,29 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index; + +/** + * @author kimchy (Shay Banon) + */ +@IndexLifecycle +public interface IndexComponent { + + Index index(); +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/IndexException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/IndexException.java new file mode 100644 index 00000000000..2fb4cb53cbc --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/IndexException.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index; + +import org.elasticsearch.ElasticSearchException; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexException extends ElasticSearchException { + + private final Index index; + + public IndexException(Index index, String msg) { + this(index, msg, null); + } + + public IndexException(Index index, String msg, Throwable cause) { + super("Index[" + index.name() + "] " + msg, cause); + this.index = index; + } + + public Index index() { + return index; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/IndexLifecycle.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/IndexLifecycle.java new file mode 100644 index 00000000000..ccd4e0fef44 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/IndexLifecycle.java @@ -0,0 +1,41 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index; + +import java.lang.annotation.Documented; +import java.lang.annotation.Retention; +import java.lang.annotation.Target; + +import static java.lang.annotation.ElementType.*; +import static java.lang.annotation.RetentionPolicy.*; + +/** + * A simple annotation that marks a component to be bounded to a single index + * lifecycle. + *

+ *

Note, currently only acts as a marker interface for readability. + * + * @author kimchy (Shay Banon) + */ +@Target({TYPE, ANNOTATION_TYPE}) +@Retention(RUNTIME) +@Documented +public @interface IndexLifecycle { +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/IndexModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/IndexModule.java new file mode 100644 index 00000000000..19c4ff5a995 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/IndexModule.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index; + +import com.google.inject.AbstractModule; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexModule extends AbstractModule { + + @Override protected void configure() { + bind(IndexService.class).to(InternalIndexService.class).asEagerSingleton(); + bind(IndexServiceManagement.class).asEagerSingleton(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/IndexNameModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/IndexNameModule.java new file mode 100644 index 00000000000..2349cba175f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/IndexNameModule.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index; + +import com.google.inject.AbstractModule; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexNameModule extends AbstractModule { + + private final Index index; + + public IndexNameModule(Index index) { + this.index = index; + } + + @Override protected void configure() { + bind(Index.class).toInstance(index); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/IndexService.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/IndexService.java new file mode 100644 index 00000000000..b2d65639bf6 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/IndexService.java @@ -0,0 +1,70 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index; + +import com.google.inject.Injector; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.index.cache.filter.FilterCache; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.query.IndexQueryParserService; +import org.elasticsearch.index.routing.OperationRouting; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.similarity.SimilarityService; + +import java.util.Set; + +/** + * @author kimchy (Shay Banon) + */ +@IndexLifecycle +public interface IndexService extends IndexComponent, Iterable { + + void close(); + + Injector injector(); + + FilterCache filterCache(); + + OperationRouting operationRouting(); + + MapperService mapperService(); + + IndexQueryParserService queryParserService(); + + SimilarityService similarityService(); + + IndexShard createShard(int sShardId) throws ElasticSearchException; + + void deleteShard(int shardId) throws ElasticSearchException; + + int numberOfShards(); + + Set shardIds(); + + boolean hasShard(int shardId); + + IndexShard shard(int shardId); + + IndexShard shardSafe(int shardId) throws IndexShardMissingException; + + Injector shardInjector(int shardId); + + Injector shardInjectorSafe(int shardId) throws IndexShardMissingException; +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/IndexServiceManagement.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/IndexServiceManagement.java new file mode 100644 index 00000000000..9c6d598c1c5 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/IndexServiceManagement.java @@ -0,0 +1,57 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index; + +import com.google.inject.Inject; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.jmx.JmxService; +import org.elasticsearch.jmx.MBean; +import org.elasticsearch.jmx.ManagedAttribute; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +@MBean(objectName = "", description = "") +public class IndexServiceManagement extends AbstractIndexComponent { + + public static String buildIndexGroupName(Index index) { + return "service=indices,index=" + index.name(); + } + + private final JmxService jmxService; + + private final IndexService indexService; + + @Inject public IndexServiceManagement(Index index, @IndexSettings Settings indexSettings, JmxService jmxService, IndexService indexService) { + super(index, indexSettings); + this.jmxService = jmxService; + this.indexService = indexService; + } + + public void close() { + jmxService.unregisterGroup(buildIndexGroupName(indexService.index())); + } + + @ManagedAttribute(description = "Index Name") + public String getIndex() { + return indexService.index().name(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/IndexShardAlreadyExistsException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/IndexShardAlreadyExistsException.java new file mode 100644 index 00000000000..8088c0665ef --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/IndexShardAlreadyExistsException.java @@ -0,0 +1,32 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index; + +import org.elasticsearch.ElasticSearchException; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexShardAlreadyExistsException extends ElasticSearchException { + + public IndexShardAlreadyExistsException(String message) { + super(message); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/IndexShardMissingException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/IndexShardMissingException.java new file mode 100644 index 00000000000..0ccfa4ebef1 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/IndexShardMissingException.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index; + +import org.elasticsearch.index.shard.IndexShardException; +import org.elasticsearch.index.shard.ShardId; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexShardMissingException extends IndexShardException { + + public IndexShardMissingException(ShardId shardId) { + super(shardId, "missing"); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/InternalIndexService.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/InternalIndexService.java new file mode 100644 index 00000000000..e75524f681b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/InternalIndexService.java @@ -0,0 +1,253 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.UnmodifiableIterator; +import com.google.inject.Inject; +import com.google.inject.Injector; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.index.cache.filter.FilterCache; +import org.elasticsearch.index.deletionpolicy.DeletionPolicyModule; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.EngineModule; +import org.elasticsearch.index.gateway.IndexGateway; +import org.elasticsearch.index.gateway.IndexShardGatewayModule; +import org.elasticsearch.index.gateway.IndexShardGatewayService; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.merge.policy.MergePolicyModule; +import org.elasticsearch.index.merge.scheduler.MergeSchedulerModule; +import org.elasticsearch.index.query.IndexQueryParserService; +import org.elasticsearch.index.routing.OperationRouting; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardManagement; +import org.elasticsearch.index.shard.IndexShardModule; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.shard.recovery.RecoveryAction; +import org.elasticsearch.index.similarity.SimilarityService; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.index.store.StoreModule; +import org.elasticsearch.index.translog.TranslogModule; +import org.elasticsearch.util.guice.Injectors; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; +import java.util.Map; +import java.util.Set; + +import static com.google.common.collect.Maps.*; +import static com.google.common.collect.Sets.*; +import static org.elasticsearch.util.MapBuilder.*; + +/** + * @author kimchy (Shay Banon) + */ +@IndexLifecycle +public class InternalIndexService extends AbstractIndexComponent implements IndexService { + + private final Injector injector; + + private final Settings indexSettings; + + private final MapperService mapperService; + + private final IndexQueryParserService queryParserService; + + private final SimilarityService similarityService; + + private final FilterCache filterCache; + + private final OperationRouting operationRouting; + + private volatile ImmutableMap shardsInjectors = ImmutableMap.of(); + + private volatile ImmutableMap shards = ImmutableMap.of(); + + @Inject public InternalIndexService(Injector injector, Index index, @IndexSettings Settings indexSettings, + MapperService mapperService, IndexQueryParserService queryParserService, SimilarityService similarityService, + FilterCache filterCache, OperationRouting operationRouting) { + super(index, indexSettings); + this.injector = injector; + this.indexSettings = indexSettings; + this.mapperService = mapperService; + this.queryParserService = queryParserService; + this.similarityService = similarityService; + this.filterCache = filterCache; + this.operationRouting = operationRouting; + } + + @Override public int numberOfShards() { + return shards.size(); + } + + @Override public UnmodifiableIterator iterator() { + return shards.values().iterator(); + } + + @Override public boolean hasShard(int shardId) { + return shards.containsKey(shardId); + } + + @Override public IndexShard shard(int shardId) { + return shards.get(shardId); + } + + @Override public IndexShard shardSafe(int shardId) throws IndexShardMissingException { + IndexShard indexShard = shard(shardId); + if (indexShard == null) { + throw new IndexShardMissingException(new ShardId(index, shardId)); + } + return indexShard; + } + + @Override public Set shardIds() { + return newHashSet(shards.keySet()); + } + + @Override public Injector injector() { + return injector; + } + + @Override public FilterCache filterCache() { + return filterCache; + } + + @Override public OperationRouting operationRouting() { + return operationRouting; + } + + @Override public MapperService mapperService() { + return mapperService; + } + + @Override public IndexQueryParserService queryParserService() { + return queryParserService; + } + + @Override public SimilarityService similarityService() { + return similarityService; + } + + @Override public synchronized void close() { + for (int shardId : shardIds()) { + deleteShard(shardId, true); + } + } + + @Override public Injector shardInjector(int shardId) throws ElasticSearchException { + return shardsInjectors.get(shardId); + } + + @Override public Injector shardInjectorSafe(int shardId) throws IndexShardMissingException { + Injector shardInjector = shardInjector(shardId); + if (shardInjector == null) { + throw new IndexShardMissingException(new ShardId(index, shardId)); + } + return shardInjector; + } + + @Override public synchronized IndexShard createShard(int sShardId) throws ElasticSearchException { + ShardId shardId = new ShardId(index, sShardId); + if (shardsInjectors.containsKey(shardId.id())) { + throw new IndexShardAlreadyExistsException(shardId + " already exists"); + } + + logger.debug("Creating Shard Id [{}]", shardId.id()); + + Injector shardInjector = injector.createChildInjector( + new IndexShardModule(shardId), + new StoreModule(indexSettings), + new DeletionPolicyModule(indexSettings), + new MergePolicyModule(indexSettings), + new MergeSchedulerModule(indexSettings), + new TranslogModule(indexSettings), + new EngineModule(indexSettings), + new IndexShardGatewayModule(injector.getInstance(IndexGateway.class))); + + shardsInjectors = newMapBuilder(shardsInjectors).put(shardId.id(), shardInjector).immutableMap(); + + IndexShard indexShard = shardInjector.getInstance(IndexShard.class); + + // clean the store + Store store = shardInjector.getInstance(Store.class); + try { + store.deleteContent(); + } catch (IOException e) { + logger.warn("Failed to clean store on shard creation", e); + } + + shards = newMapBuilder(shards).put(shardId.id(), indexShard).immutableMap(); + + return indexShard; + } + + @Override public synchronized void deleteShard(int shardId) throws ElasticSearchException { + deleteShard(shardId, false); + } + + private synchronized void deleteShard(int shardId, boolean close) throws ElasticSearchException { + Map tmpShardInjectors = newHashMap(shardsInjectors); + Injector shardInjector = tmpShardInjectors.remove(shardId); + if (shardInjector == null) { + if (close) { + return; + } + throw new IndexShardMissingException(new ShardId(index, shardId)); + } + shardsInjectors = ImmutableMap.copyOf(tmpShardInjectors); + if (!close) { + logger.debug("Deleting Shard Id [{}]", shardId); + } + + Map tmpShardsMap = newHashMap(shards); + IndexShard indexShard = tmpShardsMap.remove(shardId); + shards = ImmutableMap.copyOf(tmpShardsMap); + + // close shard actions + shardInjector.getInstance(IndexShardManagement.class).close(); + + RecoveryAction recoveryAction = shardInjector.getInstance(RecoveryAction.class); + if (recoveryAction != null) recoveryAction.close(); + + shardInjector.getInstance(IndexShardGatewayService.class).close(); + + indexShard.close(); + + + Engine engine = shardInjector.getInstance(Engine.class); + engine.close(); + + Store store = shardInjector.getInstance(Store.class); + try { + store.fullDelete(); + } catch (IOException e) { + logger.warn("Failed to clean store on shard deletion", e); + } + try { + store.close(); + } catch (IOException e) { + logger.warn("Failed to close store on shard deletion", e); + } + + Injectors.close(injector); + } + +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/LocalNodeId.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/LocalNodeId.java new file mode 100644 index 00000000000..5b6b7b3da36 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/LocalNodeId.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index; + +import com.google.inject.BindingAnnotation; + +import java.lang.annotation.Documented; +import java.lang.annotation.Retention; +import java.lang.annotation.Target; + +import static java.lang.annotation.ElementType.*; +import static java.lang.annotation.RetentionPolicy.*; + +/** + * @author kimchy (Shay Banon) + */ +@BindingAnnotation +@Target({FIELD, PARAMETER}) +@Retention(RUNTIME) +@Documented +public @interface LocalNodeId { +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/LocalNodeIdModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/LocalNodeIdModule.java new file mode 100644 index 00000000000..b876b583c9c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/LocalNodeIdModule.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index; + +import com.google.inject.AbstractModule; + +/** + * @author kimchy (Shay Banon) + */ +public class LocalNodeIdModule extends AbstractModule { + + private final String localNodeId; + + public LocalNodeIdModule(String localNodeId) { + this.localNodeId = localNodeId; + } + + @Override protected void configure() { + bind(String.class).annotatedWith(LocalNodeId.class).toInstance(localNodeId); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/ASCIIFoldingTokenFilterFactory.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/ASCIIFoldingTokenFilterFactory.java new file mode 100644 index 00000000000..6fab0676171 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/ASCIIFoldingTokenFilterFactory.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import com.google.inject.Inject; +import com.google.inject.assistedinject.Assisted; +import org.apache.lucene.analysis.ASCIIFoldingFilter; +import org.apache.lucene.analysis.TokenStream; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class ASCIIFoldingTokenFilterFactory extends AbstractTokenFilterFactory { + + @Inject public ASCIIFoldingTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) { + super(index, indexSettings, name); + } + + @Override public TokenStream create(TokenStream tokenStream) { + return new ASCIIFoldingFilter(tokenStream); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/AbstractAnalyzerProvider.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/AbstractAnalyzerProvider.java new file mode 100644 index 00000000000..530ef4e5508 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/AbstractAnalyzerProvider.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import org.apache.lucene.analysis.Analyzer; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class AbstractAnalyzerProvider extends AbstractIndexComponent implements AnalyzerProvider { + + private final String name; + + public AbstractAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, String name) { + super(index, indexSettings); + this.name = name; + } + + @Override public String name() { + return this.name; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/AbstractTokenFilterFactory.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/AbstractTokenFilterFactory.java new file mode 100644 index 00000000000..f6faecf3a0f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/AbstractTokenFilterFactory.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class AbstractTokenFilterFactory extends AbstractIndexComponent implements TokenFilterFactory { + + private final String name; + + public AbstractTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, String name) { + super(index, indexSettings); + this.name = name; + } + + @Override public String name() { + return this.name; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/AbstractTokenizerFactory.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/AbstractTokenizerFactory.java new file mode 100644 index 00000000000..574910a9f32 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/AbstractTokenizerFactory.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class AbstractTokenizerFactory extends AbstractIndexComponent implements TokenizerFactory { + + private final String name; + + public AbstractTokenizerFactory(Index index, @IndexSettings Settings indexSettings, String name) { + super(index, indexSettings); + this.name = name; + } + + @Override public String name() { + return this.name; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/AnalysisModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/AnalysisModule.java new file mode 100644 index 00000000000..f7221576642 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/AnalysisModule.java @@ -0,0 +1,147 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import com.google.inject.AbstractModule; +import com.google.inject.Scopes; +import com.google.inject.assistedinject.FactoryProvider; +import com.google.inject.multibindings.MapBinder; +import org.elasticsearch.util.settings.Settings; + +import java.util.Map; + +/** + * @author kimchy (Shay Banon) + */ +public class AnalysisModule extends AbstractModule { + + private final Settings settings; + + public AnalysisModule(Settings settings) { + this.settings = settings; + } + + @Override protected void configure() { + MapBinder tokenFilterBinder + = MapBinder.newMapBinder(binder(), String.class, TokenFilterFactoryFactory.class); + + Map tokenFiltersSettings = settings.getGroups("index.analysis.filter"); + for (Map.Entry entry : tokenFiltersSettings.entrySet()) { + String tokenFilterName = entry.getKey(); + Settings tokenFilterSettings = entry.getValue(); + + Class type = tokenFilterSettings.getAsClass("type", null, "org.elasticsearch.index.analysis.", "TokenFilterFactory"); + if (type == null) { + throw new IllegalArgumentException("Token Filter [" + tokenFilterName + "] must have a type associated with it"); + } + tokenFilterBinder.addBinding(tokenFilterName).toProvider(FactoryProvider.newFactory(TokenFilterFactoryFactory.class, type)).in(Scopes.SINGLETON); + } + // add defaults + if (!tokenFiltersSettings.containsKey("stop")) { + tokenFilterBinder.addBinding("stop").toProvider(FactoryProvider.newFactory(TokenFilterFactoryFactory.class, StopTokenFilterFactory.class)).in(Scopes.SINGLETON); + } + if (!tokenFiltersSettings.containsKey("asciifolding")) { + tokenFilterBinder.addBinding("asciifolding").toProvider(FactoryProvider.newFactory(TokenFilterFactoryFactory.class, ASCIIFoldingTokenFilterFactory.class)).in(Scopes.SINGLETON); + } + if (!tokenFiltersSettings.containsKey("length")) { + tokenFilterBinder.addBinding("length").toProvider(FactoryProvider.newFactory(TokenFilterFactoryFactory.class, LengthTokenFilterFactory.class)).in(Scopes.SINGLETON); + } + if (!tokenFiltersSettings.containsKey("lowercase")) { + tokenFilterBinder.addBinding("lowercase").toProvider(FactoryProvider.newFactory(TokenFilterFactoryFactory.class, LowerCaseTokenFilterFactory.class)).in(Scopes.SINGLETON); + } + if (!tokenFiltersSettings.containsKey("porterStem")) { + tokenFilterBinder.addBinding("porterStem").toProvider(FactoryProvider.newFactory(TokenFilterFactoryFactory.class, PorterStemTokenFilterFactory.class)).in(Scopes.SINGLETON); + } + if (!tokenFiltersSettings.containsKey("standard")) { + tokenFilterBinder.addBinding("standard").toProvider(FactoryProvider.newFactory(TokenFilterFactoryFactory.class, StandardTokenFilterFactory.class)).in(Scopes.SINGLETON); + } + if (!tokenFiltersSettings.containsKey("nGram")) { + tokenFilterBinder.addBinding("nGram").toProvider(FactoryProvider.newFactory(TokenFilterFactoryFactory.class, NGramTokenFilterFactory.class)).in(Scopes.SINGLETON); + } + if (!tokenFiltersSettings.containsKey("edgeNGram")) { + tokenFilterBinder.addBinding("edgeNGram").toProvider(FactoryProvider.newFactory(TokenFilterFactoryFactory.class, EdgeNGramTokenFilterFactory.class)).in(Scopes.SINGLETON); + } + if (!tokenFiltersSettings.containsKey("shingle")) { + tokenFilterBinder.addBinding("shingle").toProvider(FactoryProvider.newFactory(TokenFilterFactoryFactory.class, ShingleTokenFilterFactory.class)).in(Scopes.SINGLETON); + } + + + MapBinder tokenizerBinder + = MapBinder.newMapBinder(binder(), String.class, TokenizerFactoryFactory.class); + + Map tokenizersSettings = settings.getGroups("index.analysis.tokenizer"); + for (Map.Entry entry : tokenizersSettings.entrySet()) { + String tokenizerName = entry.getKey(); + Settings tokenizerSettings = entry.getValue(); + + Class type = tokenizerSettings.getAsClass("type", null, "org.elasticsearch.index.analysis.", "TokenizerFactory"); + if (type == null) { + throw new IllegalArgumentException("Tokenizer [" + tokenizerName + "] must have a type associated with it"); + } + tokenizerBinder.addBinding(tokenizerName).toProvider(FactoryProvider.newFactory(TokenizerFactoryFactory.class, type)).in(Scopes.SINGLETON); + } + // add defaults + if (!tokenizersSettings.containsKey("standard")) { + tokenizerBinder.addBinding("standard").toProvider(FactoryProvider.newFactory(TokenizerFactoryFactory.class, StandardTokenizerFactory.class)).in(Scopes.SINGLETON); + } + if (!tokenizersSettings.containsKey("keyword")) { + tokenizerBinder.addBinding("keyword").toProvider(FactoryProvider.newFactory(TokenizerFactoryFactory.class, KeywordTokenizerFactory.class)).in(Scopes.SINGLETON); + } + if (!tokenizersSettings.containsKey("letter")) { + tokenizerBinder.addBinding("letter").toProvider(FactoryProvider.newFactory(TokenizerFactoryFactory.class, LetterTokenizerFactory.class)).in(Scopes.SINGLETON); + } + if (!tokenizersSettings.containsKey("lowercase")) { + tokenizerBinder.addBinding("lowercase").toProvider(FactoryProvider.newFactory(TokenizerFactoryFactory.class, LowerCaseTokenizerFactory.class)).in(Scopes.SINGLETON); + } + if (!tokenizersSettings.containsKey("whitespace")) { + tokenizerBinder.addBinding("whitespace").toProvider(FactoryProvider.newFactory(TokenizerFactoryFactory.class, WhitespaceTokenizerFactory.class)).in(Scopes.SINGLETON); + } + if (!tokenizersSettings.containsKey("nGram")) { + tokenizerBinder.addBinding("nGram").toProvider(FactoryProvider.newFactory(TokenizerFactoryFactory.class, NGramTokenizerFactory.class)).in(Scopes.SINGLETON); + } + if (!tokenizersSettings.containsKey("edgeNGram")) { + tokenizerBinder.addBinding("edgeNGram").toProvider(FactoryProvider.newFactory(TokenizerFactoryFactory.class, EdgeNGramTokenizerFactory.class)).in(Scopes.SINGLETON); + } + + + MapBinder analyzerBinder + = MapBinder.newMapBinder(binder(), String.class, AnalyzerProviderFactory.class); + + Map analyzersSettings = settings.getGroups("index.analysis.analyzer"); + for (Map.Entry entry : analyzersSettings.entrySet()) { + String analyzerName = entry.getKey(); + Settings analyzerSettings = entry.getValue(); + Class type = analyzerSettings.getAsClass("type", null, "org.elasticsearch.index.analysis.", "AnalyzerProvider"); + if (type == null) { + // no specific type, check if it has a tokenizer associated with it + String tokenizerName = analyzerSettings.get("tokenizer"); + if (tokenizerName != null) { + // we have a tokenizer, use the CustomAnalyzer + type = CustomAnalyzerProvider.class; + } else { + throw new IllegalArgumentException("Analyzer [" + analyzerName + "] must have a type associated with it or a tokenizer"); + } + } + analyzerBinder.addBinding(analyzerName).toProvider(FactoryProvider.newFactory(AnalyzerProviderFactory.class, type)).in(Scopes.SINGLETON); + } + + bind(AnalysisService.class).in(Scopes.SINGLETON); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/AnalysisService.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/AnalysisService.java new file mode 100644 index 00000000000..05ab6c0e41c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/AnalysisService.java @@ -0,0 +1,182 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import com.google.common.collect.ImmutableMap; +import com.google.inject.Inject; +import org.apache.lucene.analysis.Analyzer; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexLifecycle; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.Nullable; +import org.elasticsearch.util.settings.ImmutableSettings; +import org.elasticsearch.util.settings.Settings; + +import java.util.Map; + +import static com.google.common.collect.Maps.*; + +/** + * @author kimchy (Shay Banon) + */ +@IndexLifecycle +public class AnalysisService extends AbstractIndexComponent { + + private final ImmutableMap analyzerProviders; + + private final ImmutableMap analyzers; + + private final ImmutableMap tokenizers; + + private final ImmutableMap tokenFilters; + + public AnalysisService(Index index) { + this(index, ImmutableSettings.Builder.EMPTY_SETTINGS, null, null, null); + } + + @Inject public AnalysisService(Index index, @IndexSettings Settings indexSettings, + @Nullable Map analyzerFactoryFactories, + @Nullable Map tokenizerFactoryFactories, + @Nullable Map tokenFilterFactoryFactories) { + super(index, indexSettings); + + Map analyzerProviders = newHashMap(); + if (analyzerFactoryFactories != null) { + Map analyzersSettings = indexSettings.getGroups("index.analysis.analyzer"); + for (Map.Entry entry : analyzerFactoryFactories.entrySet()) { + String analyzerName = entry.getKey(); + AnalyzerProviderFactory analyzerFactoryFactory = entry.getValue(); + + Settings analyzerSettings = analyzersSettings.get(analyzerName); + if (analyzerSettings == null) { + analyzerSettings = ImmutableSettings.Builder.EMPTY_SETTINGS; + } + + AnalyzerProvider analyzerFactory = analyzerFactoryFactory.create(analyzerName, analyzerSettings); + analyzerProviders.put(analyzerName, analyzerFactory); + } + } + + // add some defaults + if (!analyzerProviders.containsKey("standard")) { + analyzerProviders.put("standard", new StandardAnalyzerProvider(index, indexSettings, "standard", ImmutableSettings.Builder.EMPTY_SETTINGS)); + } + if (!analyzerProviders.containsKey("simple")) { + analyzerProviders.put("simple", new SimpleAnalyzerProvider(index, indexSettings, "simple", ImmutableSettings.Builder.EMPTY_SETTINGS)); + } + if (!analyzerProviders.containsKey("stop")) { + analyzerProviders.put("stop", new StopAnalyzerProvider(index, indexSettings, "stop", ImmutableSettings.Builder.EMPTY_SETTINGS)); + } + if (!analyzerProviders.containsKey("whitespace")) { + analyzerProviders.put("whitespace", new WhitespaceAnalyzerProvider(index, indexSettings, "whitespace", ImmutableSettings.Builder.EMPTY_SETTINGS)); + } + if (!analyzerProviders.containsKey("keyword")) { + analyzerProviders.put("keyword", new KeywordAnalyzerProvider(index, indexSettings, "keyword", ImmutableSettings.Builder.EMPTY_SETTINGS)); + } + if (!analyzerProviders.containsKey("default")) { + analyzerProviders.put("default", new StandardAnalyzerProvider(index, indexSettings, "default", ImmutableSettings.Builder.EMPTY_SETTINGS)); + } + if (!analyzerProviders.containsKey("defaultIndex")) { + analyzerProviders.put("defaultIndex", analyzerProviders.get("default")); + } + if (!analyzerProviders.containsKey("defaultSearch")) { + analyzerProviders.put("defaultSearch", analyzerProviders.get("default")); + } + + this.analyzerProviders = ImmutableMap.copyOf(analyzerProviders); + + Map analyzers = newHashMap(); + for (AnalyzerProvider analyzerFactory : analyzerProviders.values()) { + analyzers.put(analyzerFactory.name(), analyzerFactory.get()); + } + this.analyzers = ImmutableMap.copyOf(analyzers); + + Map tokenizers = newHashMap(); + if (tokenizerFactoryFactories != null) { + Map tokenizersSettings = indexSettings.getGroups("index.analysis.tokenizer"); + for (Map.Entry entry : tokenizerFactoryFactories.entrySet()) { + String tokenizerName = entry.getKey(); + TokenizerFactoryFactory tokenizerFactoryFactory = entry.getValue(); + + Settings tokenizerSettings = tokenizersSettings.get(tokenizerName); + if (tokenizerSettings == null) { + tokenizerSettings = ImmutableSettings.Builder.EMPTY_SETTINGS; + } + + TokenizerFactory tokenizerFactory = tokenizerFactoryFactory.create(tokenizerName, tokenizerSettings); + tokenizers.put(tokenizerName, tokenizerFactory); + } + } + this.tokenizers = ImmutableMap.copyOf(tokenizers); + + Map tokenFilters = newHashMap(); + if (tokenFilterFactoryFactories != null) { + Map tokenFiltersSettings = indexSettings.getGroups("index.analysis.filter"); + for (Map.Entry entry : tokenFilterFactoryFactories.entrySet()) { + String tokenFilterName = entry.getKey(); + TokenFilterFactoryFactory tokenFilterFactoryFactory = entry.getValue(); + + Settings tokenFilterSettings = tokenFiltersSettings.get(tokenFilterName); + if (tokenFilterSettings == null) { + tokenFilterSettings = ImmutableSettings.Builder.EMPTY_SETTINGS; + } + + TokenFilterFactory tokenFilterFactory = tokenFilterFactoryFactory.create(tokenFilterName, tokenFilterSettings); + tokenFilters.put(tokenFilterName, tokenFilterFactory); + } + } + this.tokenFilters = ImmutableMap.copyOf(tokenFilters); + } + + public void close() { + for (Analyzer analyzer : analyzers.values()) { + analyzer.close(); + } + } + + public Analyzer analyzer(String name) { + return analyzers.get(name); + } + + public Analyzer defaultAnalyzer() { + return analyzers.get("default"); + } + + public Analyzer defaultIndexAnalyzer() { + return defaultAnalyzer(); + } + + public Analyzer defaultSearchAnalyzer() { + return defaultAnalyzer(); + } + + public AnalyzerProvider analyzerProvider(String name) { + return analyzerProviders.get(name); + } + + public TokenizerFactory tokenizer(String name) { + return tokenizers.get(name); + } + + public TokenFilterFactory tokenFilter(String name) { + return tokenFilters.get(name); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/AnalyzerProvider.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/AnalyzerProvider.java new file mode 100644 index 00000000000..fe1c9962e6d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/AnalyzerProvider.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import com.google.inject.Provider; +import org.apache.lucene.analysis.Analyzer; +import org.elasticsearch.index.IndexComponent; + +/** + * @author kimchy (Shay Banon) + */ +public interface AnalyzerProvider extends IndexComponent, Provider { + + String name(); + + T get(); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/AnalyzerProviderFactory.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/AnalyzerProviderFactory.java new file mode 100644 index 00000000000..6c5a946678e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/AnalyzerProviderFactory.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public interface AnalyzerProviderFactory { + + AnalyzerProvider create(String name, Settings settings); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzer.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzer.java new file mode 100644 index 00000000000..f68c7675b0d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzer.java @@ -0,0 +1,94 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.Tokenizer; + +import java.io.IOException; +import java.io.Reader; + +/** + * @author kimchy (Shay Banon) + */ +public class CustomAnalyzer extends Analyzer implements PositionIncrementGapAnalyzer { + + private final TokenizerFactory tokenizerFactory; + + private final TokenFilterFactory[] tokenFilters; + + private int positionIncrementGap = 0; + + public CustomAnalyzer(TokenizerFactory tokenizerFactory, TokenFilterFactory[] tokenFilters) { + this.tokenizerFactory = tokenizerFactory; + this.tokenFilters = tokenFilters; + } + + @Override public void setPositionIncrementGap(int positionIncrementGap) { + this.positionIncrementGap = positionIncrementGap; + } + + public TokenizerFactory tokenizerFactory() { + return tokenizerFactory; + } + + public TokenFilterFactory[] tokenFilters() { + return tokenFilters; + } + + @Override public int getPositionIncrementGap(String fieldName) { + return this.positionIncrementGap; + } + + @Override public TokenStream tokenStream(String fieldName, Reader reader) { + return buildHolder(reader).tokenStream; + } + + @Override public TokenStream reusableTokenStream(String fieldName, Reader reader) throws IOException { + Holder holder = (Holder) getPreviousTokenStream(); + if (holder == null) { + holder = buildHolder(reader); + setPreviousTokenStream(holder); + } else { + holder.tokenizer.reset(reader); + } + return holder.tokenStream; + } + + private Holder buildHolder(Reader input) { + Tokenizer tokenizer = tokenizerFactory.create(input); + TokenStream tokenStream = tokenizer; + for (TokenFilterFactory tokenFilter : tokenFilters) { + tokenStream = tokenFilter.create(tokenStream); + } + return new Holder(tokenizer, tokenStream); + } + + private static class Holder { + final Tokenizer tokenizer; + final TokenStream tokenStream; + + private Holder(Tokenizer tokenizer, TokenStream tokenStream) { + this.tokenizer = tokenizer; + this.tokenStream = tokenStream; + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzerProvider.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzerProvider.java new file mode 100644 index 00000000000..2d713f6fd56 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzerProvider.java @@ -0,0 +1,89 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import com.google.inject.Inject; +import com.google.inject.assistedinject.Assisted; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.ImmutableSettings; +import org.elasticsearch.util.settings.Settings; + +import java.util.List; +import java.util.Map; + +import static com.google.common.collect.Lists.*; + +/** + * A custom analyzer that is built out of a single {@link org.apache.lucene.analysis.Tokenizer} and a list + * of {@link org.apache.lucene.analysis.TokenFilter}s. + * + * @author kimchy (Shay Banon) + */ +public class CustomAnalyzerProvider extends AbstractAnalyzerProvider { + + private final TokenizerFactory tokenizerFactory; + + private final TokenFilterFactory[] tokenFilterFactories; + + private final CustomAnalyzer customAnalyzer; + + @Inject public CustomAnalyzerProvider(Index index, + Map tokenizerFactories, + Map tokenFilterFactories, + @IndexSettings Settings indexSettings, + @Assisted String name, @Assisted Settings settings) { + super(index, indexSettings, name); + String tokenizerName = settings.get("tokenizer"); + if (tokenizerName == null) { + throw new IllegalArgumentException("Custom Analyzer [" + name + "] must be configured with a tokenizer"); + } + TokenizerFactoryFactory tokenizerFactoryFactory = tokenizerFactories.get(tokenizerName); + if (tokenizerFactoryFactory == null) { + throw new IllegalArgumentException("Custom Analyzer [" + name + "] failed to find tokenizer under name [" + tokenizerName + "]"); + } + Settings tokenizerSettings = indexSettings.getGroups("index.analysis.tokenizer").get(tokenizerName); + if (tokenizerSettings == null) { + tokenizerSettings = ImmutableSettings.Builder.EMPTY_SETTINGS; + } + tokenizerFactory = tokenizerFactoryFactory.create(tokenizerName, tokenizerSettings); + + List tokenFilters = newArrayList(); + String[] tokenFilterNames = settings.getAsArray("filter"); + for (String tokenFilterName : tokenFilterNames) { + TokenFilterFactoryFactory tokenFilterFactoryFactory = tokenFilterFactories.get(tokenFilterName); + if (tokenFilterFactoryFactory == null) { + throw new IllegalArgumentException("Custom Analyzer [" + name + "] failed to find token filter under name [" + tokenFilterName + "]"); + } + Settings tokenFilterSettings = indexSettings.getGroups("index.analysis.filter").get(tokenFilterName); + if (tokenFilterSettings == null) { + tokenFilterSettings = ImmutableSettings.Builder.EMPTY_SETTINGS; + } + tokenFilters.add(tokenFilterFactoryFactory.create(tokenFilterName, tokenFilterSettings)); + } + this.tokenFilterFactories = tokenFilters.toArray(new TokenFilterFactory[tokenFilters.size()]); + + this.customAnalyzer = new CustomAnalyzer(this.tokenizerFactory, this.tokenFilterFactories); + } + + @Override public CustomAnalyzer get() { + return this.customAnalyzer; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenFilterFactory.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenFilterFactory.java new file mode 100644 index 00000000000..6c369c78e4d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenFilterFactory.java @@ -0,0 +1,54 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import com.google.inject.Inject; +import com.google.inject.assistedinject.Assisted; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter; +import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer; +import org.apache.lucene.analysis.ngram.NGramTokenFilter; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + + +/** + * @author kimchy (Shay Banon) + */ +public class EdgeNGramTokenFilterFactory extends AbstractTokenFilterFactory { + + private final int minGram; + + private final int maxGram; + + private final EdgeNGramTokenFilter.Side side; + + @Inject public EdgeNGramTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) { + super(index, indexSettings, name); + this.minGram = settings.getAsInt("minGram", NGramTokenFilter.DEFAULT_MIN_NGRAM_SIZE); + this.maxGram = settings.getAsInt("maxGram", NGramTokenFilter.DEFAULT_MAX_NGRAM_SIZE); + this.side = EdgeNGramTokenFilter.Side.getSide(settings.get("side", EdgeNGramTokenizer.DEFAULT_SIDE.getLabel())); + } + + @Override public TokenStream create(TokenStream tokenStream) { + return new EdgeNGramTokenFilter(tokenStream, side, minGram, maxGram); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenizerFactory.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenizerFactory.java new file mode 100644 index 00000000000..bf4e74131b1 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenizerFactory.java @@ -0,0 +1,54 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import com.google.inject.Inject; +import com.google.inject.assistedinject.Assisted; +import org.apache.lucene.analysis.Tokenizer; +import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer; +import org.apache.lucene.analysis.ngram.NGramTokenizer; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +import java.io.Reader; + +/** + * @author kimchy (Shay Banon) + */ +public class EdgeNGramTokenizerFactory extends AbstractTokenizerFactory { + + private final int minGram; + + private final int maxGram; + + private final EdgeNGramTokenizer.Side side; + + @Inject public EdgeNGramTokenizerFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) { + super(index, indexSettings, name); + this.minGram = settings.getAsInt("minGram", NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE); + this.maxGram = settings.getAsInt("maxGram", NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE); + this.side = EdgeNGramTokenizer.Side.getSide(settings.get("side", EdgeNGramTokenizer.DEFAULT_SIDE.getLabel())); + } + + @Override public Tokenizer create(Reader reader) { + return new EdgeNGramTokenizer(reader, side, minGram, maxGram); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/FieldNameAnalyzer.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/FieldNameAnalyzer.java new file mode 100644 index 00000000000..33fa2a80386 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/FieldNameAnalyzer.java @@ -0,0 +1,63 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import com.google.common.collect.ImmutableMap; +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.TokenStream; + +import java.io.IOException; +import java.io.Reader; +import java.util.Map; + +/** + * @author kimchy (Shay Banon) + */ +public class FieldNameAnalyzer extends Analyzer { + + private final ImmutableMap analyzers; + + private final Analyzer defaultAnalyzer; + + public FieldNameAnalyzer(Map analyzers, Analyzer defaultAnalyzer) { + this.analyzers = ImmutableMap.copyOf(analyzers); + this.defaultAnalyzer = defaultAnalyzer; + } + + @Override public TokenStream tokenStream(String fieldName, Reader reader) { + return getAnalyzer(fieldName).tokenStream(fieldName, reader); + } + + @Override public TokenStream reusableTokenStream(String fieldName, Reader reader) throws IOException { + return getAnalyzer(fieldName).reusableTokenStream(fieldName, reader); + } + + @Override public int getPositionIncrementGap(String fieldName) { + return getAnalyzer(fieldName).getPositionIncrementGap(fieldName); + } + + private Analyzer getAnalyzer(String name) { + Analyzer analyzer = analyzers.get(name); + if (analyzer != null) { + return analyzer; + } + return defaultAnalyzer; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/KeywordAnalyzerProvider.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/KeywordAnalyzerProvider.java new file mode 100644 index 00000000000..fb39efbc09c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/KeywordAnalyzerProvider.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import com.google.inject.Inject; +import com.google.inject.assistedinject.Assisted; +import org.apache.lucene.analysis.KeywordAnalyzer; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class KeywordAnalyzerProvider extends AbstractAnalyzerProvider { + + private final KeywordAnalyzer keywordAnalyzer; + + @Inject public KeywordAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) { + super(index, indexSettings, name); + this.keywordAnalyzer = new KeywordAnalyzer(); + } + + @Override public KeywordAnalyzer get() { + return this.keywordAnalyzer; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/KeywordTokenizerFactory.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/KeywordTokenizerFactory.java new file mode 100644 index 00000000000..5380e0c25be --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/KeywordTokenizerFactory.java @@ -0,0 +1,47 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import com.google.inject.Inject; +import com.google.inject.assistedinject.Assisted; +import org.apache.lucene.analysis.KeywordTokenizer; +import org.apache.lucene.analysis.Tokenizer; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +import java.io.Reader; + +/** + * @author kimchy (Shay Banon) + */ +public class KeywordTokenizerFactory extends AbstractTokenizerFactory { + + private final int bufferSize; + + @Inject public KeywordTokenizerFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) { + super(index, indexSettings, name); + bufferSize = settings.getAsInt("bufferSize", 256); + } + + @Override public Tokenizer create(Reader reader) { + return new KeywordTokenizer(reader, bufferSize); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/LengthTokenFilterFactory.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/LengthTokenFilterFactory.java new file mode 100644 index 00000000000..9f35e00d872 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/LengthTokenFilterFactory.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import com.google.inject.Inject; +import com.google.inject.assistedinject.Assisted; +import org.apache.lucene.analysis.LengthFilter; +import org.apache.lucene.analysis.TokenStream; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class LengthTokenFilterFactory extends AbstractTokenFilterFactory { + + private final int min; + + private final int max; + + @Inject public LengthTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) { + super(index, indexSettings, name); + min = settings.getAsInt("min", 0); + max = settings.getAsInt("max", Integer.MAX_VALUE); + } + + @Override public TokenStream create(TokenStream tokenStream) { + return new LengthFilter(tokenStream, min, max); + } +} + diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/LetterTokenizerFactory.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/LetterTokenizerFactory.java new file mode 100644 index 00000000000..b50092cf27c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/LetterTokenizerFactory.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import com.google.inject.Inject; +import com.google.inject.assistedinject.Assisted; +import org.apache.lucene.analysis.LetterTokenizer; +import org.apache.lucene.analysis.Tokenizer; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +import java.io.Reader; + +/** + * @author kimchy (Shay Banon) + */ +public class LetterTokenizerFactory extends AbstractTokenizerFactory { + + @Inject public LetterTokenizerFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) { + super(index, indexSettings, name); + } + + @Override public Tokenizer create(Reader reader) { + return new LetterTokenizer(reader); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/LowerCaseTokenFilterFactory.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/LowerCaseTokenFilterFactory.java new file mode 100644 index 00000000000..2cf5bc2fc2b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/LowerCaseTokenFilterFactory.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import com.google.inject.Inject; +import com.google.inject.assistedinject.Assisted; +import org.apache.lucene.analysis.LowerCaseFilter; +import org.apache.lucene.analysis.TokenStream; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class LowerCaseTokenFilterFactory extends AbstractTokenFilterFactory { + + @Inject public LowerCaseTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) { + super(index, indexSettings, name); + } + + @Override public TokenStream create(TokenStream tokenStream) { + return new LowerCaseFilter(tokenStream); + } +} + + diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/LowerCaseTokenizerFactory.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/LowerCaseTokenizerFactory.java new file mode 100644 index 00000000000..80665c088a0 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/LowerCaseTokenizerFactory.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import com.google.inject.Inject; +import com.google.inject.assistedinject.Assisted; +import org.apache.lucene.analysis.LowerCaseTokenizer; +import org.apache.lucene.analysis.Tokenizer; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +import java.io.Reader; + +/** + * @author kimchy (Shay Banon) + */ +public class LowerCaseTokenizerFactory extends AbstractTokenizerFactory { + + @Inject public LowerCaseTokenizerFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) { + super(index, indexSettings, name); + } + + @Override public Tokenizer create(Reader reader) { + return new LowerCaseTokenizer(reader); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NGramTokenFilterFactory.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NGramTokenFilterFactory.java new file mode 100644 index 00000000000..64284073737 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NGramTokenFilterFactory.java @@ -0,0 +1,50 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import com.google.inject.Inject; +import com.google.inject.assistedinject.Assisted; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.ngram.NGramTokenFilter; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + + +/** + * @author kimchy (Shay Banon) + */ +public class NGramTokenFilterFactory extends AbstractTokenFilterFactory { + + private final int minGram; + + private final int maxGram; + + + @Inject public NGramTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) { + super(index, indexSettings, name); + this.minGram = settings.getAsInt("minGram", NGramTokenFilter.DEFAULT_MIN_NGRAM_SIZE); + this.maxGram = settings.getAsInt("maxGram", NGramTokenFilter.DEFAULT_MAX_NGRAM_SIZE); + } + + @Override public TokenStream create(TokenStream tokenStream) { + return new NGramTokenFilter(tokenStream, minGram, maxGram); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NGramTokenizerFactory.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NGramTokenizerFactory.java new file mode 100644 index 00000000000..01a67bc0063 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NGramTokenizerFactory.java @@ -0,0 +1,50 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import com.google.inject.Inject; +import com.google.inject.assistedinject.Assisted; +import org.apache.lucene.analysis.Tokenizer; +import org.apache.lucene.analysis.ngram.NGramTokenizer; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +import java.io.Reader; + +/** + * @author kimchy (Shay Banon) + */ +public class NGramTokenizerFactory extends AbstractTokenizerFactory { + + private final int minGram; + + private final int maxGram; + + @Inject public NGramTokenizerFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) { + super(index, indexSettings, name); + this.minGram = settings.getAsInt("minGram", NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE); + this.maxGram = settings.getAsInt("maxGram", NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE); + } + + @Override public Tokenizer create(Reader reader) { + return new NGramTokenizer(reader, minGram, maxGram); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NumericAnalyzer.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NumericAnalyzer.java new file mode 100644 index 00000000000..f2e87bf27aa --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NumericAnalyzer.java @@ -0,0 +1,64 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.TokenStream; + +import java.io.IOException; +import java.io.Reader; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class NumericAnalyzer extends Analyzer { + + @Override public TokenStream tokenStream(String fieldName, Reader reader) { + try { + return createNumericTokenizer(reader, new char[32]); + } catch (IOException e) { + throw new RuntimeException("Failed to create numeric tokenizer", e); + } + } + + @Override public TokenStream reusableTokenStream(String fieldName, Reader reader) throws IOException { + Holder holder = (Holder) getPreviousTokenStream(); + if (holder == null) { + char[] buffer = new char[120]; + holder = new Holder(createNumericTokenizer(reader, buffer), buffer); + setPreviousTokenStream(holder); + } else { + holder.tokenizer.reset(reader, holder.buffer); + } + return holder.tokenizer; + } + + protected abstract T createNumericTokenizer(Reader reader, char[] buffer) throws IOException; + + private static final class Holder { + final NumericTokenizer tokenizer; + final char[] buffer; + + private Holder(NumericTokenizer tokenizer, char[] buffer) { + this.tokenizer = tokenizer; + this.buffer = buffer; + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NumericDateAnalyzer.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NumericDateAnalyzer.java new file mode 100644 index 00000000000..c7e5bf685f5 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NumericDateAnalyzer.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import org.apache.lucene.util.NumericUtils; +import org.joda.time.format.DateTimeFormatter; + +import java.io.IOException; +import java.io.Reader; + +/** + * @author kimchy (Shay Banon) + */ +public class NumericDateAnalyzer extends NumericAnalyzer { + + private final int precisionStep; + + private final DateTimeFormatter dateTimeFormatter; + + public NumericDateAnalyzer(DateTimeFormatter dateTimeFormatter) { + this(NumericUtils.PRECISION_STEP_DEFAULT, dateTimeFormatter); + } + + public NumericDateAnalyzer(int precisionStep, DateTimeFormatter dateTimeFormatter) { + this.precisionStep = precisionStep; + this.dateTimeFormatter = dateTimeFormatter; + } + + @Override protected NumericDateTokenizer createNumericTokenizer(Reader reader, char[] buffer) throws IOException { + return new NumericDateTokenizer(reader, precisionStep, buffer, dateTimeFormatter); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NumericDateTokenizer.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NumericDateTokenizer.java new file mode 100644 index 00000000000..230941731fe --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NumericDateTokenizer.java @@ -0,0 +1,48 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import org.apache.lucene.analysis.NumericTokenStream; +import org.joda.time.format.DateTimeFormatter; + +import java.io.IOException; +import java.io.Reader; + +/** + * @author kimchy (Shay Banon) + */ +public class NumericDateTokenizer extends NumericTokenizer { + + private final DateTimeFormatter dateTimeFormatter; + + public NumericDateTokenizer(Reader reader, int precisionStep, DateTimeFormatter dateTimeFormatter) throws IOException { + super(reader, new NumericTokenStream(precisionStep)); + this.dateTimeFormatter = dateTimeFormatter; + } + + public NumericDateTokenizer(Reader reader, int precisionStep, char[] buffer, DateTimeFormatter dateTimeFormatter) throws IOException { + super(reader, new NumericTokenStream(precisionStep), buffer); + this.dateTimeFormatter = dateTimeFormatter; + } + + @Override protected void setValue(NumericTokenStream tokenStream, String value) { + tokenStream.setLongValue(dateTimeFormatter.parseMillis(value)); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NumericDoubleAnalyzer.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NumericDoubleAnalyzer.java new file mode 100644 index 00000000000..537dd1f7d95 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NumericDoubleAnalyzer.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import org.apache.lucene.util.NumericUtils; + +import java.io.IOException; +import java.io.Reader; + +/** + * @author kimchy (Shay Banon) + */ +public class NumericDoubleAnalyzer extends NumericAnalyzer { + + private final int precisionStep; + + public NumericDoubleAnalyzer() { + this(NumericUtils.PRECISION_STEP_DEFAULT); + } + + public NumericDoubleAnalyzer(int precisionStep) { + this.precisionStep = precisionStep; + } + + @Override protected NumericDoubleTokenizer createNumericTokenizer(Reader reader, char[] buffer) throws IOException { + return new NumericDoubleTokenizer(reader, precisionStep, buffer); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NumericDoubleTokenizer.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NumericDoubleTokenizer.java new file mode 100644 index 00000000000..35c2eab8706 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NumericDoubleTokenizer.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import org.apache.lucene.analysis.NumericTokenStream; + +import java.io.IOException; +import java.io.Reader; + +/** + * @author kimchy (Shay Banon) + */ +public class NumericDoubleTokenizer extends NumericTokenizer { + + public NumericDoubleTokenizer(Reader reader, int precisionStep) throws IOException { + super(reader, new NumericTokenStream(precisionStep)); + } + + public NumericDoubleTokenizer(Reader reader, int precisionStep, char[] buffer) throws IOException { + super(reader, new NumericTokenStream(precisionStep), buffer); + } + + @Override protected void setValue(NumericTokenStream tokenStream, String value) { + tokenStream.setDoubleValue(Double.parseDouble(value)); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NumericFloatAnalyzer.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NumericFloatAnalyzer.java new file mode 100644 index 00000000000..52003c7b0fe --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NumericFloatAnalyzer.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import org.apache.lucene.util.NumericUtils; + +import java.io.IOException; +import java.io.Reader; + +/** + * @author kimchy (Shay Banon) + */ +public class NumericFloatAnalyzer extends NumericAnalyzer { + + private final int precisionStep; + + public NumericFloatAnalyzer() { + this(NumericUtils.PRECISION_STEP_DEFAULT); + } + + public NumericFloatAnalyzer(int precisionStep) { + this.precisionStep = precisionStep; + } + + @Override protected NumericFloatTokenizer createNumericTokenizer(Reader reader, char[] buffer) throws IOException { + return new NumericFloatTokenizer(reader, precisionStep, buffer); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NumericFloatTokenizer.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NumericFloatTokenizer.java new file mode 100644 index 00000000000..7491f2c32c5 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NumericFloatTokenizer.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import org.apache.lucene.analysis.NumericTokenStream; + +import java.io.IOException; +import java.io.Reader; + +/** + * @author kimchy (Shay Banon) + */ +public class NumericFloatTokenizer extends NumericTokenizer { + + public NumericFloatTokenizer(Reader reader, int precisionStep) throws IOException { + super(reader, new NumericTokenStream(precisionStep)); + } + + public NumericFloatTokenizer(Reader reader, int precisionStep, char[] buffer) throws IOException { + super(reader, new NumericTokenStream(precisionStep), buffer); + } + + @Override protected void setValue(NumericTokenStream tokenStream, String value) { + tokenStream.setFloatValue(Float.parseFloat(value)); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NumericIntegerAnalyzer.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NumericIntegerAnalyzer.java new file mode 100644 index 00000000000..3fe6b7157ba --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NumericIntegerAnalyzer.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import org.apache.lucene.util.NumericUtils; + +import java.io.IOException; +import java.io.Reader; + +/** + * @author kimchy (Shay Banon) + */ +public class NumericIntegerAnalyzer extends NumericAnalyzer { + + private final int precisionStep; + + public NumericIntegerAnalyzer() { + this(NumericUtils.PRECISION_STEP_DEFAULT); + } + + public NumericIntegerAnalyzer(int precisionStep) { + this.precisionStep = precisionStep; + } + + @Override protected NumericIntegerTokenizer createNumericTokenizer(Reader reader, char[] buffer) throws IOException { + return new NumericIntegerTokenizer(reader, precisionStep, buffer); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NumericIntegerTokenizer.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NumericIntegerTokenizer.java new file mode 100644 index 00000000000..efe50012633 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NumericIntegerTokenizer.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import org.apache.lucene.analysis.NumericTokenStream; + +import java.io.IOException; +import java.io.Reader; + +/** + * @author kimchy (Shay Banon) + */ +public class NumericIntegerTokenizer extends NumericTokenizer { + + public NumericIntegerTokenizer(Reader reader, int precisionStep) throws IOException { + super(reader, new NumericTokenStream(precisionStep)); + } + + public NumericIntegerTokenizer(Reader reader, int precisionStep, char[] buffer) throws IOException { + super(reader, new NumericTokenStream(precisionStep), buffer); + } + + @Override protected void setValue(NumericTokenStream tokenStream, String value) { + tokenStream.setIntValue(Integer.parseInt(value)); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NumericLongAnalyzer.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NumericLongAnalyzer.java new file mode 100644 index 00000000000..ed40b3af4ce --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NumericLongAnalyzer.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import org.apache.lucene.util.NumericUtils; + +import java.io.IOException; +import java.io.Reader; + +/** + * @author kimchy (Shay Banon) + */ +public class NumericLongAnalyzer extends NumericAnalyzer { + + private final int precisionStep; + + public NumericLongAnalyzer() { + this(NumericUtils.PRECISION_STEP_DEFAULT); + } + + public NumericLongAnalyzer(int precisionStep) { + this.precisionStep = precisionStep; + } + + @Override protected NumericLongTokenizer createNumericTokenizer(Reader reader, char[] buffer) throws IOException { + return new NumericLongTokenizer(reader, precisionStep, buffer); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NumericLongTokenizer.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NumericLongTokenizer.java new file mode 100644 index 00000000000..0633797397b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NumericLongTokenizer.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import org.apache.lucene.analysis.NumericTokenStream; + +import java.io.IOException; +import java.io.Reader; + +/** + * @author kimchy (Shay Banon) + */ +public class NumericLongTokenizer extends NumericTokenizer { + + public NumericLongTokenizer(Reader reader, int precisionStep) throws IOException { + super(reader, new NumericTokenStream(precisionStep)); + } + + public NumericLongTokenizer(Reader reader, int precisionStep, char[] buffer) throws IOException { + super(reader, new NumericTokenStream(precisionStep), buffer); + } + + @Override protected void setValue(NumericTokenStream tokenStream, String value) { + tokenStream.setLongValue(Long.parseLong(value)); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NumericTokenizer.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NumericTokenizer.java new file mode 100644 index 00000000000..9c2f5fe1978 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/NumericTokenizer.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import org.apache.lucene.analysis.NumericTokenStream; +import org.apache.lucene.analysis.Tokenizer; + +import java.io.IOException; +import java.io.Reader; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class NumericTokenizer extends Tokenizer { + + private final NumericTokenStream numericTokenStream; + + protected NumericTokenizer(Reader reader, NumericTokenStream numericTokenStream) throws IOException { + super(numericTokenStream); + this.numericTokenStream = numericTokenStream; + reset(reader); + } + + protected NumericTokenizer(Reader reader, NumericTokenStream numericTokenStream, char[] buffer) throws IOException { + super(numericTokenStream); + this.numericTokenStream = numericTokenStream; + reset(reader, buffer); + } + + @Override public void reset(Reader input) throws IOException { + char[] buffer = new char[32]; + reset(input, buffer); + } + + public void reset(Reader input, char[] buffer) throws IOException { + super.reset(input); + int len = super.input.read(buffer); + String value = new String(buffer, 0, len); + setValue(numericTokenStream, value); + numericTokenStream.reset(); + } + + @Override public boolean incrementToken() throws IOException { + return numericTokenStream.incrementToken(); + } + + protected abstract void setValue(NumericTokenStream tokenStream, String value); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/PorterStemTokenFilterFactory.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/PorterStemTokenFilterFactory.java new file mode 100644 index 00000000000..ae37763f214 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/PorterStemTokenFilterFactory.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import com.google.inject.Inject; +import com.google.inject.assistedinject.Assisted; +import org.apache.lucene.analysis.PorterStemFilter; +import org.apache.lucene.analysis.TokenStream; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class PorterStemTokenFilterFactory extends AbstractTokenFilterFactory { + + @Inject public PorterStemTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) { + super(index, indexSettings, name); + } + + @Override public TokenStream create(TokenStream tokenStream) { + return new PorterStemFilter(tokenStream); + } +} + + diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/PositionIncrementGapAnalyzer.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/PositionIncrementGapAnalyzer.java new file mode 100644 index 00000000000..d5f8c9221a4 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/PositionIncrementGapAnalyzer.java @@ -0,0 +1,28 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +/** + * @author kimchy (Shay Banon) + */ +public interface PositionIncrementGapAnalyzer { + + void setPositionIncrementGap(int positionIncrementGap); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactory.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactory.java new file mode 100644 index 00000000000..4e28dd6c54f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactory.java @@ -0,0 +1,50 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import com.google.inject.Inject; +import com.google.inject.assistedinject.Assisted; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.shingle.ShingleFilter; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class ShingleTokenFilterFactory extends AbstractTokenFilterFactory { + + private final int maxShingleSize; + + private final boolean outputUnigrams; + + @Inject public ShingleTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) { + super(index, indexSettings, name); + maxShingleSize = settings.getAsInt("maxShingleSize", ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE); + outputUnigrams = settings.getAsBoolean("outputUnigrams", true); + } + + @Override public TokenStream create(TokenStream tokenStream) { + ShingleFilter filter = new ShingleFilter(tokenStream, maxShingleSize); + filter.setOutputUnigrams(outputUnigrams); + return filter; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/SimpleAnalyzerProvider.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/SimpleAnalyzerProvider.java new file mode 100644 index 00000000000..3d579a1f6a6 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/SimpleAnalyzerProvider.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import com.google.inject.Inject; +import com.google.inject.assistedinject.Assisted; +import org.apache.lucene.analysis.SimpleAnalyzer; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class SimpleAnalyzerProvider extends AbstractAnalyzerProvider { + + private final SimpleAnalyzer simpleAnalyzer; + + @Inject public SimpleAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) { + super(index, indexSettings, name); + this.simpleAnalyzer = new SimpleAnalyzer(); + } + + @Override public SimpleAnalyzer get() { + return this.simpleAnalyzer; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/StandardAnalyzerProvider.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/StandardAnalyzerProvider.java new file mode 100644 index 00000000000..5e8b730a554 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/StandardAnalyzerProvider.java @@ -0,0 +1,62 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Iterators; +import com.google.inject.Inject; +import com.google.inject.assistedinject.Assisted; +import org.apache.lucene.analysis.StopAnalyzer; +import org.apache.lucene.analysis.standard.StandardAnalyzer; +import org.apache.lucene.util.Version; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +import java.util.Set; + +/** + * @author kimchy (Shay Banon) + */ +public class StandardAnalyzerProvider extends AbstractAnalyzerProvider { + + private final Set stopWords; + + private final int maxTokenLength; + + private final StandardAnalyzer standardAnalyzer; + + @Inject public StandardAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) { + super(index, indexSettings, name); + String[] stopWords = settings.getAsArray("stopwords"); + if (stopWords.length > 0) { + this.stopWords = ImmutableSet.copyOf(Iterators.forArray(stopWords)); + } else { + this.stopWords = ImmutableSet.copyOf((Iterable) StopAnalyzer.ENGLISH_STOP_WORDS_SET); + } + maxTokenLength = settings.getAsInt("maxTokenLength", StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH); + standardAnalyzer = new StandardAnalyzer(Version.LUCENE_CURRENT, this.stopWords); + standardAnalyzer.setMaxTokenLength(maxTokenLength); + } + + @Override public StandardAnalyzer get() { + return this.standardAnalyzer; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/StandardTokenFilterFactory.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/StandardTokenFilterFactory.java new file mode 100644 index 00000000000..cef4797eb2f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/StandardTokenFilterFactory.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import com.google.inject.Inject; +import com.google.inject.assistedinject.Assisted; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.standard.StandardFilter; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + + +/** + * @author kimchy (Shay Banon) + */ +public class StandardTokenFilterFactory extends AbstractTokenFilterFactory { + + @Inject public StandardTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) { + super(index, indexSettings, name); + } + + @Override public TokenStream create(TokenStream tokenStream) { + return new StandardFilter(tokenStream); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/StandardTokenizerFactory.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/StandardTokenizerFactory.java new file mode 100644 index 00000000000..8c52261cb3d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/StandardTokenizerFactory.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import com.google.inject.Inject; +import com.google.inject.assistedinject.Assisted; +import org.apache.lucene.analysis.Tokenizer; +import org.apache.lucene.analysis.standard.StandardAnalyzer; +import org.apache.lucene.analysis.standard.StandardTokenizer; +import org.apache.lucene.util.Version; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +import java.io.Reader; + +/** + * @author kimchy (Shay Banon) + */ +public class StandardTokenizerFactory extends AbstractTokenizerFactory { + + private final int maxTokenLength; + + @Inject public StandardTokenizerFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) { + super(index, indexSettings, name); + maxTokenLength = settings.getAsInt("maxTokenLength", StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH); + } + + @Override public Tokenizer create(Reader reader) { + StandardTokenizer tokenizer = new StandardTokenizer(Version.LUCENE_CURRENT, reader); + tokenizer.setMaxTokenLength(maxTokenLength); + return tokenizer; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/StopAnalyzerProvider.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/StopAnalyzerProvider.java new file mode 100644 index 00000000000..07dd429db0f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/StopAnalyzerProvider.java @@ -0,0 +1,57 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Iterators; +import com.google.inject.Inject; +import com.google.inject.assistedinject.Assisted; +import org.apache.lucene.analysis.StopAnalyzer; +import org.apache.lucene.util.Version; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +import java.util.Set; + +/** + * @author kimchy (Shay Banon) + */ +public class StopAnalyzerProvider extends AbstractAnalyzerProvider { + + private final Set stopWords; + + private final StopAnalyzer stopAnalyzer; + + @Inject public StopAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) { + super(index, indexSettings, name); + String[] stopWords = settings.getAsArray("stopwords"); + if (stopWords.length > 0) { + this.stopWords = ImmutableSet.copyOf(Iterators.forArray(stopWords)); + } else { + this.stopWords = ImmutableSet.copyOf((Iterable) StopAnalyzer.ENGLISH_STOP_WORDS_SET); + } + this.stopAnalyzer = new StopAnalyzer(Version.LUCENE_CURRENT, this.stopWords); + } + + @Override public StopAnalyzer get() { + return this.stopAnalyzer; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/StopTokenFilterFactory.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/StopTokenFilterFactory.java new file mode 100644 index 00000000000..ae86cd760be --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/StopTokenFilterFactory.java @@ -0,0 +1,73 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Iterators; +import com.google.inject.Inject; +import com.google.inject.assistedinject.Assisted; +import org.apache.lucene.analysis.StopAnalyzer; +import org.apache.lucene.analysis.StopFilter; +import org.apache.lucene.analysis.TokenStream; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +import java.util.Set; + +/** + * @author kimchy (Shay Banon) + */ +public class StopTokenFilterFactory extends AbstractTokenFilterFactory { + + private final Set stopWords; + + private final boolean enablePositionIncrements; + + private final boolean ignoreCase; + + @Inject public StopTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) { + super(index, indexSettings, name); + String[] stopWords = settings.getAsArray("stopwords"); + if (stopWords.length > 0) { + this.stopWords = ImmutableSet.copyOf(Iterators.forArray(stopWords)); + } else { + this.stopWords = ImmutableSet.copyOf((Iterable) StopAnalyzer.ENGLISH_STOP_WORDS_SET); + } + this.enablePositionIncrements = settings.getAsBoolean("enablePositionIncrements", true); + this.ignoreCase = settings.getAsBoolean("ignoreCase", false); + } + + @Override public TokenStream create(TokenStream tokenStream) { + return new StopFilter(enablePositionIncrements, tokenStream, stopWords, ignoreCase); + } + + public Set stopWords() { + return stopWords; + } + + public boolean enablePositionIncrements() { + return enablePositionIncrements; + } + + public boolean ignoreCase() { + return ignoreCase; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/TokenFilterFactory.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/TokenFilterFactory.java new file mode 100644 index 00000000000..01c4bfa980c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/TokenFilterFactory.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import org.apache.lucene.analysis.TokenStream; +import org.elasticsearch.index.IndexComponent; + +/** + * @author kimchy (Shay Banon) + */ +public interface TokenFilterFactory extends IndexComponent { + + String name(); + + TokenStream create(TokenStream tokenStream); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/TokenFilterFactoryFactory.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/TokenFilterFactoryFactory.java new file mode 100644 index 00000000000..32d73778076 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/TokenFilterFactoryFactory.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public interface TokenFilterFactoryFactory { + + TokenFilterFactory create(String name, Settings settings); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/TokenizerFactory.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/TokenizerFactory.java new file mode 100644 index 00000000000..c5e01cf0ad7 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/TokenizerFactory.java @@ -0,0 +1,35 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import org.apache.lucene.analysis.Tokenizer; +import org.elasticsearch.index.IndexComponent; + +import java.io.Reader; + +/** + * @author kimchy (Shay Banon) + */ +public interface TokenizerFactory extends IndexComponent { + + String name(); + + Tokenizer create(Reader reader); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/TokenizerFactoryFactory.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/TokenizerFactoryFactory.java new file mode 100644 index 00000000000..307f0c628e8 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/TokenizerFactoryFactory.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public interface TokenizerFactoryFactory { + + TokenizerFactory create(String name, Settings settings); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/WhitespaceAnalyzerProvider.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/WhitespaceAnalyzerProvider.java new file mode 100644 index 00000000000..d46e9473d72 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/WhitespaceAnalyzerProvider.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import com.google.inject.Inject; +import com.google.inject.assistedinject.Assisted; +import org.apache.lucene.analysis.WhitespaceAnalyzer; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class WhitespaceAnalyzerProvider extends AbstractAnalyzerProvider { + + private final WhitespaceAnalyzer analyzer; + + @Inject public WhitespaceAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) { + super(index, indexSettings, name); + this.analyzer = new WhitespaceAnalyzer(); + } + + @Override public WhitespaceAnalyzer get() { + return this.analyzer; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/WhitespaceTokenizerFactory.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/WhitespaceTokenizerFactory.java new file mode 100644 index 00000000000..7872247a43b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/analysis/WhitespaceTokenizerFactory.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import com.google.inject.Inject; +import com.google.inject.assistedinject.Assisted; +import org.apache.lucene.analysis.Tokenizer; +import org.apache.lucene.analysis.WhitespaceTokenizer; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +import java.io.Reader; + +/** + * @author kimchy (Shay Banon) + */ +public class WhitespaceTokenizerFactory extends AbstractTokenizerFactory { + + @Inject public WhitespaceTokenizerFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) { + super(index, indexSettings, name); + } + + @Override public Tokenizer create(Reader reader) { + return new WhitespaceTokenizer(reader); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/cache/filter/FilterCache.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/cache/filter/FilterCache.java new file mode 100644 index 00000000000..54e47de6d60 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/cache/filter/FilterCache.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.cache.filter; + +import org.apache.lucene.search.Filter; +import org.elasticsearch.index.IndexComponent; + +/** + * @author kimchy (Shay Banon) + */ +public interface FilterCache extends IndexComponent { + + Filter cache(Filter filterToCache); + + void close(); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/cache/filter/FilterCacheModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/cache/filter/FilterCacheModule.java new file mode 100644 index 00000000000..93dd0a24017 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/cache/filter/FilterCacheModule.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.cache.filter; + +import com.google.inject.AbstractModule; +import com.google.inject.Scopes; +import org.elasticsearch.index.IndexLifecycle; +import org.elasticsearch.index.cache.filter.soft.SoftFilterCache; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +@IndexLifecycle +public class FilterCacheModule extends AbstractModule { + + public static final class FilterCacheSettings { + public static final String FILTER_CACHE_TYPE = "index.cache.filter.type"; + } + + private final Settings settings; + + public FilterCacheModule(Settings settings) { + this.settings = settings; + } + + @Override protected void configure() { + bind(FilterCache.class) + .to(settings.getAsClass(FilterCacheSettings.FILTER_CACHE_TYPE, SoftFilterCache.class, "org.elasticsearch.index.cache.filter.", "FilterCache")) + .in(Scopes.SINGLETON); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/cache/filter/none/NoneFilterCache.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/cache/filter/none/NoneFilterCache.java new file mode 100644 index 00000000000..e8636454e2e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/cache/filter/none/NoneFilterCache.java @@ -0,0 +1,47 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.cache.filter.none; + +import com.google.inject.Inject; +import org.apache.lucene.search.Filter; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.cache.filter.FilterCache; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class NoneFilterCache extends AbstractIndexComponent implements FilterCache { + + @Inject public NoneFilterCache(Index index, @IndexSettings Settings indexSettings) { + super(index, indexSettings); + logger.debug("Using no filter cache"); + } + + @Override public void close() { + // nothing to do here + } + + @Override public Filter cache(Filter filterToCache) { + return filterToCache; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/cache/filter/soft/SoftFilterCache.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/cache/filter/soft/SoftFilterCache.java new file mode 100644 index 00000000000..ba84907920d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/cache/filter/soft/SoftFilterCache.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.cache.filter.soft; + +import com.google.common.collect.MapMaker; +import com.google.inject.Inject; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.cache.filter.support.AbstractConcurrentMapFilterCache; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.util.settings.Settings; + +import java.util.concurrent.ConcurrentMap; + +/** + * @author kimchy (Shay Banon) + */ +public class SoftFilterCache extends AbstractConcurrentMapFilterCache { + + @Inject public SoftFilterCache(Index index, @IndexSettings Settings indexSettings, ThreadPool threadPool) { + super(index, indexSettings, threadPool); + } + + @Override protected ConcurrentMap buildMap() { + return new MapMaker().softValues().makeMap(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/cache/filter/support/AbstractConcurrentMapFilterCache.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/cache/filter/support/AbstractConcurrentMapFilterCache.java new file mode 100644 index 00000000000..1d4ab2af9e6 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/cache/filter/support/AbstractConcurrentMapFilterCache.java @@ -0,0 +1,122 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.cache.filter.support; + +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.search.DocIdSet; +import org.apache.lucene.search.Filter; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.cache.filter.FilterCache; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.util.TimeValue; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; +import java.util.Iterator; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.Future; + +import static org.elasticsearch.util.concurrent.ConcurrentMaps.*; +import static org.elasticsearch.util.lucene.docidset.DocIdSets.*; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class AbstractConcurrentMapFilterCache extends AbstractIndexComponent implements FilterCache { + + private final ConcurrentMap> cache; + + private final TimeValue readerCleanerSchedule; + + private final Future scheduleFuture; + + protected AbstractConcurrentMapFilterCache(Index index, @IndexSettings Settings indexSettings, ThreadPool threadPool) { + super(index, indexSettings); + + this.readerCleanerSchedule = componentSettings.getAsTime("readerCleanerSchedule", TimeValue.timeValueMinutes(1)); + + logger.debug("Using weak filter cache with readerCleanerSchedule [{}]", readerCleanerSchedule); + + this.cache = newConcurrentMap(); + this.scheduleFuture = threadPool.scheduleWithFixedDelay(new IndexReaderCleaner(), readerCleanerSchedule); + } + + @Override public void close() { + scheduleFuture.cancel(false); + cache.clear(); + } + + @Override public Filter cache(Filter filterToCache) { + return new SoftFilterCacheFilterWrapper(filterToCache); + } + + private class IndexReaderCleaner implements Runnable { + @Override public void run() { + for (Iterator readerIt = cache.keySet().iterator(); readerIt.hasNext();) { + IndexReader reader = readerIt.next(); + if (reader.getRefCount() <= 0) { + readerIt.remove(); + } + } + } + } + + protected abstract ConcurrentMap buildMap(); + + private class SoftFilterCacheFilterWrapper extends Filter { + + private final Filter filter; + + private SoftFilterCacheFilterWrapper(Filter filter) { + this.filter = filter; + } + + @Override public DocIdSet getDocIdSet(IndexReader reader) throws IOException { + ConcurrentMap cachedFilters = cache.get(reader); + if (cachedFilters == null) { + cachedFilters = buildMap(); + cache.putIfAbsent(reader, cachedFilters); + } + DocIdSet docIdSet = cachedFilters.get(filter); + if (docIdSet != null) { + return docIdSet; + } + docIdSet = filter.getDocIdSet(reader); + docIdSet = cacheable(reader, docIdSet); + cachedFilters.putIfAbsent(filter, docIdSet); + return docIdSet; + } + + public String toString() { + return "FilterCacheFilterWrapper(" + filter + ")"; + } + + public boolean equals(Object o) { + if (!(o instanceof SoftFilterCacheFilterWrapper)) return false; + return this.filter.equals(((SoftFilterCacheFilterWrapper) o).filter); + } + + public int hashCode() { + return filter.hashCode() ^ 0x1117BF25; + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/cache/filter/weak/WeakFilterCache.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/cache/filter/weak/WeakFilterCache.java new file mode 100644 index 00000000000..abbfab35bac --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/cache/filter/weak/WeakFilterCache.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.cache.filter.weak; + +import com.google.common.collect.MapMaker; +import com.google.inject.Inject; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.cache.filter.support.AbstractConcurrentMapFilterCache; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.util.settings.Settings; + +import java.util.concurrent.ConcurrentMap; + +/** + * @author kimchy (Shay Banon) + */ +public class WeakFilterCache extends AbstractConcurrentMapFilterCache { + + @Inject public WeakFilterCache(Index index, @IndexSettings Settings indexSettings, ThreadPool threadPool) { + super(index, indexSettings, threadPool); + } + + @Override protected ConcurrentMap buildMap() { + return new MapMaker().weakValues().makeMap(); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/deletionpolicy/DeletionPolicyModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/deletionpolicy/DeletionPolicyModule.java new file mode 100644 index 00000000000..76d1a21dfef --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/deletionpolicy/DeletionPolicyModule.java @@ -0,0 +1,55 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.deletionpolicy; + +import com.google.inject.AbstractModule; +import com.google.inject.name.Names; +import org.apache.lucene.index.IndexDeletionPolicy; +import org.elasticsearch.index.shard.IndexShardLifecycle; +import org.elasticsearch.util.settings.Settings; + +import static org.elasticsearch.index.deletionpolicy.DeletionPolicyModule.DeletionPolicySettings.*; + +/** + * @author kimchy (Shay Banon) + */ +@IndexShardLifecycle +public class DeletionPolicyModule extends AbstractModule { + + public static class DeletionPolicySettings { + public static final String TYPE = "index.deletionpolicy.type"; + } + + private final Settings settings; + + public DeletionPolicyModule(Settings settings) { + this.settings = settings; + } + + @Override protected void configure() { + bind(IndexDeletionPolicy.class) + .annotatedWith(Names.named("actual")) + .to(settings.getAsClass(TYPE, KeepOnlyLastDeletionPolicy.class)) + .asEagerSingleton(); + + bind(SnapshotDeletionPolicy.class) + .asEagerSingleton(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/deletionpolicy/KeepLastNDeletionPolicy.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/deletionpolicy/KeepLastNDeletionPolicy.java new file mode 100644 index 00000000000..ed82b285a0a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/deletionpolicy/KeepLastNDeletionPolicy.java @@ -0,0 +1,64 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.deletionpolicy; + +import com.google.inject.Inject; +import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.index.IndexDeletionPolicy; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.index.shard.AbstractIndexShardComponent; +import org.elasticsearch.index.shard.IndexShardLifecycle; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; +import java.util.List; + +/** + * @author kimchy (Shay Banon) + */ +@IndexShardLifecycle +public class KeepLastNDeletionPolicy extends AbstractIndexShardComponent implements IndexDeletionPolicy { + + private final int numToKeep; + + @Inject public KeepLastNDeletionPolicy(ShardId shardId, @IndexSettings Settings indexSettings) { + super(shardId, indexSettings); + this.numToKeep = componentSettings.getAsInt("numToKeep", 5); + logger.debug("Using [KeepLastN] deletion policy with numToKeep [{}]", numToKeep); + } + + public void onInit(List commits) throws IOException { + // do no deletions on init + doDeletes(commits); + } + + public void onCommit(List commits) throws IOException { + doDeletes(commits); + } + + private void doDeletes(List commits) { + int size = commits.size(); + for (int i = 0; i < size - numToKeep; i++) { + commits.get(i).delete(); + } + } + +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/deletionpolicy/KeepOnlyLastDeletionPolicy.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/deletionpolicy/KeepOnlyLastDeletionPolicy.java new file mode 100644 index 00000000000..28c82a04bae --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/deletionpolicy/KeepOnlyLastDeletionPolicy.java @@ -0,0 +1,66 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.deletionpolicy; + +import com.google.inject.Inject; +import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.index.IndexDeletionPolicy; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.index.shard.AbstractIndexShardComponent; +import org.elasticsearch.index.shard.IndexShardLifecycle; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.util.settings.Settings; + +import java.util.List; + +/** + * This {@link org.apache.lucene.index.IndexDeletionPolicy} implementation that + * keeps only the most recent commit and immediately removes + * all prior commits after a new commit is done. This is + * the default deletion policy. + */ +@IndexShardLifecycle +public class KeepOnlyLastDeletionPolicy extends AbstractIndexShardComponent implements IndexDeletionPolicy { + + @Inject public KeepOnlyLastDeletionPolicy(ShardId shardId, @IndexSettings Settings indexSettings) { + super(shardId, indexSettings); + logger.debug("Using [KeepOnlyLast] deletion policy"); + } + + /** + * Deletes all commits except the most recent one. + */ + public void onInit(List commits) { + // Note that commits.size() should normally be 1: + onCommit(commits); + } + + /** + * Deletes all commits except the most recent one. + */ + public void onCommit(List commits) { + // Note that commits.size() should normally be 2 (if not + // called by onInit above): + int size = commits.size(); + for (int i = 0; i < size - 1; i++) { + commits.get(i).delete(); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/deletionpolicy/SnapshotDeletionPolicy.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/deletionpolicy/SnapshotDeletionPolicy.java new file mode 100644 index 00000000000..761d37755cd --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/deletionpolicy/SnapshotDeletionPolicy.java @@ -0,0 +1,207 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.deletionpolicy; + +import com.google.inject.Inject; +import com.google.inject.name.Named; +import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.index.IndexDeletionPolicy; +import org.elasticsearch.index.shard.AbstractIndexShardComponent; +import org.elasticsearch.index.shard.IndexShardComponent; +import org.elasticsearch.index.shard.IndexShardLifecycle; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +/** + * Snapshot deletion policy allows to get snapshots of an index state (last commit or all commits) + * and if the deletion policy is used with all open index writers (JVM level) then the snapshot + * state will not be deleted until it will be released. + * + * @author kimchy (Shay Banon) + */ +@IndexShardLifecycle +public class SnapshotDeletionPolicy extends AbstractIndexShardComponent implements IndexDeletionPolicy { + + private final IndexDeletionPolicy primary; + + private ConcurrentMap snapshots = new ConcurrentHashMap(); + + private volatile List commits; + + private final Object mutex = new Object(); + + private SnapshotIndexCommit lastCommit; + + /** + * Constructs a new snapshot deletion policy that wraps the provided deletion policy. + */ + @Inject public SnapshotDeletionPolicy(@Named("actual") IndexDeletionPolicy primary) { + super(((IndexShardComponent) primary).shardId(), ((IndexShardComponent) primary).indexSettings()); + this.primary = primary; + } + + /** + * Called by Lucene. Same as {@link #onCommit(java.util.List)}. + */ + public void onInit(List commits) throws IOException { + onCommit(commits); + } + + /** + * Called by Lucene.. Wraps the provided commits with {@link SnapshotIndexCommit} + * and delegates to the wrapped deletion policy. + */ + public void onCommit(List commits) throws IOException { + synchronized (mutex) { + List snapshotCommits = wrapCommits(commits); + primary.onCommit(snapshotCommits); + + // clean snapshots that their respective counts are 0 (should not really happen) + for (Iterator it = snapshots.values().iterator(); it.hasNext();) { + SnapshotHolder holder = it.next(); + if (holder.counter <= 0) { + it.remove(); + } + } + // build the current commits list (all the ones that are not deleted by the primary) + List newCommits = new ArrayList(); + for (SnapshotIndexCommit commit : snapshotCommits) { + if (!commit.isDeleted()) { + newCommits.add(commit); + } + } + this.commits = newCommits; + // the last commit that is not deleted + this.lastCommit = newCommits.get(newCommits.size() - 1); + } + } + + /** + * Snapshots all the current commits in the index. Make sure to call + * {@link SnapshotIndexCommits#release()} to release it. + */ + public SnapshotIndexCommits snapshots() throws IOException { + synchronized (mutex) { + if (snapshots == null) { + throw new IllegalStateException("Snapshot deletion policy has not been init yet..."); + } + List result = new ArrayList(commits.size()); + for (SnapshotIndexCommit commit : commits) { + result.add(snapshot(commit)); + } + return new SnapshotIndexCommits(result); + } + } + + /** + * Returns a snapshot of the index (for the last commit point). Make + * sure to call {@link SnapshotIndexCommit#release()} in order to release it. + */ + public SnapshotIndexCommit snapshot() throws IOException { + synchronized (mutex) { + if (lastCommit == null) { + throw new IllegalStateException("Snapshot deletion policy has not been init yet..."); + } + return snapshot(lastCommit); + } + } + + /** + * Helper method to snapshot a give commit. + */ + private SnapshotIndexCommit snapshot(SnapshotIndexCommit commit) throws IOException { + SnapshotHolder snapshotHolder = snapshots.get(commit.getVersion()); + if (snapshotHolder == null) { + snapshotHolder = new SnapshotHolder(0); + snapshots.put(commit.getVersion(), snapshotHolder); + } + snapshotHolder.counter++; + return new OneTimeReleaseSnapshotIndexCommit(this, commit); + } + + /** + * Returns true if the version has been snapshotted. + */ + boolean isHeld(long version) { + SnapshotDeletionPolicy.SnapshotHolder holder = snapshots.get(version); + return holder != null && holder.counter > 0; + } + + /** + * Releases the version provided. Returns true if the release was successful. + */ + boolean release(long version) { + synchronized (mutex) { + SnapshotDeletionPolicy.SnapshotHolder holder = snapshots.get(version); + if (holder == null) { + return false; + } + if (holder.counter <= 0) { + snapshots.remove(version); + return false; + } + if (--holder.counter == 0) { + snapshots.remove(version); + } + return true; + } + } + + /** + * A class that wraps an {@link SnapshotIndexCommit} and makes sure that release will only + * be called once on it. + */ + private static class OneTimeReleaseSnapshotIndexCommit extends SnapshotIndexCommit { + private volatile boolean released = false; + + OneTimeReleaseSnapshotIndexCommit(SnapshotDeletionPolicy deletionPolicy, IndexCommit cp) throws IOException { + super(deletionPolicy, cp); + } + + @Override public boolean release() { + if (released) { + return false; + } + released = true; + return ((SnapshotIndexCommit) delegate).release(); + } + } + + private static class SnapshotHolder { + int counter; + + private SnapshotHolder(int counter) { + this.counter = counter; + } + } + + private List wrapCommits(List commits) throws IOException { + final int count = commits.size(); + List snapshotCommits = new ArrayList(count); + for (int i = 0; i < count; i++) + snapshotCommits.add(new SnapshotIndexCommit(this, commits.get(i))); + return snapshotCommits; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/deletionpolicy/SnapshotIndexCommit.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/deletionpolicy/SnapshotIndexCommit.java new file mode 100644 index 00000000000..f36a431fedc --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/deletionpolicy/SnapshotIndexCommit.java @@ -0,0 +1,73 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.deletionpolicy; + +import org.apache.lucene.index.IndexCommit; +import org.elasticsearch.util.lease.Releasable; +import org.elasticsearch.util.lucene.IndexCommitDelegate; + +import java.io.IOException; +import java.util.ArrayList; + +/** + * A snapshot index commit point. While this is held and {@link #release()} + * was not called, no files will be deleted that relates to this commit point + * ({@link #getFileNames()}). + * + * @author kimchy (Shay Banon) + */ +public class SnapshotIndexCommit extends IndexCommitDelegate implements Releasable { + + private final SnapshotDeletionPolicy deletionPolicy; + + private final String[] files; + + SnapshotIndexCommit(SnapshotDeletionPolicy deletionPolicy, IndexCommit cp) throws IOException { + super(cp); + this.deletionPolicy = deletionPolicy; + ArrayList tmpFiles = new ArrayList(); + for (String o : cp.getFileNames()) { + tmpFiles.add(o); + } + files = tmpFiles.toArray(new String[tmpFiles.size()]); + } + + public String[] getFiles() { + return files; + } + + /** + * Releases the current snapshot, returning true if it was + * actually released. + */ + public boolean release() { + return deletionPolicy.release(getVersion()); + } + + /** + * Override the delete operation, and only actually delete it if it + * is not held by the {@link SnapshotDeletionPolicy}. + */ + @Override public void delete() { + if (!deletionPolicy.isHeld(getVersion())) { + delegate.delete(); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/deletionpolicy/SnapshotIndexCommits.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/deletionpolicy/SnapshotIndexCommits.java new file mode 100644 index 00000000000..d2598a12ad6 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/deletionpolicy/SnapshotIndexCommits.java @@ -0,0 +1,56 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.deletionpolicy; + +import org.elasticsearch.util.lease.Releasable; + +import java.util.Iterator; +import java.util.List; + +/** + * Represents a snapshot view of several commits. Provides a way to iterate over + * them as well as a simple method to release all of them. + * + * @author kimchy (Shay Banon) + */ +public class SnapshotIndexCommits implements Iterable, Releasable { + + private final List commits; + + public SnapshotIndexCommits(List commits) { + this.commits = commits; + } + + public int size() { + return commits.size(); + } + + @Override public Iterator iterator() { + return commits.iterator(); + } + + public boolean release() { + boolean result = false; + for (SnapshotIndexCommit snapshot : commits) { + result |= snapshot.release(); + } + return result; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/CloseEngineException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/CloseEngineException.java new file mode 100644 index 00000000000..11b3602580b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/CloseEngineException.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + +import org.elasticsearch.index.shard.ShardId; + +/** + * An exception indicating that an {@link org.elasticsearch.index.engine.Engine} close failed. + * + * @author kimchy (Shay Banon) + */ +public class CloseEngineException extends EngineException { + + public CloseEngineException(ShardId shardId, String msg) { + super(shardId, msg); + } + + public CloseEngineException(ShardId shardId, String msg, Throwable cause) { + super(shardId, msg, cause); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/CreateFailedEngineException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/CreateFailedEngineException.java new file mode 100644 index 00000000000..b79e693236e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/CreateFailedEngineException.java @@ -0,0 +1,46 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + +import org.elasticsearch.index.shard.ShardId; + +/** + * @author kimchy (Shay Banon) + */ +public class CreateFailedEngineException extends EngineException { + + private final String type; + + private final String id; + + public CreateFailedEngineException(ShardId shardId, Engine.Create create, Throwable cause) { + super(shardId, "Create failed for [" + create.type() + "#" + create.id() + "]", cause); + this.type = create.type(); + this.id = create.id(); + } + + public String type() { + return this.type; + } + + public String id() { + return this.id; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/DeleteByQueryFailedEngineException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/DeleteByQueryFailedEngineException.java new file mode 100644 index 00000000000..57bab23873f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/DeleteByQueryFailedEngineException.java @@ -0,0 +1,32 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + +import org.elasticsearch.index.shard.ShardId; + +/** + * @author kimchy (Shay Banon) + */ +public class DeleteByQueryFailedEngineException extends EngineException { + + public DeleteByQueryFailedEngineException(ShardId shardId, Engine.DeleteByQuery deleteByQuery, Throwable cause) { + super(shardId, "Delete by query failed for [" + deleteByQuery.query() + "]", cause); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/DeleteFailedEngineException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/DeleteFailedEngineException.java new file mode 100644 index 00000000000..9ce29ee3db5 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/DeleteFailedEngineException.java @@ -0,0 +1,32 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + +import org.elasticsearch.index.shard.ShardId; + +/** + * @author kimchy (Shay Banon) + */ +public class DeleteFailedEngineException extends EngineException { + + public DeleteFailedEngineException(ShardId shardId, Engine.Delete delete, Throwable cause) { + super(shardId, "Delete failed for [" + delete.uid().text() + "]", cause); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/Engine.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/Engine.java new file mode 100644 index 00000000000..6e53ed86f3a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -0,0 +1,240 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.document.Document; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit; +import org.elasticsearch.index.shard.IndexShardComponent; +import org.elasticsearch.index.shard.IndexShardLifecycle; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.util.Nullable; +import org.elasticsearch.util.SizeValue; +import org.elasticsearch.util.concurrent.ThreadSafe; +import org.elasticsearch.util.lease.Releasable; + +/** + * @author kimchy (Shay Banon) + */ +@ThreadSafe +@IndexShardLifecycle +public interface Engine extends IndexShardComponent { + + /** + * Starts the Engine. + * + *

Note, after the creation and before the call to start, the store might + * be changed. + */ + void start() throws EngineException; + + void create(Create create) throws EngineException; + + void index(Index index) throws EngineException; + + void delete(Delete delete) throws EngineException; + + void delete(DeleteByQuery delete) throws EngineException; + + Searcher searcher() throws EngineException; + + /** + * Refreshes the engine for new search operations to reflect the latest + * changes. Pass true if the refresh operation should include + * all the operations performed up to this call. + */ + void refresh(boolean waitForOperations) throws EngineException; + + /** + * Flushes the state of the engine, clearing memory. + */ + void flush() throws EngineException, FlushNotAllowedEngineException; + + void snapshot(SnapshotHandler snapshotHandler) throws EngineException; + + void recover(RecoveryHandler recoveryHandler) throws EngineException; + + /** + * Returns the estimated flushable memory size. Returns null if not available. + */ + SizeValue estimateFlushableMemorySize(); + + void close() throws ElasticSearchException; + + /** + * Recovery allow to start the recovery process. It is built of three phases. + * + *

The first phase allows to take a snapshot of the master index. Once this + * is taken, no commit operations are effectively allowed on the index until the recovery + * phases are through. + * + *

The seconds phase takes a snapshot of the current transaction log. + * + *

The last phase returns the remaining transaction log. During this phase, no dirty + * operations are allowed on the index. + */ + static interface RecoveryHandler { + + void phase1(SnapshotIndexCommit snapshot) throws ElasticSearchException; + + void phase2(Translog.Snapshot snapshot) throws ElasticSearchException; + + void phase3(Translog.Snapshot snapshot) throws ElasticSearchException; + } + + /** + */ + static interface SnapshotHandler { + + void snapshot(SnapshotIndexCommit snapshotIndexCommit, Translog.Snapshot translogSnapshot) throws EngineException; + } + + static interface Searcher extends Releasable { + + IndexReader reader(); + + IndexSearcher searcher(); + } + + static class Create { + private final Document document; + private final Analyzer analyzer; + private final String type; + private final String id; + private final String source; + + public Create(Document document, Analyzer analyzer, String type, String id, String source) { + this.document = document; + this.analyzer = analyzer; + this.type = type; + this.id = id; + this.source = source; + } + + public String type() { + return this.type; + } + + public String id() { + return this.id; + } + + public Document doc() { + return this.document; + } + + public Analyzer analyzer() { + return this.analyzer; + } + + public String source() { + return this.source; + } + } + + static class Index { + private final Term uid; + private final Document document; + private final Analyzer analyzer; + private final String type; + private final String id; + private final String source; + + public Index(Term uid, Document document, Analyzer analyzer, String type, String id, String source) { + this.uid = uid; + this.document = document; + this.analyzer = analyzer; + this.type = type; + this.id = id; + this.source = source; + } + + public Term uid() { + return this.uid; + } + + public Document doc() { + return this.document; + } + + public Analyzer analyzer() { + return this.analyzer; + } + + public String id() { + return this.id; + } + + public String type() { + return this.type; + } + + public String source() { + return this.source; + } + } + + static class Delete { + private final Term uid; + + public Delete(Term uid) { + this.uid = uid; + } + + public Term uid() { + return this.uid; + } + } + + static class DeleteByQuery { + private final Query query; + private final String queryParserName; + private final String source; + private final String[] types; + + public DeleteByQuery(Query query, String source, @Nullable String queryParserName, String... types) { + this.query = query; + this.source = source; + this.queryParserName = queryParserName; + this.types = types; + } + + public String queryParserName() { + return this.queryParserName; + } + + public Query query() { + return this.query; + } + + public String source() { + return this.source; + } + + public String[] types() { + return this.types; + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/EngineAlreadyStartedException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/EngineAlreadyStartedException.java new file mode 100644 index 00000000000..b094b56bd57 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/EngineAlreadyStartedException.java @@ -0,0 +1,32 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + +import org.elasticsearch.index.shard.ShardId; + +/** + * @author kimchy (Shay Banon) + */ +public class EngineAlreadyStartedException extends EngineException { + + public EngineAlreadyStartedException(ShardId shardId) { + super(shardId, "Already started"); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/EngineCreationFailureException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/EngineCreationFailureException.java new file mode 100644 index 00000000000..147719e66c6 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/EngineCreationFailureException.java @@ -0,0 +1,35 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + +import org.elasticsearch.index.shard.ShardId; + +/** + * An exception indicating that an {@link Engine} creation failed. + * + * @author kimchy (Shay Banon) + */ +public class EngineCreationFailureException extends EngineException { + + public EngineCreationFailureException(ShardId shardId, String msg, Throwable cause) { + super(shardId, msg, cause); + } + +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/EngineException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/EngineException.java new file mode 100644 index 00000000000..5aba7371399 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/EngineException.java @@ -0,0 +1,37 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + +import org.elasticsearch.index.shard.IndexShardException; +import org.elasticsearch.index.shard.ShardId; + +/** + * @author kimchy (Shay Banon) + */ +public class EngineException extends IndexShardException { + + public EngineException(ShardId shardId, String msg) { + super(shardId, msg); + } + + public EngineException(ShardId shardId, String msg, Throwable cause) { + super(shardId, msg, cause); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/EngineModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/EngineModule.java new file mode 100644 index 00000000000..233b289e0a9 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/EngineModule.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + +import com.google.inject.AbstractModule; +import org.elasticsearch.index.engine.robin.RobinEngineModule; +import org.elasticsearch.util.guice.ModulesFactory; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class EngineModule extends AbstractModule { + + public static final class EngineSettings { + public static final String ENGINE_TYPE = "index.engine.type"; + } + + private final Settings settings; + + public EngineModule(Settings settings) { + this.settings = settings; + } + + @Override protected void configure() { + ModulesFactory.createModule(settings.getAsClass(EngineSettings.ENGINE_TYPE, RobinEngineModule.class, "org.elasticsearch.index.engine.", "EngineModule"), settings).configure(binder()); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/FlushFailedEngineException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/FlushFailedEngineException.java new file mode 100644 index 00000000000..ab804082a75 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/FlushFailedEngineException.java @@ -0,0 +1,32 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + +import org.elasticsearch.index.shard.ShardId; + +/** + * @author kimchy (Shay Banon) + */ +public class FlushFailedEngineException extends EngineException { + + public FlushFailedEngineException(ShardId shardId, Throwable t) { + super(shardId, "Flush failed", t); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/FlushNotAllowedEngineException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/FlushNotAllowedEngineException.java new file mode 100644 index 00000000000..871be010f3a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/FlushNotAllowedEngineException.java @@ -0,0 +1,32 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + +import org.elasticsearch.index.shard.ShardId; + +/** + * @author kimchy (Shay Banon) + */ +public class FlushNotAllowedEngineException extends EngineException { + + public FlushNotAllowedEngineException(ShardId shardId, String msg) { + super(shardId, msg); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/IndexFailedEngineException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/IndexFailedEngineException.java new file mode 100644 index 00000000000..7d8e731f754 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/IndexFailedEngineException.java @@ -0,0 +1,46 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + +import org.elasticsearch.index.shard.ShardId; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexFailedEngineException extends EngineException { + + private final String type; + + private final String id; + + public IndexFailedEngineException(ShardId shardId, Engine.Index index, Throwable cause) { + super(shardId, "Index failed for [" + index.type() + "#" + index.id() + "]", cause); + this.type = index.type(); + this.id = index.id(); + } + + public String type() { + return this.type; + } + + public String id() { + return this.id; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/RecoveryEngineException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/RecoveryEngineException.java new file mode 100644 index 00000000000..24c2467d426 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/RecoveryEngineException.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + +import org.elasticsearch.index.shard.ShardId; + +/** + * @author kimchy (Shay Banon) + */ +public class RecoveryEngineException extends EngineException { + + private final int phase; + + public RecoveryEngineException(ShardId shardId, int phase, String msg, Throwable cause) { + super(shardId, "Phase[" + phase + "] " + msg, cause); + this.phase = phase; + } + + public int phase() { + return phase; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/RefreshFailedEngineException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/RefreshFailedEngineException.java new file mode 100644 index 00000000000..b4f8dd5dc40 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/RefreshFailedEngineException.java @@ -0,0 +1,32 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + +import org.elasticsearch.index.shard.ShardId; + +/** + * @author kimchy (Shay Banon) + */ +public class RefreshFailedEngineException extends EngineException { + + public RefreshFailedEngineException(ShardId shardId, Throwable t) { + super(shardId, "Refresh failed", t); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/RollbackFailedEngineException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/RollbackFailedEngineException.java new file mode 100644 index 00000000000..525ad9d4a05 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/RollbackFailedEngineException.java @@ -0,0 +1,32 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + +import org.elasticsearch.index.shard.ShardId; + +/** + * @author kimchy (Shay Banon) + */ +public class RollbackFailedEngineException extends EngineException { + + public RollbackFailedEngineException(ShardId shardId, Throwable t) { + super(shardId, "Rollback failed", t); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/RollbackNotAllowedEngineException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/RollbackNotAllowedEngineException.java new file mode 100644 index 00000000000..910600f366d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/RollbackNotAllowedEngineException.java @@ -0,0 +1,32 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + +import org.elasticsearch.index.shard.ShardId; + +/** + * @author kimchy (Shay Banon) + */ +public class RollbackNotAllowedEngineException extends EngineException { + + public RollbackNotAllowedEngineException(ShardId shardId, String msg) { + super(shardId, msg); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/ScheduledRefreshableEngine.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/ScheduledRefreshableEngine.java new file mode 100644 index 00000000000..d8ab83db285 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/ScheduledRefreshableEngine.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + +import org.elasticsearch.util.TimeValue; + +/** + * @author kimchy (Shay Banon) + */ +public interface ScheduledRefreshableEngine { + + TimeValue refreshInterval(); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/SnapshotFailedEngineException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/SnapshotFailedEngineException.java new file mode 100644 index 00000000000..b61b7e71202 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/SnapshotFailedEngineException.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + +import org.elasticsearch.index.shard.ShardId; + +/** + * @author kimchy (Shay Banon) + */ +public class SnapshotFailedEngineException extends EngineException { + + public SnapshotFailedEngineException(ShardId shardId, Throwable cause) { + super(shardId, "Snapshot failed", cause); + } + +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/robin/RobinEngine.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/robin/RobinEngine.java new file mode 100644 index 00000000000..536ce16b9ed --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/robin/RobinEngine.java @@ -0,0 +1,424 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine.robin; + +import com.google.inject.Inject; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.search.IndexSearcher; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.index.analysis.AnalysisService; +import org.elasticsearch.index.deletionpolicy.SnapshotDeletionPolicy; +import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit; +import org.elasticsearch.index.engine.*; +import org.elasticsearch.index.merge.policy.MergePolicyProvider; +import org.elasticsearch.index.merge.scheduler.MergeSchedulerProvider; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.index.shard.AbstractIndexShardComponent; +import org.elasticsearch.index.shard.IndexShardLifecycle; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.similarity.SimilarityService; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.util.Preconditions; +import org.elasticsearch.util.SizeUnit; +import org.elasticsearch.util.SizeValue; +import org.elasticsearch.util.TimeValue; +import org.elasticsearch.util.concurrent.resource.AcquirableResource; +import org.elasticsearch.util.lucene.IndexWriters; +import org.elasticsearch.util.lucene.ReaderSearcherHolder; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +import static org.elasticsearch.util.TimeValue.*; +import static org.elasticsearch.util.concurrent.resource.AcquirableResourceFactory.*; +import static org.elasticsearch.util.lucene.Lucene.*; + +/** + * @author kimchy (Shay Banon) + */ +@IndexShardLifecycle +public class RobinEngine extends AbstractIndexShardComponent implements Engine, ScheduledRefreshableEngine { + + private final SizeValue ramBufferSize; + + private final TimeValue refreshInterval; + + private final int termIndexInterval; + + private final ReadWriteLock rwl = new ReentrantReadWriteLock(); + + private final AtomicBoolean refreshMutex = new AtomicBoolean(); + + private final Store store; + + private final SnapshotDeletionPolicy deletionPolicy; + + private final Translog translog; + + private final MergePolicyProvider mergePolicyProvider; + + private final MergeSchedulerProvider mergeScheduler; + + private final AnalysisService analysisService; + + private final SimilarityService similarityService; + + private volatile IndexWriter indexWriter; + + private volatile AcquirableResource nrtResource; + + private volatile boolean closed = false; + + // flag indicating if a dirty operation has occurred since the last refresh + private volatile boolean dirty = false; + + private volatile int disableFlushCounter = 0; + + @Inject public RobinEngine(ShardId shardId, @IndexSettings Settings indexSettings, Store store, SnapshotDeletionPolicy deletionPolicy, Translog translog, + MergePolicyProvider mergePolicyProvider, MergeSchedulerProvider mergeScheduler, + AnalysisService analysisService, SimilarityService similarityService) throws EngineException { + super(shardId, indexSettings); + Preconditions.checkNotNull(store, "Store must be provided to the engine"); + Preconditions.checkNotNull(deletionPolicy, "Snapshot deletion policy must be provided to the engine"); + Preconditions.checkNotNull(translog, "Translog must be provided to the engine"); + + this.ramBufferSize = componentSettings.getAsSize("ramBufferSize", new SizeValue(64, SizeUnit.MB)); + this.refreshInterval = componentSettings.getAsTime("refreshInterval", timeValueSeconds(1)); + this.termIndexInterval = componentSettings.getAsInt("termIndexInterval", IndexWriter.DEFAULT_TERM_INDEX_INTERVAL); + + this.store = store; + this.deletionPolicy = deletionPolicy; + this.translog = translog; + this.mergePolicyProvider = mergePolicyProvider; + this.mergeScheduler = mergeScheduler; + this.analysisService = analysisService; + this.similarityService = similarityService; + } + + @Override public void start() throws EngineException { + if (indexWriter != null) { + throw new EngineAlreadyStartedException(shardId); + } + if (logger.isDebugEnabled()) { + logger.debug("Starting engine with ramBufferSize [" + ramBufferSize + "], refreshInterval [" + refreshInterval + "]"); + } + IndexWriter indexWriter = null; + try { + // release locks when started + if (IndexWriter.isLocked(store.directory())) { + logger.trace("Shard is locked, releasing lock"); + store.directory().clearLock(IndexWriter.WRITE_LOCK_NAME); + } + boolean create = !IndexReader.indexExists(store.directory()); + indexWriter = new IndexWriter(store.directory(), + analysisService.defaultIndexAnalyzer(), create, deletionPolicy, IndexWriter.MaxFieldLength.UNLIMITED); + indexWriter.setMergeScheduler(mergeScheduler.newMergeScheduler()); + indexWriter.setMergePolicy(mergePolicyProvider.newMergePolicy(indexWriter)); + indexWriter.setSimilarity(similarityService.defaultIndexSimilarity()); + indexWriter.setRAMBufferSizeMB(ramBufferSize.mbFrac()); + indexWriter.setTermIndexInterval(termIndexInterval); + } catch (IOException e) { + safeClose(indexWriter); + throw new EngineCreationFailureException(shardId, "Failed to create engine", e); + } + this.indexWriter = indexWriter; + + try { + IndexReader indexReader = indexWriter.getReader(); + IndexSearcher indexSearcher = new IndexSearcher(indexReader); + indexSearcher.setSimilarity(similarityService.defaultSearchSimilarity()); + this.nrtResource = newAcquirableResource(new ReaderSearcherHolder(indexReader, indexSearcher)); + } catch (IOException e) { + try { + indexWriter.rollback(); + } catch (IOException e1) { + // ignore + } finally { + try { + indexWriter.close(); + } catch (IOException e1) { + // ignore + } + } + throw new EngineCreationFailureException(shardId, "Failed to open reader on writer", e); + } + } + + @Override public TimeValue refreshInterval() { + return refreshInterval; + } + + @Override public void create(Create create) throws EngineException { + rwl.readLock().lock(); + try { + indexWriter.addDocument(create.doc(), create.analyzer()); + translog.add(new Translog.Create(create)); + dirty = true; + } catch (IOException e) { + throw new CreateFailedEngineException(shardId, create, e); + } finally { + rwl.readLock().unlock(); + } + } + + @Override public void index(Index index) throws EngineException { + rwl.readLock().lock(); + try { + indexWriter.updateDocument(index.uid(), index.doc(), index.analyzer()); + translog.add(new Translog.Index(index)); + dirty = true; + } catch (IOException e) { + throw new IndexFailedEngineException(shardId, index, e); + } finally { + rwl.readLock().unlock(); + } + } + + @Override public void delete(Delete delete) throws EngineException { + rwl.readLock().lock(); + try { + indexWriter.deleteDocuments(delete.uid()); + translog.add(new Translog.Delete(delete)); + dirty = true; + } catch (IOException e) { + throw new DeleteFailedEngineException(shardId, delete, e); + } finally { + rwl.readLock().unlock(); + } + } + + @Override public void delete(DeleteByQuery delete) throws EngineException { + rwl.readLock().lock(); + try { + indexWriter.deleteDocuments(delete.query()); + translog.add(new Translog.DeleteByQuery(delete)); + dirty = true; + } catch (IOException e) { + throw new DeleteByQueryFailedEngineException(shardId, delete, e); + } finally { + rwl.readLock().unlock(); + } + } + + @Override public Searcher searcher() throws EngineException { + AcquirableResource holder; + for (; ;) { + holder = this.nrtResource; + if (holder.acquire()) { + break; + } + Thread.yield(); + } + return new RobinSearchResult(holder); + } + + @Override public SizeValue estimateFlushableMemorySize() { + rwl.readLock().lock(); + try { + long bytes = IndexWriters.estimateRamSize(indexWriter); + bytes += translog.estimateMemorySize().bytes(); + return new SizeValue(bytes); + } catch (Exception e) { + return null; + } finally { + rwl.readLock().unlock(); + } + } + + @Override public void refresh(boolean waitForOperations) throws EngineException { + // this engine always acts as if waitForOperations=true + if (refreshMutex.compareAndSet(false, true)) { + if (dirty) { + dirty = false; + try { + AcquirableResource current = nrtResource; + IndexReader newReader = current.resource().reader().reopen(true); + if (newReader != current.resource().reader()) { + nrtResource = newAcquirableResource(new ReaderSearcherHolder(newReader)); + current.markForClose(); + } + } catch (IOException e) { + throw new RefreshFailedEngineException(shardId, e); + } + } + refreshMutex.set(false); + } + } + + @Override public void flush() throws EngineException { + // check outside the lock as well so we can check without blocking on the write lock + if (disableFlushCounter > 0) { + throw new FlushNotAllowedEngineException(shardId, "Recovery is in progress, flush is not allowed"); + } + rwl.writeLock().lock(); + try { + if (disableFlushCounter > 0) { + throw new FlushNotAllowedEngineException(shardId, "Recovery is in progress, flush is not allowed"); + } + try { + indexWriter.commit(); + translog.newTranslog(); + } catch (IOException e) { + throw new FlushFailedEngineException(shardId, e); + } + } finally { + rwl.writeLock().unlock(); + } + } + + @Override public void snapshot(SnapshotHandler snapshotHandler) throws EngineException { + SnapshotIndexCommit snapshotIndexCommit = null; + Translog.Snapshot traslogSnapshot = null; + rwl.readLock().lock(); + try { + snapshotIndexCommit = deletionPolicy.snapshot(); + traslogSnapshot = translog.snapshot(); + } catch (Exception e) { + if (snapshotIndexCommit != null) snapshotIndexCommit.release(); + throw new SnapshotFailedEngineException(shardId, e); + } finally { + rwl.readLock().unlock(); + } + + try { + snapshotHandler.snapshot(snapshotIndexCommit, traslogSnapshot); + } finally { + snapshotIndexCommit.release(); + traslogSnapshot.release(); + } + } + + @Override public void recover(RecoveryHandler recoveryHandler) throws EngineException { + // take a write lock here so it won't happen while a flush is in progress + // this means that next commits will not be allowed once the lock is released + rwl.writeLock().lock(); + try { + disableFlushCounter++; + } finally { + rwl.writeLock().unlock(); + } + + SnapshotIndexCommit phase1Snapshot; + try { + phase1Snapshot = deletionPolicy.snapshot(); + } catch (IOException e) { + --disableFlushCounter; + throw new RecoveryEngineException(shardId, 1, "Snapshot failed", e); + } + + try { + recoveryHandler.phase1(phase1Snapshot); + } catch (Exception e) { + --disableFlushCounter; + phase1Snapshot.release(); + throw new RecoveryEngineException(shardId, 1, "Execution failed", e); + } + + Translog.Snapshot phase2Snapshot; + try { + phase2Snapshot = translog.snapshot(); + } catch (Exception e) { + --disableFlushCounter; + phase1Snapshot.release(); + throw new RecoveryEngineException(shardId, 2, "Snapshot failed", e); + } + + try { + recoveryHandler.phase2(phase2Snapshot); + } catch (Exception e) { + --disableFlushCounter; + phase1Snapshot.release(); + phase2Snapshot.release(); + throw new RecoveryEngineException(shardId, 2, "Execution failed", e); + } + + rwl.writeLock().lock(); + Translog.Snapshot phase3Snapshot; + try { + phase3Snapshot = translog.snapshot(phase2Snapshot); + } catch (Exception e) { + --disableFlushCounter; + rwl.writeLock().unlock(); + phase1Snapshot.release(); + phase2Snapshot.release(); + throw new RecoveryEngineException(shardId, 3, "Snapshot failed", e); + } + + try { + recoveryHandler.phase3(phase3Snapshot); + } catch (Exception e) { + throw new RecoveryEngineException(shardId, 3, "Execution failed", e); + } finally { + --disableFlushCounter; + rwl.writeLock().unlock(); + phase1Snapshot.release(); + phase2Snapshot.release(); + phase3Snapshot.release(); + } + } + + @Override public void close() throws ElasticSearchException { + if (closed) { + return; + } + closed = true; + rwl.writeLock().lock(); + if (nrtResource != null) { + this.nrtResource.forceClose(); + } + try { + if (indexWriter != null) { + indexWriter.close(); + } + } catch (IOException e) { + throw new CloseEngineException(shardId, "Failed to close engine", e); + } finally { + indexWriter = null; + rwl.writeLock().unlock(); + } + } + + private static class RobinSearchResult implements Searcher { + + private final AcquirableResource nrtHolder; + + private RobinSearchResult(AcquirableResource nrtHolder) { + this.nrtHolder = nrtHolder; + } + + @Override public IndexReader reader() { + return nrtHolder.resource().reader(); + } + + @Override public IndexSearcher searcher() { + return nrtHolder.resource().searcher(); + } + + @Override public boolean release() throws ElasticSearchException { + nrtHolder.release(); + return true; + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/robin/RobinEngineModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/robin/RobinEngineModule.java new file mode 100644 index 00000000000..661309959c2 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/engine/robin/RobinEngineModule.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine.robin; + +import com.google.inject.AbstractModule; +import org.elasticsearch.index.engine.Engine; + +/** + * @author kimchy (Shay Banon) + */ +public class RobinEngineModule extends AbstractModule { + + @Override protected void configure() { + bind(Engine.class).to(RobinEngine.class).asEagerSingleton(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/IgnoreGatewayRecoveryException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/IgnoreGatewayRecoveryException.java new file mode 100644 index 00000000000..446d350fefa --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/IgnoreGatewayRecoveryException.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.gateway; + +import org.elasticsearch.index.shard.IndexShardException; +import org.elasticsearch.index.shard.ShardId; + +/** + * An exception marking that this recovery attempt should be ignored (since probably, we already recovered). + * + * @author kimchy (Shay Banon) + */ +public class IgnoreGatewayRecoveryException extends IndexShardException { + + public IgnoreGatewayRecoveryException(ShardId shardId, String msg) { + super(shardId, msg); + } + + public IgnoreGatewayRecoveryException(ShardId shardId, String msg, Throwable cause) { + super(shardId, msg, cause); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/IndexGateway.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/IndexGateway.java new file mode 100644 index 00000000000..b458397d4f7 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/IndexGateway.java @@ -0,0 +1,40 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.gateway; + +import org.elasticsearch.index.IndexComponent; + +/** + * @author kimchy (Shay Banon) + */ +public interface IndexGateway extends IndexComponent { + + Class shardGatewayClass(); + + /** + * Deletes the content of the index gateway. + */ + void delete(); + + /** + * Closes the index gateway. + */ + void close(); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/IndexGatewayModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/IndexGatewayModule.java new file mode 100644 index 00000000000..ea011996f77 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/IndexGatewayModule.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.gateway; + +import com.google.inject.AbstractModule; +import org.elasticsearch.gateway.Gateway; +import org.elasticsearch.util.guice.ModulesFactory; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexGatewayModule extends AbstractModule { + + private final Settings settings; + + private final Gateway gateway; + + public IndexGatewayModule(Settings settings, Gateway gateway) { + this.settings = settings; + this.gateway = gateway; + } + + @Override protected void configure() { + ModulesFactory.createModule(settings.getAsClass("index.gateway.type", gateway.suggestIndexGateway(), "org.elasticsearch.index.gateway.", "IndexGatewayModule"), settings).configure(binder()); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/IndexShardGateway.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/IndexShardGateway.java new file mode 100644 index 00000000000..b14fca63eb9 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/IndexShardGateway.java @@ -0,0 +1,48 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.gateway; + +import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit; +import org.elasticsearch.index.shard.IndexShardComponent; +import org.elasticsearch.index.translog.Translog; + +/** + * @author kimchy (Shay Banon) + */ +public interface IndexShardGateway extends IndexShardComponent { + + /** + * Recovers the state of the shard from the gateway. + */ + RecoveryStatus recover() throws IndexShardGatewayRecoveryException; + + /** + * Snapshots the given shard into the gateway. + */ + void snapshot(SnapshotIndexCommit snapshotIndexCommit, Translog.Snapshot translogSnapshot); + + /** + * Returns true if this gateway requires scheduling management for snapshot + * operations. + */ + boolean requiresSnapshotScheduling(); + + void close(); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/IndexShardGatewayException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/IndexShardGatewayException.java new file mode 100644 index 00000000000..d497755eb0e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/IndexShardGatewayException.java @@ -0,0 +1,37 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.gateway; + +import org.elasticsearch.index.shard.IndexShardException; +import org.elasticsearch.index.shard.ShardId; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexShardGatewayException extends IndexShardException { + + public IndexShardGatewayException(ShardId shardId, String msg) { + super(shardId, msg); + } + + public IndexShardGatewayException(ShardId shardId, String msg, Throwable cause) { + super(shardId, msg, cause); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/IndexShardGatewayModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/IndexShardGatewayModule.java new file mode 100644 index 00000000000..dd69c5d06c1 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/IndexShardGatewayModule.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.gateway; + +import com.google.inject.AbstractModule; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexShardGatewayModule extends AbstractModule { + + private final IndexGateway indexGateway; + + public IndexShardGatewayModule(IndexGateway indexGateway) { + this.indexGateway = indexGateway; + } + + @Override protected void configure() { + bind(IndexShardGateway.class) + .to(indexGateway.shardGatewayClass()) + .asEagerSingleton(); + + bind(IndexShardGatewayService.class).asEagerSingleton(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/IndexShardGatewayRecoveryException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/IndexShardGatewayRecoveryException.java new file mode 100644 index 00000000000..a087d53581a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/IndexShardGatewayRecoveryException.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.gateway; + +import org.elasticsearch.index.shard.ShardId; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexShardGatewayRecoveryException extends IndexShardGatewayException { + + public IndexShardGatewayRecoveryException(ShardId shardId, String msg, Throwable cause) { + super(shardId, msg, cause); + } + +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/IndexShardGatewayService.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/IndexShardGatewayService.java new file mode 100644 index 00000000000..10275b8dd4e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/IndexShardGatewayService.java @@ -0,0 +1,208 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.gateway; + +import com.google.inject.Inject; +import org.elasticsearch.ElasticSearchIllegalStateException; +import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.EngineException; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.index.shard.*; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.util.StopWatch; +import org.elasticsearch.util.TimeValue; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexShardGatewayService extends AbstractIndexShardComponent { + + private final boolean snapshotOnClose; + + private final ThreadPool threadPool; + + private final InternalIndexShard indexShard; + + private final IndexShardGateway shardGateway; + + private final Store store; + + + private volatile long lastIndexVersion; + + private volatile long lastTranslogId = -1; + + private volatile int lastTranslogSize; + + private final AtomicBoolean recovered = new AtomicBoolean(); + + private final TimeValue snapshotInterval; + + private volatile ScheduledFuture snapshotScheduleFuture; + + @Inject public IndexShardGatewayService(ShardId shardId, @IndexSettings Settings indexSettings, + ThreadPool threadPool, IndexShard indexShard, IndexShardGateway shardGateway, + Store store) { + super(shardId, indexSettings); + this.threadPool = threadPool; + this.indexShard = (InternalIndexShard) indexShard; + this.shardGateway = shardGateway; + this.store = store; + + this.snapshotOnClose = componentSettings.getAsBoolean("snapshotOnClose", true); + this.snapshotInterval = componentSettings.getAsTime("snapshotInterval", TimeValue.timeValueSeconds(10)); + } + + /** + * Should be called when the shard routing state has changed (note, after the state has been set on the shard). + */ + public void routingStateChanged() { + scheduleSnapshotIfNeeded(); + } + + /** + * Recovers the state of the shard from the gateway. + */ + public synchronized void recover() throws IndexShardGatewayRecoveryException, IgnoreGatewayRecoveryException { + if (recovered.compareAndSet(false, true)) { + if (!indexShard.routingEntry().primary()) { + throw new ElasticSearchIllegalStateException("Trying to recover when the shard is in backup state"); + } + // clear the store, we are going to recover into it + try { + store.deleteContent(); + } catch (IOException e) { + logger.debug("Failed to delete store before recovery from gateway", e); + } + indexShard.recovering(); + logger.debug("Starting recovery from {}", shardGateway); + StopWatch stopWatch = new StopWatch().start(); + RecoveryStatus recoveryStatus = shardGateway.recover(); + + // update the last up to date values + indexShard.snapshot(new Engine.SnapshotHandler() { + @Override public void snapshot(SnapshotIndexCommit snapshotIndexCommit, Translog.Snapshot translogSnapshot) throws EngineException { + lastIndexVersion = snapshotIndexCommit.getVersion(); + lastTranslogId = translogSnapshot.translogId(); + lastTranslogSize = translogSnapshot.size(); + } + }); + + // start the shard if the gateway has not started it already + if (indexShard.state() != IndexShardState.STARTED) { + indexShard.start(); + } + stopWatch.stop(); + if (logger.isDebugEnabled()) { + StringBuilder sb = new StringBuilder(); + sb.append("Recovery completed from ").append(shardGateway).append(", took [").append(stopWatch.totalTime()).append("]\n"); + sb.append(" Index : numberOfFiles [").append(recoveryStatus.index().numberOfFiles()).append("] with totalSize [").append(recoveryStatus.index().totalSize()).append("]\n"); + sb.append(" Translog : numberOfOperations [").append(recoveryStatus.translog().numberOfOperations()).append("] with totalSize [").append(recoveryStatus.translog().totalSize()).append("]"); + logger.debug(sb.toString()); + } + // refresh the shard + indexShard.refresh(false); + scheduleSnapshotIfNeeded(); + } else { + throw new IgnoreGatewayRecoveryException(shardId, "Already recovered"); + } + } + + /** + * Snapshots the given shard into the gateway. + */ + public synchronized void snapshot() throws IndexShardGatewaySnapshotFailedException { + if (!indexShard.routingEntry().primary()) { + return; +// throw new IndexShardGatewaySnapshotNotAllowedException(shardId, "Snapshot not allowed on non primary shard"); + } + if (indexShard.routingEntry().relocating()) { + // do not snapshot when in the process of relocation of primaries so we won't get conflicts + return; + } + indexShard.snapshot(new Engine.SnapshotHandler() { + @Override public void snapshot(SnapshotIndexCommit snapshotIndexCommit, Translog.Snapshot translogSnapshot) throws EngineException { + if (lastIndexVersion != snapshotIndexCommit.getVersion() || lastTranslogId != translogSnapshot.translogId() || lastTranslogSize != translogSnapshot.size()) { + + shardGateway.snapshot(snapshotIndexCommit, translogSnapshot); + + lastIndexVersion = snapshotIndexCommit.getVersion(); + lastTranslogId = translogSnapshot.translogId(); + lastTranslogSize = translogSnapshot.size(); + } + } + }); + } + + public void close() { + if (snapshotScheduleFuture != null) { + snapshotScheduleFuture.cancel(true); + snapshotScheduleFuture = null; + } + if (snapshotOnClose) { + logger.debug("Snapshotting on close ..."); + snapshot(); + } + shardGateway.close(); + } + + private synchronized void scheduleSnapshotIfNeeded() { + if (!shardGateway.requiresSnapshotScheduling()) { + return; + } + if (!indexShard.routingEntry().primary()) { + // we only do snapshotting on the primary shard + return; + } + if (!indexShard.routingEntry().started()) { + // we only schedule when the cluster assumes we have started + return; + } + if (snapshotScheduleFuture != null) { + // we are already scheduling this one, ignore + return; + } + if (snapshotInterval.millis() != -1) { + // we need to schedule snapshot + if (logger.isDebugEnabled()) { + logger.debug("Scheduling snapshot every [{}]", snapshotInterval); + } + snapshotScheduleFuture = threadPool.scheduleWithFixedDelay(new SnapshotRunnable(), snapshotInterval); + } + } + + private class SnapshotRunnable implements Runnable { + @Override public void run() { + try { + snapshot(); + } catch (Exception e) { + logger.warn("Failed to snapshot", e); + } + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/IndexShardGatewaySnapshotFailedException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/IndexShardGatewaySnapshotFailedException.java new file mode 100644 index 00000000000..8a3ef8b3156 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/IndexShardGatewaySnapshotFailedException.java @@ -0,0 +1,32 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.gateway; + +import org.elasticsearch.index.shard.ShardId; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexShardGatewaySnapshotFailedException extends IndexShardGatewayException { + + public IndexShardGatewaySnapshotFailedException(ShardId shardId, String msg, Throwable cause) { + super(shardId, msg, cause); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/IndexShardGatewaySnapshotNotAllowedException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/IndexShardGatewaySnapshotNotAllowedException.java new file mode 100644 index 00000000000..0e40989684b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/IndexShardGatewaySnapshotNotAllowedException.java @@ -0,0 +1,32 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.gateway; + +import org.elasticsearch.index.shard.ShardId; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexShardGatewaySnapshotNotAllowedException extends IndexShardGatewayException { + + public IndexShardGatewaySnapshotNotAllowedException(ShardId shardId, String msg) { + super(shardId, msg); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/RecoveryStatus.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/RecoveryStatus.java new file mode 100644 index 00000000000..5099a2ac70e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/RecoveryStatus.java @@ -0,0 +1,81 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.gateway; + +import org.elasticsearch.util.SizeValue; + +/** + * @author kimchy (Shay Banon) + */ +public class RecoveryStatus { + + private Index index; + + private Translog translog; + + public RecoveryStatus(Index index, Translog translog) { + this.index = index; + this.translog = translog; + } + + public Index index() { + return index; + } + + public Translog translog() { + return translog; + } + + public static class Translog { + private int numberOfOperations; + private SizeValue totalSize; + + public Translog(int numberOfOperations, SizeValue totalSize) { + this.numberOfOperations = numberOfOperations; + this.totalSize = totalSize; + } + + public int numberOfOperations() { + return numberOfOperations; + } + + public SizeValue totalSize() { + return totalSize; + } + } + + public static class Index { + private int numberOfFiles; + private SizeValue totalSize; + + public Index(int numberOfFiles, SizeValue totalSize) { + this.numberOfFiles = numberOfFiles; + this.totalSize = totalSize; + } + + public int numberOfFiles() { + return numberOfFiles; + } + + public SizeValue totalSize() { + return totalSize; + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/fs/FsIndexGateway.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/fs/FsIndexGateway.java new file mode 100644 index 00000000000..0fce185b0ab --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/fs/FsIndexGateway.java @@ -0,0 +1,84 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.gateway.fs; + +import com.google.inject.Inject; +import org.elasticsearch.env.Environment; +import org.elasticsearch.gateway.Gateway; +import org.elasticsearch.gateway.fs.FsGateway; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.gateway.IndexGateway; +import org.elasticsearch.index.gateway.IndexShardGateway; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.Strings; +import org.elasticsearch.util.io.FileSystemUtils; +import org.elasticsearch.util.settings.Settings; + +import java.io.File; + +/** + * @author kimchy (Shay Banon) + */ +public class FsIndexGateway extends AbstractIndexComponent implements IndexGateway { + + private final Environment environment; + + private final Gateway gateway; + + private final String location; + + private File indexGatewayHome; + + @Inject public FsIndexGateway(Index index, @IndexSettings Settings indexSettings, Environment environment, Gateway gateway) { + super(index, indexSettings); + this.environment = environment; + this.gateway = gateway; + + String location = componentSettings.get("location"); + if (location == null) { + if (gateway instanceof FsGateway) { + indexGatewayHome = new File(((FsGateway) gateway).gatewayHome(), index().name()); + } else { + indexGatewayHome = new File(new File(environment.workWithClusterFile(), "gateway"), index().name()); + } + location = Strings.cleanPath(indexGatewayHome.getAbsolutePath()); + } else { + indexGatewayHome = new File(location); + } + this.location = location; + indexGatewayHome.mkdirs(); + } + + @Override public Class shardGatewayClass() { + return FsIndexShardGateway.class; + } + + @Override public void delete() { + FileSystemUtils.deleteRecursively(indexGatewayHome, false); + } + + @Override public void close() { + } + + public File indexGatewayHome() { + return this.indexGatewayHome; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/fs/FsIndexGatewayModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/fs/FsIndexGatewayModule.java new file mode 100644 index 00000000000..e62c061a96d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/fs/FsIndexGatewayModule.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.gateway.fs; + +import com.google.inject.AbstractModule; +import org.elasticsearch.index.gateway.IndexGateway; + +/** + * @author kimchy (Shay Banon) + */ +public class FsIndexGatewayModule extends AbstractModule { + + @Override protected void configure() { + bind(IndexGateway.class).to(FsIndexGateway.class).asEagerSingleton(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/fs/FsIndexShardGateway.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/fs/FsIndexShardGateway.java new file mode 100644 index 00000000000..23ce25f9b05 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/fs/FsIndexShardGateway.java @@ -0,0 +1,338 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.gateway.fs; + +import com.google.inject.Inject; +import org.apache.lucene.store.IndexInput; +import org.elasticsearch.ElasticSearchIllegalStateException; +import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.EngineException; +import org.elasticsearch.index.gateway.IndexShardGateway; +import org.elasticsearch.index.gateway.IndexShardGatewayRecoveryException; +import org.elasticsearch.index.gateway.IndexShardGatewaySnapshotFailedException; +import org.elasticsearch.index.gateway.RecoveryStatus; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.index.shard.AbstractIndexShardComponent; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.InternalIndexShard; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.util.SizeUnit; +import org.elasticsearch.util.SizeValue; +import org.elasticsearch.util.settings.Settings; + +import java.io.File; +import java.io.FilenameFilter; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.util.ArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicReference; + +import static com.google.common.collect.Lists.*; +import static org.elasticsearch.index.translog.TranslogStreams.*; +import static org.elasticsearch.util.lucene.Directories.*; + +/** + * @author kimchy (Shay Banon) + */ +public class FsIndexShardGateway extends AbstractIndexShardComponent implements IndexShardGateway { + + private final InternalIndexShard indexShard; + + private final ThreadPool threadPool; + + private final Store store; + + private final File location; + + private final File locationIndex; + + private final File locationTranslog; + + private long lastIndexVersion; + + private long lastTranslogId = -1; + + private int lastTranslogSize; + + private RandomAccessFile translogFile; + + @Inject public FsIndexShardGateway(ShardId shardId, @IndexSettings Settings indexSettings, ThreadPool threadPool, FsIndexGateway fsIndexGateway, IndexShard indexShard, Store store) { + super(shardId, indexSettings); + this.threadPool = threadPool; + this.indexShard = (InternalIndexShard) indexShard; + this.store = store; + this.location = new File(fsIndexGateway.indexGatewayHome(), Integer.toString(shardId.id())); + this.locationIndex = new File(location, "index"); + this.locationTranslog = new File(location, "translog"); + + locationIndex.mkdirs(); + locationTranslog.mkdirs(); + } + + @Override public boolean requiresSnapshotScheduling() { + return true; + } + + @Override public String toString() { + return "fs[" + location + "]"; + } + + @Override public void close() { + } + + @Override public RecoveryStatus recover() throws IndexShardGatewayRecoveryException { + RecoveryStatus.Index recoveryStatusIndex = recoverIndex(); + RecoveryStatus.Translog recoveryStatusTranslog = recoverTranslog(); + // update the last up to date values + indexShard.snapshot(new Engine.SnapshotHandler() { + @Override public void snapshot(SnapshotIndexCommit snapshotIndexCommit, Translog.Snapshot translogSnapshot) throws EngineException { + lastIndexVersion = snapshotIndexCommit.getVersion(); + lastTranslogId = translogSnapshot.translogId(); + lastTranslogSize = translogSnapshot.size(); + } + }); + return new RecoveryStatus(recoveryStatusIndex, recoveryStatusTranslog); + } + + @Override public void snapshot(final SnapshotIndexCommit snapshotIndexCommit, final Translog.Snapshot translogSnapshot) { + boolean indexDirty = false; + boolean translogDirty = false; + + if (lastIndexVersion != snapshotIndexCommit.getVersion()) { + indexDirty = true; + // snapshot into the index + final CountDownLatch latch = new CountDownLatch(snapshotIndexCommit.getFiles().length); + final AtomicReference lastException = new AtomicReference(); + for (final String fileName : snapshotIndexCommit.getFiles()) { + if (fileName.equals(snapshotIndexCommit.getSegmentsFileName())) { + latch.countDown(); + continue; + } + try { + IndexInput indexInput = snapshotIndexCommit.getDirectory().openInput(fileName); + File snapshotFile = new File(locationIndex, fileName); + if (snapshotFile.exists() && (snapshotFile.length() == indexInput.length())) { + // we assume its the same one, no need to copy + latch.countDown(); + continue; + } + indexInput.close(); + } catch (Exception e) { + logger.debug("Failed to verify file equality based on length, copying...", e); + } + threadPool.execute(new Runnable() { + @Override public void run() { + try { + copyFromDirectory(snapshotIndexCommit.getDirectory(), fileName, new File(locationIndex, fileName)); + } catch (Exception e) { + lastException.set(e); + } finally { + latch.countDown(); + } + } + }); + } + try { + latch.await(); + } catch (InterruptedException e) { + lastException.set(e); + } + if (lastException.get() != null) { + throw new IndexShardGatewaySnapshotFailedException(shardId(), "Failed to perform snapshot (index files)", lastException.get()); + } + } + if (translogSnapshot.translogId() != lastTranslogId || translogFile == null) { + translogDirty = true; + if (translogFile != null) { + try { + translogFile.close(); + } catch (IOException e) { + // ignore + } + } + try { + File f = new File(locationTranslog, "translog-" + translogSnapshot.translogId()); + translogFile = new RandomAccessFile(f, "rw"); + translogFile.writeInt(-1); // write the number of operations header with -1 currently + // double check that we managed to read/write correctly + translogFile.seek(0); + if (translogFile.readInt() != -1) { + throw new ElasticSearchIllegalStateException("Wrote to snapshot file [" + f + "] but did not read..."); + } + for (Translog.Operation operation : translogSnapshot) { + writeTranslogOperation(translogFile, operation); + } + } catch (Exception e) { + try { + translogFile.close(); + } catch (IOException e1) { + // ignore + } + translogFile = null; + throw new IndexShardGatewaySnapshotFailedException(shardId(), "Failed to snapshot translog", e); + } + } else if (translogSnapshot.size() > lastTranslogSize) { + translogDirty = true; + try { + for (Translog.Operation operation : translogSnapshot.skipTo(lastTranslogSize)) { + writeTranslogOperation(translogFile, operation); + } + } catch (Exception e) { + try { + translogFile.close(); + } catch (IOException e1) { + // ignore + } + translogFile = null; + throw new IndexShardGatewaySnapshotFailedException(shardId(), "Failed to snapshot translog", e); + } + } + + // now write the segments file and update the translog header + try { + if (indexDirty) { + copyFromDirectory(snapshotIndexCommit.getDirectory(), snapshotIndexCommit.getSegmentsFileName(), + new File(locationIndex, snapshotIndexCommit.getSegmentsFileName())); + } + if (translogDirty) { + translogFile.seek(0); + translogFile.writeInt(translogSnapshot.size()); + translogFile.seek(translogFile.length()); + translogFile.getFD().sync(); + } + } catch (Exception e) { + try { + translogFile.close(); + } catch (IOException e1) { + // ignore + } + translogFile = null; + throw new IndexShardGatewaySnapshotFailedException(shardId(), "Failed to finalize snapshot", e); + } + + // delete the old translog + if (lastTranslogId != translogSnapshot.translogId()) { + new File(locationTranslog, "translog-" + lastTranslogId).delete(); + } + + + lastIndexVersion = snapshotIndexCommit.getVersion(); + lastTranslogId = translogSnapshot.translogId(); + lastTranslogSize = translogSnapshot.size(); + } + + private RecoveryStatus.Index recoverIndex() throws IndexShardGatewayRecoveryException { + File[] files = locationIndex.listFiles(); + final CountDownLatch latch = new CountDownLatch(files.length); + final AtomicReference lastException = new AtomicReference(); + for (final File file : files) { + threadPool.execute(new Runnable() { + @Override public void run() { + try { + copyToDirectory(file, store.directory(), file.getName()); + } catch (Exception e) { + logger.debug("Failed to read [" + file + "] into [" + store + "]", e); + lastException.set(e); + } finally { + latch.countDown(); + } + } + }); + } + try { + latch.await(); + } catch (InterruptedException e) { + lastException.set(e); + } + if (lastException.get() != null) { + throw new IndexShardGatewayRecoveryException(shardId(), "Failed to recover index files", lastException.get()); + } + long totalSize = 0; + for (File file : files) { + totalSize += file.length(); + } + return new RecoveryStatus.Index(files.length, new SizeValue(totalSize, SizeUnit.BYTES)); + } + + private RecoveryStatus.Translog recoverTranslog() throws IndexShardGatewayRecoveryException { + RandomAccessFile raf = null; + try { + long recoveryTranslogId = findLatestTranslogId(locationTranslog); + if (recoveryTranslogId == -1) { + // no recovery file found, start the shard and bail + indexShard.start(); + return new RecoveryStatus.Translog(0, new SizeValue(0, SizeUnit.BYTES)); + } + File recoveryTranslogFile = new File(locationTranslog, "translog-" + recoveryTranslogId); + raf = new RandomAccessFile(recoveryTranslogFile, "r"); + int numberOfOperations = raf.readInt(); + ArrayList operations = newArrayListWithExpectedSize(numberOfOperations); + for (int i = 0; i < numberOfOperations; i++) { + operations.add(readTranslogOperation(raf)); + } + indexShard.performRecovery(operations); + return new RecoveryStatus.Translog(operations.size(), new SizeValue(recoveryTranslogFile.length(), SizeUnit.BYTES)); + } catch (Exception e) { + throw new IndexShardGatewayRecoveryException(shardId(), "Failed to perform recovery of translog", e); + } finally { + if (raf != null) { + try { + raf.close(); + } catch (IOException e) { + // ignore + } + } + } + } + + private static long findLatestTranslogId(File location) { + File[] files = location.listFiles(new FilenameFilter() { + @Override public boolean accept(File dir, String name) { + return name.startsWith("translog-"); + } + }); + + long index = -1; + for (File file : files) { + String name = file.getName(); + try { + RandomAccessFile raf = new RandomAccessFile(file, "r"); + // if header is -1, then its not properly written, ignore it + if (raf.readInt() == -1) { + continue; + } + } catch (Exception e) { + // broken file, continue + continue; + } + long fileIndex = Long.parseLong(name.substring(name.indexOf('-') + 1)); + if (fileIndex >= index) { + index = fileIndex; + } + } + + return index; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/none/NoneIndexGateway.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/none/NoneIndexGateway.java new file mode 100644 index 00000000000..8f0d74725e9 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/none/NoneIndexGateway.java @@ -0,0 +1,52 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.gateway.none; + +import com.google.inject.Inject; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.gateway.IndexGateway; +import org.elasticsearch.index.gateway.IndexShardGateway; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class NoneIndexGateway extends AbstractIndexComponent implements IndexGateway { + + @Inject public NoneIndexGateway(Index index, @IndexSettings Settings indexSettings) { + super(index, indexSettings); + } + + @Override public Class shardGatewayClass() { + return NoneIndexShardGateway.class; + } + + @Override public String toString() { + return "none"; + } + + @Override public void delete() { + } + + @Override public void close() { + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/none/NoneIndexGatewayModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/none/NoneIndexGatewayModule.java new file mode 100644 index 00000000000..ceacef35dee --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/none/NoneIndexGatewayModule.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.gateway.none; + +import com.google.inject.AbstractModule; +import org.elasticsearch.index.gateway.IndexGateway; + +/** + * @author kimchy (Shay Banon) + */ +public class NoneIndexGatewayModule extends AbstractModule { + + @Override protected void configure() { + bind(IndexGateway.class).to(NoneIndexGateway.class).asEagerSingleton(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/none/NoneIndexShardGateway.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/none/NoneIndexShardGateway.java new file mode 100644 index 00000000000..ce195865608 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/gateway/none/NoneIndexShardGateway.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.gateway.none; + +import com.google.inject.Inject; +import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit; +import org.elasticsearch.index.gateway.IndexShardGateway; +import org.elasticsearch.index.gateway.IndexShardGatewayRecoveryException; +import org.elasticsearch.index.gateway.RecoveryStatus; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.index.shard.AbstractIndexShardComponent; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.InternalIndexShard; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.util.SizeUnit; +import org.elasticsearch.util.SizeValue; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class NoneIndexShardGateway extends AbstractIndexShardComponent implements IndexShardGateway { + + private final InternalIndexShard indexShard; + + @Inject public NoneIndexShardGateway(ShardId shardId, @IndexSettings Settings indexSettings, IndexShard indexShard) { + super(shardId, indexSettings); + this.indexShard = (InternalIndexShard) indexShard; + } + + @Override public RecoveryStatus recover() throws IndexShardGatewayRecoveryException { + // in the none case, we simply start the shard + indexShard.start(); + return new RecoveryStatus(new RecoveryStatus.Index(0, new SizeValue(0, SizeUnit.BYTES)), new RecoveryStatus.Translog(0, new SizeValue(0, SizeUnit.BYTES))); + } + + @Override public void snapshot(SnapshotIndexCommit snapshotIndexCommit, Translog.Snapshot translogSnapshot) { + // nothing to do here + } + + @Override public boolean requiresSnapshotScheduling() { + return false; + } + + @Override public void close() { + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/BoostFieldMapper.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/BoostFieldMapper.java new file mode 100644 index 00000000000..e28f7d3391f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/BoostFieldMapper.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +/** + * A field mapper that allows to control the boosting of a parsed document. Can be treated as + * any other field mapper by being stored and analyzed, though, by default, it does neither. + * + * @author kimchy (Shay Banon) + */ +public interface BoostFieldMapper extends FieldMapper { + +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/DocumentFieldMappers.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/DocumentFieldMappers.java new file mode 100644 index 00000000000..fedec4ff441 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/DocumentFieldMappers.java @@ -0,0 +1,137 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Iterables; +import com.google.common.collect.UnmodifiableIterator; +import org.apache.lucene.analysis.Analyzer; +import org.elasticsearch.index.analysis.FieldNameAnalyzer; +import org.elasticsearch.util.concurrent.Immutable; + +import java.util.Map; + +import static com.google.common.collect.Lists.*; +import static com.google.common.collect.Maps.*; + +/** + * @author kimchy (Shay Banon) + */ +@Immutable +public class DocumentFieldMappers implements Iterable { + + private final ImmutableList fieldMappers; + private final Map fullNameFieldMappers; + private final Map nameFieldMappers; + private final Map indexNameFieldMappers; + + private final FieldNameAnalyzer indexAnalyzer; + private final FieldNameAnalyzer searchAnalyzer; + + public DocumentFieldMappers(DocumentMapper docMapper, Iterable fieldMappers) { + final Map tempNameFieldMappers = newHashMap(); + final Map tempIndexNameFieldMappers = newHashMap(); + final Map tempFullNameFieldMappers = newHashMap(); + + final Map indexAnalyzers = newHashMap(); + final Map searchAnalyzers = newHashMap(); + + for (FieldMapper fieldMapper : fieldMappers) { + FieldMappers mappers = tempNameFieldMappers.get(fieldMapper.name()); + if (mappers == null) { + mappers = new FieldMappers(fieldMapper); + } else { + mappers = mappers.concat(fieldMapper); + } + tempNameFieldMappers.put(fieldMapper.name(), mappers); + + mappers = tempIndexNameFieldMappers.get(fieldMapper.indexName()); + if (mappers == null) { + mappers = new FieldMappers(fieldMapper); + } else { + mappers = mappers.concat(fieldMapper); + } + tempIndexNameFieldMappers.put(fieldMapper.indexName(), mappers); + + mappers = tempFullNameFieldMappers.get(fieldMapper.fullName()); + if (mappers == null) { + mappers = new FieldMappers(fieldMapper); + } else { + mappers = mappers.concat(fieldMapper); + } + tempFullNameFieldMappers.put(fieldMapper.fullName(), mappers); + + if (fieldMapper.indexAnalyzer() != null) { + indexAnalyzers.put(fieldMapper.indexName(), fieldMapper.indexAnalyzer()); + } + if (fieldMapper.searchAnalyzer() != null) { + searchAnalyzers.put(fieldMapper.indexName(), fieldMapper.searchAnalyzer()); + } + } + this.fieldMappers = ImmutableList.copyOf(fieldMappers); + this.nameFieldMappers = ImmutableMap.copyOf(tempNameFieldMappers); + this.indexNameFieldMappers = ImmutableMap.copyOf(tempIndexNameFieldMappers); + this.fullNameFieldMappers = ImmutableMap.copyOf(tempFullNameFieldMappers); + + this.indexAnalyzer = new FieldNameAnalyzer(indexAnalyzers, docMapper.indexAnalyzer()); + this.searchAnalyzer = new FieldNameAnalyzer(searchAnalyzers, docMapper.searchAnalyzer()); + } + + @Override public UnmodifiableIterator iterator() { + return fieldMappers.iterator(); + } + + public FieldMappers name(String name) { + return nameFieldMappers.get(name); + } + + public FieldMappers indexName(String indexName) { + return indexNameFieldMappers.get(indexName); + } + + public FieldMappers fullName(String fullName) { + return fullNameFieldMappers.get(fullName); + } + + /** + * A smart analyzer used for indexing that takes into account specific analyzers configured + * per {@link FieldMapper}. + */ + public Analyzer indexAnalyzer() { + return this.indexAnalyzer; + } + + /** + * A smart analyzer used for searching that takes into account specific analyzers configured + * per {@link FieldMapper}. + */ + public Analyzer searchAnalyzer() { + return this.searchAnalyzer; + } + + public DocumentFieldMappers concat(DocumentMapper docMapper, FieldMapper... fieldMappers) { + return concat(docMapper, newArrayList(fieldMappers)); + } + + public DocumentFieldMappers concat(DocumentMapper docMapper, Iterable fieldMappers) { + return new DocumentFieldMappers(docMapper, Iterables.concat(this.fieldMappers, fieldMappers)); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java new file mode 100644 index 00000000000..74aabf1fe7a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.analysis.Analyzer; +import org.elasticsearch.util.Nullable; +import org.elasticsearch.util.concurrent.ThreadSafe; + +/** + * @author kimchy (Shay Banon) + */ +@ThreadSafe +public interface DocumentMapper { + + String type(); + + /** + * When constructed by parsing a mapping definition, will return it. Otherwise, + * returns null. + */ + String mappingSource(); + + UidFieldMapper uidMapper(); + + IdFieldMapper idMapper(); + + TypeFieldMapper typeMapper(); + + SourceFieldMapper sourceMapper(); + + BoostFieldMapper boostMapper(); + + DocumentFieldMappers mappers(); + + /** + * The default index analyzer to be used. Note, the {@link DocumentFieldMappers#indexAnalyzer()} should + * probably be used instead. + */ + Analyzer indexAnalyzer(); + + /** + * The default search analyzer to be used. Note, the {@link DocumentFieldMappers#searchAnalyzer()} should + * probably be used instead. + */ + Analyzer searchAnalyzer(); + + /** + * Parses the source into a parsed document. + *

+ *

Validates that the source has the provided id and type. Note, most times + * we will already have the id and the type even though they exist in the source as well. + */ + ParsedDocument parse(@Nullable String type, @Nullable String id, String source) throws MapperParsingException; + + /** + * Parses the source into the parsed document. + */ + ParsedDocument parse(String source) throws MapperParsingException; + + void addFieldMapperListener(FieldMapperListener fieldMapperListener, boolean includeExisting); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/DocumentMapperNotFoundException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/DocumentMapperNotFoundException.java new file mode 100644 index 00000000000..b729cfefbd1 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/DocumentMapperNotFoundException.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +/** + * @author kimchy (Shay Banon) + */ +public class DocumentMapperNotFoundException extends MapperException { + + public DocumentMapperNotFoundException(String message) { + super(message); + } + + public DocumentMapperNotFoundException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java new file mode 100644 index 00000000000..984a04b379a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java @@ -0,0 +1,37 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +/** + * @author kimchy (Shay Banon) + */ +public interface DocumentMapperParser { + + /** + * Parses the source mapping definition into a document mapper with the specified + * type (overrding the one defined in the source mapping). + */ + DocumentMapper parse(String type, String mappingSource) throws MapperParsingException; + + /** + * Parses the source mapping definition into a document mapper. + */ + DocumentMapper parse(String mappingSource) throws MapperParsingException; +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java new file mode 100644 index 00000000000..9b0319c7035 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java @@ -0,0 +1,121 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.Fieldable; +import org.apache.lucene.search.Filter; +import org.apache.lucene.search.Query; +import org.elasticsearch.util.concurrent.ThreadSafe; + +/** + * @author kimchy (Shay Banon) + */ +@ThreadSafe +public interface FieldMapper { + + /** + * The name of the field (this is not what we store in the index). + */ + String name(); + + /** + * The indexed name of the field. This is the name under which we will + * store it in the index. + */ + String indexName(); + + /** + * The full name of the field. If it is under a certain context (for example, + * in json it exists within an object with a given name), then the context + * will be included. Expected to end with the {@link #name()}. + */ + String fullName(); + + Field.Index index(); + + boolean indexed(); + + boolean analyzed(); + + Field.Store store(); + + boolean stored(); + + Field.TermVector termVector(); + + float boost(); + + boolean omitNorms(); + + boolean omitTermFreqAndPositions(); + + /** + * The analyzer that will be used to index the field. + */ + Analyzer indexAnalyzer(); + + /** + * The analyzer that will be used to search the field. + */ + Analyzer searchAnalyzer(); + + /** + * Returns the value that will be used as a result for search. Can be only of specific types... . + */ + Object valueForSearch(Fieldable field); + + /** + * Returns the actual value of the field. + */ + T value(Fieldable field); + + /** + * Returns the actual value of the field as string. + */ + String valueAsString(Fieldable field); + + /** + * Returns the indexed value. + */ + String indexedValue(String value); + + /** + * Returns the indexed value. + */ + String indexedValue(T value); + + Query fieldQuery(String value); + + Filter fieldFilter(String value); + + /** + * Constructs a range query based on the mapper. + */ + Query rangeQuery(String lowerTerm, String upperTerm, boolean includeLower, boolean includeUpper); + + /** + * Constructs a range query filter based on the mapper. + */ + Filter rangeFilter(String lowerTerm, String upperTerm, boolean includeLower, boolean includeUpper); + + int sortType(); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/FieldMapperListener.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/FieldMapperListener.java new file mode 100644 index 00000000000..4b11926f282 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/FieldMapperListener.java @@ -0,0 +1,28 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +/** + * @author kimchy (Shay Banon) + */ +public interface FieldMapperListener { + + void fieldMapper(FieldMapper fieldMapper); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/FieldMappers.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/FieldMappers.java new file mode 100644 index 00000000000..cb6b75f9c79 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/FieldMappers.java @@ -0,0 +1,84 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Iterators; +import com.google.common.collect.UnmodifiableIterator; +import org.elasticsearch.util.concurrent.Immutable; + +/** + * A holder for several {@link FieldMapper}. + * + * @author kimchy (Shay Banon) + */ +@Immutable +public class FieldMappers implements Iterable { + + private final ImmutableList fieldMappers; + + public FieldMappers() { + this.fieldMappers = ImmutableList.of(); + } + + public FieldMappers(FieldMapper fieldMapper) { + this(new FieldMapper[]{fieldMapper}); + } + + public FieldMappers(FieldMapper[] fieldMappers) { + if (fieldMappers == null) { + fieldMappers = new FieldMapper[0]; + } + this.fieldMappers = ImmutableList.copyOf(Iterators.forArray(fieldMappers)); + } + + public FieldMappers(ImmutableList fieldMappers) { + this.fieldMappers = fieldMappers; + } + + public FieldMapper mapper() { + if (fieldMappers.isEmpty()) { + return null; + } + return fieldMappers.get(0); + } + + public ImmutableList mappers() { + return this.fieldMappers; + } + + @Override public UnmodifiableIterator iterator() { + return fieldMappers.iterator(); + } + + /** + * Concats and returns a new {@link FieldMappers}. + */ + public FieldMappers concat(FieldMapper mapper) { + return new FieldMappers(new ImmutableList.Builder().addAll(fieldMappers).add(mapper).build()); + } + + /** + * Concats and returns a new {@link FieldMappers}. + */ + public FieldMappers concat(FieldMappers mappers) { + return new FieldMappers(new ImmutableList.Builder().addAll(fieldMappers).addAll(mappers).build()); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/FieldMappersFieldSelector.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/FieldMappersFieldSelector.java new file mode 100644 index 00000000000..cb3d3bf29ba --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/FieldMappersFieldSelector.java @@ -0,0 +1,46 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.document.FieldSelector; +import org.apache.lucene.document.FieldSelectorResult; + +import java.util.HashSet; + +/** + * @author kimchy (Shay Banon) + */ +public class FieldMappersFieldSelector implements FieldSelector { + + private final HashSet names = new HashSet(); + + public void add(FieldMappers fieldMappers) { + for (FieldMapper fieldMapper : fieldMappers) { + names.add(fieldMapper.indexName()); + } + } + + @Override public FieldSelectorResult accept(String fieldName) { + if (names.contains(fieldName)) { + return FieldSelectorResult.LOAD; + } + return FieldSelectorResult.NO_LOAD; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java new file mode 100644 index 00000000000..c4e2862e8fe --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.document.Document; + +/** + * @author kimchy (Shay Banon) + */ +public interface IdFieldMapper extends FieldMapper { + + String value(Document document); +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/InvalidTypeNameException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/InvalidTypeNameException.java new file mode 100644 index 00000000000..3c8698e9c5b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/InvalidTypeNameException.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +/** + * @author kimchy (Shay Banon) + */ +public class InvalidTypeNameException extends MapperException { + + public InvalidTypeNameException(String message) { + super(message); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/MapperCompressionException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/MapperCompressionException.java new file mode 100644 index 00000000000..38ca7538fd1 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/MapperCompressionException.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +/** + * @author kimchy (Shay Banon) + */ +public class MapperCompressionException extends MapperException { + + public MapperCompressionException(String message) { + super(message); + } + + public MapperCompressionException(String message, Throwable cause) { + super(message, cause); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/MapperException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/MapperException.java new file mode 100644 index 00000000000..d085d12a22f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/MapperException.java @@ -0,0 +1,36 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.elasticsearch.ElasticSearchException; + +/** + * @author kimchy (Shay Banon) + */ +public class MapperException extends ElasticSearchException { + + public MapperException(String message) { + super(message); + } + + public MapperException(String message, Throwable cause) { + super(message, cause); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/MapperParsingException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/MapperParsingException.java new file mode 100644 index 00000000000..18e4661cfec --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/MapperParsingException.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +/** + * @author kimchy (Shay Banon) + */ +public class MapperParsingException extends MapperException { + + public MapperParsingException(String message) { + super(message); + } + + public MapperParsingException(String message, Throwable cause) { + super(message, cause); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/MapperService.java new file mode 100644 index 00000000000..c54a1165b86 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -0,0 +1,407 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.UnmodifiableIterator; +import com.google.inject.Inject; +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.TokenStream; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.FailedToResolveConfigException; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexLifecycle; +import org.elasticsearch.index.analysis.AnalysisService; +import org.elasticsearch.index.mapper.json.JsonDocumentMapperParser; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.Nullable; +import org.elasticsearch.util.concurrent.ThreadSafe; +import org.elasticsearch.util.io.Streams; +import org.elasticsearch.util.settings.Settings; + +import java.io.File; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.Reader; +import java.net.MalformedURLException; +import java.net.URL; + +import static org.elasticsearch.util.MapBuilder.*; + +/** + * @author kimchy (Shay Banon) + */ +@IndexLifecycle +@ThreadSafe +public class MapperService extends AbstractIndexComponent implements Iterable { + + /** + * Will create types automatically if they do not exists in the repo yet + */ + private final boolean dynamic; + + private final String dynamicMappingLocation; + + private final URL dynamicMappingUrl; + + private final ClassLoader indexClassLoader; + + private final String dynamicMappingSource; + + private volatile ImmutableMap mappers = ImmutableMap.of(); + + private final Object mutex = new Object(); + + private volatile ImmutableMap nameFieldMappers = ImmutableMap.of(); + private volatile ImmutableMap indexNameFieldMappers = ImmutableMap.of(); + private volatile ImmutableMap fullNameFieldMappers = ImmutableMap.of(); + private volatile FieldMappers idFieldMappers = new FieldMappers(); + private volatile FieldMappers typeFieldMappers = new FieldMappers(); + private volatile FieldMappers uidFieldMappers = new FieldMappers(); + private volatile FieldMappers sourceFieldMappers = new FieldMappers(); + + // for now, just use the json one. Can work on it more to support custom ones + private final DocumentMapperParser documentParser; + + private final InternalFieldMapperListener fieldMapperListener = new InternalFieldMapperListener(); + + private final SmartIndexNameSearchAnalyzer searchAnalyzer; + + @Inject public MapperService(Index index, @IndexSettings Settings indexSettings, Environment environment, AnalysisService analysisService) { + super(index, indexSettings); + this.documentParser = new JsonDocumentMapperParser(analysisService); + this.searchAnalyzer = new SmartIndexNameSearchAnalyzer(analysisService.defaultSearchAnalyzer()); + this.indexClassLoader = indexSettings.getClassLoader(); + + this.dynamic = componentSettings.getAsBoolean("dynamic", true); + String dynamicMappingLocation = componentSettings.get("dynamicMappingLocation"); + URL dynamicMappingUrl; + if (dynamicMappingLocation == null) { + try { + dynamicMappingUrl = environment.resolveConfig("dynamic-mapping.json"); + } catch (FailedToResolveConfigException e) { + // not there, default to the built in one + dynamicMappingUrl = indexClassLoader.getResource("org/elasticsearch/index/mapper/json/dynamic-mapping.json"); + } + } else { + try { + dynamicMappingUrl = environment.resolveConfig(dynamicMappingLocation); + } catch (FailedToResolveConfigException e) { + // not there, default to the built in one + try { + dynamicMappingUrl = new File(dynamicMappingLocation).toURI().toURL(); + } catch (MalformedURLException e1) { + throw new FailedToResolveConfigException("Failed to resolve dynamic mapping location [" + dynamicMappingLocation + "]"); + } + } + } + this.dynamicMappingUrl = dynamicMappingUrl; + if (dynamicMappingLocation == null) { + this.dynamicMappingLocation = dynamicMappingUrl.toExternalForm(); + } else { + this.dynamicMappingLocation = dynamicMappingLocation; + } + + if (dynamic) { + try { + dynamicMappingSource = Streams.copyToString(new InputStreamReader(dynamicMappingUrl.openStream(), "UTF8")); + } catch (IOException e) { + throw new MapperException("Failed to load default mapping source from [" + dynamicMappingLocation + "]", e); + } + } else { + dynamicMappingSource = null; + } + logger.debug("Using dynamic [{}] with location [{}] and source [{}]", new Object[]{dynamic, dynamicMappingLocation, dynamicMappingSource}); + } + + @Override public UnmodifiableIterator iterator() { + return mappers.values().iterator(); + } + + public DocumentMapper type(String type) { + DocumentMapper mapper = mappers.get(type); + if (mapper != null) { + return mapper; + } + if (!dynamic) { + return null; + } + // go ahead and dynamically create it + synchronized (mutex) { + mapper = mappers.get(type); + if (mapper != null) { + return mapper; + } + add(type, dynamicMappingSource); + return mappers.get(type); + } + } + + public void add(String type, String mappingSource) { + add(documentParser.parse(type, mappingSource)); + } + + public void add(String mappingSource) throws MapperParsingException { + add(documentParser.parse(mappingSource)); + } + + /** + * Just parses and returns the mapper without adding it. + */ + public DocumentMapper parse(String mappingType, String mappingSource) throws MapperParsingException { + return documentParser.parse(mappingType, mappingSource); + } + + public boolean hasMapping(String mappingType) { + return mappers.containsKey(mappingType); + } + + public DocumentMapper documentMapper(String type) { + return mappers.get(type); + } + + public FieldMappers idFieldMappers() { + return this.idFieldMappers; + } + + public FieldMappers typeFieldMappers() { + return this.typeFieldMappers; + } + + public FieldMappers sourceFieldMappers() { + return this.sourceFieldMappers; + } + + public FieldMappers uidFieldMappers() { + return this.uidFieldMappers; + } + + /** + * Returns {@link FieldMappers} for all the {@link FieldMapper}s that are registered + * under the given name across all the different {@link DocumentMapper} types. + * + * @param name The name to return all the {@link FieldMappers} for across all {@link DocumentMapper}s. + * @return All the {@link FieldMappers} for across all {@link DocumentMapper}s + */ + public FieldMappers name(String name) { + return nameFieldMappers.get(name); + } + + /** + * Returns {@link FieldMappers} for all the {@link FieldMapper}s that are registered + * under the given indexName across all the different {@link DocumentMapper} types. + * + * @param indexName The indexName to return all the {@link FieldMappers} for across all {@link DocumentMapper}s. + * @return All the {@link FieldMappers} across all {@link DocumentMapper}s for the given indexName. + */ + public FieldMappers indexName(String indexName) { + return indexNameFieldMappers.get(indexName); + } + + /** + * Returns the {@link FieldMappers} of all the {@link FieldMapper}s that are + * registered under the give fullName ({@link FieldMapper#fullName()} across + * all the different {@link DocumentMapper} types. + * + * @param fullName The full name + * @return All teh {@link FieldMappers} across all the {@link DocumentMapper}s for the given fullName. + */ + public FieldMappers fullName(String fullName) { + return fullNameFieldMappers.get(fullName); + } + + /** + * Same as {@link #smartName(String)}, except it returns just the field mappers. + */ + public FieldMappers smartNameFieldMappers(String smartName) { + int dotIndex = smartName.indexOf('.'); + if (dotIndex != -1) { + String possibleType = smartName.substring(0, dotIndex); + DocumentMapper possibleDocMapper = mappers.get(possibleType); + if (possibleDocMapper != null) { + String possibleName = smartName.substring(dotIndex + 1); + FieldMappers mappers = possibleDocMapper.mappers().fullName(possibleName); + if (mappers != null) { + return mappers; + } + mappers = possibleDocMapper.mappers().indexName(possibleName); + if (mappers != null) { + return mappers; + } + } + } + FieldMappers mappers = fullName(smartName); + if (mappers != null) { + return mappers; + } + return indexName(smartName); + } + + public SmartNameFieldMappers smartName(String smartName) { + int dotIndex = smartName.indexOf('.'); + if (dotIndex != -1) { + String possibleType = smartName.substring(0, dotIndex); + DocumentMapper possibleDocMapper = mappers.get(possibleType); + if (possibleDocMapper != null) { + String possibleName = smartName.substring(dotIndex + 1); + FieldMappers mappers = possibleDocMapper.mappers().fullName(possibleName); + if (mappers != null) { + return new SmartNameFieldMappers(mappers, possibleDocMapper); + } + mappers = possibleDocMapper.mappers().indexName(possibleName); + if (mappers != null) { + return new SmartNameFieldMappers(mappers, possibleDocMapper); + } + } + } + FieldMappers fieldMappers = fullName(smartName); + if (fieldMappers != null) { + return new SmartNameFieldMappers(fieldMappers, null); + } + fieldMappers = indexName(smartName); + if (fieldMappers != null) { + return new SmartNameFieldMappers(fieldMappers, null); + } + return null; + } + + public void add(DocumentMapper mapper) { + synchronized (mutex) { + if (mapper.type().charAt(0) == '_') { + throw new InvalidTypeNameException("Document mapping type name can't start with '_'"); + } + mappers = newMapBuilder(mappers).put(mapper.type(), mapper).immutableMap(); + mapper.addFieldMapperListener(fieldMapperListener, true); + } + } + + public Analyzer searchAnalyzer() { + return this.searchAnalyzer; + } + + public static class SmartNameFieldMappers { + private final FieldMappers fieldMappers; + private final DocumentMapper docMapper; + + public SmartNameFieldMappers(FieldMappers fieldMappers, @Nullable DocumentMapper docMapper) { + this.fieldMappers = fieldMappers; + this.docMapper = docMapper; + } + + public FieldMappers fieldMappers() { + return fieldMappers; + } + + public boolean hasDocMapper() { + return docMapper != null; + } + + public DocumentMapper docMapper() { + return docMapper; + } + } + + private class SmartIndexNameSearchAnalyzer extends Analyzer { + + private final Analyzer defaultAnalyzer; + + private SmartIndexNameSearchAnalyzer(Analyzer defaultAnalyzer) { + this.defaultAnalyzer = defaultAnalyzer; + } + + @Override public TokenStream tokenStream(String fieldName, Reader reader) { + int dotIndex = fieldName.indexOf('.'); + if (dotIndex != -1) { + String possibleType = fieldName.substring(0, dotIndex); + DocumentMapper possibleDocMapper = mappers.get(possibleType); + if (possibleDocMapper != null) { + return possibleDocMapper.mappers().searchAnalyzer().tokenStream(fieldName, reader); + } + } + FieldMappers mappers = indexNameFieldMappers.get(fieldName); + if (mappers != null && mappers.mapper() != null && mappers.mapper().searchAnalyzer() != null) { + return mappers.mapper().searchAnalyzer().tokenStream(fieldName, reader); + } + return defaultAnalyzer.tokenStream(fieldName, reader); + } + + @Override public TokenStream reusableTokenStream(String fieldName, Reader reader) throws IOException { + int dotIndex = fieldName.indexOf('.'); + if (dotIndex != -1) { + String possibleType = fieldName.substring(0, dotIndex); + DocumentMapper possibleDocMapper = mappers.get(possibleType); + if (possibleDocMapper != null) { + return possibleDocMapper.mappers().searchAnalyzer().reusableTokenStream(fieldName, reader); + } + } + FieldMappers mappers = indexNameFieldMappers.get(fieldName); + if (mappers != null && mappers.mapper() != null && mappers.mapper().searchAnalyzer() != null) { + return mappers.mapper().searchAnalyzer().reusableTokenStream(fieldName, reader); + } + return defaultAnalyzer.reusableTokenStream(fieldName, reader); + } + } + + private class InternalFieldMapperListener implements FieldMapperListener { + @Override public void fieldMapper(FieldMapper fieldMapper) { + synchronized (mutex) { + if (fieldMapper instanceof IdFieldMapper) { + idFieldMappers = idFieldMappers.concat(fieldMapper); + } + if (fieldMapper instanceof TypeFieldMapper) { + typeFieldMappers = typeFieldMappers.concat(fieldMapper); + } + if (fieldMapper instanceof SourceFieldMapper) { + sourceFieldMappers = sourceFieldMappers.concat(fieldMapper); + } + if (fieldMapper instanceof UidFieldMapper) { + uidFieldMappers = uidFieldMappers.concat(fieldMapper); + } + + + FieldMappers mappers = nameFieldMappers.get(fieldMapper.name()); + if (mappers == null) { + mappers = new FieldMappers(fieldMapper); + } else { + mappers = mappers.concat(fieldMapper); + } + + nameFieldMappers = newMapBuilder(nameFieldMappers).put(fieldMapper.name(), mappers).immutableMap(); + + mappers = indexNameFieldMappers.get(fieldMapper.indexName()); + if (mappers == null) { + mappers = new FieldMappers(fieldMapper); + } else { + mappers = mappers.concat(fieldMapper); + } + indexNameFieldMappers = newMapBuilder(indexNameFieldMappers).put(fieldMapper.indexName(), mappers).immutableMap(); + + mappers = fullNameFieldMappers.get(fieldMapper.indexName()); + if (mappers == null) { + mappers = new FieldMappers(fieldMapper); + } else { + mappers = mappers.concat(fieldMapper); + } + fullNameFieldMappers = newMapBuilder(fullNameFieldMappers).put(fieldMapper.fullName(), mappers).immutableMap(); + } + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/MapperServiceModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/MapperServiceModule.java new file mode 100644 index 00000000000..577e8e9e344 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/MapperServiceModule.java @@ -0,0 +1,32 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import com.google.inject.AbstractModule; + +/** + * @author kimchy (Shay Banon) + */ +public class MapperServiceModule extends AbstractModule { + + @Override protected void configure() { + bind(MapperService.class).asEagerSingleton(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java new file mode 100644 index 00000000000..58dd6aa0627 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java @@ -0,0 +1,72 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.document.Document; + +/** + * @author kimchy (Shay Banon) + */ +public class ParsedDocument { + + private final String uid; + + private final String id; + + private final String type; + + private final Document document; + + private final String source; + + public ParsedDocument(String uid, String id, String type, Document document, String source) { + this.uid = uid; + this.id = id; + this.type = type; + this.document = document; + this.source = source; + } + + public String uid() { + return this.uid; + } + + public String id() { + return this.id; + } + + public String type() { + return this.type; + } + + public Document doc() { + return this.document; + } + + public String source() { + return this.source; + } + + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("Document ").append("uid[").append(uid).append("] doc [").append(document).append("]"); + return sb.toString(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java new file mode 100644 index 00000000000..6b0e09fbdbb --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.FieldSelector; +import org.elasticsearch.util.concurrent.ThreadSafe; + +/** + * A mapper that maps the actual source of a generated document. + * + * @author kimchy (Shay Banon) + */ +@ThreadSafe +public interface SourceFieldMapper extends FieldMapper { + + /** + * Returns true if the source field mapper is enalbed or not. + */ + boolean enabled(); + + String value(Document document); + + /** + * A field selector that loads just the source field. + */ + FieldSelector fieldSelector(); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java new file mode 100644 index 00000000000..b6de9e1f5d9 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java @@ -0,0 +1,37 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.document.Document; +import org.apache.lucene.index.Term; +import org.elasticsearch.util.concurrent.ThreadSafe; + +/** + * A mapper that maps the type of the resource into the document. + * + * @author kimchy (Shay Banon) + */ +@ThreadSafe +public interface TypeFieldMapper extends FieldMapper { + + String value(Document document); + + Term term(String value); +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/Uid.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/Uid.java new file mode 100644 index 00000000000..0a0eb5fa183 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/Uid.java @@ -0,0 +1,58 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +/** + * @author kimchy (Shay Banon) + */ +public final class Uid { + + public static final char DELIMITER = '#'; + + private final String type; + + private final String id; + + public Uid(String type, String id) { + this.type = type; + this.id = id; + } + + public String type() { + return type; + } + + public String id() { + return id; + } + + public static Uid createUid(String uid) { + int delimiterIndex = uid.lastIndexOf(DELIMITER); + return new Uid(uid.substring(0, delimiterIndex), uid.substring(delimiterIndex + 1)); + } + + public static String createUid(String type, String id) { + return createUid(new StringBuilder(), type, id); + } + + public static String createUid(StringBuilder sb, String type, String id) { + return sb.append(type).append(DELIMITER).append(id).toString(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/UidFieldMapper.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/UidFieldMapper.java new file mode 100644 index 00000000000..f2f3f0a8daf --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/UidFieldMapper.java @@ -0,0 +1,36 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.index.Term; +import org.elasticsearch.util.concurrent.ThreadSafe; + +/** + * @author kimchy (Shay Banon) + */ +@ThreadSafe +public interface UidFieldMapper extends FieldMapper { + + String name(); + + Term term(String type, String id); + + Term term(String uid); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonBinaryFieldMapper.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonBinaryFieldMapper.java new file mode 100644 index 00000000000..5b19c8b6a71 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonBinaryFieldMapper.java @@ -0,0 +1,73 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper.json; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.Fieldable; +import org.codehaus.jackson.JsonToken; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class JsonBinaryFieldMapper extends JsonFieldMapper { + + public static class Builder extends JsonFieldMapper.Builder { + + public Builder(String name) { + super(name); + builder = this; + } + + @Override public JsonBinaryFieldMapper build(BuilderContext context) { + return new JsonBinaryFieldMapper(name, buildIndexName(context), buildFullName(context)); + } + } + + protected JsonBinaryFieldMapper(String name, String indexName, String fullName) { + super(name, indexName, fullName, Field.Index.NO, Field.Store.YES, Field.TermVector.NO, 1.0f, true, true, null, null); + } + + @Override public byte[] value(Fieldable field) { + return field.getBinaryValue(); + } + + @Override public String valueAsString(Fieldable field) { + return null; + } + + @Override public String indexedValue(String value) { + return value; + } + + @Override protected Field parseCreateField(JsonParseContext jsonContext) throws IOException { + byte[] value; + if (jsonContext.jp().getCurrentToken() == JsonToken.VALUE_NULL) { + return null; + } else { + value = jsonContext.jp().getBinaryValue(); + } + if (value == null) { + return null; + } + return new Field(indexName, value, Field.Store.YES); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonBooleanFieldMapper.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonBooleanFieldMapper.java new file mode 100644 index 00000000000..c25356a8d7d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonBooleanFieldMapper.java @@ -0,0 +1,111 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper.json; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.Fieldable; +import org.codehaus.jackson.JsonToken; +import org.elasticsearch.util.lucene.Lucene; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +// TODO this can be made better, maybe storing a byte for it? +public class JsonBooleanFieldMapper extends JsonFieldMapper { + + public static class Defaults extends JsonFieldMapper.Defaults { + public static final boolean OMIT_NORMS = true; + public static final Boolean NULL_VALUE = null; + } + + public static class Builder extends JsonFieldMapper.Builder { + + private Boolean nullValue = Defaults.NULL_VALUE; + + public Builder(String name) { + super(name); + this.omitNorms = Defaults.OMIT_NORMS; + this.builder = this; + } + + public Builder nullValue(boolean nullValue) { + this.nullValue = nullValue; + return this; + } + + @Override public JsonBooleanFieldMapper build(BuilderContext context) { + return new JsonBooleanFieldMapper(name, buildIndexName(context), buildFullName(context), index, store, + termVector, boost, omitNorms, omitTermFreqAndPositions, nullValue); + } + } + + private Boolean nullValue; + + protected JsonBooleanFieldMapper(String name, String indexName, String fullName, Field.Index index, Field.Store store, Field.TermVector termVector, + float boost, boolean omitNorms, boolean omitTermFreqAndPositions, Boolean nullValue) { + super(name, indexName, fullName, index, store, termVector, boost, omitNorms, omitTermFreqAndPositions, + Lucene.KEYWORD_ANALYZER, Lucene.KEYWORD_ANALYZER); + this.nullValue = nullValue; + } + + @Override public Boolean value(Fieldable field) { + return Boolean.parseBoolean(valueAsString(field)); + } + + @Override public String valueAsString(Fieldable field) { + return field.stringValue().charAt(0) == 'T' ? "true" : "false"; + } + + @Override public String indexedValue(String value) { + if (value == null || value.length() == 0) { + return "F"; + } + return value.equals("true") ? "T" : "F"; + } + + @Override public String indexedValue(Boolean value) { + if (value == null) { + return "F"; + } + return value ? "T" : "F"; + } + + @Override protected Field parseCreateField(JsonParseContext jsonContext) throws IOException { + JsonToken token = jsonContext.jp().getCurrentToken(); + String value = null; + if (token == JsonToken.VALUE_FALSE) { + value = "F"; + } else if (token == JsonToken.VALUE_TRUE) { + value = "T"; + } else if (token == JsonToken.VALUE_NULL) { + if (nullValue != null) { + value = nullValue ? "T" : "F"; + } + } else { + return null; + } + if (value == null) { + return null; + } + return new Field(indexName, value, store, index, termVector); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonBoostFieldMapper.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonBoostFieldMapper.java new file mode 100644 index 00000000000..f6d69ce0755 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonBoostFieldMapper.java @@ -0,0 +1,164 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper.json; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.Fieldable; +import org.apache.lucene.search.*; +import org.apache.lucene.util.NumericUtils; +import org.codehaus.jackson.JsonToken; +import org.elasticsearch.index.analysis.NumericFloatAnalyzer; +import org.elasticsearch.index.mapper.BoostFieldMapper; +import org.elasticsearch.util.Numbers; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class JsonBoostFieldMapper extends JsonNumberFieldMapper implements BoostFieldMapper { + + public static class Defaults extends JsonNumberFieldMapper.Defaults { + public static final String NAME = "_boost"; + public static final Float NULL_VALUE = null; + public static final Field.Index INDEX = Field.Index.NO; + public static final Field.Store STORE = Field.Store.NO; + } + + public static class Builder extends JsonNumberFieldMapper.Builder { + + protected Float nullValue = Defaults.NULL_VALUE; + + public Builder(String name) { + super(name); + builder = this; + index = Defaults.INDEX; + store = Defaults.STORE; + } + + public Builder nullValue(float nullValue) { + this.nullValue = nullValue; + return this; + } + + @Override public JsonBoostFieldMapper build(BuilderContext context) { + return new JsonBoostFieldMapper(name, buildIndexName(context), + precisionStep, index, store, boost, omitNorms, omitTermFreqAndPositions, nullValue); + } + } + + + private final Float nullValue; + + protected JsonBoostFieldMapper() { + this(Defaults.NAME, Defaults.NAME); + } + + protected JsonBoostFieldMapper(String name, String indexName) { + this(name, indexName, Defaults.PRECISION_STEP, Defaults.INDEX, Defaults.STORE, + Defaults.BOOST, Defaults.OMIT_NORMS, Defaults.OMIT_TERM_FREQ_AND_POSITIONS, Defaults.NULL_VALUE); + } + + protected JsonBoostFieldMapper(String name, String indexName, int precisionStep, Field.Index index, Field.Store store, + float boost, boolean omitNorms, boolean omitTermFreqAndPositions, + Float nullValue) { + super(name, indexName, name, precisionStep, index, store, boost, omitNorms, omitTermFreqAndPositions, + new NumericFloatAnalyzer(precisionStep), new NumericFloatAnalyzer(Integer.MAX_VALUE)); + this.nullValue = nullValue; + } + + @Override protected int maxPrecisionStep() { + return 32; + } + + @Override public Float value(Fieldable field) { + byte[] value = field.getBinaryValue(); + if (value == null) { + return Float.NaN; + } + return Numbers.bytesToFloat(value); + } + + @Override public String indexedValue(String value) { + return indexedValue(Float.parseFloat(value)); + } + + @Override public String indexedValue(Float value) { + return NumericUtils.floatToPrefixCoded(value); + } + + @Override public Query rangeQuery(String lowerTerm, String upperTerm, boolean includeLower, boolean includeUpper) { + return NumericRangeQuery.newFloatRange(indexName, precisionStep, + lowerTerm == null ? null : Float.parseFloat(lowerTerm), + upperTerm == null ? null : Float.parseFloat(upperTerm), + includeLower, includeUpper); + } + + @Override public Filter rangeFilter(String lowerTerm, String upperTerm, boolean includeLower, boolean includeUpper) { + return NumericRangeFilter.newFloatRange(indexName, precisionStep, + lowerTerm == null ? null : Float.parseFloat(lowerTerm), + upperTerm == null ? null : Float.parseFloat(upperTerm), + includeLower, includeUpper); + } + + @Override public void parse(JsonParseContext jsonContext) throws IOException { + // we override parse since we want to handle cases where it is not indexed and not stored (the default) + float value = parsedFloatValue(jsonContext); + if (!Float.isNaN(value)) { + jsonContext.doc().setBoost(value); + } + super.parse(jsonContext); + } + + @Override protected Field parseCreateField(JsonParseContext jsonContext) throws IOException { + float value = parsedFloatValue(jsonContext); + if (Float.isNaN(value)) { + return null; + } + jsonContext.doc().setBoost(value); + Field field = null; + if (stored()) { + field = new Field(indexName, Numbers.floatToBytes(value), store); + if (indexed()) { + field.setTokenStream(popCachedStream(precisionStep).setFloatValue(value)); + } + } else if (indexed()) { + field = new Field(indexName, popCachedStream(precisionStep).setFloatValue(value)); + } + return field; + } + + private float parsedFloatValue(JsonParseContext jsonContext) throws IOException { + float value; + if (jsonContext.jp().getCurrentToken() == JsonToken.VALUE_NULL) { + if (nullValue == null) { + return Float.NaN; + } + value = nullValue; + } else { + value = jsonContext.jp().getFloatValue(); + } + return value; + } + + @Override public int sortType() { + return SortField.FLOAT; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonDateFieldMapper.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonDateFieldMapper.java new file mode 100644 index 00000000000..fd1a6c76284 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonDateFieldMapper.java @@ -0,0 +1,153 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper.json; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.Fieldable; +import org.apache.lucene.search.*; +import org.apache.lucene.util.NumericUtils; +import org.codehaus.jackson.JsonToken; +import org.elasticsearch.index.analysis.NumericDateAnalyzer; +import org.elasticsearch.util.Numbers; +import org.joda.time.DateTimeZone; +import org.joda.time.format.DateTimeFormatter; +import org.joda.time.format.ISODateTimeFormat; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class JsonDateFieldMapper extends JsonNumberFieldMapper { + + public static class Defaults extends JsonNumberFieldMapper.Defaults { + public static final DateTimeFormatter DATE_TIME_FORMATTER = + ISODateTimeFormat.dateOptionalTimeParser().withZone(DateTimeZone.UTC); + + public static final String NULL_VALUE = null; + } + + public static class Builder extends JsonNumberFieldMapper.Builder { + + protected String nullValue = Defaults.NULL_VALUE; + + protected DateTimeFormatter dateTimeFormatter = Defaults.DATE_TIME_FORMATTER; + + public Builder(String name) { + super(name); + builder = this; + } + + public Builder nullValue(String nullValue) { + this.nullValue = nullValue; + return this; + } + + public Builder dateTimeFormatter(DateTimeFormatter dateTimeFormatter) { + this.dateTimeFormatter = dateTimeFormatter; + return this; + } + + @Override public JsonDateFieldMapper build(BuilderContext context) { + return new JsonDateFieldMapper(name, buildIndexName(context), buildFullName(context), dateTimeFormatter, + precisionStep, index, store, boost, omitNorms, omitTermFreqAndPositions, nullValue); + } + } + + + private final DateTimeFormatter dateTimeFormatter; + + private final String nullValue; + + protected JsonDateFieldMapper(String name, String indexName, String fullName, DateTimeFormatter dateTimeFormatter, int precisionStep, + Field.Index index, Field.Store store, + float boost, boolean omitNorms, boolean omitTermFreqAndPositions, + String nullValue) { + super(name, indexName, fullName, precisionStep, index, store, boost, omitNorms, omitTermFreqAndPositions, + new NumericDateAnalyzer(precisionStep, dateTimeFormatter), new NumericDateAnalyzer(Integer.MAX_VALUE, dateTimeFormatter)); + this.dateTimeFormatter = dateTimeFormatter; + this.nullValue = nullValue; + } + + @Override protected int maxPrecisionStep() { + return 64; + } + + @Override public Long value(Fieldable field) { + byte[] value = field.getBinaryValue(); + if (value == null) { + return Long.MIN_VALUE; + } + return Numbers.bytesToLong(value); + } + + @Override public String valueAsString(Fieldable field) { + return dateTimeFormatter.print(value(field)); + } + + @Override public String indexedValue(String value) { + return NumericUtils.longToPrefixCoded(dateTimeFormatter.parseMillis(value)); + } + + @Override public String indexedValue(Long value) { + return NumericUtils.longToPrefixCoded(value); + } + + @Override public Query rangeQuery(String lowerTerm, String upperTerm, boolean includeLower, boolean includeUpper) { + return NumericRangeQuery.newLongRange(indexName, precisionStep, + lowerTerm == null ? null : dateTimeFormatter.parseMillis(lowerTerm), + upperTerm == null ? null : dateTimeFormatter.parseMillis(upperTerm), + includeLower, includeUpper); + } + + @Override public Filter rangeFilter(String lowerTerm, String upperTerm, boolean includeLower, boolean includeUpper) { + return NumericRangeFilter.newLongRange(indexName, precisionStep, + lowerTerm == null ? null : dateTimeFormatter.parseMillis(lowerTerm), + upperTerm == null ? null : dateTimeFormatter.parseMillis(upperTerm), + includeLower, includeUpper); + } + + @Override protected Field parseCreateField(JsonParseContext jsonContext) throws IOException { + String dateAsString; + if (jsonContext.jp().getCurrentToken() == JsonToken.VALUE_NULL) { + dateAsString = nullValue; + } else { + dateAsString = jsonContext.jp().getText(); + } + if (dateAsString == null) { + return null; + } + long value = dateTimeFormatter.parseMillis(dateAsString); + Field field = null; + if (stored()) { + field = new Field(indexName, Numbers.longToBytes(value), store); + if (indexed()) { + field.setTokenStream(popCachedStream(precisionStep).setLongValue(value)); + } + } else if (indexed()) { + field = new Field(indexName, popCachedStream(precisionStep).setLongValue(value)); + } + return field; + } + + @Override public int sortType() { + return SortField.LONG; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonDocumentMapper.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonDocumentMapper.java new file mode 100644 index 00000000000..95cf195bb1c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonDocumentMapper.java @@ -0,0 +1,329 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper.json; + +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.document.Document; +import org.codehaus.jackson.JsonFactory; +import org.codehaus.jackson.JsonParser; +import org.codehaus.jackson.JsonToken; +import org.elasticsearch.index.mapper.*; +import org.elasticsearch.util.Nullable; +import org.elasticsearch.util.Preconditions; +import org.elasticsearch.util.io.FastStringReader; +import org.elasticsearch.util.json.Jackson; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static com.google.common.collect.Lists.*; + +/** + * @author kimchy (Shay Banon) + */ +public class JsonDocumentMapper implements DocumentMapper { + + public static class Builder { + + private JsonUidFieldMapper uidFieldMapper = new JsonUidFieldMapper(); + + private JsonIdFieldMapper idFieldMapper = new JsonIdFieldMapper(); + + private JsonTypeFieldMapper typeFieldMapper = new JsonTypeFieldMapper(); + + private JsonSourceFieldMapper sourceFieldMapper = new JsonSourceFieldMapper(); + + private JsonBoostFieldMapper boostFieldMapper = new JsonBoostFieldMapper(); + + private Analyzer indexAnalyzer; + + private Analyzer searchAnalyzer; + + private final JsonObjectMapper rootObjectMapper; + + private String mappingSource; + + private JsonMapper.BuilderContext builderContext = new JsonMapper.BuilderContext(new JsonPath(1)); + + public Builder(JsonObjectMapper.Builder builder) { + this.rootObjectMapper = builder.build(builderContext); + } + + public Builder sourceField(JsonSourceFieldMapper.Builder builder) { + this.sourceFieldMapper = builder.build(builderContext); + return this; + } + + public Builder idField(JsonIdFieldMapper.Builder builder) { + this.idFieldMapper = builder.build(builderContext); + return this; + } + + public Builder uidField(JsonUidFieldMapper.Builder builder) { + this.uidFieldMapper = builder.build(builderContext); + return this; + } + + public Builder typeField(JsonTypeFieldMapper.Builder builder) { + this.typeFieldMapper = builder.build(builderContext); + return this; + } + + public Builder boostField(JsonBoostFieldMapper.Builder builder) { + this.boostFieldMapper = builder.build(builderContext); + return this; + } + + public Builder mappingSource(String mappingSource) { + this.mappingSource = mappingSource; + return this; + } + + public Builder indexAnalyzer(Analyzer indexAnalyzer) { + this.indexAnalyzer = indexAnalyzer; + return this; + } + + public boolean hasIndexAnalyzer() { + return indexAnalyzer != null; + } + + public Builder searchAnalyzer(Analyzer searchAnalyzer) { + this.searchAnalyzer = searchAnalyzer; + return this; + } + + public boolean hasSearchAnalyzer() { + return searchAnalyzer != null; + } + + public JsonDocumentMapper build() { + Preconditions.checkNotNull(rootObjectMapper, "Json mapper builder must have the root object mapper set"); + return new JsonDocumentMapper(rootObjectMapper, uidFieldMapper, idFieldMapper, typeFieldMapper, + sourceFieldMapper, indexAnalyzer, searchAnalyzer, boostFieldMapper, mappingSource); + } + } + + + private ThreadLocal cache = new ThreadLocal() { + @Override protected JsonParseContext initialValue() { + return new JsonParseContext(JsonDocumentMapper.this, new JsonPath(0)); + } + }; + + private final JsonFactory jsonFactory = Jackson.defaultJsonFactory(); + + private final String type; + + private final String mappingSource; + + private final JsonUidFieldMapper uidFieldMapper; + + private final JsonIdFieldMapper idFieldMapper; + + private final JsonTypeFieldMapper typeFieldMapper; + + private final JsonSourceFieldMapper sourceFieldMapper; + + private final JsonBoostFieldMapper boostFieldMapper; + + private final JsonObjectMapper rootObjectMapper; + + private final Analyzer indexAnalyzer; + + private final Analyzer searchAnalyzer; + + private volatile DocumentFieldMappers fieldMappers; + + private final List fieldMapperListeners = newArrayList(); + + private final Object mutex = new Object(); + + public JsonDocumentMapper(JsonObjectMapper rootObjectMapper, + JsonUidFieldMapper uidFieldMapper, + JsonIdFieldMapper idFieldMapper, + JsonTypeFieldMapper typeFieldMapper, + JsonSourceFieldMapper sourceFieldMapper, + Analyzer indexAnalyzer, Analyzer searchAnalyzer, + @Nullable JsonBoostFieldMapper boostFieldMapper, + @Nullable String mappingSource) { + this.type = rootObjectMapper.name(); + this.mappingSource = mappingSource; + this.rootObjectMapper = rootObjectMapper; + this.uidFieldMapper = uidFieldMapper; + this.idFieldMapper = idFieldMapper; + this.typeFieldMapper = typeFieldMapper; + this.sourceFieldMapper = sourceFieldMapper; + this.boostFieldMapper = boostFieldMapper; + + this.indexAnalyzer = indexAnalyzer; + this.searchAnalyzer = searchAnalyzer; + + rootObjectMapper.putMapper(idFieldMapper); + if (boostFieldMapper != null) { + rootObjectMapper.putMapper(boostFieldMapper); + } + + final List tempFieldMappers = new ArrayList(); + // add the basic ones + tempFieldMappers.add(typeFieldMapper); + tempFieldMappers.add(sourceFieldMapper); + tempFieldMappers.add(uidFieldMapper); + if (boostFieldMapper != null) { + tempFieldMappers.add(boostFieldMapper); + } + // now traverse and get all the statically defined ones + rootObjectMapper.traverse(new FieldMapperListener() { + @Override public void fieldMapper(FieldMapper fieldMapper) { + tempFieldMappers.add(fieldMapper); + } + }); + + this.fieldMappers = new DocumentFieldMappers(this, tempFieldMappers); + } + + @Override public String type() { + return this.type; + } + + @Override public String mappingSource() { + return this.mappingSource; + } + + @Override public UidFieldMapper uidMapper() { + return this.uidFieldMapper; + } + + @Override public IdFieldMapper idMapper() { + return this.idFieldMapper; + } + + @Override public TypeFieldMapper typeMapper() { + return this.typeFieldMapper; + } + + @Override public SourceFieldMapper sourceMapper() { + return this.sourceFieldMapper; + } + + @Override public BoostFieldMapper boostMapper() { + return this.boostFieldMapper; + } + + @Override public Analyzer indexAnalyzer() { + return this.indexAnalyzer; + } + + @Override public Analyzer searchAnalyzer() { + return this.searchAnalyzer; + } + + @Override public DocumentFieldMappers mappers() { + return this.fieldMappers; + } + + @Override public ParsedDocument parse(String source) { + return parse(null, null, source); + } + + @Override public ParsedDocument parse(String type, String id, String source) { + JsonParseContext jsonContext = cache.get(); + + if (type != null && !type.equals(this.type)) { + throw new MapperParsingException("Type mismatch, provide type [" + type + "] but mapper is of type [" + this.type + "]"); + } + type = this.type; + + try { + JsonParser jp = jsonFactory.createJsonParser(new FastStringReader(source)); + jsonContext.reset(jp, new Document(), type, source); + + // will result in JsonToken.START_OBJECT + JsonToken token = jp.nextToken(); + if (token != JsonToken.START_OBJECT) { + throw new MapperException("Malformed json, must start with an object"); + } + token = jp.nextToken(); + if (token != JsonToken.FIELD_NAME) { + throw new MapperException("Malformed json, after first object, the type name must exists"); + } + if (!jp.getCurrentName().equals(type)) { + if (type == null) { + throw new MapperException("Json content type [" + jp.getCurrentName() + "] does not match the type of the mapper [" + type + "]"); + } + // continue + } else { + // now move to the actual content, which is the start object + token = jp.nextToken(); + if (token != JsonToken.START_OBJECT) { + throw new MapperException("Malformed json, after type is must start with an object"); + } + } + + if (sourceFieldMapper.enabled()) { + sourceFieldMapper.parse(jsonContext); + } + // set the id if we have it so we can validate it later on, also, add the uid if we can + if (id != null) { + jsonContext.id(id); + uidFieldMapper.parse(jsonContext); + } + typeFieldMapper.parse(jsonContext); + + rootObjectMapper.parse(jsonContext); + + // if we did not get the id, we need to parse the uid into the document now, after it was added + if (id == null) { + uidFieldMapper.parse(jsonContext); + } + if (jsonContext.parsedIdState() != JsonParseContext.ParsedIdState.PARSED) { + // mark it as external, so we can parse it + jsonContext.parsedId(JsonParseContext.ParsedIdState.EXTERNAL); + idFieldMapper.parse(jsonContext); + } + } catch (IOException e) { + throw new MapperParsingException("Failed to parse", e); + } + return new ParsedDocument(jsonContext.uid(), jsonContext.id(), jsonContext.type(), jsonContext.doc(), source); + } + + void addFieldMapper(FieldMapper fieldMapper) { + synchronized (mutex) { + fieldMappers = fieldMappers.concat(this, fieldMapper); + for (FieldMapperListener listener : fieldMapperListeners) { + listener.fieldMapper(fieldMapper); + } + } + } + + @Override public void addFieldMapperListener(FieldMapperListener fieldMapperListener, boolean includeExisting) { + synchronized (mutex) { + fieldMapperListeners.add(fieldMapperListener); + if (includeExisting) { + fieldMapperListener.fieldMapper(sourceFieldMapper); + fieldMapperListener.fieldMapper(typeFieldMapper); + fieldMapperListener.fieldMapper(idFieldMapper); + fieldMapperListener.fieldMapper(uidFieldMapper); + rootObjectMapper.traverse(fieldMapperListener); + } + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonDocumentMapperParser.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonDocumentMapperParser.java new file mode 100644 index 00000000000..72f701b0a62 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonDocumentMapperParser.java @@ -0,0 +1,486 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper.json; + +import org.apache.lucene.document.Field; +import org.codehaus.jackson.JsonNode; +import org.codehaus.jackson.map.ObjectMapper; +import org.codehaus.jackson.node.ArrayNode; +import org.codehaus.jackson.node.ObjectNode; +import org.elasticsearch.index.analysis.AnalysisService; +import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.index.mapper.DocumentMapperParser; +import org.elasticsearch.index.mapper.MapperParsingException; +import org.elasticsearch.util.io.FastStringReader; +import org.elasticsearch.util.io.compression.GZIPCompressor; +import org.elasticsearch.util.io.compression.LzfCompressor; +import org.elasticsearch.util.io.compression.ZipCompressor; +import org.elasticsearch.util.joda.Joda; +import org.elasticsearch.util.json.Jackson; +import org.joda.time.DateTimeZone; +import org.joda.time.format.DateTimeFormatter; + +import java.io.IOException; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +import static com.google.common.collect.Lists.*; +import static org.elasticsearch.index.mapper.json.JsonMapperBuilders.*; + +/** + * @author kimchy (Shay Banon) + */ +public class JsonDocumentMapperParser implements DocumentMapperParser { + + private final ObjectMapper objectMapper = Jackson.newObjectMapper(); + + private final AnalysisService analysisService; + + public JsonDocumentMapperParser(AnalysisService analysisService) { + this.analysisService = analysisService; + } + + @Override public DocumentMapper parse(String source) throws MapperParsingException { + return parse(null, source); + } + + @Override public DocumentMapper parse(String type, String source) throws MapperParsingException { + JsonNode root; + try { + root = objectMapper.readValue(new FastStringReader(source), JsonNode.class); + } catch (IOException e) { + throw new MapperParsingException("Failed to parse json mapping definition", e); + } + String rootName = root.getFieldNames().next(); + ObjectNode rootObj; + if (type == null) { + // we have no type, we assume the first node is the type + rootObj = (ObjectNode) root.get(rootName); + type = rootName; + } else { + // we have a type, check if the top level one is the type as well + // if it is, then the root is that node, if not then the root is the master node + if (type.equals(rootName)) { + JsonNode tmpNode = root.get(type); + if (!tmpNode.isObject()) { + throw new MapperParsingException("Expected root node name [" + rootName + "] to be of json object type, but its not"); + } + rootObj = (ObjectNode) tmpNode; + } else { + rootObj = (ObjectNode) root; + } + } + + JsonDocumentMapper.Builder docBuilder = JsonMapperBuilders.doc(parseObject(type, rootObj)); + + for (Iterator> fieldsIt = rootObj.getFields(); fieldsIt.hasNext();) { + Map.Entry entry = fieldsIt.next(); + String fieldName = entry.getKey(); + JsonNode fieldNode = entry.getValue(); + + if ("sourceField".equals(fieldName)) { + docBuilder.sourceField(parseSourceField((ObjectNode) fieldNode)); + } else if ("idField".equals(fieldName)) { + docBuilder.idField(parseIdField((ObjectNode) fieldNode)); + } else if ("typeField".equals(fieldName)) { + docBuilder.typeField(parseTypeField((ObjectNode) fieldNode)); + } else if ("uidField".equals(fieldName)) { + docBuilder.uidField(parseUidField((ObjectNode) fieldNode)); + } else if ("boostField".equals(fieldName)) { + docBuilder.boostField(parseBoostField((ObjectNode) fieldNode)); + } else if ("indexAnalyzer".equals(fieldName)) { + docBuilder.indexAnalyzer(analysisService.analyzer(fieldNode.getTextValue())); + } else if ("searchAnalyzer".equals(fieldName)) { + docBuilder.searchAnalyzer(analysisService.analyzer(fieldNode.getTextValue())); + } else if ("analyzer".equals(fieldName)) { + docBuilder.indexAnalyzer(analysisService.analyzer(fieldNode.getTextValue())); + docBuilder.searchAnalyzer(analysisService.analyzer(fieldNode.getTextValue())); + } + } + + if (!docBuilder.hasIndexAnalyzer()) { + docBuilder.indexAnalyzer(analysisService.defaultIndexAnalyzer()); + } + if (!docBuilder.hasSearchAnalyzer()) { + docBuilder.searchAnalyzer(analysisService.defaultSearchAnalyzer()); + } + + docBuilder.mappingSource(source); + + return docBuilder.build(); + } + + private JsonUidFieldMapper.Builder parseUidField(ObjectNode uidNode) { + String name = uidNode.get("name") == null ? JsonUidFieldMapper.Defaults.NAME : uidNode.get("name").getTextValue(); + JsonUidFieldMapper.Builder builder = uid(name); + for (Iterator> fieldsIt = uidNode.getFields(); fieldsIt.hasNext();) { + Map.Entry entry = fieldsIt.next(); + String fieldName = entry.getKey(); + JsonNode fieldNode = entry.getValue(); + + if ("indexName".equals(fieldName)) { + builder.indexName(fieldNode.getTextValue()); + } + } + return builder; + } + + private JsonBoostFieldMapper.Builder parseBoostField(ObjectNode boostNode) { + String name = boostNode.get("name") == null ? JsonBoostFieldMapper.Defaults.NAME : boostNode.get("name").getTextValue(); + JsonBoostFieldMapper.Builder builder = boost(name); + parseNumberField(builder, name, boostNode); + for (Iterator> propsIt = boostNode.getFields(); propsIt.hasNext();) { + Map.Entry entry = propsIt.next(); + String propName = entry.getKey(); + JsonNode propNode = entry.getValue(); + if (propName.equals("nullValue")) { + builder.nullValue(propNode.getNumberValue().floatValue()); + } + } + return builder; + } + + private JsonTypeFieldMapper.Builder parseTypeField(ObjectNode typeNode) { + String name = typeNode.get("name") == null ? JsonTypeFieldMapper.Defaults.NAME : typeNode.get("name").getTextValue(); + JsonTypeFieldMapper.Builder builder = type(name); + parseJsonField(builder, name, typeNode); + return builder; + } + + + private JsonIdFieldMapper.Builder parseIdField(ObjectNode idNode) { + String name = idNode.get("name") == null ? JsonIdFieldMapper.Defaults.NAME : idNode.get("name").getTextValue(); + JsonIdFieldMapper.Builder builder = id(name); + parseJsonField(builder, name, idNode); + return builder; + } + + private JsonSourceFieldMapper.Builder parseSourceField(ObjectNode sourceNode) { + String name = sourceNode.get("name") == null ? JsonSourceFieldMapper.Defaults.NAME : sourceNode.get("name").getTextValue(); + JsonSourceFieldMapper.Builder builder = source(name); + for (Iterator> fieldsIt = sourceNode.getFields(); fieldsIt.hasNext();) { + Map.Entry entry = fieldsIt.next(); + String fieldName = entry.getKey(); + JsonNode fieldNode = entry.getValue(); + if (fieldName.equals("compressionThreshold")) { + builder.compressionThreshold(fieldNode.getNumberValue().intValue()); + } else if (fieldName.equals("compressionType")) { + String compressionType = fieldNode.getTextValue(); + if ("zip".equals(compressionType)) { + builder.compressor(new ZipCompressor()); + } else if ("gzip".equals(compressionType)) { + builder.compressor(new GZIPCompressor()); + } else if ("lzf".equals(compressionType)) { + builder.compressor(new LzfCompressor()); + } else { + throw new MapperParsingException("No compressor registed under [" + compressionType + "]"); + } + } + } + return builder; + } + + private JsonObjectMapper.Builder parseObject(String name, ObjectNode node) { + JsonObjectMapper.Builder builder = object(name); + for (Iterator> fieldsIt = node.getFields(); fieldsIt.hasNext();) { + Map.Entry entry = fieldsIt.next(); + String fieldName = entry.getKey(); + JsonNode fieldNode = entry.getValue(); + if (fieldName.equals("dynamic")) { + builder.dynamic(fieldNode.getBooleanValue()); + } else if (fieldName.equals("type")) { + String type = fieldNode.getTextValue(); + if (!type.equals("object")) { + throw new MapperParsingException("Trying to parse an object but has a different type [" + type + "] for [" + name + "]"); + } + } else if (fieldName.equals("dateFormats")) { + List dateTimeFormatters = newArrayList(); + if (fieldNode.isArray()) { + for (JsonNode node1 : (ArrayNode) fieldNode) { + dateTimeFormatters.add(parseDateTimeFormatter(fieldName, node1)); + } + } else if ("none".equals(fieldNode.getValueAsText())) { + dateTimeFormatters = null; + } else { + dateTimeFormatters.add(parseDateTimeFormatter(fieldName, fieldNode)); + } + if (dateTimeFormatters == null) { + builder.noDateTimeFormatter(); + } else { + builder.dateTimeFormatter(dateTimeFormatters); + } + } else if (fieldName.equals("enabled")) { + builder.enabled(fieldNode.getBooleanValue()); + } else if (fieldName.equals("pathType")) { + builder.pathType(parsePathType(name, fieldNode.getValueAsText())); + } else if (fieldName.equals("properties")) { + parseProperties(builder, (ObjectNode) fieldNode); + } + } + return builder; + } + + private JsonPath.Type parsePathType(String name, String path) throws MapperParsingException { + if ("justName".equals(path)) { + return JsonPath.Type.JUST_NAME; + } else if ("full".equals(path)) { + return JsonPath.Type.FULL; + } else { + throw new MapperParsingException("Wrong value for pathType [" + path + "] for objet [" + name + "]"); + } + } + + private void parseProperties(JsonObjectMapper.Builder objBuilder, ObjectNode propsNode) { + for (Iterator> propsIt = propsNode.getFields(); propsIt.hasNext();) { + Map.Entry entry = propsIt.next(); + String propName = entry.getKey(); + JsonNode propNode = entry.getValue(); + + String type; + JsonNode typeNode = propNode.get("type"); + if (typeNode != null) { + type = typeNode.getTextValue(); + } else { + // lets see if we can derive this... + if (propNode.isObject() && propNode.get("properties") != null) { + type = "object"; + } else { + throw new MapperParsingException("No type specified for property [" + propName + "]"); + } + } + if (type.equals("string")) { + objBuilder.add(parseString(propName, (ObjectNode) propNode)); + } else if (type.equals("date")) { + objBuilder.add(parseDate(propName, (ObjectNode) propNode)); + } else if (type.equals("integer")) { + objBuilder.add(parseInteger(propName, (ObjectNode) propNode)); + } else if (type.equals("long")) { + objBuilder.add(parseLong(propName, (ObjectNode) propNode)); + } else if (type.equals("float")) { + objBuilder.add(parseFloat(propName, (ObjectNode) propNode)); + } else if (type.equals("double")) { + objBuilder.add(parseDouble(propName, (ObjectNode) propNode)); + } else if (type.equals("boolean")) { + objBuilder.add(parseBoolean(propName, (ObjectNode) propNode)); + } else if (type.equals("object")) { + objBuilder.add(parseObject(propName, (ObjectNode) propNode)); + } else if (type.equals("binary")) { + objBuilder.add(parseBinary(propName, (ObjectNode) propNode)); + } + } + } + + private JsonDateFieldMapper.Builder parseDate(String name, ObjectNode dateNode) { + JsonDateFieldMapper.Builder builder = dateField(name); + parseNumberField(builder, name, dateNode); + for (Iterator> propsIt = dateNode.getFields(); propsIt.hasNext();) { + Map.Entry entry = propsIt.next(); + String propName = entry.getKey(); + JsonNode propNode = entry.getValue(); + if (propName.equals("nullValue")) { + builder.nullValue(propNode.getValueAsText()); + } else if (propName.equals("format")) { + builder.dateTimeFormatter(parseDateTimeFormatter(propName, propNode)); + } + } + return builder; + } + + + private JsonIntegerFieldMapper.Builder parseInteger(String name, ObjectNode integerNode) { + JsonIntegerFieldMapper.Builder builder = integerField(name); + parseNumberField(builder, name, integerNode); + for (Iterator> propsIt = integerNode.getFields(); propsIt.hasNext();) { + Map.Entry entry = propsIt.next(); + String propName = entry.getKey(); + JsonNode propNode = entry.getValue(); + if (propName.equals("nullValue")) { + builder.nullValue(propNode.getNumberValue().intValue()); + } + } + return builder; + } + + private JsonLongFieldMapper.Builder parseLong(String name, ObjectNode longNode) { + JsonLongFieldMapper.Builder builder = longField(name); + parseNumberField(builder, name, longNode); + for (Iterator> propsIt = longNode.getFields(); propsIt.hasNext();) { + Map.Entry entry = propsIt.next(); + String propName = entry.getKey(); + JsonNode propNode = entry.getValue(); + if (propName.equals("nullValue")) { + builder.nullValue(propNode.getNumberValue().longValue()); + } + } + return builder; + } + + private JsonFloatFieldMapper.Builder parseFloat(String name, ObjectNode floatNode) { + JsonFloatFieldMapper.Builder builder = floatField(name); + parseNumberField(builder, name, floatNode); + for (Iterator> propsIt = floatNode.getFields(); propsIt.hasNext();) { + Map.Entry entry = propsIt.next(); + String propName = entry.getKey(); + JsonNode propNode = entry.getValue(); + if (propName.equals("nullValue")) { + builder.nullValue(propNode.getNumberValue().floatValue()); + } + } + return builder; + } + + private JsonDoubleFieldMapper.Builder parseDouble(String name, ObjectNode doubleNode) { + JsonDoubleFieldMapper.Builder builder = doubleField(name); + parseNumberField(builder, name, doubleNode); + for (Iterator> propsIt = doubleNode.getFields(); propsIt.hasNext();) { + Map.Entry entry = propsIt.next(); + String propName = entry.getKey(); + JsonNode propNode = entry.getValue(); + if (propName.equals("nullValue")) { + builder.nullValue(propNode.getNumberValue().doubleValue()); + } + } + return builder; + } + + private JsonStringFieldMapper.Builder parseString(String name, ObjectNode stringNode) { + JsonStringFieldMapper.Builder builder = stringField(name); + parseJsonField(builder, name, stringNode); + for (Iterator> propsIt = stringNode.getFields(); propsIt.hasNext();) { + Map.Entry entry = propsIt.next(); + String propName = entry.getKey(); + JsonNode propNode = entry.getValue(); + if (propName.equals("nullValue")) { + builder.nullValue(propNode.getValueAsText()); + } + } + return builder; + } + + private JsonBinaryFieldMapper.Builder parseBinary(String name, ObjectNode binaryNode) { + JsonBinaryFieldMapper.Builder builder = binaryField(name); + parseJsonField(builder, name, binaryNode); + return builder; + } + + private JsonBooleanFieldMapper.Builder parseBoolean(String name, ObjectNode booleanNode) { + JsonBooleanFieldMapper.Builder builder = booleanField(name); + parseJsonField(builder, name, booleanNode); + for (Iterator> propsIt = booleanNode.getFields(); propsIt.hasNext();) { + Map.Entry entry = propsIt.next(); + String propName = entry.getKey(); + JsonNode propNode = entry.getValue(); + if (propName.equals("nullValue")) { + builder.nullValue(propNode.getBooleanValue()); + } + } + return builder; + } + + private void parseNumberField(JsonNumberFieldMapper.Builder builder, String name, ObjectNode numberNode) { + parseJsonField(builder, name, numberNode); + for (Iterator> propsIt = numberNode.getFields(); propsIt.hasNext();) { + Map.Entry entry = propsIt.next(); + String propName = entry.getKey(); + JsonNode propNode = entry.getValue(); + if (propName.equals("precisionStep")) { + builder.precisionStep(propNode.getNumberValue().intValue()); + } + } + } + + private void parseJsonField(JsonFieldMapper.Builder builder, String name, ObjectNode fieldNode) { + for (Iterator> propsIt = fieldNode.getFields(); propsIt.hasNext();) { + Map.Entry entry = propsIt.next(); + String propName = entry.getKey(); + JsonNode propNode = entry.getValue(); + if (propName.equals("indexName")) { + builder.indexName(propNode.getValueAsText()); + } else if (propName.equals("store")) { + builder.store(parseStore(name, propNode.getTextValue())); + } else if (propName.equals("index")) { + builder.index(parseIndex(name, propNode.getTextValue())); + } else if (propName.equals("termVector")) { + builder.termVector(parseTermVector(name, propNode.getTextValue())); + } else if (propName.equals("boost")) { + builder.boost(propNode.getNumberValue().floatValue()); + } else if (propName.equals("omitNorms")) { + builder.omitNorms(propNode.getBooleanValue()); + } else if (propName.equals("omitTermFreqAndPositions")) { + builder.omitTermFreqAndPositions(propNode.getBooleanValue()); + } else if (propName.equals("indexAnalyzer")) { + builder.indexAnalyzer(analysisService.analyzer(propNode.getTextValue())); + } else if (propName.equals("searchAnalyzer")) { + builder.searchAnalyzer(analysisService.analyzer(propNode.getTextValue())); + } else if (propName.equals("analyzer")) { + builder.indexAnalyzer(analysisService.analyzer(propNode.getTextValue())); + builder.searchAnalyzer(analysisService.analyzer(propNode.getTextValue())); + } + } + } + + private DateTimeFormatter parseDateTimeFormatter(String fieldName, JsonNode node) { + if (node.isTextual()) { + return Joda.forPattern(node.getTextValue()).withZone(DateTimeZone.UTC); + } else { + // TODO support more complex configuration... + throw new MapperParsingException("Wrong node to use to parse date formatters [" + fieldName + "]"); + } + } + + private Field.TermVector parseTermVector(String fieldName, String termVector) throws MapperParsingException { + if ("no".equals(termVector)) { + return Field.TermVector.NO; + } else if ("yes".equals(termVector)) { + return Field.TermVector.YES; + } else if ("with_offsets".equals(termVector)) { + return Field.TermVector.WITH_OFFSETS; + } else if ("with_positions".equals(termVector)) { + return Field.TermVector.WITH_POSITIONS; + } else if ("with_positions_offsets".equals(termVector)) { + return Field.TermVector.WITH_POSITIONS_OFFSETS; + } else { + throw new MapperParsingException("Wrong value for termVector [" + termVector + "] for field [" + fieldName + "]"); + } + } + + private Field.Index parseIndex(String fieldName, String index) throws MapperParsingException { + if ("no".equals(index)) { + return Field.Index.NO; + } else if ("not_analyzed".equals(index)) { + return Field.Index.NOT_ANALYZED; + } else if ("analyzed".equals(index)) { + return Field.Index.ANALYZED; + } else { + throw new MapperParsingException("Wrong value for index [" + index + "] for field [" + fieldName + "]"); + } + } + + private Field.Store parseStore(String fieldName, String store) throws MapperParsingException { + if ("no".equals(store)) { + return Field.Store.NO; + } else if ("yes".equals(store)) { + return Field.Store.YES; + } else { + throw new MapperParsingException("Wrong value for store [" + store + "] for field [" + fieldName + "]"); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonDoubleFieldMapper.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonDoubleFieldMapper.java new file mode 100644 index 00000000000..8b4c7bb3c8a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonDoubleFieldMapper.java @@ -0,0 +1,131 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper.json; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.Fieldable; +import org.apache.lucene.search.*; +import org.apache.lucene.util.NumericUtils; +import org.codehaus.jackson.JsonToken; +import org.elasticsearch.index.analysis.NumericDoubleAnalyzer; +import org.elasticsearch.util.Numbers; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class JsonDoubleFieldMapper extends JsonNumberFieldMapper { + + public static class Defaults extends JsonNumberFieldMapper.Defaults { + public static final Double NULL_VALUE = null; + } + + public static class Builder extends JsonNumberFieldMapper.Builder { + + protected Double nullValue = Defaults.NULL_VALUE; + + public Builder(String name) { + super(name); + builder = this; + } + + public Builder nullValue(double nullValue) { + this.nullValue = nullValue; + return this; + } + + @Override public JsonDoubleFieldMapper build(BuilderContext context) { + return new JsonDoubleFieldMapper(name, buildIndexName(context), buildFullName(context), + precisionStep, index, store, boost, omitNorms, omitTermFreqAndPositions, nullValue); + } + } + + private final Double nullValue; + + protected JsonDoubleFieldMapper(String name, String indexName, String fullName, int precisionStep, + Field.Index index, Field.Store store, + float boost, boolean omitNorms, boolean omitTermFreqAndPositions, + Double nullValue) { + super(name, indexName, fullName, precisionStep, index, store, boost, omitNorms, omitTermFreqAndPositions, + new NumericDoubleAnalyzer(precisionStep), new NumericDoubleAnalyzer(Integer.MAX_VALUE)); + this.nullValue = nullValue; + } + + @Override protected int maxPrecisionStep() { + return 64; + } + + @Override public Double value(Fieldable field) { + byte[] value = field.getBinaryValue(); + if (value == null) { + return Double.NaN; + } + return Numbers.bytesToDouble(value); + } + + @Override public String indexedValue(String value) { + return indexedValue(Double.parseDouble(value)); + } + + @Override public String indexedValue(Double value) { + return NumericUtils.doubleToPrefixCoded(value); + } + + @Override public Query rangeQuery(String lowerTerm, String upperTerm, boolean includeLower, boolean includeUpper) { + return NumericRangeQuery.newDoubleRange(indexName, precisionStep, + lowerTerm == null ? null : Double.parseDouble(lowerTerm), + upperTerm == null ? null : Double.parseDouble(upperTerm), + includeLower, includeUpper); + } + + @Override public Filter rangeFilter(String lowerTerm, String upperTerm, boolean includeLower, boolean includeUpper) { + return NumericRangeFilter.newDoubleRange(indexName, precisionStep, + lowerTerm == null ? null : Double.parseDouble(lowerTerm), + upperTerm == null ? null : Double.parseDouble(upperTerm), + includeLower, includeUpper); + } + + @Override protected Field parseCreateField(JsonParseContext jsonContext) throws IOException { + double value; + if (jsonContext.jp().getCurrentToken() == JsonToken.VALUE_NULL) { + if (nullValue == null) { + return null; + } + value = nullValue; + } else { + value = jsonContext.jp().getDoubleValue(); + } + Field field = null; + if (stored()) { + field = new Field(indexName, Numbers.doubleToBytes(value), store); + if (indexed()) { + field.setTokenStream(popCachedStream(precisionStep).setDoubleValue(value)); + } + } else if (indexed()) { + field = new Field(indexName, popCachedStream(precisionStep).setDoubleValue(value)); + } + return field; + } + + @Override public int sortType() { + return SortField.DOUBLE; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonFieldMapper.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonFieldMapper.java new file mode 100644 index 00000000000..6caf6364597 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonFieldMapper.java @@ -0,0 +1,280 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper.json; + +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.Fieldable; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.*; +import org.elasticsearch.index.mapper.FieldMapper; +import org.elasticsearch.index.mapper.FieldMapperListener; +import org.elasticsearch.util.lucene.search.TermFilter; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class JsonFieldMapper implements FieldMapper, JsonMapper { + + public static class Defaults { + public static final Field.Index INDEX = Field.Index.ANALYZED; + public static final Field.Store STORE = Field.Store.NO; + public static final Field.TermVector TERM_VECTOR = Field.TermVector.NO; + public static final float BOOST = 1.0f; + public static final boolean OMIT_NORMS = false; + public static final boolean OMIT_TERM_FREQ_AND_POSITIONS = false; + } + + public abstract static class Builder extends JsonMapper.Builder { + + protected Field.Index index = Defaults.INDEX; + + protected Field.Store store = Defaults.STORE; + + protected Field.TermVector termVector = Defaults.TERM_VECTOR; + + protected float boost = Defaults.BOOST; + + protected boolean omitNorms = Defaults.OMIT_NORMS; + + protected boolean omitTermFreqAndPositions = Defaults.OMIT_TERM_FREQ_AND_POSITIONS; + + protected String indexName; + + protected Analyzer indexAnalyzer; + + protected Analyzer searchAnalyzer; + + public Builder(String name) { + super(name); + indexName = name; + } + + public T index(Field.Index index) { + this.index = index; + return builder; + } + + public T store(Field.Store store) { + this.store = store; + return builder; + } + + public T termVector(Field.TermVector termVector) { + this.termVector = termVector; + return builder; + } + + public T boost(float boost) { + this.boost = boost; + return builder; + } + + public T omitNorms(boolean omitNorms) { + this.omitNorms = omitNorms; + return builder; + } + + public T omitTermFreqAndPositions(boolean omitTermFreqAndPositions) { + this.omitTermFreqAndPositions = omitTermFreqAndPositions; + return builder; + } + + public T indexName(String indexName) { + this.indexName = indexName; + return builder; + } + + public T indexAnalyzer(Analyzer indexAnalyzer) { + this.indexAnalyzer = indexAnalyzer; + if (this.searchAnalyzer == null) { + this.searchAnalyzer = indexAnalyzer; + } + return builder; + } + + public T searchAnalyzer(Analyzer searchAnalyzer) { + this.searchAnalyzer = searchAnalyzer; + return builder; + } + + protected String buildIndexName(BuilderContext context) { + String actualIndexName = indexName == null ? name : indexName; + return context.path().pathAsText(actualIndexName); + } + + protected String buildFullName(BuilderContext context) { + return context.path().fullPathAsText(name); + } + } + + protected final String name; + + protected final String indexName; + + protected final String fullName; + + protected final Field.Index index; + + protected final Field.Store store; + + protected final Field.TermVector termVector; + + protected final float boost; + + protected final boolean omitNorms; + + protected final boolean omitTermFreqAndPositions; + + protected final Analyzer indexAnalyzer; + + protected final Analyzer searchAnalyzer; + + protected JsonFieldMapper(String name, String indexName, String fullName, Field.Index index, Field.Store store, Field.TermVector termVector, + float boost, boolean omitNorms, boolean omitTermFreqAndPositions, Analyzer indexAnalyzer, Analyzer searchAnalyzer) { + this.name = name; + this.indexName = indexName; + this.fullName = fullName; + this.index = index; + this.store = store; + this.termVector = termVector; + this.boost = boost; + this.omitNorms = omitNorms; + this.omitTermFreqAndPositions = omitTermFreqAndPositions; + this.indexAnalyzer = indexAnalyzer; + this.searchAnalyzer = searchAnalyzer; + } + + @Override public String name() { + return this.name; + } + + @Override public String indexName() { + return this.indexName; + } + + @Override public String fullName() { + return this.fullName; + } + + @Override public Field.Index index() { + return this.index; + } + + @Override public Field.Store store() { + return this.store; + } + + @Override public boolean stored() { + return store == Field.Store.YES; + } + + @Override public boolean indexed() { + return index != Field.Index.NO; + } + + @Override public boolean analyzed() { + return index == Field.Index.ANALYZED; + } + + @Override public Field.TermVector termVector() { + return this.termVector; + } + + @Override public float boost() { + return this.boost; + } + + @Override public boolean omitNorms() { + return this.omitNorms; + } + + @Override public boolean omitTermFreqAndPositions() { + return this.omitTermFreqAndPositions; + } + + @Override public Analyzer indexAnalyzer() { + return this.indexAnalyzer; + } + + @Override public Analyzer searchAnalyzer() { + return this.searchAnalyzer; + } + + @Override public void parse(JsonParseContext jsonContext) throws IOException { + if (!indexed() && !stored()) { + return; + } + Field field = parseCreateField(jsonContext); + if (field == null) { + return; + } + field.setOmitNorms(omitNorms); + field.setOmitTermFreqAndPositions(omitTermFreqAndPositions); + field.setBoost(boost); + jsonContext.doc().add(field); + } + + protected abstract Field parseCreateField(JsonParseContext jsonContext) throws IOException; + + @Override public void traverse(FieldMapperListener fieldMapperListener) { + fieldMapperListener.fieldMapper(this); + } + + @Override public Object valueForSearch(Fieldable field) { + return valueAsString(field); + } + + @Override public String indexedValue(String value) { + return value; + } + + @Override public String indexedValue(T value) { + return value.toString(); + } + + @Override public Query fieldQuery(String value) { + return new TermQuery(new Term(indexName, indexedValue(value))); + } + + @Override public Filter fieldFilter(String value) { + return new TermFilter(new Term(indexName, indexedValue(value))); + } + + @Override public Query rangeQuery(String lowerTerm, String upperTerm, boolean includeLower, boolean includeUpper) { + return new TermRangeQuery(indexName, + lowerTerm == null ? null : indexedValue(lowerTerm), + upperTerm == null ? null : indexedValue(upperTerm), + includeLower, includeUpper); + } + + @Override public Filter rangeFilter(String lowerTerm, String upperTerm, boolean includeLower, boolean includeUpper) { + return new TermRangeFilter(indexName, + lowerTerm == null ? null : indexedValue(lowerTerm), + upperTerm == null ? null : indexedValue(upperTerm), + includeLower, includeUpper); + } + + @Override public int sortType() { + return SortField.STRING; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonFloatFieldMapper.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonFloatFieldMapper.java new file mode 100644 index 00000000000..081f4395890 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonFloatFieldMapper.java @@ -0,0 +1,131 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper.json; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.Fieldable; +import org.apache.lucene.search.*; +import org.apache.lucene.util.NumericUtils; +import org.codehaus.jackson.JsonToken; +import org.elasticsearch.index.analysis.NumericFloatAnalyzer; +import org.elasticsearch.util.Numbers; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class JsonFloatFieldMapper extends JsonNumberFieldMapper { + + public static class Defaults extends JsonNumberFieldMapper.Defaults { + public static final Float NULL_VALUE = null; + } + + public static class Builder extends JsonNumberFieldMapper.Builder { + + protected Float nullValue = Defaults.NULL_VALUE; + + public Builder(String name) { + super(name); + builder = this; + } + + public Builder nullValue(float nullValue) { + this.nullValue = nullValue; + return this; + } + + @Override public JsonFloatFieldMapper build(BuilderContext context) { + return new JsonFloatFieldMapper(name, buildIndexName(context), buildFullName(context), + precisionStep, index, store, boost, omitNorms, omitTermFreqAndPositions, nullValue); + } + } + + + private final Float nullValue; + + protected JsonFloatFieldMapper(String name, String indexName, String fullName, int precisionStep, Field.Index index, Field.Store store, + float boost, boolean omitNorms, boolean omitTermFreqAndPositions, + Float nullValue) { + super(name, indexName, fullName, precisionStep, index, store, boost, omitNorms, omitTermFreqAndPositions, + new NumericFloatAnalyzer(precisionStep), new NumericFloatAnalyzer(Integer.MAX_VALUE)); + this.nullValue = nullValue; + } + + @Override protected int maxPrecisionStep() { + return 32; + } + + @Override public Float value(Fieldable field) { + byte[] value = field.getBinaryValue(); + if (value == null) { + return Float.NaN; + } + return Numbers.bytesToFloat(value); + } + + @Override public String indexedValue(String value) { + return indexedValue(Float.parseFloat(value)); + } + + @Override public String indexedValue(Float value) { + return NumericUtils.floatToPrefixCoded(value); + } + + @Override public Query rangeQuery(String lowerTerm, String upperTerm, boolean includeLower, boolean includeUpper) { + return NumericRangeQuery.newFloatRange(indexName, precisionStep, + lowerTerm == null ? null : Float.parseFloat(lowerTerm), + upperTerm == null ? null : Float.parseFloat(upperTerm), + includeLower, includeUpper); + } + + @Override public Filter rangeFilter(String lowerTerm, String upperTerm, boolean includeLower, boolean includeUpper) { + return NumericRangeFilter.newFloatRange(indexName, precisionStep, + lowerTerm == null ? null : Float.parseFloat(lowerTerm), + upperTerm == null ? null : Float.parseFloat(upperTerm), + includeLower, includeUpper); + } + + @Override protected Field parseCreateField(JsonParseContext jsonContext) throws IOException { + float value; + if (jsonContext.jp().getCurrentToken() == JsonToken.VALUE_NULL) { + if (nullValue == null) { + return null; + } + value = nullValue; + } else { + value = jsonContext.jp().getFloatValue(); + } + Field field = null; + if (stored()) { + field = new Field(indexName, Numbers.floatToBytes(value), store); + if (indexed()) { + field.setTokenStream(popCachedStream(precisionStep).setFloatValue(value)); + } + } else if (indexed()) { + field = new Field(indexName, popCachedStream(precisionStep).setFloatValue(value)); + } + return field; + } + + @Override public int sortType() { + return SortField.FLOAT; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonIdFieldMapper.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonIdFieldMapper.java new file mode 100644 index 00000000000..5723ad0a2f8 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonIdFieldMapper.java @@ -0,0 +1,116 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper.json; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.Fieldable; +import org.elasticsearch.index.mapper.FieldMapperListener; +import org.elasticsearch.index.mapper.IdFieldMapper; +import org.elasticsearch.index.mapper.MapperParsingException; +import org.elasticsearch.util.lucene.Lucene; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class JsonIdFieldMapper extends JsonFieldMapper implements IdFieldMapper { + + public static class Defaults extends JsonFieldMapper.Defaults { + public static final String NAME = "_id"; + public static final String INDEX_NAME = "_id"; + public static final Field.Index INDEX = Field.Index.NOT_ANALYZED; + public static final Field.Store STORE = Field.Store.NO; + public static final boolean OMIT_NORMS = true; + public static final boolean OMIT_TERM_FREQ_AND_POSITIONS = true; + } + + public static class Builder extends JsonFieldMapper.Builder { + + public Builder(String name) { + super(name); + indexName = Defaults.INDEX_NAME; + store = Defaults.STORE; + index = Defaults.INDEX; + omitNorms = Defaults.OMIT_NORMS; + omitTermFreqAndPositions = Defaults.OMIT_TERM_FREQ_AND_POSITIONS; + } + + @Override public JsonIdFieldMapper build(BuilderContext context) { + return new JsonIdFieldMapper(name, indexName, store, termVector, boost, omitNorms, omitTermFreqAndPositions); + } + } + + protected JsonIdFieldMapper() { + this(Defaults.NAME, Defaults.INDEX_NAME); + } + + protected JsonIdFieldMapper(String name, String indexName) { + this(name, indexName, Defaults.STORE, Defaults.TERM_VECTOR, Defaults.BOOST, + Defaults.OMIT_NORMS, Defaults.OMIT_TERM_FREQ_AND_POSITIONS); + } + + public JsonIdFieldMapper(String name, String indexName, Field.Store store, Field.TermVector termVector, + float boost, boolean omitNorms, boolean omitTermFreqAndPositions) { + super(name, indexName, name, Defaults.INDEX, store, termVector, boost, omitNorms, omitTermFreqAndPositions, + Lucene.KEYWORD_ANALYZER, Lucene.KEYWORD_ANALYZER); + } + + @Override public String value(Document document) { + Fieldable field = document.getFieldable(indexName); + return field == null ? null : value(field); + } + + @Override public String value(Fieldable field) { + return field.stringValue(); + } + + @Override public String valueAsString(Fieldable field) { + return value(field); + } + + @Override public String indexedValue(String value) { + return value; + } + + @Override protected Field parseCreateField(JsonParseContext jsonContext) throws IOException { + if (jsonContext.parsedIdState() == JsonParseContext.ParsedIdState.NO) { + String id = jsonContext.jp().getText(); + if (jsonContext.id() != null && !jsonContext.id().equals(id)) { + throw new MapperParsingException("Provided id [" + jsonContext.id() + "] does not match the json one [" + id + "]"); + } + jsonContext.id(id); + jsonContext.parsedId(JsonParseContext.ParsedIdState.PARSED); + return new Field(indexName, jsonContext.id(), store, index); + } else if (jsonContext.parsedIdState() == JsonParseContext.ParsedIdState.EXTERNAL) { + if (jsonContext.id() == null) { + throw new MapperParsingException("No id mapping with [" + name() + "] found in the json, and not explicitly set"); + } + return new Field(indexName, jsonContext.id(), store, index); + } else { + throw new MapperParsingException("Illegal parsed id state"); + } + } + + @Override public void traverse(FieldMapperListener fieldMapperListener) { + fieldMapperListener.fieldMapper(this); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonIntegerFieldMapper.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonIntegerFieldMapper.java new file mode 100644 index 00000000000..baab15b6e52 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonIntegerFieldMapper.java @@ -0,0 +1,130 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper.json; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.Fieldable; +import org.apache.lucene.search.*; +import org.apache.lucene.util.NumericUtils; +import org.codehaus.jackson.JsonToken; +import org.elasticsearch.index.analysis.NumericIntegerAnalyzer; +import org.elasticsearch.util.Numbers; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class JsonIntegerFieldMapper extends JsonNumberFieldMapper { + + public static class Defaults extends JsonNumberFieldMapper.Defaults { + public static final Integer NULL_VALUE = null; + } + + public static class Builder extends JsonNumberFieldMapper.Builder { + + protected Integer nullValue = Defaults.NULL_VALUE; + + public Builder(String name) { + super(name); + builder = this; + } + + public Builder nullValue(int nullValue) { + this.nullValue = nullValue; + return this; + } + + @Override public JsonIntegerFieldMapper build(BuilderContext context) { + return new JsonIntegerFieldMapper(name, buildIndexName(context), buildFullName(context), + precisionStep, index, store, boost, omitNorms, omitTermFreqAndPositions, nullValue); + } + } + + private final Integer nullValue; + + protected JsonIntegerFieldMapper(String name, String indexName, String fullName, int precisionStep, Field.Index index, Field.Store store, + float boost, boolean omitNorms, boolean omitTermFreqAndPositions, + Integer nullValue) { + super(name, indexName, fullName, precisionStep, index, store, boost, omitNorms, omitTermFreqAndPositions, + new NumericIntegerAnalyzer(precisionStep), new NumericIntegerAnalyzer(Integer.MAX_VALUE)); + this.nullValue = nullValue; + } + + @Override protected int maxPrecisionStep() { + return 32; + } + + @Override public Integer value(Fieldable field) { + byte[] value = field.getBinaryValue(); + if (value == null) { + return Integer.MIN_VALUE; + } + return Numbers.bytesToInt(value); + } + + @Override public String indexedValue(String value) { + return indexedValue(Integer.parseInt(value)); + } + + @Override public String indexedValue(Integer value) { + return NumericUtils.intToPrefixCoded(value); + } + + @Override public Query rangeQuery(String lowerTerm, String upperTerm, boolean includeLower, boolean includeUpper) { + return NumericRangeQuery.newIntRange(indexName, precisionStep, + lowerTerm == null ? null : Integer.parseInt(lowerTerm), + upperTerm == null ? null : Integer.parseInt(upperTerm), + includeLower, includeUpper); + } + + @Override public Filter rangeFilter(String lowerTerm, String upperTerm, boolean includeLower, boolean includeUpper) { + return NumericRangeFilter.newIntRange(indexName, precisionStep, + lowerTerm == null ? null : Integer.parseInt(lowerTerm), + upperTerm == null ? null : Integer.parseInt(upperTerm), + includeLower, includeUpper); + } + + @Override protected Field parseCreateField(JsonParseContext jsonContext) throws IOException { + int value; + if (jsonContext.jp().getCurrentToken() == JsonToken.VALUE_NULL) { + if (nullValue == null) { + return null; + } + value = nullValue; + } else { + value = jsonContext.jp().getIntValue(); + } + Field field = null; + if (stored()) { + field = new Field(indexName, Numbers.intToBytes(value), store); + if (indexed()) { + field.setTokenStream(popCachedStream(precisionStep).setIntValue(value)); + } + } else if (indexed()) { + field = new Field(indexName, popCachedStream(precisionStep).setIntValue(value)); + } + return field; + } + + @Override public int sortType() { + return SortField.INT; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonLongFieldMapper.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonLongFieldMapper.java new file mode 100644 index 00000000000..a7e6aa1b6c2 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonLongFieldMapper.java @@ -0,0 +1,130 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper.json; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.Fieldable; +import org.apache.lucene.search.*; +import org.apache.lucene.util.NumericUtils; +import org.codehaus.jackson.JsonToken; +import org.elasticsearch.index.analysis.NumericLongAnalyzer; +import org.elasticsearch.util.Numbers; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class JsonLongFieldMapper extends JsonNumberFieldMapper { + + public static class Defaults extends JsonNumberFieldMapper.Defaults { + public static final Long NULL_VALUE = null; + } + + public static class Builder extends JsonNumberFieldMapper.Builder { + + protected Long nullValue = Defaults.NULL_VALUE; + + public Builder(String name) { + super(name); + builder = this; + } + + public Builder nullValue(long nullValue) { + this.nullValue = nullValue; + return this; + } + + @Override public JsonLongFieldMapper build(BuilderContext context) { + return new JsonLongFieldMapper(name, buildIndexName(context), buildFullName(context), + precisionStep, index, store, boost, omitNorms, omitTermFreqAndPositions, nullValue); + } + } + + private final Long nullValue; + + protected JsonLongFieldMapper(String name, String indexName, String fullName, int precisionStep, Field.Index index, Field.Store store, + float boost, boolean omitNorms, boolean omitTermFreqAndPositions, + Long nullValue) { + super(name, indexName, fullName, precisionStep, index, store, boost, omitNorms, omitTermFreqAndPositions, + new NumericLongAnalyzer(precisionStep), new NumericLongAnalyzer(Integer.MAX_VALUE)); + this.nullValue = nullValue; + } + + @Override protected int maxPrecisionStep() { + return 64; + } + + @Override public Long value(Fieldable field) { + byte[] value = field.getBinaryValue(); + if (value == null) { + return Long.MIN_VALUE; + } + return Numbers.bytesToLong(value); + } + + @Override public String indexedValue(String value) { + return indexedValue(Long.parseLong(value)); + } + + @Override public String indexedValue(Long value) { + return NumericUtils.longToPrefixCoded(value); + } + + @Override public Query rangeQuery(String lowerTerm, String upperTerm, boolean includeLower, boolean includeUpper) { + return NumericRangeQuery.newLongRange(indexName, precisionStep, + lowerTerm == null ? null : Long.parseLong(lowerTerm), + upperTerm == null ? null : Long.parseLong(upperTerm), + includeLower, includeUpper); + } + + @Override public Filter rangeFilter(String lowerTerm, String upperTerm, boolean includeLower, boolean includeUpper) { + return NumericRangeFilter.newLongRange(indexName, precisionStep, + lowerTerm == null ? null : Long.parseLong(lowerTerm), + upperTerm == null ? null : Long.parseLong(upperTerm), + includeLower, includeUpper); + } + + @Override protected Field parseCreateField(JsonParseContext jsonContext) throws IOException { + long value; + if (jsonContext.jp().getCurrentToken() == JsonToken.VALUE_NULL) { + if (nullValue == null) { + return null; + } + value = nullValue; + } else { + value = jsonContext.jp().getLongValue(); + } + Field field = null; + if (stored()) { + field = new Field(indexName, Numbers.longToBytes(value), store); + if (indexed()) { + field.setTokenStream(popCachedStream(precisionStep).setLongValue(value)); + } + } else if (indexed()) { + field = new Field(indexName, popCachedStream(precisionStep).setLongValue(value)); + } + return field; + } + + @Override public int sortType() { + return SortField.LONG; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonMapper.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonMapper.java new file mode 100644 index 00000000000..287222c5fd4 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonMapper.java @@ -0,0 +1,66 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper.json; + +import org.elasticsearch.index.mapper.FieldMapperListener; +import org.elasticsearch.util.concurrent.NotThreadSafe; +import org.elasticsearch.util.concurrent.ThreadSafe; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +@ThreadSafe +public interface JsonMapper { + + @NotThreadSafe + public static class BuilderContext { + private final JsonPath jsonPath; + + public BuilderContext(JsonPath jsonPath) { + this.jsonPath = jsonPath; + } + + public JsonPath path() { + return this.jsonPath; + } + } + + @NotThreadSafe + public static abstract class Builder { + + protected final String name; + + protected T builder; + + public Builder(String name) { + this.name = name; + } + + public abstract Y build(BuilderContext context); + } + + String name(); + + void parse(JsonParseContext jsonContext) throws IOException; + + void traverse(FieldMapperListener fieldMapperListener); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonMapperBuilders.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonMapperBuilders.java new file mode 100644 index 00000000000..1056aae90eb --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonMapperBuilders.java @@ -0,0 +1,90 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper.json; + +/** + * @author kimchy (Shay Banon) + */ +public final class JsonMapperBuilders { + + private JsonMapperBuilders() { + + } + + public static JsonDocumentMapper.Builder doc(JsonObjectMapper.Builder objectBuilder) { + return new JsonDocumentMapper.Builder(objectBuilder); + } + + public static JsonSourceFieldMapper.Builder source(String name) { + return new JsonSourceFieldMapper.Builder(name); + } + + public static JsonIdFieldMapper.Builder id(String name) { + return new JsonIdFieldMapper.Builder(name); + } + + public static JsonUidFieldMapper.Builder uid(String name) { + return new JsonUidFieldMapper.Builder(name); + } + + public static JsonTypeFieldMapper.Builder type(String name) { + return new JsonTypeFieldMapper.Builder(name); + } + + public static JsonBoostFieldMapper.Builder boost(String name) { + return new JsonBoostFieldMapper.Builder(name); + } + + public static JsonObjectMapper.Builder object(String name) { + return new JsonObjectMapper.Builder(name); + } + + public static JsonBooleanFieldMapper.Builder booleanField(String name) { + return new JsonBooleanFieldMapper.Builder(name); + } + + public static JsonStringFieldMapper.Builder stringField(String name) { + return new JsonStringFieldMapper.Builder(name); + } + + public static JsonBinaryFieldMapper.Builder binaryField(String name) { + return new JsonBinaryFieldMapper.Builder(name); + } + + public static JsonDateFieldMapper.Builder dateField(String name) { + return new JsonDateFieldMapper.Builder(name); + } + + public static JsonIntegerFieldMapper.Builder integerField(String name) { + return new JsonIntegerFieldMapper.Builder(name); + } + + public static JsonLongFieldMapper.Builder longField(String name) { + return new JsonLongFieldMapper.Builder(name); + } + + public static JsonFloatFieldMapper.Builder floatField(String name) { + return new JsonFloatFieldMapper.Builder(name); + } + + public static JsonDoubleFieldMapper.Builder doubleField(String name) { + return new JsonDoubleFieldMapper.Builder(name); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonNumberFieldMapper.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonNumberFieldMapper.java new file mode 100644 index 00000000000..0f2957e66d6 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonNumberFieldMapper.java @@ -0,0 +1,178 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper.json; + +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.NumericTokenStream; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.Fieldable; +import org.apache.lucene.util.NumericUtils; +import org.elasticsearch.util.gnu.trove.TIntObjectHashMap; + +import java.io.IOException; +import java.util.ArrayDeque; +import java.util.Deque; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class JsonNumberFieldMapper extends JsonFieldMapper { + + public static class Defaults extends JsonFieldMapper.Defaults { + public static final int PRECISION_STEP = NumericUtils.PRECISION_STEP_DEFAULT; + public static final Field.Index INDEX = Field.Index.NOT_ANALYZED; + public static final boolean OMIT_NORMS = true; + public static final boolean OMIT_TERM_FREQ_AND_POSITIONS = true; + } + + public abstract static class Builder extends JsonFieldMapper.Builder { + + protected int precisionStep = Defaults.PRECISION_STEP; + + public Builder(String name) { + super(name); + this.index = Defaults.INDEX; + this.omitNorms = Defaults.OMIT_NORMS; + this.omitTermFreqAndPositions = Defaults.OMIT_TERM_FREQ_AND_POSITIONS; + } + + public T precisionStep(int precisionStep) { + this.precisionStep = precisionStep; + return builder; + } + } + + private static final ThreadLocal>> cachedStreams = new ThreadLocal>>() { + @Override protected TIntObjectHashMap> initialValue() { + return new TIntObjectHashMap>(); + } + }; + + protected final int precisionStep; + + protected JsonNumberFieldMapper(String name, String indexName, String fullName, int precisionStep, + Field.Index index, Field.Store store, + float boost, boolean omitNorms, boolean omitTermFreqAndPositions, + Analyzer indexAnalyzer, Analyzer searchAnalyzer) { + super(name, indexName, fullName, index, store, Field.TermVector.NO, boost, omitNorms, omitTermFreqAndPositions, indexAnalyzer, searchAnalyzer); + if (precisionStep <= 0 || precisionStep >= maxPrecisionStep()) { + this.precisionStep = Integer.MAX_VALUE; + } else { + this.precisionStep = precisionStep; + } + } + + protected abstract int maxPrecisionStep(); + + public int precisionStep() { + return this.precisionStep; + } + + /** + * Override the defualt behavior (to return the string, and reutrn the actual Number instance). + */ + @Override public Object valueForSearch(Fieldable field) { + return value(field); + } + + @Override public String valueAsString(Fieldable field) { + return value(field).toString(); + } + + @Override public abstract int sortType(); + + /** + * Removes a cached numeric token stream. The stream will be returned to the cahed once it is used + * sicne it implements the end method. + */ + protected CachedNumericTokenStream popCachedStream(int precisionStep) { + Deque deque = cachedStreams.get().get(precisionStep); + if (deque == null) { + deque = new ArrayDeque(); + cachedStreams.get().put(precisionStep, deque); + deque.add(new CachedNumericTokenStream(new NumericTokenStream(precisionStep), precisionStep)); + } + if (deque.isEmpty()) { + deque.add(new CachedNumericTokenStream(new NumericTokenStream(precisionStep), precisionStep)); + } + return deque.pollFirst(); + } + + /** + * A wrapper around a numeric stream allowing to reuse it by implementing the end method which returns + * this stream back to the thread local cache. + */ + protected static final class CachedNumericTokenStream extends TokenStream { + + private final int precisionStep; + + private final NumericTokenStream numericTokenStream; + + public CachedNumericTokenStream(NumericTokenStream numericTokenStream, int precisionStep) { + super(numericTokenStream); + this.numericTokenStream = numericTokenStream; + this.precisionStep = precisionStep; + } + + public void end() throws IOException { + numericTokenStream.end(); + } + + /** + * Close the input TokenStream. + */ + public void close() throws IOException { + numericTokenStream.close(); + cachedStreams.get().get(precisionStep).add(this); + } + + /** + * Reset the filter as well as the input TokenStream. + */ + public void reset() throws IOException { + numericTokenStream.reset(); + } + + @Override public boolean incrementToken() throws IOException { + return numericTokenStream.incrementToken(); + } + + public CachedNumericTokenStream setIntValue(int value) { + numericTokenStream.setIntValue(value); + return this; + } + + public CachedNumericTokenStream setLongValue(long value) { + numericTokenStream.setLongValue(value); + return this; + } + + public CachedNumericTokenStream setFloatValue(float value) { + numericTokenStream.setFloatValue(value); + return this; + } + + public CachedNumericTokenStream setDoubleValue(double value) { + numericTokenStream.setDoubleValue(value); + return this; + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonObjectMapper.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonObjectMapper.java new file mode 100644 index 00000000000..c62a360494c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonObjectMapper.java @@ -0,0 +1,335 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper.json; + +import com.google.common.collect.ImmutableMap; +import org.codehaus.jackson.JsonParser; +import org.codehaus.jackson.JsonToken; +import org.elasticsearch.ElasticSearchIllegalStateException; +import org.elasticsearch.index.mapper.FieldMapper; +import org.elasticsearch.index.mapper.FieldMapperListener; +import org.elasticsearch.util.concurrent.ThreadSafe; +import org.joda.time.format.DateTimeFormatter; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static com.google.common.collect.ImmutableMap.*; +import static com.google.common.collect.Lists.*; +import static org.elasticsearch.index.mapper.json.JsonMapperBuilders.*; +import static org.elasticsearch.util.MapBuilder.*; + +/** + * @author kimchy (Shay Banon) + */ +@ThreadSafe +public class JsonObjectMapper implements JsonMapper { + + public static class Defaults { + public static final boolean ENABLED = true; + public static final boolean DYNAMIC = true; + public static final JsonPath.Type PATH_TYPE = JsonPath.Type.FULL; + public static final DateTimeFormatter[] DATE_TIME_FORMATTERS = new DateTimeFormatter[]{JsonDateFieldMapper.Defaults.DATE_TIME_FORMATTER}; + } + + public static class Builder extends JsonMapper.Builder { + + private boolean enabled = Defaults.ENABLED; + + private boolean dynamic = Defaults.DYNAMIC; + + private JsonPath.Type pathType = Defaults.PATH_TYPE; + + private List dateTimeFormatters = newArrayList(); + + private final List mappersBuilders = newArrayList(); + + public Builder(String name) { + super(name); + } + + public Builder enabled(boolean enabled) { + this.enabled = enabled; + return this; + } + + public Builder dynamic(boolean dynamic) { + this.dynamic = dynamic; + return this; + } + + public Builder pathType(JsonPath.Type pathType) { + this.pathType = pathType; + return this; + } + + public Builder noDateTimeFormatter() { + this.dateTimeFormatters = null; + return this; + } + + public Builder dateTimeFormatter(Iterable dateTimeFormatters) { + for (DateTimeFormatter dateTimeFormatter : dateTimeFormatters) { + this.dateTimeFormatters.add(dateTimeFormatter); + } + return this; + } + + public Builder dateTimeFormatter(DateTimeFormatter[] dateTimeFormatters) { + this.dateTimeFormatters.addAll(newArrayList(dateTimeFormatters)); + return this; + } + + public Builder dateTimeFormatter(DateTimeFormatter dateTimeFormatter) { + this.dateTimeFormatters.add(dateTimeFormatter); + return this; + } + + public Builder add(JsonMapper.Builder builder) { + mappersBuilders.add(builder); + return this; + } + + @Override public JsonObjectMapper build(BuilderContext context) { + if (dateTimeFormatters == null) { + dateTimeFormatters = newArrayList(); + } else if (dateTimeFormatters.isEmpty()) { + // add the default one + dateTimeFormatters.addAll(newArrayList(Defaults.DATE_TIME_FORMATTERS)); + } + JsonPath.Type origPathType = context.path().pathType(); + context.path().pathType(pathType); + context.path().add(name); + + Map mappers = new HashMap(); + for (JsonMapper.Builder builder : mappersBuilders) { + JsonMapper mapper = builder.build(context); + mappers.put(mapper.name(), mapper); + } + JsonObjectMapper objectMapper = new JsonObjectMapper(name, enabled, dynamic, pathType, + dateTimeFormatters.toArray(new DateTimeFormatter[dateTimeFormatters.size()]), + mappers); + + context.path().pathType(origPathType); + context.path().remove(); + + return objectMapper; + } + } + + private final String name; + + private final boolean enabled; + + private final boolean dynamic; + + private final JsonPath.Type pathType; + + private final DateTimeFormatter[] dateTimeFormatters; + + private volatile ImmutableMap mappers = ImmutableMap.of(); + + private final Object mutex = new Object(); + + protected JsonObjectMapper(String name) { + this(name, Defaults.ENABLED, Defaults.DYNAMIC, Defaults.PATH_TYPE); + } + + protected JsonObjectMapper(String name, boolean enabled, boolean dynamic, JsonPath.Type pathType) { + this(name, enabled, dynamic, pathType, Defaults.DATE_TIME_FORMATTERS); + } + + protected JsonObjectMapper(String name, boolean enabled, boolean dynamic, JsonPath.Type pathType, + DateTimeFormatter[] dateTimeFormatters) { + this(name, enabled, dynamic, pathType, dateTimeFormatters, null); + } + + JsonObjectMapper(String name, boolean enabled, boolean dynamic, JsonPath.Type pathType, + DateTimeFormatter[] dateTimeFormatters, Map mappers) { + this.name = name; + this.enabled = enabled; + this.dynamic = dynamic; + this.pathType = pathType; + this.dateTimeFormatters = dateTimeFormatters; + if (mappers != null) { + this.mappers = copyOf(mappers); + } + } + + @Override public String name() { + return this.name; + } + + public JsonObjectMapper putMapper(JsonMapper mapper) { + synchronized (mutex) { + mappers = newMapBuilder(mappers).put(mapper.name(), mapper).immutableMap(); + } + return this; + } + + @Override public void traverse(FieldMapperListener fieldMapperListener) { + for (JsonMapper mapper : mappers.values()) { + mapper.traverse(fieldMapperListener); + } + } + + public void parse(JsonParseContext jsonContext) throws IOException { + if (!enabled) { + jsonContext.jp().skipChildren(); + return; + } + JsonParser jp = jsonContext.jp(); + + JsonPath.Type origPathType = jsonContext.path().pathType(); + jsonContext.path().pathType(pathType); + + String currentFieldName = jp.getCurrentName(); + JsonToken token; + while ((token = jp.nextToken()) != JsonToken.END_OBJECT) { + if (token == JsonToken.START_OBJECT) { + serializeObject(jsonContext, currentFieldName); + } else if (token == JsonToken.START_ARRAY) { + serializeArray(jsonContext, currentFieldName); + } else if (token == JsonToken.FIELD_NAME) { + currentFieldName = jp.getCurrentName(); + } else if (token == JsonToken.VALUE_NULL) { + serializeNullValue(jsonContext, currentFieldName); + } else { + serializeValue(jsonContext, currentFieldName, token); + } + } + // restore the enable path flag + jsonContext.path().pathType(origPathType); + } + + private void serializeNullValue(JsonParseContext jsonContext, String lastFieldName) throws IOException { + // we can only handle null values if we have mappings for them + JsonMapper mapper = mappers.get(lastFieldName); + if (mapper != null) { + mapper.parse(jsonContext); + } + } + + private void serializeObject(JsonParseContext jsonContext, String currentFieldName) throws IOException { + jsonContext.path().add(currentFieldName); + + JsonMapper objectMapper = mappers.get(currentFieldName); + if (objectMapper != null) { + objectMapper.parse(jsonContext); + } else { + if (dynamic) { + // we sync here just so we won't add it twice. Its not the end of the world + // to sync here since next operations will get it before + synchronized (mutex) { + objectMapper = mappers.get(currentFieldName); + if (objectMapper != null) { + objectMapper.parse(jsonContext); + } + + BuilderContext builderContext = new BuilderContext(jsonContext.path()); + objectMapper = JsonMapperBuilders.object(currentFieldName).enabled(true) + .dynamic(dynamic).pathType(pathType).dateTimeFormatter(dateTimeFormatters).build(builderContext); + putMapper(objectMapper); + objectMapper.parse(jsonContext); + } + } else { + // not dynamic, read everything up to end object + jsonContext.jp().skipChildren(); + } + } + + jsonContext.path().remove(); + } + + private void serializeArray(JsonParseContext jsonContext, String lastFieldName) throws IOException { + JsonParser jp = jsonContext.jp(); + JsonToken token; + while ((token = jp.nextToken()) != JsonToken.END_ARRAY) { + if (token == JsonToken.START_OBJECT) { + serializeObject(jsonContext, lastFieldName); + } else if (token == JsonToken.START_ARRAY) { + serializeArray(jsonContext, lastFieldName); + } else if (token == JsonToken.FIELD_NAME) { + lastFieldName = jp.getCurrentName(); + } else if (token == JsonToken.VALUE_NULL) { + serializeNullValue(jsonContext, lastFieldName); + } else { + serializeValue(jsonContext, lastFieldName, token); + } + } + } + + private void serializeValue(JsonParseContext jsonContext, String currentFieldName, JsonToken token) throws IOException { + JsonMapper mapper = mappers.get(currentFieldName); + if (mapper != null) { + mapper.parse(jsonContext); + return; + } + if (!dynamic) { + return; + } + // we sync here since we don't want to add this field twice to the document mapper + // its not the end of the world, since we add it to the mappers once we create it + // so next time we won't even get here for this field + synchronized (mutex) { + mapper = mappers.get(currentFieldName); + if (mapper != null) { + mapper.parse(jsonContext); + return; + } + + BuilderContext builderContext = new BuilderContext(jsonContext.path()); + if (token == JsonToken.VALUE_STRING) { + // check if it fits one of the date formats + boolean isDate = false; + for (DateTimeFormatter dateTimeFormatter : dateTimeFormatters) { + try { + dateTimeFormatter.parseMillis(jsonContext.jp().getText()); + mapper = dateField(currentFieldName).dateTimeFormatter(dateTimeFormatter).build(builderContext); + isDate = true; + break; + } catch (Exception e) { + // failure to parse this, continue + } + } + if (!isDate) { + mapper = stringField(currentFieldName).build(builderContext); + } + } else if (token == JsonToken.VALUE_NUMBER_INT) { + mapper = longField(currentFieldName).build(builderContext); + } else if (token == JsonToken.VALUE_NUMBER_FLOAT) { + mapper = doubleField(currentFieldName).build(builderContext); + } else if (token == JsonToken.VALUE_TRUE) { + mapper = booleanField(currentFieldName).build(builderContext); + } else if (token == JsonToken.VALUE_FALSE) { + mapper = booleanField(currentFieldName).build(builderContext); + } else { + // TODO how do we identify dynamically that its a binary value? + throw new ElasticSearchIllegalStateException("Can't handle serializing a dynamic type with json token [" + token + "] and field name [" + currentFieldName + "]"); + } + putMapper(mapper); + jsonContext.docMapper().addFieldMapper((FieldMapper) mapper); + + mapper.parse(jsonContext); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonParseContext.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonParseContext.java new file mode 100644 index 00000000000..047cbd05206 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonParseContext.java @@ -0,0 +1,134 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper.json; + +import org.apache.lucene.document.Document; +import org.codehaus.jackson.JsonParser; +import org.elasticsearch.util.concurrent.NotThreadSafe; + +/** + * @author kimchy (Shay Banon) + */ +@NotThreadSafe +public class JsonParseContext { + + private final JsonDocumentMapper docMapper; + + private final JsonPath path; + + private JsonParser jsonParser; + + private Document document; + + private String type; + + private String source; + + private String id; + + private String uid; + + private StringBuilder stringBuiler = new StringBuilder(); + + private ParsedIdState parsedIdState; + + public JsonParseContext(JsonDocumentMapper docMapper, JsonPath path) { + this.docMapper = docMapper; + this.path = path; + } + + public void reset(JsonParser jsonParser, Document document, String type, String source) { + this.jsonParser = jsonParser; + this.document = document; + this.type = type; + this.source = source; + this.path.reset(); + this.parsedIdState = ParsedIdState.NO; + } + + public String type() { + return this.type; + } + + public String source() { + return this.source; + } + + public JsonPath path() { + return this.path; + } + + public JsonParser jp() { + return this.jsonParser; + } + + public Document doc() { + return this.document; + } + + public JsonDocumentMapper docMapper() { + return this.docMapper; + } + + public String id() { + return id; + } + + public void parsedId(ParsedIdState parsedIdState) { + this.parsedIdState = parsedIdState; + } + + public ParsedIdState parsedIdState() { + return this.parsedIdState; + } + + /** + * Really, just the id mapper should set this. + */ + public void id(String id) { + this.id = id; + } + + public String uid() { + return this.uid; + } + + /** + * Really, just the uid mapper should set this. + */ + public void uid(String uid) { + this.uid = uid; + } + + /** + * A string builder that can be used to construct complex names for example. + * Its better to reuse the. + */ + public StringBuilder stringBuilder() { + stringBuiler.setLength(0); + return this.stringBuiler; + } + + public static enum ParsedIdState { + NO, + PARSED, + EXTERNAL + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonPath.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonPath.java new file mode 100644 index 00000000000..b1aecfe7e22 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonPath.java @@ -0,0 +1,102 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper.json; + +import org.elasticsearch.util.concurrent.NotThreadSafe; + +/** + * @author kimchy (Shay Banon) + */ +@NotThreadSafe +public class JsonPath { + + public static enum Type { + JUST_NAME, + FULL, + } + + private Type pathType; + + private final char delimiter; + + private final StringBuilder sb; + + private final int offset; + + private int index = 0; + + private String[] path = new String[10]; + + public JsonPath() { + this(0); + } + + /** + * Constructs a json path with an offset. The offset will result an offset + * number of path elements to not be included in {@link #pathAsText(String)}. + */ + public JsonPath(int offset) { + this.delimiter = '.'; + this.sb = new StringBuilder(); + this.offset = offset; + reset(); + } + + public void reset() { + this.index = 0; + } + + public void add(String name) { + if (index == path.length) { // resize + String[] newPath = new String[path.length + 10]; + System.arraycopy(path, 0, newPath, 0, path.length); + path = newPath; + } + path[index++] = name; + } + + public void remove() { + path[index--] = null; + } + + public String pathAsText(String name) { + if (pathType == Type.JUST_NAME) { + return name; + } + return fullPathAsText(name); + } + + public String fullPathAsText(String name) { + sb.setLength(0); + for (int i = offset; i < index; i++) { + sb.append(path[i]).append(delimiter); + } + sb.append(name); + return sb.toString(); + } + + public Type pathType() { + return pathType; + } + + public void pathType(Type type) { + this.pathType = type; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonSourceFieldMapper.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonSourceFieldMapper.java new file mode 100644 index 00000000000..4ec610b9042 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonSourceFieldMapper.java @@ -0,0 +1,173 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper.json; + +import org.apache.lucene.document.*; +import org.elasticsearch.index.mapper.MapperCompressionException; +import org.elasticsearch.index.mapper.SourceFieldMapper; +import org.elasticsearch.util.io.compression.Compressor; +import org.elasticsearch.util.io.compression.ZipCompressor; +import org.elasticsearch.util.lucene.Lucene; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class JsonSourceFieldMapper extends JsonFieldMapper implements SourceFieldMapper { + + public static class Defaults extends JsonFieldMapper.Defaults { + public static final String NAME = "_source"; + public static final boolean ENABLED = true; + public static final Field.Index INDEX = Field.Index.NO; + public static final Field.Store STORE = Field.Store.YES; + public static final boolean OMIT_NORMS = true; + public static final boolean OMIT_TERM_FREQ_AND_POSITIONS = true; + public static final Compressor COMPRESSOR = new ZipCompressor(); + public static final int NO_COMPRESSION = -1; + } + + public static class Builder extends JsonMapper.Builder { + + private boolean enabled = Defaults.ENABLED; + + private Compressor compressor = Defaults.COMPRESSOR; + + private int compressionThreshold = Defaults.NO_COMPRESSION; + + public Builder(String name) { + super(name); + } + + public Builder enabled(boolean enabled) { + this.enabled = enabled; + return this; + } + + public Builder compressor(Compressor compressor) { + this.compressor = compressor; + return this; + } + + public Builder compressionThreshold(int compressionThreshold) { + this.compressionThreshold = compressionThreshold; + return this; + } + + @Override public JsonSourceFieldMapper build(BuilderContext context) { + return new JsonSourceFieldMapper(name, enabled, compressionThreshold, compressor); + } + } + + private final boolean enabled; + + private final Compressor compressor; + + // the size of the source file that we will perform compression for + private final int compressionThreshold; + + private final SourceFieldSelector fieldSelector; + + protected JsonSourceFieldMapper() { + this(Defaults.NAME, Defaults.ENABLED); + } + + protected JsonSourceFieldMapper(String name, boolean enabled) { + this(name, enabled, Defaults.NO_COMPRESSION, Defaults.COMPRESSOR); + } + + protected JsonSourceFieldMapper(String name, boolean enabled, int compressionThreshold, Compressor compressor) { + super(name, name, name, Defaults.INDEX, Defaults.STORE, Defaults.TERM_VECTOR, Defaults.BOOST, + Defaults.OMIT_NORMS, Defaults.OMIT_TERM_FREQ_AND_POSITIONS, Lucene.KEYWORD_ANALYZER, Lucene.KEYWORD_ANALYZER); + this.enabled = enabled; + this.compressionThreshold = compressionThreshold; + this.compressor = compressor; + this.fieldSelector = new SourceFieldSelector(indexName); + } + + public boolean enabled() { + return this.enabled; + } + + public FieldSelector fieldSelector() { + return this.fieldSelector; + } + + @Override protected Field parseCreateField(JsonParseContext jsonContext) throws IOException { + if (!enabled) { + return null; + } + Field sourceField; + if (compressionThreshold == Defaults.NO_COMPRESSION || jsonContext.source().length() < compressionThreshold) { + sourceField = new Field(name, jsonContext.source(), store, index); + } else { + try { + sourceField = new Field(name, compressor.compressString(jsonContext.source()), store); + } catch (IOException e) { + throw new MapperCompressionException("Failed to compress data", e); + } + } + return sourceField; + } + + @Override public String value(Document document) { + Fieldable field = document.getFieldable(indexName); + return field == null ? null : value(field); + } + + @Override public String value(Fieldable field) { + if (field.stringValue() != null) { + return field.stringValue(); + } + byte[] compressed = field.getBinaryValue(); + if (compressed == null) { + return null; + } + try { + return compressor.decompressString(compressed); + } catch (IOException e) { + throw new MapperCompressionException("Failed to decompress data", e); + } + } + + @Override public String valueAsString(Fieldable field) { + return value(field); + } + + @Override public String indexedValue(String value) { + return value; + } + + private static class SourceFieldSelector implements FieldSelector { + + private final String name; + + private SourceFieldSelector(String name) { + this.name = name; + } + + @Override public FieldSelectorResult accept(String fieldName) { + if (fieldName.equals(name)) { + return FieldSelectorResult.LOAD_AND_BREAK; + } + return FieldSelectorResult.NO_LOAD; + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonStringFieldMapper.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonStringFieldMapper.java new file mode 100644 index 00000000000..797389159bd --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonStringFieldMapper.java @@ -0,0 +1,93 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper.json; + +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.Fieldable; +import org.codehaus.jackson.JsonToken; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class JsonStringFieldMapper extends JsonFieldMapper { + + public static class Defaults extends JsonFieldMapper.Defaults { + // NOTE, when adding defaults here, make sure you add them in the builder + public static final String NULL_VALUE = null; + } + + public static class Builder extends JsonFieldMapper.Builder { + + protected String nullValue = Defaults.NULL_VALUE; + + public Builder(String name) { + super(name); + builder = this; + } + + public Builder nullValue(String nullValue) { + this.nullValue = nullValue; + return this; + } + + @Override public JsonStringFieldMapper build(BuilderContext context) { + return new JsonStringFieldMapper(name, buildIndexName(context), buildFullName(context), + index, store, termVector, boost, omitNorms, omitTermFreqAndPositions, nullValue, + indexAnalyzer, searchAnalyzer); + } + } + + private final String nullValue; + + protected JsonStringFieldMapper(String name, String indexName, String fullName, Field.Index index, Field.Store store, Field.TermVector termVector, + float boost, boolean omitNorms, boolean omitTermFreqAndPositions, + String nullValue, Analyzer indexAnalyzer, Analyzer searchAnalyzer) { + super(name, indexName, fullName, index, store, termVector, boost, omitNorms, omitTermFreqAndPositions, indexAnalyzer, searchAnalyzer); + this.nullValue = nullValue; + } + + @Override public String value(Fieldable field) { + return field.stringValue(); + } + + @Override public String valueAsString(Fieldable field) { + return value(field); + } + + @Override public String indexedValue(String value) { + return value; + } + + @Override protected Field parseCreateField(JsonParseContext jsonContext) throws IOException { + String value; + if (jsonContext.jp().getCurrentToken() == JsonToken.VALUE_NULL) { + value = nullValue; + } else { + value = jsonContext.jp().getText(); + } + if (value == null) { + return null; + } + return new Field(indexName, value, store, index, termVector); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonTypeFieldMapper.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonTypeFieldMapper.java new file mode 100644 index 00000000000..7b680174522 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonTypeFieldMapper.java @@ -0,0 +1,100 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper.json; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.Fieldable; +import org.apache.lucene.index.Term; +import org.elasticsearch.index.mapper.TypeFieldMapper; +import org.elasticsearch.util.lucene.Lucene; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class JsonTypeFieldMapper extends JsonFieldMapper implements TypeFieldMapper { + + public static class Defaults extends JsonFieldMapper.Defaults { + public static final String NAME = "_type"; + public static final String INDEX_NAME = "_type"; + public static final Field.Index INDEX = Field.Index.NOT_ANALYZED; + public static final Field.Store STORE = Field.Store.NO; + public static final boolean OMIT_NORMS = true; + public static final boolean OMIT_TERM_FREQ_AND_POSITIONS = true; + } + + public static class Builder extends JsonFieldMapper.Builder { + + public Builder(String name) { + super(name); + indexName = Defaults.INDEX_NAME; + index = Defaults.INDEX; + store = Defaults.STORE; + omitNorms = Defaults.OMIT_NORMS; + omitTermFreqAndPositions = Defaults.OMIT_TERM_FREQ_AND_POSITIONS; + } + + @Override public JsonTypeFieldMapper build(BuilderContext context) { + return new JsonTypeFieldMapper(name, indexName, store, termVector, boost, omitNorms, omitTermFreqAndPositions); + } + } + + protected JsonTypeFieldMapper() { + this(Defaults.NAME, Defaults.INDEX_NAME); + } + + protected JsonTypeFieldMapper(String name, String indexName) { + this(name, indexName, Defaults.STORE, Defaults.TERM_VECTOR, Defaults.BOOST, + Defaults.OMIT_NORMS, Defaults.OMIT_TERM_FREQ_AND_POSITIONS); + } + + public JsonTypeFieldMapper(String name, String indexName, Field.Store store, Field.TermVector termVector, + float boost, boolean omitNorms, boolean omitTermFreqAndPositions) { + super(name, indexName, name, Defaults.INDEX, store, termVector, boost, omitNorms, omitTermFreqAndPositions, + Lucene.KEYWORD_ANALYZER, Lucene.KEYWORD_ANALYZER); + } + + @Override public String value(Document document) { + Fieldable field = document.getFieldable(indexName); + return field == null ? null : value(field); + } + + @Override public String value(Fieldable field) { + return field.stringValue(); + } + + @Override public String valueAsString(Fieldable field) { + return value(field); + } + + @Override public String indexedValue(String value) { + return value; + } + + @Override public Term term(String value) { + return new Term(indexName, value); + } + + @Override protected Field parseCreateField(JsonParseContext jsonContext) throws IOException { + return new Field(indexName, jsonContext.type(), store, index); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonUidFieldMapper.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonUidFieldMapper.java new file mode 100644 index 00000000000..4affeacf96d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/JsonUidFieldMapper.java @@ -0,0 +1,107 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper.json; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.Fieldable; +import org.apache.lucene.index.Term; +import org.elasticsearch.index.mapper.MapperParsingException; +import org.elasticsearch.index.mapper.Uid; +import org.elasticsearch.index.mapper.UidFieldMapper; +import org.elasticsearch.util.lucene.Lucene; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class JsonUidFieldMapper extends JsonFieldMapper implements UidFieldMapper { + + public static class Defaults extends JsonFieldMapper.Defaults { + public static final String NAME = "_uid"; + public static final Field.Index INDEX = Field.Index.NOT_ANALYZED; + public static final boolean OMIT_NORMS = true; + public static final boolean OMIT_TERM_FREQ_AND_POSITIONS = true; + } + + public static class Builder extends JsonMapper.Builder { + + protected String indexName; + + public Builder(String name) { + super(name); + this.indexName = name; + } + + public Builder indexName(String indexName) { + this.indexName = indexName; + return this; + } + + @Override public JsonUidFieldMapper build(BuilderContext context) { + return new JsonUidFieldMapper(name, indexName); + } + } + + protected JsonUidFieldMapper() { + this(Defaults.NAME); + } + + protected JsonUidFieldMapper(String name) { + this(name, name); + } + + protected JsonUidFieldMapper(String name, String indexName) { + super(name, indexName, name, Defaults.INDEX, Field.Store.YES, Defaults.TERM_VECTOR, Defaults.BOOST, + Defaults.OMIT_NORMS, Defaults.OMIT_TERM_FREQ_AND_POSITIONS, Lucene.KEYWORD_ANALYZER, Lucene.KEYWORD_ANALYZER); + } + + @Override public String name() { + return this.name; + } + + @Override protected Field parseCreateField(JsonParseContext jsonContext) throws IOException { + if (jsonContext.id() == null) { + throw new MapperParsingException("No id found while parsing the json source"); + } + jsonContext.uid(Uid.createUid(jsonContext.stringBuilder(), jsonContext.type(), jsonContext.id())); + return new Field(name, jsonContext.uid(), store, index); + } + + @Override public Uid value(Fieldable field) { + return Uid.createUid(field.stringValue()); + } + + @Override public String valueAsString(Fieldable field) { + return field.stringValue(); + } + + @Override public String indexedValue(String value) { + return value; + } + + @Override public Term term(String type, String id) { + return term(Uid.createUid(type, id)); + } + + @Override public Term term(String uid) { + return new Term(indexName, uid); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/dynamic-mapping.json b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/dynamic-mapping.json new file mode 100644 index 00000000000..16fd0ec6f7a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/mapper/json/dynamic-mapping.json @@ -0,0 +1,4 @@ +{ + _default_ : { + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/merge/policy/BalancedSegmentMergePolicy.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/merge/policy/BalancedSegmentMergePolicy.java new file mode 100644 index 00000000000..84320211d5a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/merge/policy/BalancedSegmentMergePolicy.java @@ -0,0 +1,449 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.merge.policy; + +import org.apache.lucene.index.*; + +import java.io.IOException; +import java.util.Set; + +/** + * Merge policy that tries to balance not doing large + * segment merges with not accumulating too many segments in + * the index, to provide for better performance in near + * real-time setting. + *

+ *

This is based on code from zoie, described in more detail + * at http://code.google.com/p/zoie/wiki/ZoieMergePolicy.

+ *

+ *

See: https://issues.apache.org/jira/browse/LUCENE-1924

+ */ +// TODO monitor against Lucene 3.0 trunk, once we move to 3.0 remove this. +public class BalancedSegmentMergePolicy extends LogByteSizeMergePolicy { + public static final int DEFAULT_NUM_LARGE_SEGMENTS = 10; + + private boolean _partialExpunge = false; + private int _numLargeSegments = DEFAULT_NUM_LARGE_SEGMENTS; + private int _maxSmallSegments = 2 * LogMergePolicy.DEFAULT_MERGE_FACTOR; + private int _maxSegments = _numLargeSegments + _maxSmallSegments; + + public BalancedSegmentMergePolicy(IndexWriter writer) { + super(writer); + } + + public void setMergePolicyParams(MergePolicyParams params) { + if (params != null) { + setPartialExpunge(params._doPartialExpunge); + setNumLargeSegments(params._numLargeSegments); + setMaxSmallSegments(params._maxSmallSegments); + setPartialExpunge(params._doPartialExpunge); + setMergeFactor(params._mergeFactor); + setUseCompoundFile(params._useCompoundFile); + setMaxMergeDocs(params._maxMergeDocs); + } + } + + @Override + protected long size(SegmentInfo info) throws IOException { + long byteSize = info.sizeInBytes(); + float delRatio = (info.docCount <= 0 ? 0.0f : ((float) info.getDelCount() / (float) info.docCount)); + return (info.docCount <= 0 ? byteSize : (long) ((float) byteSize * (1.0f - delRatio))); + } + + public void setPartialExpunge(boolean doPartialExpunge) { + _partialExpunge = doPartialExpunge; + } + + public boolean getPartialExpunge() { + return _partialExpunge; + } + + public void setNumLargeSegments(int numLargeSegments) { + if (numLargeSegments < 2) { + throw new IllegalArgumentException("numLargeSegments cannot be less than 2"); + } + + _numLargeSegments = numLargeSegments; + _maxSegments = _numLargeSegments + 2 * getMergeFactor(); + } + + public int getNumLargeSegments() { + return _numLargeSegments; + } + + public void setMaxSmallSegments(int maxSmallSegments) { + if (maxSmallSegments < getMergeFactor()) { + throw new IllegalArgumentException("maxSmallSegments cannot be less than mergeFactor"); + } + _maxSmallSegments = maxSmallSegments; + _maxSegments = _numLargeSegments + _maxSmallSegments; + } + + public int getMaxSmallSegments() { + return _maxSmallSegments; + } + + @Override + public void setMergeFactor(int mergeFactor) { + super.setMergeFactor(mergeFactor); + if (_maxSmallSegments < getMergeFactor()) { + _maxSmallSegments = getMergeFactor(); + _maxSegments = _numLargeSegments + _maxSmallSegments; + } + } + + private boolean isOptimized(SegmentInfos infos, IndexWriter writer, int maxNumSegments, Set segmentsToOptimize) throws IOException { + final int numSegments = infos.size(); + int numToOptimize = 0; + SegmentInfo optimizeInfo = null; + for (int i = 0; i < numSegments && numToOptimize <= maxNumSegments; i++) { + final SegmentInfo info = infos.info(i); + if (segmentsToOptimize.contains(info)) { + numToOptimize++; + optimizeInfo = info; + } + } + + return numToOptimize <= maxNumSegments && + (numToOptimize != 1 || isOptimized(writer, optimizeInfo)); + } + + private boolean isOptimized(IndexWriter writer, SegmentInfo info) + throws IOException { + return !info.hasDeletions() && + !info.hasSeparateNorms() && + info.dir == writer.getDirectory() && + info.getUseCompoundFile() == getUseCompoundFile(); + } + + @Override + public MergeSpecification findMergesForOptimize(SegmentInfos infos, int maxNumSegments, Set segmentsToOptimize) throws IOException { + + assert maxNumSegments > 0; + + MergeSpecification spec = null; + + if (!isOptimized(infos, writer, maxNumSegments, segmentsToOptimize)) { + + // Find the newest (rightmost) segment that needs to + // be optimized (other segments may have been flushed + // since optimize started): + int last = infos.size(); + while (last > 0) { + + final SegmentInfo info = infos.info(--last); + if (segmentsToOptimize.contains(info)) { + + last++; + break; + } + } + + if (last > 0) { + + if (maxNumSegments == 1) { + + // Since we must optimize down to 1 segment, the + // choice is simple: + boolean useCompoundFile = getUseCompoundFile(); + if (last > 1 || !isOptimized(writer, infos.info(0))) { + + spec = new MergeSpecification(); + spec.add(new OneMerge(infos.range(0, last), useCompoundFile)); + } + } else if (last > maxNumSegments) { + + // find most balanced merges + spec = findBalancedMerges(infos, last, maxNumSegments, _partialExpunge); + } + } + } + return spec; + } + + private MergeSpecification findBalancedMerges(SegmentInfos infos, int infoLen, int maxNumSegments, boolean partialExpunge) + throws IOException { + if (infoLen <= maxNumSegments) return null; + + MergeSpecification spec = new MergeSpecification(); + boolean useCompoundFile = getUseCompoundFile(); + + // use Viterbi algorithm to find the best segmentation. + // we will try to minimize the size variance of resulting segments. + + double[][] variance = createVarianceTable(infos, infoLen, maxNumSegments); + + final int maxMergeSegments = infoLen - maxNumSegments + 1; + double[] sumVariance = new double[maxMergeSegments]; + int[][] backLink = new int[maxNumSegments][maxMergeSegments]; + + for (int i = (maxMergeSegments - 1); i >= 0; i--) { + sumVariance[i] = variance[0][i]; + backLink[0][i] = 0; + } + for (int i = 1; i < maxNumSegments; i++) { + for (int j = (maxMergeSegments - 1); j >= 0; j--) { + double minV = Double.MAX_VALUE; + int minK = 0; + for (int k = j; k >= 0; k--) { + double v = sumVariance[k] + variance[i + k][j - k]; + if (v < minV) { + minV = v; + minK = k; + } + } + sumVariance[j] = minV; + backLink[i][j] = minK; + } + } + + // now, trace back the back links to find all merges, + // also find a candidate for partial expunge if requested + int mergeEnd = infoLen; + int prev = maxMergeSegments - 1; + int expungeCandidate = -1; + int maxDelCount = 0; + for (int i = maxNumSegments - 1; i >= 0; i--) { + prev = backLink[i][prev]; + int mergeStart = i + prev; + if ((mergeEnd - mergeStart) > 1) { + spec.add(new OneMerge(infos.range(mergeStart, mergeEnd), useCompoundFile)); + } else { + if (partialExpunge) { + SegmentInfo info = infos.info(mergeStart); + int delCount = info.getDelCount(); + if (delCount > maxDelCount) { + expungeCandidate = mergeStart; + maxDelCount = delCount; + } + } + } + mergeEnd = mergeStart; + } + + if (partialExpunge && maxDelCount > 0) { + // expunge deletes + spec.add(new OneMerge(infos.range(expungeCandidate, expungeCandidate + 1), useCompoundFile)); + } + + return spec; + } + + private double[][] createVarianceTable(SegmentInfos infos, int last, int maxNumSegments) throws IOException { + int maxMergeSegments = last - maxNumSegments + 1; + double[][] variance = new double[last][maxMergeSegments]; + + // compute the optimal segment size + long optSize = 0; + long[] sizeArr = new long[last]; + for (int i = 0; i < sizeArr.length; i++) { + sizeArr[i] = size(infos.info(i)); + optSize += sizeArr[i]; + } + optSize = (optSize / maxNumSegments); + + for (int i = 0; i < last; i++) { + long size = 0; + for (int j = 0; j < maxMergeSegments; j++) { + if ((i + j) < last) { + size += sizeArr[i + j]; + double residual = ((double) size / (double) optSize) - 1.0d; + variance[i][j] = residual * residual; + } else { + variance[i][j] = Double.NaN; + } + } + } + return variance; + } + + @Override + public MergeSpecification findMergesToExpungeDeletes(SegmentInfos infos) + throws CorruptIndexException, IOException { + final int numSegs = infos.size(); + final int numLargeSegs = (numSegs < _numLargeSegments ? numSegs : _numLargeSegments); + MergeSpecification spec = null; + + if (numLargeSegs < numSegs) { + SegmentInfos smallSegments = infos.range(numLargeSegs, numSegs); + spec = super.findMergesToExpungeDeletes(smallSegments); + } + + if (spec == null) spec = new MergeSpecification(); + for (int i = 0; i < numLargeSegs; i++) { + SegmentInfo info = infos.info(i); + if (info.hasDeletions()) { + spec.add(new OneMerge(infos.range(i, i + 1), getUseCompoundFile())); + } + } + return spec; + } + + @Override + public MergeSpecification findMerges(SegmentInfos infos) throws IOException { + final int numSegs = infos.size(); + final int numLargeSegs = _numLargeSegments; + + if (numSegs <= numLargeSegs) { + return null; + } + + long totalLargeSegSize = 0; + long totalSmallSegSize = 0; + SegmentInfo info; + + // compute the total size of large segments + for (int i = 0; i < numLargeSegs; i++) { + info = infos.info(i); + totalLargeSegSize += size(info); + } + // compute the total size of small segments + for (int i = numLargeSegs; i < numSegs; i++) { + info = infos.info(i); + totalSmallSegSize += size(info); + } + + long targetSegSize = (totalLargeSegSize / (numLargeSegs - 1)); + if (targetSegSize <= totalSmallSegSize) { + // the total size of small segments is big enough, + // promote the small segments to a large segment and do balanced merge, + + if (totalSmallSegSize < targetSegSize * 2) { + MergeSpecification spec = findBalancedMerges(infos, numLargeSegs, (numLargeSegs - 1), _partialExpunge); + if (spec == null) spec = new MergeSpecification(); // should not happen + spec.add(new OneMerge(infos.range(numLargeSegs, numSegs), getUseCompoundFile())); + return spec; + } else { + return findBalancedMerges(infos, numSegs, numLargeSegs, _partialExpunge); + } + } else if (_maxSegments < numSegs) { + // we have more than _maxSegments, merge small segments smaller than targetSegSize/4 + MergeSpecification spec = new MergeSpecification(); + int startSeg = numLargeSegs; + long sizeThreshold = (targetSegSize / 4); + while (startSeg < numSegs) { + info = infos.info(startSeg); + if (size(info) < sizeThreshold) break; + startSeg++; + } + spec.add(new OneMerge(infos.range(startSeg, numSegs), getUseCompoundFile())); + return spec; + } else { + // apply the log merge policy to small segments. + SegmentInfos smallSegments = infos.range(numLargeSegs, numSegs); + MergeSpecification spec = super.findMerges(smallSegments); + + if (_partialExpunge) { + OneMerge expunge = findOneSegmentToExpunge(infos, numLargeSegs); + if (expunge != null) { + if (spec == null) spec = new MergeSpecification(); + spec.add(expunge); + } + } + return spec; + } + } + + private OneMerge findOneSegmentToExpunge(SegmentInfos infos, int maxNumSegments) throws IOException { + int expungeCandidate = -1; + int maxDelCount = 0; + + for (int i = maxNumSegments - 1; i >= 0; i--) { + SegmentInfo info = infos.info(i); + int delCount = info.getDelCount(); + if (delCount > maxDelCount) { + expungeCandidate = i; + maxDelCount = delCount; + } + } + if (maxDelCount > 0) { + return new OneMerge(infos.range(expungeCandidate, expungeCandidate + 1), getUseCompoundFile()); + } + return null; + } + + + public static class MergePolicyParams { + private int _numLargeSegments; + private int _maxSmallSegments; + private boolean _doPartialExpunge; + private int _mergeFactor; + private boolean _useCompoundFile; + private int _maxMergeDocs; + + public MergePolicyParams() { + _useCompoundFile = true; + _doPartialExpunge = false; + _numLargeSegments = DEFAULT_NUM_LARGE_SEGMENTS; + _maxSmallSegments = 2 * LogMergePolicy.DEFAULT_MERGE_FACTOR; + _maxSmallSegments = _numLargeSegments + _maxSmallSegments; + _mergeFactor = LogMergePolicy.DEFAULT_MERGE_FACTOR; + _maxMergeDocs = LogMergePolicy.DEFAULT_MAX_MERGE_DOCS; + } + + public void setNumLargeSegments(int numLargeSegments) { + _numLargeSegments = numLargeSegments; + } + + public int getNumLargeSegments() { + return _numLargeSegments; + } + + public void setMaxSmallSegments(int maxSmallSegments) { + _maxSmallSegments = maxSmallSegments; + } + + public int getMaxSmallSegments() { + return _maxSmallSegments; + } + + public void setPartialExpunge(boolean doPartialExpunge) { + _doPartialExpunge = doPartialExpunge; + } + + public boolean getPartialExpunge() { + return _doPartialExpunge; + } + + public void setMergeFactor(int mergeFactor) { + _mergeFactor = mergeFactor; + } + + public int getMergeFactor() { + return _mergeFactor; + } + + public void setMaxMergeDocs(int maxMergeDocs) { + _maxMergeDocs = maxMergeDocs; + } + + public int getMaxMergeDocs() { + return _maxMergeDocs; + } + + public void setUseCompoundFile(boolean useCompoundFile) { + _useCompoundFile = useCompoundFile; + } + + public boolean isUseCompoundFile() { + return _useCompoundFile; + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/merge/policy/BalancedSegmentMergePolicyProvider.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/merge/policy/BalancedSegmentMergePolicyProvider.java new file mode 100644 index 00000000000..a28970c67d4 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/merge/policy/BalancedSegmentMergePolicyProvider.java @@ -0,0 +1,77 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.merge.policy; + +import com.google.inject.Inject; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.LogByteSizeMergePolicy; +import org.apache.lucene.index.LogMergePolicy; +import org.elasticsearch.index.shard.AbstractIndexShardComponent; +import org.elasticsearch.index.shard.IndexShardLifecycle; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.util.Preconditions; +import org.elasticsearch.util.SizeUnit; +import org.elasticsearch.util.SizeValue; + +/** + * @author kimchy (Shay Banon) + */ +@IndexShardLifecycle +public class BalancedSegmentMergePolicyProvider extends AbstractIndexShardComponent implements MergePolicyProvider { + + private final SizeValue minMergeSize; + private final SizeValue maxMergeSize; + private final int mergeFactor; + private final int maxMergeDocs; + private final int numLargeSegments; + private final int maxSmallSegments; + private final Boolean useCompoundFile; + + @Inject public BalancedSegmentMergePolicyProvider(Store store) { + super(store.shardId(), store.indexSettings()); + Preconditions.checkNotNull(store, "Store must be provided to merge policy"); + + this.minMergeSize = componentSettings.getAsSize("minMergeSize", new SizeValue((long) LogByteSizeMergePolicy.DEFAULT_MIN_MERGE_MB * 1024 * 1024, SizeUnit.BYTES)); + this.maxMergeSize = componentSettings.getAsSize("maxMergeSize", new SizeValue((long) LogByteSizeMergePolicy.DEFAULT_MAX_MERGE_MB, SizeUnit.MB)); + this.mergeFactor = componentSettings.getAsInt("mergeFactor", LogByteSizeMergePolicy.DEFAULT_MERGE_FACTOR); + this.maxMergeDocs = componentSettings.getAsInt("maxMergeDocs", LogByteSizeMergePolicy.DEFAULT_MAX_MERGE_DOCS); + this.numLargeSegments = componentSettings.getAsInt("numLargeSegments", BalancedSegmentMergePolicy.DEFAULT_NUM_LARGE_SEGMENTS); + this.maxSmallSegments = componentSettings.getAsInt("maxSmallSegments", 2 * LogMergePolicy.DEFAULT_MERGE_FACTOR); + + this.useCompoundFile = componentSettings.getAsBoolean("useCompoundFile", store == null || store.suggestUseCompoundFile()); + + logger.debug("Using [Balanced] merge policy with mergeFactor[{}], minMergeSize[{}], maxMergeSize[{}], maxMergeDocs[{}] useCompoundFile[{}]", + new Object[]{mergeFactor, minMergeSize, maxMergeSize, maxMergeDocs, useCompoundFile}); + } + + @Override public BalancedSegmentMergePolicy newMergePolicy(IndexWriter indexWriter) { + BalancedSegmentMergePolicy mergePolicy = new BalancedSegmentMergePolicy(indexWriter); + mergePolicy.setMinMergeMB(minMergeSize.mbFrac()); + mergePolicy.setMaxMergeMB(maxMergeSize.mbFrac()); + mergePolicy.setMergeFactor(mergeFactor); + mergePolicy.setMaxMergeDocs(maxMergeDocs); + mergePolicy.setUseCompoundFile(useCompoundFile); + mergePolicy.setUseCompoundDocStore(useCompoundFile); + + mergePolicy.setMaxSmallSegments(maxSmallSegments); + mergePolicy.setNumLargeSegments(numLargeSegments); + return mergePolicy; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/merge/policy/LogByteSizeMergePolicyProvider.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/merge/policy/LogByteSizeMergePolicyProvider.java new file mode 100644 index 00000000000..0d8990798a9 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/merge/policy/LogByteSizeMergePolicyProvider.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.merge.policy; + +import com.google.inject.Inject; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.LogByteSizeMergePolicy; +import org.elasticsearch.index.shard.AbstractIndexShardComponent; +import org.elasticsearch.index.shard.IndexShardLifecycle; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.util.Preconditions; +import org.elasticsearch.util.SizeUnit; +import org.elasticsearch.util.SizeValue; + +/** + * @author kimchy (Shay Banon) + */ +@IndexShardLifecycle +public class LogByteSizeMergePolicyProvider extends AbstractIndexShardComponent implements MergePolicyProvider { + + private final SizeValue minMergeSize; + private final SizeValue maxMergeSize; + private final int mergeFactor; + private final int maxMergeDocs; + private final Boolean useCompoundFile; + + @Inject public LogByteSizeMergePolicyProvider(Store store) { + super(store.shardId(), store.indexSettings()); + Preconditions.checkNotNull(store, "Store must be provided to merge policy"); + + this.minMergeSize = componentSettings.getAsSize("minMergeSize", new SizeValue((long) LogByteSizeMergePolicy.DEFAULT_MIN_MERGE_MB * 1024 * 1024, SizeUnit.BYTES)); + this.maxMergeSize = componentSettings.getAsSize("maxMergeSize", new SizeValue((long) LogByteSizeMergePolicy.DEFAULT_MAX_MERGE_MB, SizeUnit.MB)); + this.mergeFactor = componentSettings.getAsInt("mergeFactor", LogByteSizeMergePolicy.DEFAULT_MERGE_FACTOR); + this.maxMergeDocs = componentSettings.getAsInt("maxMergeDocs", LogByteSizeMergePolicy.DEFAULT_MAX_MERGE_DOCS); + this.useCompoundFile = componentSettings.getAsBoolean("useCompoundFile", store == null || store.suggestUseCompoundFile()); + logger.debug("Using [LogByteSize] merge policy with mergeFactor[{}], minMergeSize[{}], maxMergeSize[{}], maxMergeDocs[{}] useCompoundFile[{}]", + new Object[]{mergeFactor, minMergeSize, maxMergeSize, maxMergeDocs, useCompoundFile}); + } + + @Override public LogByteSizeMergePolicy newMergePolicy(IndexWriter indexWriter) { + LogByteSizeMergePolicy mergePolicy = new LogByteSizeMergePolicy(indexWriter); + mergePolicy.setMinMergeMB(minMergeSize.mbFrac()); + mergePolicy.setMaxMergeMB(maxMergeSize.mbFrac()); + mergePolicy.setMergeFactor(mergeFactor); + mergePolicy.setMaxMergeDocs(maxMergeDocs); + mergePolicy.setUseCompoundFile(useCompoundFile); + mergePolicy.setUseCompoundDocStore(useCompoundFile); + return mergePolicy; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/merge/policy/LogDocMergePolicyProvider.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/merge/policy/LogDocMergePolicyProvider.java new file mode 100644 index 00000000000..e4bf7b9cbfa --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/merge/policy/LogDocMergePolicyProvider.java @@ -0,0 +1,62 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.merge.policy; + +import com.google.inject.Inject; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.LogDocMergePolicy; +import org.elasticsearch.index.shard.AbstractIndexShardComponent; +import org.elasticsearch.index.shard.IndexShardLifecycle; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.util.Preconditions; + +/** + * @author kimchy (Shay Banon) + */ +@IndexShardLifecycle +public class LogDocMergePolicyProvider extends AbstractIndexShardComponent implements MergePolicyProvider { + + private final int minMergeDocs; + private final int maxMergeDocs; + private final int mergeFactor; + private final Boolean useCompoundFile; + + @Inject public LogDocMergePolicyProvider(Store store) { + super(store.shardId(), store.indexSettings()); + Preconditions.checkNotNull(store, "Store must be provided to merge policy"); + + this.minMergeDocs = componentSettings.getAsInt("minMergeDocs", LogDocMergePolicy.DEFAULT_MIN_MERGE_DOCS); + this.maxMergeDocs = componentSettings.getAsInt("maxMergeDocs", LogDocMergePolicy.DEFAULT_MAX_MERGE_DOCS); + this.mergeFactor = componentSettings.getAsInt("mergeFactor", LogDocMergePolicy.DEFAULT_MERGE_FACTOR); + this.useCompoundFile = componentSettings.getAsBoolean("useCompoundFile", store == null || store.suggestUseCompoundFile()); + logger.debug("Using [LogDoc] merge policy with mergeFactor[{}] minMergeDocs[{}], maxMergeDocs[{}], useCompoundFile[{}]", + new Object[]{mergeFactor, minMergeDocs, maxMergeDocs, useCompoundFile}); + } + + @Override public LogDocMergePolicy newMergePolicy(IndexWriter indexWriter) { + LogDocMergePolicy mergePolicy = new LogDocMergePolicy(indexWriter); + mergePolicy.setMinMergeDocs(minMergeDocs); + mergePolicy.setMaxMergeDocs(maxMergeDocs); + mergePolicy.setMergeFactor(mergeFactor); + mergePolicy.setUseCompoundFile(useCompoundFile); + mergePolicy.setUseCompoundDocStore(useCompoundFile); + return mergePolicy; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/merge/policy/MergeFactor.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/merge/policy/MergeFactor.java new file mode 100644 index 00000000000..e8e9a53825e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/merge/policy/MergeFactor.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.merge.policy; + +import com.google.inject.BindingAnnotation; +import org.elasticsearch.index.shard.IndexShardLifecycle; + +import java.lang.annotation.Documented; +import java.lang.annotation.Retention; +import java.lang.annotation.Target; + +import static java.lang.annotation.ElementType.*; +import static java.lang.annotation.RetentionPolicy.*; + +/** + * The merge factor that is used for the shard. + * + * @author kimchy (Shay Banon) + */ +@BindingAnnotation +@Target({FIELD, PARAMETER}) +@Retention(RUNTIME) +@Documented +@IndexShardLifecycle +public @interface MergeFactor { +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/merge/policy/MergePolicyModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/merge/policy/MergePolicyModule.java new file mode 100644 index 00000000000..49f19eec928 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/merge/policy/MergePolicyModule.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.merge.policy; + +import com.google.inject.AbstractModule; +import org.apache.lucene.index.LogMergePolicy; +import org.elasticsearch.index.shard.IndexShardLifecycle; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +@IndexShardLifecycle +public class MergePolicyModule extends AbstractModule { + + private final Settings settings; + + public MergePolicyModule(Settings settings) { + this.settings = settings; + } + + @Override protected void configure() { + bind(Integer.class) + .annotatedWith(MergeFactor.class) + .toInstance(settings.getAsInt("index.merge.policy.mergeFactor", LogMergePolicy.DEFAULT_MERGE_FACTOR)); + + // TODO consider moving to BalancedSegmentMergePolicyProvider as the default + // Note, when using the index jmeter benchmark, it seams like the balanced merger keeps on merging ... + // don't have time to look at it now... + bind(MergePolicyProvider.class) + .to(settings.getAsClass("index.merge.policy.type", LogByteSizeMergePolicyProvider.class)) + .asEagerSingleton(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/merge/policy/MergePolicyProvider.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/merge/policy/MergePolicyProvider.java new file mode 100644 index 00000000000..58ee4d78895 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/merge/policy/MergePolicyProvider.java @@ -0,0 +1,32 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.merge.policy; + +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.MergePolicy; +import org.elasticsearch.index.shard.IndexShardComponent; + +/** + * @author kimchy (Shay Banon) + */ +public interface MergePolicyProvider extends IndexShardComponent { + + T newMergePolicy(IndexWriter indexWriter); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/merge/scheduler/ConcurrentMergeSchedulerProvider.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/merge/scheduler/ConcurrentMergeSchedulerProvider.java new file mode 100644 index 00000000000..4c58a8b8086 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/merge/scheduler/ConcurrentMergeSchedulerProvider.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.merge.scheduler; + +import com.google.inject.Inject; +import org.apache.lucene.index.ConcurrentMergeScheduler; +import org.apache.lucene.index.MergeScheduler; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.index.shard.AbstractIndexShardComponent; +import org.elasticsearch.index.shard.IndexShardLifecycle; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +@IndexShardLifecycle +public class ConcurrentMergeSchedulerProvider extends AbstractIndexShardComponent implements MergeSchedulerProvider { + + private final int maxThreadCount; + + @Inject public ConcurrentMergeSchedulerProvider(ShardId shardId, @IndexSettings Settings indexSettings) { + super(shardId, indexSettings); + + this.maxThreadCount = componentSettings.getAsInt("maxThreadCount", 1); + logger.debug("Using [concurrent] merge scheduler with maxThreadCount[{}]", maxThreadCount); + } + + @Override public MergeScheduler newMergeScheduler() { + ConcurrentMergeScheduler concurrentMergeScheduler = new ConcurrentMergeScheduler(); + concurrentMergeScheduler.setMaxThreadCount(maxThreadCount); + return concurrentMergeScheduler; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/merge/scheduler/MergeSchedulerModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/merge/scheduler/MergeSchedulerModule.java new file mode 100644 index 00000000000..8cf38acdf91 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/merge/scheduler/MergeSchedulerModule.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.merge.scheduler; + +import com.google.inject.AbstractModule; +import org.elasticsearch.index.shard.IndexShardLifecycle; +import org.elasticsearch.util.settings.Settings; + +import static org.elasticsearch.index.merge.scheduler.MergeSchedulerModule.MergeSchedulerSettings.*; + +/** + * @author kimchy (Shay Banon) + */ +@IndexShardLifecycle +public class MergeSchedulerModule extends AbstractModule { + + public static class MergeSchedulerSettings { + public static final String TYPE = "index.merge.scheduler.type"; + } + + private final Settings settings; + + public MergeSchedulerModule(Settings settings) { + this.settings = settings; + } + + @Override protected void configure() { + bind(MergeSchedulerProvider.class) + .to(settings.getAsClass(TYPE, ConcurrentMergeSchedulerProvider.class)) + .asEagerSingleton(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/merge/scheduler/MergeSchedulerProvider.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/merge/scheduler/MergeSchedulerProvider.java new file mode 100644 index 00000000000..d07e6f21d47 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/merge/scheduler/MergeSchedulerProvider.java @@ -0,0 +1,31 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.merge.scheduler; + +import org.apache.lucene.index.MergeScheduler; +import org.elasticsearch.index.shard.IndexShardComponent; + +/** + * @author kimchy (Shay Banon) + */ +public interface MergeSchedulerProvider extends IndexShardComponent { + + T newMergeScheduler(); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/merge/scheduler/SerialMergeSchedulerProvider.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/merge/scheduler/SerialMergeSchedulerProvider.java new file mode 100644 index 00000000000..8d34cb50fba --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/merge/scheduler/SerialMergeSchedulerProvider.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.merge.scheduler; + +import com.google.inject.Inject; +import org.apache.lucene.index.MergeScheduler; +import org.apache.lucene.index.SerialMergeScheduler; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.index.shard.AbstractIndexShardComponent; +import org.elasticsearch.index.shard.IndexShardLifecycle; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +@IndexShardLifecycle +public class SerialMergeSchedulerProvider extends AbstractIndexShardComponent implements MergeSchedulerProvider { + + @Inject public SerialMergeSchedulerProvider(ShardId shardId, @IndexSettings Settings indexSettings) { + super(shardId, indexSettings); + logger.trace("Using [serial] merge scheduler"); + } + + @Override public MergeScheduler newMergeScheduler() { + return new SerialMergeScheduler(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/IndexQueryParser.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/IndexQueryParser.java new file mode 100644 index 00000000000..acf59214d5c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/IndexQueryParser.java @@ -0,0 +1,36 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query; + +import org.apache.lucene.search.Query; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.index.IndexComponent; + +/** + * @author kimchy (Shay Banon) + */ +public interface IndexQueryParser extends IndexComponent { + + String name(); + + Query parse(String source) throws ElasticSearchException; + + Query parse(QueryBuilder queryBuilder) throws ElasticSearchException; +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/IndexQueryParserFactory.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/IndexQueryParserFactory.java new file mode 100644 index 00000000000..db2610bcfc6 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/IndexQueryParserFactory.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query; + +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public interface IndexQueryParserFactory { + + IndexQueryParser create(String name, Settings settings); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/IndexQueryParserMissingException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/IndexQueryParserMissingException.java new file mode 100644 index 00000000000..ddfe7abb1ca --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/IndexQueryParserMissingException.java @@ -0,0 +1,32 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query; + +import org.elasticsearch.ElasticSearchException; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexQueryParserMissingException extends ElasticSearchException { + + public IndexQueryParserMissingException(String name) { + super("Index Query Parser [" + name + "] missing"); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/IndexQueryParserModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/IndexQueryParserModule.java new file mode 100644 index 00000000000..e932d386851 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/IndexQueryParserModule.java @@ -0,0 +1,92 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query; + +import com.google.inject.AbstractModule; +import com.google.inject.Scopes; +import com.google.inject.assistedinject.FactoryProvider; +import com.google.inject.multibindings.MapBinder; +import org.elasticsearch.index.query.json.*; +import org.elasticsearch.util.settings.Settings; + +import java.util.Map; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexQueryParserModule extends AbstractModule { + + private final Settings settings; + + public IndexQueryParserModule(Settings settings) { + this.settings = settings; + } + + @Override protected void configure() { + + // handle IndexQueryParsers + MapBinder qbinder + = MapBinder.newMapBinder(binder(), String.class, IndexQueryParserFactory.class); + + Map queryParserGroupSettings = settings.getGroups(IndexQueryParserService.Defaults.PREFIX); + for (Map.Entry entry : queryParserGroupSettings.entrySet()) { + String qName = entry.getKey(); + Settings qSettings = entry.getValue(); + qbinder.addBinding(qName).toProvider(FactoryProvider.newFactory(IndexQueryParserFactory.class, + qSettings.getAsClass("type", JsonIndexQueryParser.class))).in(Scopes.SINGLETON); + } + if (!queryParserGroupSettings.containsKey(IndexQueryParserService.Defaults.DEFAULT)) { + qbinder.addBinding(IndexQueryParserService.Defaults.DEFAULT).toProvider(FactoryProvider.newFactory(IndexQueryParserFactory.class, + JsonIndexQueryParser.class)).in(Scopes.SINGLETON); + } + + // handle JsonQueryParsers + MapBinder jsonQueryBinder + = MapBinder.newMapBinder(binder(), String.class, JsonQueryParserFactory.class); + Map jsonQueryParserGroups = settings.getGroups(JsonIndexQueryParser.Defaults.JSON_QUERY_PREFIX); + for (Map.Entry entry : jsonQueryParserGroups.entrySet()) { + String qName = entry.getKey(); + Settings qSettings = entry.getValue(); + Class type = qSettings.getAsClass("type", null); + if (type == null) { + throw new IllegalArgumentException("Json Query Parser [" + qName + "] must be provided with a type"); + } + jsonQueryBinder.addBinding(qName).toProvider(FactoryProvider.newFactory(JsonQueryParserFactory.class, + qSettings.getAsClass("type", null))).in(Scopes.SINGLETON); + } + + // handle JsonFilterParsers + MapBinder jsonFilterBinder + = MapBinder.newMapBinder(binder(), String.class, JsonFilterParserFactory.class); + Map jsonFilterParserGroups = settings.getGroups(JsonIndexQueryParser.Defaults.JSON_FILTER_PREFIX); + for (Map.Entry entry : jsonFilterParserGroups.entrySet()) { + String fName = entry.getKey(); + Settings fSettings = entry.getValue(); + Class type = fSettings.getAsClass("type", null); + if (type == null) { + throw new IllegalArgumentException("Json Filter Parser [" + fName + "] must be provided with a type"); + } + jsonFilterBinder.addBinding(fName).toProvider(FactoryProvider.newFactory(JsonFilterParserFactory.class, + fSettings.getAsClass("type", null))).in(Scopes.SINGLETON); + } + + bind(IndexQueryParserService.class).asEagerSingleton(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/IndexQueryParserService.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/IndexQueryParserService.java new file mode 100644 index 00000000000..cb09cd987c4 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/IndexQueryParserService.java @@ -0,0 +1,93 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query; + +import com.google.common.collect.ImmutableMap; +import com.google.inject.Inject; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.analysis.AnalysisService; +import org.elasticsearch.index.cache.filter.FilterCache; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.query.json.JsonIndexQueryParser; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.Nullable; +import org.elasticsearch.util.settings.ImmutableSettings; +import org.elasticsearch.util.settings.Settings; + +import java.util.Map; + +import static com.google.common.collect.Maps.*; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexQueryParserService extends AbstractIndexComponent { + + public static final class Defaults { + public static final String DEFAULT = "default"; + public static final String PREFIX = "index.queryparser.types"; + } + + private final IndexQueryParser defaultIndexQueryParser; + + private final Map indexQueryParsers; + + public IndexQueryParserService(Index index, MapperService mapperService, FilterCache filterCache, AnalysisService analysisService) { + this(index, ImmutableSettings.Builder.EMPTY_SETTINGS, mapperService, filterCache, analysisService, null); + } + + @Inject public IndexQueryParserService(Index index, @IndexSettings Settings indexSettings, + MapperService mapperService, FilterCache filterCache, + AnalysisService analysisService, + @Nullable Map indexQueryParsersFactories) { + super(index, indexSettings); + Map queryParserGroupSettings; + if (indexSettings != null) { + queryParserGroupSettings = indexSettings.getGroups(Defaults.PREFIX); + } else { + queryParserGroupSettings = newHashMap(); + } + Map qparsers = newHashMap(); + if (indexQueryParsersFactories != null) { + for (Map.Entry entry : indexQueryParsersFactories.entrySet()) { + String qparserName = entry.getKey(); + Settings qparserSettings = queryParserGroupSettings.get(qparserName); + qparsers.put(qparserName, entry.getValue().create(qparserName, qparserSettings)); + } + } + if (!qparsers.containsKey(Defaults.DEFAULT)) { + IndexQueryParser defaultQueryParser = new JsonIndexQueryParser(index, indexSettings, mapperService, filterCache, analysisService, null, null, Defaults.DEFAULT, null); + qparsers.put(Defaults.DEFAULT, defaultQueryParser); + } + + indexQueryParsers = ImmutableMap.copyOf(qparsers); + + defaultIndexQueryParser = indexQueryParser(Defaults.DEFAULT); + } + + public IndexQueryParser indexQueryParser(String name) { + return indexQueryParsers.get(name); + } + + public IndexQueryParser defaultIndexQueryParser() { + return defaultIndexQueryParser; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/QueryBuilder.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/QueryBuilder.java new file mode 100644 index 00000000000..0626337a0b4 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/QueryBuilder.java @@ -0,0 +1,28 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query; + +/** + * @author kimchy (Shay Banon) + */ +public interface QueryBuilder { + + String build() throws QueryBuilderException; +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/QueryBuilderException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/QueryBuilderException.java new file mode 100644 index 00000000000..0e324cff649 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/QueryBuilderException.java @@ -0,0 +1,36 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query; + +import org.elasticsearch.ElasticSearchException; + +/** + * @author kimchy (Shay Banon) + */ +public class QueryBuilderException extends ElasticSearchException { + + public QueryBuilderException(String msg) { + super(msg); + } + + public QueryBuilderException(String msg, Throwable cause) { + super(msg, cause); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/QueryParsingException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/QueryParsingException.java new file mode 100644 index 00000000000..81a96ea9a79 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/QueryParsingException.java @@ -0,0 +1,37 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query; + +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexException; + +/** + * @author kimchy (Shay Banon) + */ +public class QueryParsingException extends IndexException { + + public QueryParsingException(Index index, String msg) { + super(index, msg); + } + + public QueryParsingException(Index index, String msg, Throwable cause) { + super(index, msg, cause); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/BaseJsonFilterBuilder.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/BaseJsonFilterBuilder.java new file mode 100644 index 00000000000..7ceb4b3f7ca --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/BaseJsonFilterBuilder.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import org.elasticsearch.util.json.JsonBuilder; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class BaseJsonFilterBuilder implements JsonFilterBuilder { + + @Override public void toJson(JsonBuilder builder) throws IOException { + builder.startObject(); + doJson(builder); + builder.endObject(); + } + + protected abstract void doJson(JsonBuilder builder) throws IOException; +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/BaseJsonQueryBuilder.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/BaseJsonQueryBuilder.java new file mode 100644 index 00000000000..7bc415b9196 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/BaseJsonQueryBuilder.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import org.elasticsearch.index.query.QueryBuilderException; +import org.elasticsearch.util.json.JsonBuilder; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class BaseJsonQueryBuilder implements JsonQueryBuilder { + + @Override public String build() throws QueryBuilderException { + try { + JsonBuilder builder = JsonBuilder.cached(); + toJson(builder); + return builder.string(); + } catch (Exception e) { + throw new QueryBuilderException("Failed to build query", e); + } + } + + @Override public void toJson(JsonBuilder builder) throws IOException { + builder.startObject(); + doJson(builder); + builder.endObject(); + } + + protected abstract void doJson(JsonBuilder builder) throws IOException; +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/BoolJsonFilterParser.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/BoolJsonFilterParser.java new file mode 100644 index 00000000000..cdd3b1ea28b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/BoolJsonFilterParser.java @@ -0,0 +1,81 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import com.google.inject.Inject; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanFilter; +import org.apache.lucene.search.Filter; +import org.apache.lucene.search.FilterClause; +import org.codehaus.jackson.JsonParser; +import org.codehaus.jackson.JsonToken; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.query.QueryParsingException; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; +import java.util.List; + +import static com.google.common.collect.Lists.*; + +/** + * @author kimchy (Shay Banon) + */ +public class BoolJsonFilterParser extends AbstractIndexComponent implements JsonFilterParser { + + @Inject public BoolJsonFilterParser(Index index, @IndexSettings Settings settings) { + super(index, settings); + } + + @Override public String name() { + return "bool"; + } + + @Override public Filter parse(JsonQueryParseContext parseContext) throws IOException, QueryParsingException { + JsonParser jp = parseContext.jp(); + + List clauses = newArrayList(); + + String currentFieldName = null; + JsonToken token; + while ((token = jp.nextToken()) != JsonToken.END_OBJECT) { + if (token == JsonToken.FIELD_NAME) { + currentFieldName = jp.getCurrentName(); + } else if (token == JsonToken.START_OBJECT) { + if ("must".equals(currentFieldName)) { + clauses.add(new FilterClause(parseContext.parseInnerFilter(), BooleanClause.Occur.MUST)); + } else if ("mustNot".equals(currentFieldName)) { + clauses.add(new FilterClause(parseContext.parseInnerFilter(), BooleanClause.Occur.MUST_NOT)); + } else if ("should".equals(currentFieldName)) { + clauses.add(new FilterClause(parseContext.parseInnerFilter(), BooleanClause.Occur.SHOULD)); + } + } + } + + BooleanFilter booleanFilter = new BooleanFilter(); + for (FilterClause filterClause : clauses) { + booleanFilter.add(filterClause); + } + // no need to cache this one, inner queries will be cached and thats good enough (I think...) + return booleanFilter; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/BoolJsonQueryBuilder.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/BoolJsonQueryBuilder.java new file mode 100644 index 00000000000..c371616045a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/BoolJsonQueryBuilder.java @@ -0,0 +1,106 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import org.apache.lucene.search.BooleanClause; +import org.elasticsearch.util.json.JsonBuilder; + +import java.io.IOException; +import java.util.ArrayList; + +/** + * @author kimchy (Shay Banon) + */ +public class BoolJsonQueryBuilder extends BaseJsonQueryBuilder { + + private ArrayList clauses = new ArrayList(); + + private float boost = -1; + + private Boolean disableCoord; + + private int minimumNumberShouldMatch = -1; + + public BoolJsonQueryBuilder must(JsonQueryBuilder queryBuilder) { + clauses.add(new Clause(queryBuilder, BooleanClause.Occur.MUST)); + return this; + } + + public BoolJsonQueryBuilder mustNot(JsonQueryBuilder queryBuilder) { + clauses.add(new Clause(queryBuilder, BooleanClause.Occur.MUST_NOT)); + return this; + } + + public BoolJsonQueryBuilder should(JsonQueryBuilder queryBuilder) { + clauses.add(new Clause(queryBuilder, BooleanClause.Occur.SHOULD)); + return this; + } + + public BoolJsonQueryBuilder boost(float boost) { + this.boost = boost; + return this; + } + + public BoolJsonQueryBuilder disableCoord(boolean disableCoord) { + this.disableCoord = disableCoord; + return this; + } + + public BoolJsonQueryBuilder minimumNumberShouldMatch(int minimumNumberShouldMatch) { + this.minimumNumberShouldMatch = minimumNumberShouldMatch; + return this; + } + + @Override protected void doJson(JsonBuilder builder) throws IOException { + builder.startObject("bool"); + for (Clause clause : clauses) { + if (clause.occur == BooleanClause.Occur.MUST) { + builder.field("must"); + clause.queryBuilder.toJson(builder); + } else if (clause.occur == BooleanClause.Occur.MUST_NOT) { + builder.field("mustNot"); + clause.queryBuilder.toJson(builder); + } else if (clause.occur == BooleanClause.Occur.SHOULD) { + builder.field("should"); + clause.queryBuilder.toJson(builder); + } + } + if (boost != -1) { + builder.field("boost", boost); + } + if (disableCoord != null) { + builder.field("disableCoord", disableCoord); + } + if (minimumNumberShouldMatch == -1) { + builder.field("minimumNumberShouldMatch", minimumNumberShouldMatch); + } + builder.endObject(); + } + + private static class Clause { + final JsonQueryBuilder queryBuilder; + final BooleanClause.Occur occur; + + private Clause(JsonQueryBuilder queryBuilder, BooleanClause.Occur occur) { + this.queryBuilder = queryBuilder; + this.occur = occur; + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/BoolJsonQueryParser.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/BoolJsonQueryParser.java new file mode 100644 index 00000000000..7d13299ce5e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/BoolJsonQueryParser.java @@ -0,0 +1,97 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import com.google.inject.Inject; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.Query; +import org.codehaus.jackson.JsonParser; +import org.codehaus.jackson.JsonToken; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.query.QueryParsingException; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; +import java.util.List; + +import static com.google.common.collect.Lists.*; + +/** + * @author kimchy (Shay Banon) + */ +public class BoolJsonQueryParser extends AbstractIndexComponent implements JsonQueryParser { + + @Inject public BoolJsonQueryParser(Index index, @IndexSettings Settings settings) { + super(index, settings); + } + + @Override public String name() { + return "bool"; + } + + @Override public Query parse(JsonQueryParseContext parseContext) throws IOException, QueryParsingException { + JsonParser jp = parseContext.jp(); + + boolean disableCoord = false; + float boost = 1.0f; + int minimumNumberShouldMatch = -1; + + List clauses = newArrayList(); + + String currentFieldName = null; + JsonToken token; + while ((token = jp.nextToken()) != JsonToken.END_OBJECT) { + if (token == JsonToken.FIELD_NAME) { + currentFieldName = jp.getCurrentName(); + } else if (token == JsonToken.START_OBJECT) { + if ("must".equals(currentFieldName)) { + clauses.add(new BooleanClause(parseContext.parseInnerQuery(), BooleanClause.Occur.MUST)); + } else if ("mustNot".equals(currentFieldName)) { + clauses.add(new BooleanClause(parseContext.parseInnerQuery(), BooleanClause.Occur.MUST_NOT)); + } else if ("should".equals(currentFieldName)) { + clauses.add(new BooleanClause(parseContext.parseInnerQuery(), BooleanClause.Occur.SHOULD)); + } + } else if (token == JsonToken.VALUE_TRUE || token == JsonToken.VALUE_FALSE) { + if ("disableCoord".equals(currentFieldName)) { + disableCoord = token == JsonToken.VALUE_TRUE; + } + } else { + if ("boost".equals(currentFieldName)) { + boost = jp.getFloatValue(); + } else if ("minimumNumberShouldMatch".equals(currentFieldName)) { + minimumNumberShouldMatch = jp.getIntValue(); + } + } + } + + BooleanQuery query = new BooleanQuery(disableCoord); + for (BooleanClause clause : clauses) { + query.add(clause); + } + query.setBoost(boost); + if (minimumNumberShouldMatch != -1) { + query.setMinimumNumberShouldMatch(minimumNumberShouldMatch); + } + return query; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/ConstantScoreQueryJsonQueryBuilder.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/ConstantScoreQueryJsonQueryBuilder.java new file mode 100644 index 00000000000..b8639885985 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/ConstantScoreQueryJsonQueryBuilder.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import org.elasticsearch.util.json.JsonBuilder; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class ConstantScoreQueryJsonQueryBuilder extends BaseJsonQueryBuilder { + + private final JsonFilterBuilder filterBuilder; + + private float boost = -1; + + public ConstantScoreQueryJsonQueryBuilder(JsonFilterBuilder filterBuilder) { + this.filterBuilder = filterBuilder; + } + + public ConstantScoreQueryJsonQueryBuilder boost(float boost) { + this.boost = boost; + return this; + } + + @Override protected void doJson(JsonBuilder builder) throws IOException { + builder.startObject(ConstantScoreQueryJsonQueryParser.NAME); + builder.field("filter"); + filterBuilder.toJson(builder); + if (boost != -1) { + builder.field("boost", boost); + } + builder.endObject(); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/ConstantScoreQueryJsonQueryParser.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/ConstantScoreQueryJsonQueryParser.java new file mode 100644 index 00000000000..9eb9c5a40ed --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/ConstantScoreQueryJsonQueryParser.java @@ -0,0 +1,80 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import com.google.inject.Inject; +import org.apache.lucene.search.ConstantScoreQuery; +import org.apache.lucene.search.Filter; +import org.apache.lucene.search.Query; +import org.codehaus.jackson.JsonParser; +import org.codehaus.jackson.JsonToken; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.query.QueryParsingException; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class ConstantScoreQueryJsonQueryParser extends AbstractIndexComponent implements JsonQueryParser { + + public static final String NAME = "constantScore"; + + @Inject public ConstantScoreQueryJsonQueryParser(Index index, @IndexSettings Settings settings) { + super(index, settings); + } + + @Override public String name() { + return NAME; + } + + @Override public Query parse(JsonQueryParseContext parseContext) throws IOException, QueryParsingException { + JsonParser jp = parseContext.jp(); + + Filter filter = null; + float boost = 1.0f; + + String currentFieldName = null; + JsonToken token; + while ((token = jp.nextToken()) != JsonToken.END_OBJECT) { + if (token == JsonToken.FIELD_NAME) { + currentFieldName = jp.getCurrentName(); + } else if (token == JsonToken.START_OBJECT) { + if ("filter".equals(currentFieldName)) { + filter = parseContext.parseInnerFilter(); + } + } else if (token == JsonToken.VALUE_NUMBER_INT || token == JsonToken.VALUE_NUMBER_FLOAT) { + if ("boost".equals(currentFieldName)) { + boost = jp.getFloatValue(); + } + } + } + if (filter == null) { + throw new QueryParsingException(index, "[constantScore] requires 'filter' element"); + } + // we don't cache the filter, we assume it is already cached in the filter parsers... + ConstantScoreQuery query = new ConstantScoreQuery(filter); + query.setBoost(boost); + return query; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/DisMaxJsonQueryBuilder.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/DisMaxJsonQueryBuilder.java new file mode 100644 index 00000000000..7ff802910ab --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/DisMaxJsonQueryBuilder.java @@ -0,0 +1,70 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import org.elasticsearch.util.json.JsonBuilder; + +import java.io.IOException; +import java.util.ArrayList; + +import static com.google.common.collect.Lists.*; + +/** + * @author kimchy (Shay Banon) + */ +public class DisMaxJsonQueryBuilder extends BaseJsonQueryBuilder { + + private ArrayList queries = newArrayList(); + + private float boost = -1; + + private float tieBreakerMultiplier = -1; + + public DisMaxJsonQueryBuilder add(JsonQueryBuilder queryBuilder) { + queries.add(queryBuilder); + return this; + } + + public DisMaxJsonQueryBuilder boost(float boost) { + this.boost = boost; + return this; + } + + public DisMaxJsonQueryBuilder tieBreakerMultiplier(float tieBreakerMultiplier) { + this.tieBreakerMultiplier = tieBreakerMultiplier; + return this; + } + + @Override protected void doJson(JsonBuilder builder) throws IOException { + builder.startObject("disMax"); + if (tieBreakerMultiplier != -1) { + builder.field("tieBreakerMultiplier", tieBreakerMultiplier); + } + if (boost != -1) { + builder.field("boost", boost); + } + builder.startArray("queries"); + for (JsonQueryBuilder queryBuilder : queries) { + queryBuilder.toJson(builder); + } + builder.endArray(); + builder.endObject(); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/DisMaxJsonQueryParser.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/DisMaxJsonQueryParser.java new file mode 100644 index 00000000000..ba099ff975c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/DisMaxJsonQueryParser.java @@ -0,0 +1,88 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import com.google.inject.Inject; +import org.apache.lucene.search.DisjunctionMaxQuery; +import org.apache.lucene.search.Query; +import org.codehaus.jackson.JsonParser; +import org.codehaus.jackson.JsonToken; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.query.QueryParsingException; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; +import java.util.List; + +import static com.google.common.collect.Lists.*; + +/** + * @author kimchy (Shay Banon) + */ +public class DisMaxJsonQueryParser extends AbstractIndexComponent implements JsonQueryParser { + + @Inject public DisMaxJsonQueryParser(Index index, @IndexSettings Settings settings) { + super(index, settings); + } + + @Override public String name() { + return "disMax"; + } + + @Override public Query parse(JsonQueryParseContext parseContext) throws IOException, QueryParsingException { + JsonParser jp = parseContext.jp(); + + float boost = 1.0f; + float tieBreakerMultiplier = 0.0f; + + List queries = newArrayList(); + + String currentFieldName = null; + JsonToken token; + while ((token = jp.nextToken()) != JsonToken.END_OBJECT) { + if (token == JsonToken.FIELD_NAME) { + currentFieldName = jp.getCurrentName(); + } else if (token == JsonToken.START_OBJECT) { + if ("queries".equals(currentFieldName)) { + queries.add(parseContext.parseInnerQuery()); + } + } else if (token == JsonToken.START_ARRAY) { + if ("queries".equals(currentFieldName)) { + while (token != JsonToken.END_ARRAY) { + queries.add(parseContext.parseInnerQuery()); + token = jp.nextToken(); + } + } + } else { + if ("boost".equals(currentFieldName)) { + boost = jp.getFloatValue(); + } else if ("tieBreakerMultiplier".equals(currentFieldName)) { + tieBreakerMultiplier = jp.getFloatValue(); + } + } + } + + DisjunctionMaxQuery query = new DisjunctionMaxQuery(queries, tieBreakerMultiplier); + query.setBoost(boost); + return query; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/FilteredQueryJsonQueryBuilder.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/FilteredQueryJsonQueryBuilder.java new file mode 100644 index 00000000000..657eb1a2b7f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/FilteredQueryJsonQueryBuilder.java @@ -0,0 +1,58 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import org.elasticsearch.util.json.JsonBuilder; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class FilteredQueryJsonQueryBuilder extends BaseJsonQueryBuilder { + + private final JsonQueryBuilder queryBuilder; + + private final JsonFilterBuilder filterBuilder; + + private float boost = -1; + + public FilteredQueryJsonQueryBuilder(JsonQueryBuilder queryBuilder, JsonFilterBuilder filterBuilder) { + this.queryBuilder = queryBuilder; + this.filterBuilder = filterBuilder; + } + + public FilteredQueryJsonQueryBuilder boost(float boost) { + this.boost = boost; + return this; + } + + @Override protected void doJson(JsonBuilder builder) throws IOException { + builder.startObject(FilteredQueryJsonQueryParser.NAME); + builder.field("query"); + queryBuilder.toJson(builder); + builder.field("filter"); + filterBuilder.toJson(builder); + if (boost != -1) { + builder.field("boost", boost); + } + builder.endObject(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/FilteredQueryJsonQueryParser.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/FilteredQueryJsonQueryParser.java new file mode 100644 index 00000000000..174b67d2f0f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/FilteredQueryJsonQueryParser.java @@ -0,0 +1,86 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import com.google.inject.Inject; +import org.apache.lucene.search.Filter; +import org.apache.lucene.search.FilteredQuery; +import org.apache.lucene.search.Query; +import org.codehaus.jackson.JsonParser; +import org.codehaus.jackson.JsonToken; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.query.QueryParsingException; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class FilteredQueryJsonQueryParser extends AbstractIndexComponent implements JsonQueryParser { + + public static final String NAME = "filteredQuery"; + + @Inject public FilteredQueryJsonQueryParser(Index index, @IndexSettings Settings settings) { + super(index, settings); + } + + @Override public String name() { + return NAME; + } + + @Override public Query parse(JsonQueryParseContext parseContext) throws IOException, QueryParsingException { + JsonParser jp = parseContext.jp(); + + Query query = null; + Filter filter = null; + float boost = 1.0f; + + String currentFieldName = null; + JsonToken token; + while ((token = jp.nextToken()) != JsonToken.END_OBJECT) { + if (token == JsonToken.FIELD_NAME) { + currentFieldName = jp.getCurrentName(); + } else if (token == JsonToken.START_OBJECT) { + if ("query".equals(currentFieldName)) { + query = parseContext.parseInnerQuery(); + } else if ("filter".equals(currentFieldName)) { + filter = parseContext.parseInnerFilter(); + } + } else if (token == JsonToken.VALUE_NUMBER_INT || token == JsonToken.VALUE_NUMBER_FLOAT) { + if ("boost".equals(currentFieldName)) { + boost = jp.getFloatValue(); + } + } + } + if (query == null) { + throw new QueryParsingException(index, "[filteredQuery] requires 'query' element"); + } + if (filter == null) { + throw new QueryParsingException(index, "[filteredQuery] requires 'filter' element"); + } + // we don't cache the filter, we assume it is already cached in the filter parsers... + FilteredQuery filteredQuery = new FilteredQuery(query, filter); + filteredQuery.setBoost(boost); + return filteredQuery; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/JsonFilterBuilder.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/JsonFilterBuilder.java new file mode 100644 index 00000000000..7a87faccaac --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/JsonFilterBuilder.java @@ -0,0 +1,29 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import org.elasticsearch.util.json.ToJson; + +/** + * @author kimchy (Shay Banon) + */ +public interface JsonFilterBuilder extends ToJson { + +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/JsonFilterBuilders.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/JsonFilterBuilders.java new file mode 100644 index 00000000000..65f2bad0213 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/JsonFilterBuilders.java @@ -0,0 +1,62 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class JsonFilterBuilders { + + public static TermJsonFilterBuilder termFilter(String name, String value) { + return new TermJsonFilterBuilder(name, value); + } + + public static TermJsonFilterBuilder termFilter(String name, int value) { + return new TermJsonFilterBuilder(name, value); + } + + public static TermJsonFilterBuilder termFilter(String name, long value) { + return new TermJsonFilterBuilder(name, value); + } + + public static TermJsonFilterBuilder termFilter(String name, float value) { + return new TermJsonFilterBuilder(name, value); + } + + public static TermJsonFilterBuilder termFilter(String name, double value) { + return new TermJsonFilterBuilder(name, value); + } + + public static PrefixJsonFilterBuilder prefixFilter(String name, String value) { + return new PrefixJsonFilterBuilder(name, value); + } + + public static RangeJsonFilterBuilder rangeFilter(String name) { + return new RangeJsonFilterBuilder(name); + } + + public static QueryJsonFilterBuilder queryFilter(JsonQueryBuilder queryBuilder) { + return new QueryJsonFilterBuilder(queryBuilder); + } + + private JsonFilterBuilders() { + + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/JsonFilterParser.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/JsonFilterParser.java new file mode 100644 index 00000000000..45324137dfb --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/JsonFilterParser.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import org.apache.lucene.search.Filter; +import org.elasticsearch.index.IndexComponent; +import org.elasticsearch.index.query.QueryParsingException; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public interface JsonFilterParser extends IndexComponent { + + String name(); + + /** + * Parses the into a query from the current json parser location. + */ + Filter parse(JsonQueryParseContext parseContext) throws IOException, QueryParsingException; +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/JsonFilterParserFactory.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/JsonFilterParserFactory.java new file mode 100644 index 00000000000..d0f28058e84 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/JsonFilterParserFactory.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public interface JsonFilterParserFactory { + + JsonFilterParser create(String name, Settings settings); +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/JsonIndexQueryParser.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/JsonIndexQueryParser.java new file mode 100644 index 00000000000..04ccc3740b6 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/JsonIndexQueryParser.java @@ -0,0 +1,147 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import com.google.inject.Inject; +import com.google.inject.assistedinject.Assisted; +import org.apache.lucene.search.Query; +import org.codehaus.jackson.JsonFactory; +import org.codehaus.jackson.JsonParser; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.analysis.AnalysisService; +import org.elasticsearch.index.cache.filter.FilterCache; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.query.IndexQueryParser; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryParsingException; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.Nullable; +import org.elasticsearch.util.io.FastStringReader; +import org.elasticsearch.util.json.Jackson; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static com.google.common.collect.Lists.*; + +/** + * @author kimchy (Shay Banon) + */ +public class JsonIndexQueryParser extends AbstractIndexComponent implements IndexQueryParser { + + public static final class Defaults { + public static final String JSON_QUERY_PREFIX = "index.queryparser.json.query"; + public static final String JSON_FILTER_PREFIX = "index.queryparser.json.filter"; + } + + private ThreadLocal cache = new ThreadLocal() { + @Override protected JsonQueryParseContext initialValue() { + return new JsonQueryParseContext(index, queryParserRegistry, mapperService, filterCache); + } + }; + + private final JsonFactory jsonFactory = Jackson.defaultJsonFactory(); + + private final String name; + + private final MapperService mapperService; + + private final FilterCache filterCache; + + private final JsonQueryParserRegistry queryParserRegistry; + + @Inject public JsonIndexQueryParser(Index index, + @IndexSettings Settings indexSettings, + MapperService mapperService, FilterCache filterCache, + AnalysisService analysisService, + @Nullable Map jsonQueryParsers, + @Nullable Map jsonFilterParsers, + @Assisted String name, @Assisted @Nullable Settings settings) { + super(index, indexSettings); + this.name = name; + this.mapperService = mapperService; + this.filterCache = filterCache; + + List queryParsers = newArrayList(); + if (jsonQueryParsers != null) { + Map jsonQueryParserGroups = indexSettings.getGroups(JsonIndexQueryParser.Defaults.JSON_QUERY_PREFIX); + for (Map.Entry entry : jsonQueryParsers.entrySet()) { + String queryParserName = entry.getKey(); + JsonQueryParserFactory queryParserFactory = entry.getValue(); + Settings queryParserSettings = jsonQueryParserGroups.get(queryParserName); + + queryParsers.add(queryParserFactory.create(queryParserName, queryParserSettings)); + } + } + + List filterParsers = newArrayList(); + if (jsonFilterParsers != null) { + Map jsonFilterParserGroups = indexSettings.getGroups(JsonIndexQueryParser.Defaults.JSON_FILTER_PREFIX); + for (Map.Entry entry : jsonFilterParsers.entrySet()) { + String filterParserName = entry.getKey(); + JsonFilterParserFactory filterParserFactory = entry.getValue(); + Settings filterParserSettings = jsonFilterParserGroups.get(filterParserName); + + filterParsers.add(filterParserFactory.create(filterParserName, filterParserSettings)); + } + } + + this.queryParserRegistry = new JsonQueryParserRegistry(index, indexSettings, analysisService, queryParsers, filterParsers); + } + + @Override public String name() { + return this.name; + } + + public JsonQueryParserRegistry queryParserRegistry() { + return this.queryParserRegistry; + } + + @Override public Query parse(QueryBuilder queryBuilder) throws ElasticSearchException { + return parse(queryBuilder.build()); + } + + @Override public Query parse(String source) throws QueryParsingException { + try { + return parse(cache.get(), source, jsonFactory.createJsonParser(new FastStringReader(source))); + } catch (QueryParsingException e) { + throw e; + } catch (Exception e) { + throw new QueryParsingException(index, "Failed to parse [" + source + "]", e); + } + } + + public Query parse(JsonParser jsonParser, String source) { + try { + return parse(cache.get(), source, jsonParser); + } catch (IOException e) { + throw new QueryParsingException(index, "Failed to parse [" + source + "]", e); + } + } + + private Query parse(JsonQueryParseContext parseContext, String source, JsonParser jsonParser) throws IOException, QueryParsingException { + parseContext.reset(jsonParser); + return parseContext.parseInnerQuery(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/JsonQueryBuilder.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/JsonQueryBuilder.java new file mode 100644 index 00000000000..813e5c5ff81 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/JsonQueryBuilder.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.util.json.ToJson; + +/** + * @author kimchy (Shay Banon) + */ +public interface JsonQueryBuilder extends QueryBuilder, ToJson { + +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/JsonQueryBuilders.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/JsonQueryBuilders.java new file mode 100644 index 00000000000..488e5a4a735 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/JsonQueryBuilders.java @@ -0,0 +1,122 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class JsonQueryBuilders { + + public static MatchAllJsonQueryBuilder matchAllQuery() { + return new MatchAllJsonQueryBuilder(); + } + + public static DisMaxJsonQueryBuilder disMaxQuery() { + return new DisMaxJsonQueryBuilder(); + } + + public static TermJsonQueryBuilder termQuery(String name, String value) { + return new TermJsonQueryBuilder(name, value); + } + + public static TermJsonQueryBuilder termQuery(String name, int value) { + return new TermJsonQueryBuilder(name, value); + } + + public static TermJsonQueryBuilder termQuery(String name, long value) { + return new TermJsonQueryBuilder(name, value); + } + + public static TermJsonQueryBuilder termQuery(String name, float value) { + return new TermJsonQueryBuilder(name, value); + } + + public static TermJsonQueryBuilder termQuery(String name, double value) { + return new TermJsonQueryBuilder(name, value); + } + + public static PrefixJsonQueryBuilder prefixQuery(String name, String value) { + return new PrefixJsonQueryBuilder(name, value); + } + + public static RangeJsonQueryBuilder rangeQuery(String name) { + return new RangeJsonQueryBuilder(name); + } + + public static WildcardJsonQueryBuilder wildcardQuery(String name, String value) { + return new WildcardJsonQueryBuilder(name, value); + } + + public static QueryStringJsonQueryBuilder queryString(String queryString) { + return new QueryStringJsonQueryBuilder(queryString); + } + + public static BoolJsonQueryBuilder boolQuery() { + return new BoolJsonQueryBuilder(); + } + + public static SpanTermJsonQueryBuilder spanTermQuery(String name, String value) { + return new SpanTermJsonQueryBuilder(name, value); + } + + public static SpanTermJsonQueryBuilder spanTermQuery(String name, int value) { + return new SpanTermJsonQueryBuilder(name, value); + } + + public static SpanTermJsonQueryBuilder spanTermQuery(String name, long value) { + return new SpanTermJsonQueryBuilder(name, value); + } + + public static SpanTermJsonQueryBuilder spanTermQuery(String name, float value) { + return new SpanTermJsonQueryBuilder(name, value); + } + + public static SpanTermJsonQueryBuilder spanTermQuery(String name, double value) { + return new SpanTermJsonQueryBuilder(name, value); + } + + public static SpanFirstJsonQueryBuilder spanFirstQuery(JsonSpanQueryBuilder match, int end) { + return new SpanFirstJsonQueryBuilder(match, end); + } + + public static SpanNearJsonQueryBuilder spanNearQuery() { + return new SpanNearJsonQueryBuilder(); + } + + public static SpanNotJsonQueryBuilder spanNotQuery() { + return new SpanNotJsonQueryBuilder(); + } + + public static SpanOrJsonQueryBuilder spanOrQuery() { + return new SpanOrJsonQueryBuilder(); + } + + public static FilteredQueryJsonQueryBuilder filteredQuery(JsonQueryBuilder queryBuilder, JsonFilterBuilder filterBuilder) { + return new FilteredQueryJsonQueryBuilder(queryBuilder, filterBuilder); + } + + public static ConstantScoreQueryJsonQueryBuilder constantScoreQuery(JsonFilterBuilder filterBuilder) { + return new ConstantScoreQueryJsonQueryBuilder(filterBuilder); + } + + private JsonQueryBuilders() { + + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/JsonQueryParseContext.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/JsonQueryParseContext.java new file mode 100644 index 00000000000..63b6b28d8f2 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/JsonQueryParseContext.java @@ -0,0 +1,155 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import org.apache.lucene.search.Filter; +import org.apache.lucene.search.Query; +import org.codehaus.jackson.JsonParser; +import org.codehaus.jackson.JsonToken; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.cache.filter.FilterCache; +import org.elasticsearch.index.mapper.FieldMapper; +import org.elasticsearch.index.mapper.FieldMappers; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.query.QueryParsingException; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class JsonQueryParseContext { + + private final Index index; + + private final MapperService mapperService; + + private final FilterCache filterCache; + + private final JsonQueryParserRegistry queryParserRegistry; + + private JsonParser jp; + + public JsonQueryParseContext(Index index, JsonQueryParserRegistry queryParserRegistry, + MapperService mapperService, FilterCache filterCache) { + this.index = index; + this.queryParserRegistry = queryParserRegistry; + this.mapperService = mapperService; + this.filterCache = filterCache; + } + + public void reset(JsonParser jp) { + this.jp = jp; + } + + public JsonParser jp() { + return jp; + } + + public MapperService mapperService() { + return mapperService; + } + + public FilterCache filterCache() { + return filterCache; + } + + public Filter cacheFilterIfPossible(Filter filter) { + if (filterCache == null) { + return filter; + } + return filterCache.cache(filter); + } + + public Query parseInnerQuery() throws IOException, QueryParsingException { + // move to START object + JsonToken token; + if (jp.getCurrentToken() != JsonToken.START_OBJECT) { + token = jp.nextToken(); + assert token == JsonToken.START_OBJECT; + } + token = jp.nextToken(); + assert token == JsonToken.FIELD_NAME; + String queryName = jp.getCurrentName(); + // move to the next START_OBJECT + token = jp.nextToken(); + assert token == JsonToken.START_OBJECT; + + JsonQueryParser queryParser = queryParserRegistry.queryParser(queryName); + if (queryParser == null) { + throw new QueryParsingException(index, "No json query parser registered for [" + queryName + "]"); + } + Query result = queryParser.parse(this); + if (jp.getCurrentToken() == JsonToken.END_OBJECT) { + // if we are at END_OBJECT, move to the next one... + jp.nextToken(); + } + return result; + } + + public Filter parseInnerFilter() throws IOException, QueryParsingException { + // move to START object + JsonToken token; + if (jp.getCurrentToken() != JsonToken.START_OBJECT) { + token = jp.nextToken(); + assert token == JsonToken.START_OBJECT; + } + token = jp.nextToken(); + assert token == JsonToken.FIELD_NAME; + String queryName = jp.getCurrentName(); + // move to the next START_OBJECT + token = jp.nextToken(); + assert token == JsonToken.START_OBJECT; + + JsonFilterParser filterParser = queryParserRegistry.filterParser(queryName); + if (filterParser == null) { + throw new QueryParsingException(index, "No json query parser registered for [" + queryName + "]"); + } + Filter result = filterParser.parse(this); + if (jp.getCurrentToken() == JsonToken.END_OBJECT) { + // if we are at END_OBJECT, move to the next one... + jp.nextToken(); + } + return result; + } + + public FieldMapper fieldMapper(String name) { + FieldMappers fieldMappers = mapperService.smartNameFieldMappers(name); + if (fieldMappers == null) { + return null; + } + return fieldMappers.mapper(); + } + + public String indexName(String name) { + MapperService.SmartNameFieldMappers smartMapper = smartFieldMappers(name); + if (smartMapper == null) { + return name; + } + if (smartMapper.fieldMappers().mapper() != null) { + return smartMapper.fieldMappers().mapper().indexName(); + } + return name; + } + + public MapperService.SmartNameFieldMappers smartFieldMappers(String name) { + return mapperService.smartName(name); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/JsonQueryParser.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/JsonQueryParser.java new file mode 100644 index 00000000000..8a9b9b58aff --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/JsonQueryParser.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import org.apache.lucene.search.Query; +import org.elasticsearch.index.IndexComponent; +import org.elasticsearch.index.query.QueryParsingException; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public interface JsonQueryParser extends IndexComponent { + + String name(); + + /** + * Parses the into a query from the current json parser location. + */ + Query parse(JsonQueryParseContext parseContext) throws IOException, QueryParsingException; +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/JsonQueryParserFactory.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/JsonQueryParserFactory.java new file mode 100644 index 00000000000..ed621acc306 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/JsonQueryParserFactory.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public interface JsonQueryParserFactory { + + JsonQueryParser create(String name, Settings settings); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/JsonQueryParserRegistry.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/JsonQueryParserRegistry.java new file mode 100644 index 00000000000..4bb2608640b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/JsonQueryParserRegistry.java @@ -0,0 +1,105 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import com.google.common.collect.ImmutableMap; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.analysis.AnalysisService; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.Nullable; +import org.elasticsearch.util.settings.Settings; + +import java.util.Map; + +import static com.google.common.collect.Maps.*; + +/** + * @author kimchy (Shay Banon) + */ +public class JsonQueryParserRegistry { + + private final Map queryParsers; + + private final Map filterParsers; + + public JsonQueryParserRegistry(Index index, + @IndexSettings Settings indexSettings, + AnalysisService analysisService, + @Nullable Iterable queryParsers, + @Nullable Iterable filterParsers) { + + Map queryParsersMap = newHashMap(); + // add defaults + add(queryParsersMap, new DisMaxJsonQueryParser(index, indexSettings)); + add(queryParsersMap, new MatchAllJsonQueryParser(index, indexSettings)); + add(queryParsersMap, new QueryStringJsonQueryParser(index, indexSettings, analysisService)); + add(queryParsersMap, new BoolJsonQueryParser(index, indexSettings)); + add(queryParsersMap, new TermJsonQueryParser(index, indexSettings)); + add(queryParsersMap, new RangeJsonQueryParser(index, indexSettings)); + add(queryParsersMap, new PrefixJsonQueryParser(index, indexSettings)); + add(queryParsersMap, new WildcardJsonQueryParser(index, indexSettings)); + add(queryParsersMap, new FilteredQueryJsonQueryParser(index, indexSettings)); + add(queryParsersMap, new ConstantScoreQueryJsonQueryParser(index, indexSettings)); + add(queryParsersMap, new SpanTermJsonQueryParser(index, indexSettings)); + add(queryParsersMap, new SpanNotJsonQueryParser(index, indexSettings)); + add(queryParsersMap, new SpanFirstJsonQueryParser(index, indexSettings)); + add(queryParsersMap, new SpanNearJsonQueryParser(index, indexSettings)); + add(queryParsersMap, new SpanOrJsonQueryParser(index, indexSettings)); + + // now, copy over the ones provided + if (queryParsers != null) { + for (JsonQueryParser queryParser : queryParsers) { + add(queryParsersMap, queryParser); + } + } + this.queryParsers = ImmutableMap.copyOf(queryParsersMap); + + Map filterParsersMap = newHashMap(); + // add defaults + add(filterParsersMap, new TermJsonFilterParser(index, indexSettings)); + add(filterParsersMap, new RangeJsonFilterParser(index, indexSettings)); + add(filterParsersMap, new PrefixJsonFilterParser(index, indexSettings)); + add(filterParsersMap, new QueryJsonFilterParser(index, indexSettings)); + add(filterParsersMap, new BoolJsonFilterParser(index, indexSettings)); + + if (filterParsers != null) { + for (JsonFilterParser filterParser : filterParsers) { + add(filterParsersMap, filterParser); + } + } + this.filterParsers = ImmutableMap.copyOf(filterParsersMap); + } + + public JsonQueryParser queryParser(String name) { + return queryParsers.get(name); + } + + public JsonFilterParser filterParser(String name) { + return filterParsers.get(name); + } + + private void add(Map map, JsonFilterParser filterParser) { + map.put(filterParser.name(), filterParser); + } + + private void add(Map map, JsonQueryParser jsonQueryParser) { + map.put(jsonQueryParser.name(), jsonQueryParser); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/JsonSpanQueryBuilder.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/JsonSpanQueryBuilder.java new file mode 100644 index 00000000000..713b8e8b4b9 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/JsonSpanQueryBuilder.java @@ -0,0 +1,27 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +/** + * @author kimchy (Shay Banon) + */ +public interface JsonSpanQueryBuilder extends JsonQueryBuilder { + +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/MatchAllJsonQueryBuilder.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/MatchAllJsonQueryBuilder.java new file mode 100644 index 00000000000..1a9ab00ec36 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/MatchAllJsonQueryBuilder.java @@ -0,0 +1,55 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import org.elasticsearch.util.json.JsonBuilder; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class MatchAllJsonQueryBuilder extends BaseJsonQueryBuilder { + + private String normsField; + + private float boost = -1; + + public MatchAllJsonQueryBuilder normsField(String normsField) { + this.normsField = normsField; + return this; + } + + public MatchAllJsonQueryBuilder boost(float boost) { + this.boost = boost; + return this; + } + + @Override public void doJson(JsonBuilder builder) throws IOException { + builder.startObject(MatchAllJsonQueryParser.NAME); + if (boost != -1) { + builder.field("boost", boost); + } + if (normsField != null) { + builder.field("normsField", normsField); + } + builder.endObject(); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/MatchAllJsonQueryParser.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/MatchAllJsonQueryParser.java new file mode 100644 index 00000000000..c7e0296b87e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/MatchAllJsonQueryParser.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import com.google.inject.Inject; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.codehaus.jackson.JsonParser; +import org.codehaus.jackson.JsonToken; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.query.QueryParsingException; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class MatchAllJsonQueryParser extends AbstractIndexComponent implements JsonQueryParser { + + public static final String NAME = "matchAll"; + + @Inject public MatchAllJsonQueryParser(Index index, @IndexSettings Settings settings) { + super(index, settings); + } + + @Override public String name() { + return NAME; + } + + @Override public Query parse(JsonQueryParseContext parseContext) throws IOException, QueryParsingException { + JsonParser jp = parseContext.jp(); + + float boost = 1.0f; + String normsField = null; + String currentFieldName = null; + + JsonToken token; + while ((token = jp.nextToken()) != JsonToken.END_OBJECT) { + if (token == JsonToken.FIELD_NAME) { + currentFieldName = jp.getCurrentName(); + } else { + if ("boost".equals(currentFieldName)) { + boost = jp.getFloatValue(); + } else if ("normsField".equals(currentFieldName)) { + normsField = jp.getText(); + } + } + } + + MatchAllDocsQuery query = new MatchAllDocsQuery(normsField); + query.setBoost(boost); + return query; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/PrefixJsonFilterBuilder.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/PrefixJsonFilterBuilder.java new file mode 100644 index 00000000000..6c0229b0574 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/PrefixJsonFilterBuilder.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import org.elasticsearch.util.json.JsonBuilder; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class PrefixJsonFilterBuilder extends BaseJsonFilterBuilder { + + private final String name; + + private final String value; + + public PrefixJsonFilterBuilder(String name, String value) { + this.name = name; + this.value = value; + } + + @Override public void doJson(JsonBuilder builder) throws IOException { + builder.startObject(PrefixJsonFilterParser.NAME); + builder.field(name, value); + builder.endObject(); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/PrefixJsonFilterParser.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/PrefixJsonFilterParser.java new file mode 100644 index 00000000000..cb30db98a57 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/PrefixJsonFilterParser.java @@ -0,0 +1,85 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import com.google.inject.Inject; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.Filter; +import org.apache.lucene.search.PrefixFilter; +import org.codehaus.jackson.JsonParser; +import org.codehaus.jackson.JsonToken; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.mapper.FieldMapper; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.query.QueryParsingException; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; + +import static org.elasticsearch.index.query.support.QueryParsers.*; + +/** + * @author kimchy (Shay Banon) + */ +public class PrefixJsonFilterParser extends AbstractIndexComponent implements JsonFilterParser { + + public static final String NAME = "prefix"; + + @Inject public PrefixJsonFilterParser(Index index, @IndexSettings Settings settings) { + super(index, settings); + } + + @Override public String name() { + return NAME; + } + + @Override public Filter parse(JsonQueryParseContext parseContext) throws IOException, QueryParsingException { + JsonParser jp = parseContext.jp(); + + JsonToken token = jp.getCurrentToken(); + if (token == JsonToken.START_OBJECT) { + token = jp.nextToken(); + } + assert token == JsonToken.FIELD_NAME; + String fieldName = jp.getCurrentName(); + jp.nextToken(); + String value = jp.getText(); + jp.nextToken(); + + if (value == null) { + throw new QueryParsingException(index, "No value specified for prefix query"); + } + + MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(fieldName); + if (smartNameFieldMappers != null) { + FieldMapper fieldMapper = smartNameFieldMappers.fieldMappers().mapper(); + if (fieldMapper != null) { + fieldName = fieldMapper.indexName(); + value = fieldMapper.indexedValue(value); + } + } + + Filter prefixFilter = new PrefixFilter(new Term(fieldName, value)); + prefixFilter = parseContext.cacheFilterIfPossible(prefixFilter); + return wrapSmartNameFilter(prefixFilter, smartNameFieldMappers, parseContext.filterCache()); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/PrefixJsonQueryBuilder.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/PrefixJsonQueryBuilder.java new file mode 100644 index 00000000000..211f09af8a5 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/PrefixJsonQueryBuilder.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import org.elasticsearch.util.json.JsonBuilder; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class PrefixJsonQueryBuilder extends BaseJsonQueryBuilder { + + private final String name; + + private final String value; + + private float boost = -1; + + public PrefixJsonQueryBuilder(String name, String value) { + this.name = name; + this.value = value; + } + + public PrefixJsonQueryBuilder boost(float boost) { + this.boost = boost; + return this; + } + + @Override public void doJson(JsonBuilder builder) throws IOException { + builder.startObject(PrefixJsonQueryParser.NAME); + if (boost == -1) { + builder.field(name, value); + } else { + builder.startObject(name); + builder.field("prefix", value); + builder.field("boost", boost); + builder.endObject(); + } + builder.endObject(); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/PrefixJsonQueryParser.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/PrefixJsonQueryParser.java new file mode 100644 index 00000000000..de83c7869e9 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/PrefixJsonQueryParser.java @@ -0,0 +1,106 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import com.google.inject.Inject; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.MultiTermQuery; +import org.apache.lucene.search.PrefixQuery; +import org.apache.lucene.search.Query; +import org.codehaus.jackson.JsonParser; +import org.codehaus.jackson.JsonToken; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.mapper.FieldMapper; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.query.QueryParsingException; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; + +import static org.elasticsearch.index.query.support.QueryParsers.*; + +/** + * @author kimchy (Shay Banon) + */ +public class PrefixJsonQueryParser extends AbstractIndexComponent implements JsonQueryParser { + + public static final String NAME = "prefix"; + + @Inject public PrefixJsonQueryParser(Index index, @IndexSettings Settings settings) { + super(index, settings); + } + + @Override public String name() { + return NAME; + } + + @Override public Query parse(JsonQueryParseContext parseContext) throws IOException, QueryParsingException { + JsonParser jp = parseContext.jp(); + + JsonToken token = jp.getCurrentToken(); + if (token == JsonToken.START_OBJECT) { + token = jp.nextToken(); + } + assert token == JsonToken.FIELD_NAME; + String fieldName = jp.getCurrentName(); + + + String value = null; + float boost = 1.0f; + token = jp.nextToken(); + if (token == JsonToken.START_OBJECT) { + String currentFieldName = null; + while ((token = jp.nextToken()) != JsonToken.END_OBJECT) { + if (token == JsonToken.FIELD_NAME) { + currentFieldName = jp.getCurrentName(); + } else { + if (NAME.equals(currentFieldName)) { + value = jp.getText(); + } else if ("boost".equals(currentFieldName)) { + boost = jp.getFloatValue(); + } + } + } + } else { + value = jp.getText(); + jp.nextToken(); + } + + if (value == null) { + throw new QueryParsingException(index, "No value specified for prefix query"); + } + + MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(fieldName); + if (smartNameFieldMappers != null) { + FieldMapper fieldMapper = smartNameFieldMappers.fieldMappers().mapper(); + if (fieldMapper != null) { + fieldName = fieldMapper.indexName(); + value = fieldMapper.indexedValue(value); + } + } + + PrefixQuery query = new PrefixQuery(new Term(fieldName, value)); + query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT); + query.setBoost(boost); + return wrapSmartNameQuery(query, smartNameFieldMappers, parseContext.filterCache()); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/QueryJsonFilterBuilder.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/QueryJsonFilterBuilder.java new file mode 100644 index 00000000000..2e8d59b3d51 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/QueryJsonFilterBuilder.java @@ -0,0 +1,41 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import org.elasticsearch.util.json.JsonBuilder; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class QueryJsonFilterBuilder extends BaseJsonFilterBuilder { + + private final JsonQueryBuilder queryBuilder; + + public QueryJsonFilterBuilder(JsonQueryBuilder queryBuilder) { + this.queryBuilder = queryBuilder; + } + + @Override protected void doJson(JsonBuilder builder) throws IOException { + builder.field(QueryJsonFilterParser.NAME); + queryBuilder.toJson(builder); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/QueryJsonFilterParser.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/QueryJsonFilterParser.java new file mode 100644 index 00000000000..b2375add38a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/QueryJsonFilterParser.java @@ -0,0 +1,58 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import com.google.inject.Inject; +import org.apache.lucene.search.Filter; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; +import org.codehaus.jackson.JsonParser; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.query.QueryParsingException; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class QueryJsonFilterParser extends AbstractIndexComponent implements JsonFilterParser { + + public static final String NAME = "query"; + + @Inject public QueryJsonFilterParser(Index index, @IndexSettings Settings settings) { + super(index, settings); + } + + @Override public String name() { + return NAME; + } + + @Override public Filter parse(JsonQueryParseContext parseContext) throws IOException, QueryParsingException { + JsonParser jp = parseContext.jp(); + + Query query = parseContext.parseInnerQuery(); + Filter filter = new QueryWrapperFilter(query); + filter = parseContext.cacheFilterIfPossible(filter); + return filter; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/QueryStringJsonQueryBuilder.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/QueryStringJsonQueryBuilder.java new file mode 100644 index 00000000000..801b01ec730 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/QueryStringJsonQueryBuilder.java @@ -0,0 +1,147 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import org.elasticsearch.util.json.JsonBuilder; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class QueryStringJsonQueryBuilder extends BaseJsonQueryBuilder { + + public static enum Operator { + OR, + AND + } + + private final String queryString; + + private String defaultField; + + private Operator defaultOperator; + + private String analyzer; + + private Boolean allowLeadingWildcard; + + private Boolean lowercaseExpandedTerms; + + private Boolean enablePositionIncrements; + + private float fuzzyMinSim = -1; + + private float boost = -1; + + private int fuzzyPrefixLength = -1; + + private int phraseSlop = -1; + + public QueryStringJsonQueryBuilder(String queryString) { + this.queryString = queryString; + } + + public QueryStringJsonQueryBuilder defaultField(String defaultField) { + this.defaultField = defaultField; + return this; + } + + public QueryStringJsonQueryBuilder defualtOperator(Operator defaultOperator) { + this.defaultOperator = defaultOperator; + return this; + } + + public QueryStringJsonQueryBuilder analyzer(String analyzer) { + this.analyzer = analyzer; + return this; + } + + public QueryStringJsonQueryBuilder allowLeadingWildcard(boolean allowLeadingWildcard) { + this.allowLeadingWildcard = allowLeadingWildcard; + return this; + } + + public QueryStringJsonQueryBuilder lowercaseExpandedTerms(boolean lowercaseExpandedTerms) { + this.lowercaseExpandedTerms = lowercaseExpandedTerms; + return this; + } + + public QueryStringJsonQueryBuilder enablePositionIncrements(boolean enablePositionIncrements) { + this.enablePositionIncrements = enablePositionIncrements; + return this; + } + + public QueryStringJsonQueryBuilder fuzzyMinSim(float fuzzyMinSim) { + this.fuzzyMinSim = fuzzyMinSim; + return this; + } + + public QueryStringJsonQueryBuilder boost(float boost) { + this.boost = boost; + return this; + } + + public QueryStringJsonQueryBuilder fuzzyPrefixLength(int fuzzyPrefixLength) { + this.fuzzyPrefixLength = fuzzyPrefixLength; + return this; + } + + public QueryStringJsonQueryBuilder phraseSlop(int phraseSlop) { + this.phraseSlop = phraseSlop; + return this; + } + + @Override protected void doJson(JsonBuilder builder) throws IOException { + builder.startObject(QueryStringJsonQueryParser.NAME); + builder.field("query", queryString); + if (defaultField != null) { + builder.field("defaultField", defaultField); + } + if (defaultOperator != null) { + builder.field("defaultOperator", defaultOperator.name().toLowerCase()); + } + if (analyzer != null) { + builder.field("analyzer", analyzer); + } + if (allowLeadingWildcard != null) { + builder.field("allowLeadingWildcard", allowLeadingWildcard); + } + if (lowercaseExpandedTerms != null) { + builder.field("lowercaseExpandedTerms", lowercaseExpandedTerms); + } + if (enablePositionIncrements != null) { + builder.field("enablePositionIncrements", enablePositionIncrements); + } + if (fuzzyMinSim != -1) { + builder.field("fuzzyMinSim", fuzzyMinSim); + } + if (boost != -1) { + builder.field("boost", boost); + } + if (fuzzyPrefixLength != -1) { + builder.field("fuzzyPrefixLength", fuzzyPrefixLength); + } + if (phraseSlop != -1) { + builder.field("phraseSlop", phraseSlop); + } + builder.endObject(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/QueryStringJsonQueryParser.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/QueryStringJsonQueryParser.java new file mode 100644 index 00000000000..51324b088fa --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/QueryStringJsonQueryParser.java @@ -0,0 +1,147 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import com.google.inject.Inject; +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.queryParser.ParseException; +import org.apache.lucene.queryParser.QueryParser; +import org.apache.lucene.search.FuzzyQuery; +import org.apache.lucene.search.Query; +import org.codehaus.jackson.JsonParser; +import org.codehaus.jackson.JsonToken; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.analysis.AnalysisService; +import org.elasticsearch.index.query.QueryParsingException; +import org.elasticsearch.index.query.support.MapperQueryParser; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class QueryStringJsonQueryParser extends AbstractIndexComponent implements JsonQueryParser { + + public static final String NAME = "queryString"; + + private final AnalysisService analysisService; + + @Inject public QueryStringJsonQueryParser(Index index, @IndexSettings Settings settings, AnalysisService analysisService) { + super(index, settings); + this.analysisService = analysisService; + } + + @Override public String name() { + return NAME; + } + + @Override public Query parse(JsonQueryParseContext parseContext) throws IOException, QueryParsingException { + JsonParser jp = parseContext.jp(); + + // move to the field value + + String queryString = null; + String defaultField = null; + MapperQueryParser.Operator defaultOperator = QueryParser.Operator.OR; + boolean allowLeadingWildcard = true; + boolean lowercaseExpandedTerms = true; + boolean enablePositionIncrements = true; + float fuzzyMinSim = FuzzyQuery.defaultMinSimilarity; + int fuzzyPrefixLength = FuzzyQuery.defaultPrefixLength; + int phraseSlop = 0; + float boost = 1.0f; + Analyzer analyzer = null; + + String currentFieldName = null; + JsonToken token; + while ((token = jp.nextToken()) != JsonToken.END_OBJECT) { + if (token == JsonToken.FIELD_NAME) { + currentFieldName = jp.getCurrentName(); + } else if (token == JsonToken.VALUE_STRING) { + if ("query".equals(currentFieldName)) { + queryString = jp.getText(); + } else if ("defaultField".equals(currentFieldName)) { + defaultField = parseContext.indexName(jp.getText()); + } else if ("defaultOperator".equals(currentFieldName)) { + String op = jp.getText(); + if ("or".equalsIgnoreCase(op)) { + defaultOperator = QueryParser.Operator.OR; + } else if ("and".equalsIgnoreCase(op)) { + defaultOperator = QueryParser.Operator.AND; + } else { + throw new QueryParsingException(index, "Query default operator [" + op + "] is not allowed"); + } + } else if ("analyzer".equals(currentFieldName)) { + analyzer = analysisService.analyzer(jp.getText()); + } + } else if (token == JsonToken.VALUE_FALSE || token == JsonToken.VALUE_TRUE) { + if ("allowLeadingWildcard".equals(currentFieldName)) { + allowLeadingWildcard = token == JsonToken.VALUE_TRUE; + } else if ("lowercaseExpandedTerms".equals(currentFieldName)) { + lowercaseExpandedTerms = token == JsonToken.VALUE_TRUE; + } else if ("enablePositionIncrements".equals(currentFieldName)) { + enablePositionIncrements = token == JsonToken.VALUE_TRUE; + } + } else if (token == JsonToken.VALUE_NUMBER_FLOAT) { + if ("fuzzyMinSim".equals(currentFieldName)) { + fuzzyMinSim = jp.getFloatValue(); + } else if ("boost".equals(currentFieldName)) { + boost = jp.getFloatValue(); + } + } else if (token == JsonToken.VALUE_NUMBER_INT) { + if ("fuzzyPrefixLength".equals(currentFieldName)) { + fuzzyPrefixLength = jp.getIntValue(); + } else if ("phraseSlop".equals(currentFieldName)) { + phraseSlop = jp.getIntValue(); + } else if ("fuzzyMinSim".equals(currentFieldName)) { + fuzzyMinSim = jp.getFloatValue(); + } else if ("boost".equals(currentFieldName)) { + boost = jp.getFloatValue(); + } + } + } + if (queryString == null) { + throw new QueryParsingException(index, "QueryString must be provided with a [query]"); + } + if (analyzer == null) { + analyzer = parseContext.mapperService().searchAnalyzer(); + } + + MapperQueryParser queryParser = new MapperQueryParser(defaultField, analyzer, parseContext.mapperService(), parseContext.filterCache()); + queryParser.setEnablePositionIncrements(enablePositionIncrements); + queryParser.setLowercaseExpandedTerms(lowercaseExpandedTerms); + queryParser.setAllowLeadingWildcard(allowLeadingWildcard); + queryParser.setDefaultOperator(defaultOperator); + queryParser.setFuzzyMinSim(fuzzyMinSim); + queryParser.setFuzzyPrefixLength(fuzzyPrefixLength); + queryParser.setPhraseSlop(phraseSlop); + + try { + Query query = queryParser.parse(queryString); + query.setBoost(boost); + return query; + } catch (ParseException e) { + throw new QueryParsingException(index, "Failed to parse query [" + queryString + "]", e); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/RangeJsonFilterBuilder.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/RangeJsonFilterBuilder.java new file mode 100644 index 00000000000..5a5e15ee6d7 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/RangeJsonFilterBuilder.java @@ -0,0 +1,115 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import org.elasticsearch.util.json.JsonBuilder; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class RangeJsonFilterBuilder extends BaseJsonFilterBuilder { + + private final String name; + + private Object from; + + private Object to; + + private boolean includeLower = true; + + private boolean includeUpper = true; + + public RangeJsonFilterBuilder(String name) { + this.name = name; + } + + public RangeJsonFilterBuilder from(String from) { + this.from = from; + return this; + } + + public RangeJsonFilterBuilder from(int from) { + this.from = from; + return this; + } + + public RangeJsonFilterBuilder from(long from) { + this.from = from; + return this; + } + + public RangeJsonFilterBuilder from(float from) { + this.from = from; + return this; + } + + public RangeJsonFilterBuilder from(double from) { + this.from = from; + return this; + } + + public RangeJsonFilterBuilder to(String to) { + this.to = to; + return this; + } + + public RangeJsonFilterBuilder to(int to) { + this.to = to; + return this; + } + + public RangeJsonFilterBuilder to(long to) { + this.to = to; + return this; + } + + public RangeJsonFilterBuilder to(float to) { + this.to = to; + return this; + } + + public RangeJsonFilterBuilder to(double to) { + this.to = to; + return this; + } + + public RangeJsonFilterBuilder includeLower(boolean includeLower) { + this.includeLower = includeLower; + return this; + } + + public RangeJsonFilterBuilder includeUpper(boolean includeUpper) { + this.includeUpper = includeUpper; + return this; + } + + @Override protected void doJson(JsonBuilder builder) throws IOException { + builder.startObject(RangeJsonFilterParser.NAME); + builder.startObject(name); + builder.field("from", from); + builder.field("to", to); + builder.field("includeLower", includeLower); + builder.field("includeUpper", includeUpper); + builder.endObject(); + builder.endObject(); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/RangeJsonFilterParser.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/RangeJsonFilterParser.java new file mode 100644 index 00000000000..23515ef4872 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/RangeJsonFilterParser.java @@ -0,0 +1,107 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import com.google.inject.Inject; +import org.apache.lucene.search.Filter; +import org.apache.lucene.search.TermRangeFilter; +import org.codehaus.jackson.JsonParser; +import org.codehaus.jackson.JsonToken; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.mapper.FieldMapper; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.query.QueryParsingException; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; + +import static org.elasticsearch.index.query.support.QueryParsers.*; + +/** + * @author kimchy (Shay Banon) + */ +public class RangeJsonFilterParser extends AbstractIndexComponent implements JsonFilterParser { + + public static final String NAME = "range"; + + @Inject public RangeJsonFilterParser(Index index, @IndexSettings Settings settings) { + super(index, settings); + } + + @Override public String name() { + return NAME; + } + + @Override public Filter parse(JsonQueryParseContext parseContext) throws IOException, QueryParsingException { + JsonParser jp = parseContext.jp(); + + JsonToken token = jp.getCurrentToken(); + if (token == JsonToken.START_OBJECT) { + token = jp.nextToken(); + } + assert token == JsonToken.FIELD_NAME; + String fieldName = jp.getCurrentName(); + + String from = null; + String to = null; + boolean includeLower = true; + boolean includeUpper = true; + + String currentFieldName = null; + while ((token = jp.nextToken()) != JsonToken.END_OBJECT) { + if (token == JsonToken.FIELD_NAME) { + currentFieldName = jp.getCurrentName(); + } else { + if ("from".equals(currentFieldName)) { + if (jp.getCurrentToken() == JsonToken.VALUE_NULL) { + from = null; + } else { + from = jp.getText(); + } + } else if ("to".equals(currentFieldName)) { + if (jp.getCurrentToken() == JsonToken.VALUE_NULL) { + to = null; + } else { + to = jp.getText(); + } + } else if ("includeLower".equals(currentFieldName)) { + includeLower = token == JsonToken.VALUE_TRUE; + } else if ("includeUpper".equals(currentFieldName)) { + includeUpper = token == JsonToken.VALUE_TRUE; + } + } + } + + Filter filter = null; + MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(fieldName); + if (smartNameFieldMappers != null) { + FieldMapper fieldMapper = smartNameFieldMappers.fieldMappers().mapper(); + if (fieldMapper != null) { + filter = fieldMapper.rangeFilter(from, to, includeLower, includeUpper); + } + } + if (filter == null) { + filter = new TermRangeFilter(fieldName, from, to, includeLower, includeUpper); + } + return wrapSmartNameFilter(filter, smartNameFieldMappers, parseContext.filterCache()); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/RangeJsonQueryBuilder.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/RangeJsonQueryBuilder.java new file mode 100644 index 00000000000..cfefa18a24b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/RangeJsonQueryBuilder.java @@ -0,0 +1,125 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import org.elasticsearch.util.json.JsonBuilder; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class RangeJsonQueryBuilder extends BaseJsonQueryBuilder { + + private final String name; + + private Object from; + + private Object to; + + private boolean includeLower = true; + + private boolean includeUpper = true; + + private float boost = -1; + + public RangeJsonQueryBuilder(String name) { + this.name = name; + } + + public RangeJsonQueryBuilder from(String from) { + this.from = from; + return this; + } + + public RangeJsonQueryBuilder from(int from) { + this.from = from; + return this; + } + + public RangeJsonQueryBuilder from(long from) { + this.from = from; + return this; + } + + public RangeJsonQueryBuilder from(float from) { + this.from = from; + return this; + } + + public RangeJsonQueryBuilder from(double from) { + this.from = from; + return this; + } + + public RangeJsonQueryBuilder to(String to) { + this.to = to; + return this; + } + + public RangeJsonQueryBuilder to(int to) { + this.to = to; + return this; + } + + public RangeJsonQueryBuilder to(long to) { + this.to = to; + return this; + } + + public RangeJsonQueryBuilder to(float to) { + this.to = to; + return this; + } + + public RangeJsonQueryBuilder to(double to) { + this.to = to; + return this; + } + + public RangeJsonQueryBuilder includeLower(boolean includeLower) { + this.includeLower = includeLower; + return this; + } + + public RangeJsonQueryBuilder includeUpper(boolean includeUpper) { + this.includeUpper = includeUpper; + return this; + } + + public RangeJsonQueryBuilder boost(float boost) { + this.boost = boost; + return this; + } + + @Override protected void doJson(JsonBuilder builder) throws IOException { + builder.startObject(RangeJsonQueryParser.NAME); + builder.startObject(name); + builder.field("from", from); + builder.field("to", to); + builder.field("includeLower", includeLower); + builder.field("includeUpper", includeUpper); + if (boost != -1) { + builder.field("boost", boost); + } + builder.endObject(); + builder.endObject(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/RangeJsonQueryParser.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/RangeJsonQueryParser.java new file mode 100644 index 00000000000..245c986f53e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/RangeJsonQueryParser.java @@ -0,0 +1,111 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import com.google.inject.Inject; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermRangeQuery; +import org.codehaus.jackson.JsonParser; +import org.codehaus.jackson.JsonToken; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.mapper.FieldMapper; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.query.QueryParsingException; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; + +import static org.elasticsearch.index.query.support.QueryParsers.*; + +/** + * @author kimchy (Shay Banon) + */ +public class RangeJsonQueryParser extends AbstractIndexComponent implements JsonQueryParser { + + public static final String NAME = "range"; + + @Inject public RangeJsonQueryParser(Index index, @IndexSettings Settings settings) { + super(index, settings); + } + + @Override public String name() { + return NAME; + } + + @Override public Query parse(JsonQueryParseContext parseContext) throws IOException, QueryParsingException { + JsonParser jp = parseContext.jp(); + + JsonToken token = jp.getCurrentToken(); + if (token == JsonToken.START_OBJECT) { + token = jp.nextToken(); + } + assert token == JsonToken.FIELD_NAME; + String fieldName = jp.getCurrentName(); + + String from = null; + String to = null; + boolean includeLower = true; + boolean includeUpper = true; + float boost = 1.0f; + + String currentFieldName = null; + while ((token = jp.nextToken()) != JsonToken.END_OBJECT) { + if (token == JsonToken.FIELD_NAME) { + currentFieldName = jp.getCurrentName(); + } else { + if ("from".equals(currentFieldName)) { + if (jp.getCurrentToken() == JsonToken.VALUE_NULL) { + from = null; + } else { + from = jp.getText(); + } + } else if ("to".equals(currentFieldName)) { + if (jp.getCurrentToken() == JsonToken.VALUE_NULL) { + to = null; + } else { + to = jp.getText(); + } + } else if ("includeLower".equals(currentFieldName)) { + includeLower = token == JsonToken.VALUE_TRUE; + } else if ("includeUpper".equals(currentFieldName)) { + includeUpper = token == JsonToken.VALUE_TRUE; + } else if ("boost".equals(currentFieldName)) { + boost = jp.getFloatValue(); + } + } + } + + Query query = null; + MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(fieldName); + if (smartNameFieldMappers != null) { + FieldMapper fieldMapper = smartNameFieldMappers.fieldMappers().mapper(); + if (fieldMapper != null) { + query = fieldMapper.rangeQuery(from, to, includeLower, includeUpper); + } + } + if (query == null) { + query = new TermRangeQuery(fieldName, from, to, includeLower, includeUpper); + } + query.setBoost(boost); + return wrapSmartNameQuery(query, smartNameFieldMappers, parseContext.filterCache()); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/SpanFirstJsonQueryBuilder.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/SpanFirstJsonQueryBuilder.java new file mode 100644 index 00000000000..688055fb290 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/SpanFirstJsonQueryBuilder.java @@ -0,0 +1,57 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import org.elasticsearch.util.json.JsonBuilder; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class SpanFirstJsonQueryBuilder extends BaseJsonQueryBuilder implements JsonSpanQueryBuilder { + + private final JsonSpanQueryBuilder matchBuilder; + + private final int end; + + private float boost = -1; + + public SpanFirstJsonQueryBuilder(JsonSpanQueryBuilder matchBuilder, int end) { + this.matchBuilder = matchBuilder; + this.end = end; + } + + public SpanFirstJsonQueryBuilder boost(float boost) { + this.boost = boost; + return this; + } + + @Override protected void doJson(JsonBuilder builder) throws IOException { + builder.startObject(SpanFirstJsonQueryParser.NAME); + builder.field("match"); + matchBuilder.toJson(builder); + builder.field("end", end); + if (boost != -1) { + builder.field("boost", boost); + } + builder.endObject(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/SpanFirstJsonQueryParser.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/SpanFirstJsonQueryParser.java new file mode 100644 index 00000000000..1a8b333b43f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/SpanFirstJsonQueryParser.java @@ -0,0 +1,91 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import com.google.inject.Inject; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.spans.SpanFirstQuery; +import org.apache.lucene.search.spans.SpanQuery; +import org.codehaus.jackson.JsonParser; +import org.codehaus.jackson.JsonToken; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.query.QueryParsingException; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class SpanFirstJsonQueryParser extends AbstractIndexComponent implements JsonQueryParser { + + public static final String NAME = "spanFirst"; + + @Inject public SpanFirstJsonQueryParser(Index index, @IndexSettings Settings settings) { + super(index, settings); + } + + @Override public String name() { + return NAME; + } + + @Override public Query parse(JsonQueryParseContext parseContext) throws IOException, QueryParsingException { + JsonParser jp = parseContext.jp(); + + float boost = 1.0f; + + SpanQuery match = null; + int end = -1; + + String currentFieldName = null; + JsonToken token; + while ((token = jp.nextToken()) != JsonToken.END_OBJECT) { + if (token == JsonToken.FIELD_NAME) { + currentFieldName = jp.getCurrentName(); + } else if (token == JsonToken.START_OBJECT) { + if ("match".equals(currentFieldName)) { + Query query = parseContext.parseInnerQuery(); + if (!(query instanceof SpanQuery)) { + throw new QueryParsingException(index, "spanFirst [match] must be of type span query"); + } + match = (SpanQuery) query; + } + } else { + if ("boost".equals(currentFieldName)) { + boost = jp.getFloatValue(); + } else if ("end".equals(currentFieldName)) { + end = jp.getIntValue(); + } + } + } + if (match == null) { + throw new QueryParsingException(index, "spanFirst must have [match] span query clause"); + } + if (end == -1) { + throw new QueryParsingException(index, "spanFirst must have [end] set for it"); + } + + SpanFirstQuery query = new SpanFirstQuery(match, end); + query.setBoost(boost); + return query; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/SpanNearJsonQueryBuilder.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/SpanNearJsonQueryBuilder.java new file mode 100644 index 00000000000..aedac04c033 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/SpanNearJsonQueryBuilder.java @@ -0,0 +1,93 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import org.elasticsearch.index.query.QueryBuilderException; +import org.elasticsearch.util.json.JsonBuilder; + +import java.io.IOException; +import java.util.ArrayList; + +/** + * @author kimchy (Shay Banon) + */ +public class SpanNearJsonQueryBuilder extends BaseJsonQueryBuilder implements JsonSpanQueryBuilder { + + private ArrayList clauses = new ArrayList(); + + private int slop = -1; + + private Boolean inOrder; + + private Boolean collectPayloads; + + private float boost = -1; + + public SpanNearJsonQueryBuilder clause(JsonSpanQueryBuilder clause) { + clauses.add(clause); + return this; + } + + public SpanNearJsonQueryBuilder slop(int slop) { + this.slop = slop; + return this; + } + + public SpanNearJsonQueryBuilder inOrder(boolean inOrder) { + this.inOrder = inOrder; + return this; + } + + public SpanNearJsonQueryBuilder collectPayloads(boolean collectPayloads) { + this.collectPayloads = collectPayloads; + return this; + } + + public SpanNearJsonQueryBuilder boost(float boost) { + this.boost = boost; + return this; + } + + @Override protected void doJson(JsonBuilder builder) throws IOException { + if (clauses.isEmpty()) { + throw new QueryBuilderException("Must have at least one clause when building a spanNear query"); + } + if (slop == -1) { + throw new QueryBuilderException("Must set the slop when building a spanNear query"); + } + builder.startObject(SpanNearJsonQueryParser.NAME); + builder.startArray("clauses"); + for (JsonSpanQueryBuilder clause : clauses) { + clause.toJson(builder); + } + builder.endArray(); + builder.field("slop", slop); + if (inOrder != null) { + builder.field("inOrder", inOrder); + } + if (collectPayloads != null) { + builder.field("collectPayloads", collectPayloads); + } + if (boost != -1) { + builder.field("boost", boost); + } + builder.endObject(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/SpanNearJsonQueryParser.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/SpanNearJsonQueryParser.java new file mode 100644 index 00000000000..72ee2044023 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/SpanNearJsonQueryParser.java @@ -0,0 +1,104 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import com.google.inject.Inject; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.spans.SpanNearQuery; +import org.apache.lucene.search.spans.SpanQuery; +import org.codehaus.jackson.JsonParser; +import org.codehaus.jackson.JsonToken; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.query.QueryParsingException; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; +import java.util.List; + +import static com.google.common.collect.Lists.*; + +/** + * @author kimchy (Shay Banon) + */ +public class SpanNearJsonQueryParser extends AbstractIndexComponent implements JsonQueryParser { + + public static final String NAME = "spanNear"; + + @Inject public SpanNearJsonQueryParser(Index index, @IndexSettings Settings settings) { + super(index, settings); + } + + @Override public String name() { + return NAME; + } + + @Override public Query parse(JsonQueryParseContext parseContext) throws IOException, QueryParsingException { + JsonParser jp = parseContext.jp(); + + float boost = 1.0f; + int slop = -1; + boolean inOrder = true; + boolean collectPayloads = true; + + List clauses = newArrayList(); + + String currentFieldName = null; + JsonToken token; + while ((token = jp.nextToken()) != JsonToken.END_OBJECT) { + if (token == JsonToken.FIELD_NAME) { + currentFieldName = jp.getCurrentName(); + } else if (token == JsonToken.START_ARRAY) { + if ("clauses".equals(currentFieldName)) { + while ((token = jp.nextToken()) != JsonToken.END_ARRAY) { + Query query = parseContext.parseInnerQuery(); + if (!(query instanceof SpanQuery)) { + throw new QueryParsingException(index, "spanNear [clauses] must be of type span query"); + } + clauses.add((SpanQuery) query); + } + } + } else if (token == JsonToken.VALUE_FALSE || token == JsonToken.VALUE_TRUE) { + if ("inOrder".equals(currentFieldName)) { + inOrder = token == JsonToken.VALUE_TRUE; + } else if ("collectPayloads".equals(currentFieldName)) { + collectPayloads = token == JsonToken.VALUE_TRUE; + } + } else { + if ("boost".equals(currentFieldName)) { + boost = jp.getFloatValue(); + } else if ("slop".equals(currentFieldName)) { + slop = jp.getIntValue(); + } + } + } + if (clauses.isEmpty()) { + throw new QueryParsingException(index, "spanNear must include [clauses]"); + } + if (slop == -1) { + throw new QueryParsingException(index, "spanNear must include [slop]"); + } + + SpanNearQuery query = new SpanNearQuery(clauses.toArray(new SpanQuery[clauses.size()]), slop, inOrder, collectPayloads); + query.setBoost(boost); + return query; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/SpanNotJsonQueryBuilder.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/SpanNotJsonQueryBuilder.java new file mode 100644 index 00000000000..382650facee --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/SpanNotJsonQueryBuilder.java @@ -0,0 +1,70 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import org.elasticsearch.index.query.QueryBuilderException; +import org.elasticsearch.util.json.JsonBuilder; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class SpanNotJsonQueryBuilder extends BaseJsonQueryBuilder implements JsonSpanQueryBuilder { + + private JsonSpanQueryBuilder include; + + private JsonSpanQueryBuilder exclude; + + private float boost = -1; + + public SpanNotJsonQueryBuilder include(JsonSpanQueryBuilder include) { + this.include = include; + return this; + } + + public SpanNotJsonQueryBuilder exclude(JsonSpanQueryBuilder exclude) { + this.exclude = exclude; + return this; + } + + public SpanNotJsonQueryBuilder boost(float boost) { + this.boost = boost; + return this; + } + + @Override protected void doJson(JsonBuilder builder) throws IOException { + if (include == null) { + throw new QueryBuilderException("Must specify include when using spanNot query"); + } + if (exclude == null) { + throw new QueryBuilderException("Must specify exclude when using spanNot query"); + } + builder.startObject(SpanNotJsonQueryParser.NAME); + builder.field("include"); + include.toJson(builder); + builder.field("exclude"); + exclude.toJson(builder); + if (boost == -1) { + builder.field("boost", boost); + } + builder.endObject(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/SpanNotJsonQueryParser.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/SpanNotJsonQueryParser.java new file mode 100644 index 00000000000..b9153d32742 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/SpanNotJsonQueryParser.java @@ -0,0 +1,95 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import com.google.inject.Inject; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.spans.SpanNotQuery; +import org.apache.lucene.search.spans.SpanQuery; +import org.codehaus.jackson.JsonParser; +import org.codehaus.jackson.JsonToken; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.query.QueryParsingException; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class SpanNotJsonQueryParser extends AbstractIndexComponent implements JsonQueryParser { + + public static final String NAME = "spanNot"; + + @Inject public SpanNotJsonQueryParser(Index index, @IndexSettings Settings settings) { + super(index, settings); + } + + @Override public String name() { + return NAME; + } + + @Override public Query parse(JsonQueryParseContext parseContext) throws IOException, QueryParsingException { + JsonParser jp = parseContext.jp(); + + float boost = 1.0f; + + SpanQuery include = null; + SpanQuery exclude = null; + + String currentFieldName = null; + JsonToken token; + while ((token = jp.nextToken()) != JsonToken.END_OBJECT) { + if (token == JsonToken.FIELD_NAME) { + currentFieldName = jp.getCurrentName(); + } else if (token == JsonToken.START_OBJECT) { + if ("include".equals(currentFieldName)) { + Query query = parseContext.parseInnerQuery(); + if (!(query instanceof SpanQuery)) { + throw new QueryParsingException(index, "spanNot [include] must be of type span query"); + } + include = (SpanQuery) query; + } else if ("exclude".equals(currentFieldName)) { + Query query = parseContext.parseInnerQuery(); + if (!(query instanceof SpanQuery)) { + throw new QueryParsingException(index, "spanNot [exclude] must be of type span query"); + } + exclude = (SpanQuery) query; + } + } else { + if ("boost".equals(currentFieldName)) { + boost = jp.getFloatValue(); + } + } + } + if (include == null) { + throw new QueryParsingException(index, "spanNot must have [include] span query clause"); + } + if (exclude == null) { + throw new QueryParsingException(index, "spanNot must have [exclude] span query clause"); + } + + SpanNotQuery query = new SpanNotQuery(include, exclude); + query.setBoost(boost); + return query; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/SpanOrJsonQueryBuilder.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/SpanOrJsonQueryBuilder.java new file mode 100644 index 00000000000..505479bbfde --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/SpanOrJsonQueryBuilder.java @@ -0,0 +1,62 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import org.elasticsearch.index.query.QueryBuilderException; +import org.elasticsearch.util.json.JsonBuilder; + +import java.io.IOException; +import java.util.ArrayList; + +/** + * @author kimchy (Shay Banon) + */ +public class SpanOrJsonQueryBuilder extends BaseJsonQueryBuilder implements JsonSpanQueryBuilder { + + private ArrayList clauses = new ArrayList(); + + private float boost = -1; + + public SpanOrJsonQueryBuilder clause(JsonSpanQueryBuilder clause) { + clauses.add(clause); + return this; + } + + public SpanOrJsonQueryBuilder boost(float boost) { + this.boost = boost; + return this; + } + + @Override protected void doJson(JsonBuilder builder) throws IOException { + if (clauses.isEmpty()) { + throw new QueryBuilderException("Must have at least one clause when building a spanOr query"); + } + builder.startObject(SpanOrJsonQueryParser.NAME); + builder.startArray("clauses"); + for (JsonSpanQueryBuilder clause : clauses) { + clause.toJson(builder); + } + builder.endArray(); + if (boost != -1) { + builder.field("boost", boost); + } + builder.endObject(); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/SpanOrJsonQueryParser.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/SpanOrJsonQueryParser.java new file mode 100644 index 00000000000..bdb4ee57886 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/SpanOrJsonQueryParser.java @@ -0,0 +1,90 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import com.google.inject.Inject; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.spans.SpanOrQuery; +import org.apache.lucene.search.spans.SpanQuery; +import org.codehaus.jackson.JsonParser; +import org.codehaus.jackson.JsonToken; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.query.QueryParsingException; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; +import java.util.List; + +import static com.google.common.collect.Lists.*; + +/** + * @author kimchy (Shay Banon) + */ +public class SpanOrJsonQueryParser extends AbstractIndexComponent implements JsonQueryParser { + + public static final String NAME = "spanOr"; + + @Inject public SpanOrJsonQueryParser(Index index, @IndexSettings Settings settings) { + super(index, settings); + } + + @Override public String name() { + return NAME; + } + + @Override public Query parse(JsonQueryParseContext parseContext) throws IOException, QueryParsingException { + JsonParser jp = parseContext.jp(); + + float boost = 1.0f; + + List clauses = newArrayList(); + + String currentFieldName = null; + JsonToken token; + while ((token = jp.nextToken()) != JsonToken.END_OBJECT) { + if (token == JsonToken.FIELD_NAME) { + currentFieldName = jp.getCurrentName(); + } else if (token == JsonToken.START_ARRAY) { + if ("clauses".equals(currentFieldName)) { + while ((token = jp.nextToken()) != JsonToken.END_ARRAY) { + Query query = parseContext.parseInnerQuery(); + if (!(query instanceof SpanQuery)) { + throw new QueryParsingException(index, "spanNear [clauses] must be of type span query"); + } + clauses.add((SpanQuery) query); + } + } + } else { + if ("boost".equals(currentFieldName)) { + boost = jp.getFloatValue(); + } + } + } + if (clauses.isEmpty()) { + throw new QueryParsingException(index, "spanOr must include [clauses]"); + } + + SpanOrQuery query = new SpanOrQuery(clauses.toArray(new SpanQuery[clauses.size()])); + query.setBoost(boost); + return query; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/SpanTermJsonQueryBuilder.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/SpanTermJsonQueryBuilder.java new file mode 100644 index 00000000000..2a487781248 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/SpanTermJsonQueryBuilder.java @@ -0,0 +1,79 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import org.elasticsearch.util.json.JsonBuilder; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class SpanTermJsonQueryBuilder extends BaseJsonQueryBuilder implements JsonSpanQueryBuilder { + + private final String name; + + private final Object value; + + private float boost = -1; + + public SpanTermJsonQueryBuilder(String name, String value) { + this(name, (Object) value); + } + + public SpanTermJsonQueryBuilder(String name, int value) { + this(name, (Object) value); + } + + public SpanTermJsonQueryBuilder(String name, long value) { + this(name, (Object) value); + } + + public SpanTermJsonQueryBuilder(String name, float value) { + this(name, (Object) value); + } + + public SpanTermJsonQueryBuilder(String name, double value) { + this(name, (Object) value); + } + + private SpanTermJsonQueryBuilder(String name, Object value) { + this.name = name; + this.value = value; + } + + public SpanTermJsonQueryBuilder boost(float boost) { + this.boost = boost; + return this; + } + + @Override public void doJson(JsonBuilder builder) throws IOException { + builder.startObject(SpanTermJsonQueryParser.NAME); + if (boost == -1) { + builder.field(name, value); + } else { + builder.startObject(name); + builder.field("value", value); + builder.field("boost", boost); + builder.endObject(); + } + builder.endObject(); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/SpanTermJsonQueryParser.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/SpanTermJsonQueryParser.java new file mode 100644 index 00000000000..d30ee6833e4 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/SpanTermJsonQueryParser.java @@ -0,0 +1,105 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import com.google.inject.Inject; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.spans.SpanTermQuery; +import org.codehaus.jackson.JsonParser; +import org.codehaus.jackson.JsonToken; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.mapper.FieldMapper; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.query.QueryParsingException; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; + +import static org.elasticsearch.index.query.support.QueryParsers.*; + +/** + * @author kimchy (Shay Banon) + */ +public class SpanTermJsonQueryParser extends AbstractIndexComponent implements JsonQueryParser { + + public static final String NAME = "spanTerm"; + + @Inject public SpanTermJsonQueryParser(Index index, @IndexSettings Settings settings) { + super(index, settings); + } + + @Override public String name() { + return NAME; + } + + @Override public Query parse(JsonQueryParseContext parseContext) throws IOException, QueryParsingException { + JsonParser jp = parseContext.jp(); + + JsonToken token = jp.getCurrentToken(); + if (token == JsonToken.START_OBJECT) { + token = jp.nextToken(); + } + assert token == JsonToken.FIELD_NAME; + String fieldName = jp.getCurrentName(); + + + String value = null; + float boost = 1.0f; + token = jp.nextToken(); + if (token == JsonToken.START_OBJECT) { + String currentFieldName = null; + while ((token = jp.nextToken()) != JsonToken.END_OBJECT) { + if (token == JsonToken.FIELD_NAME) { + currentFieldName = jp.getCurrentName(); + } else { + if ("value".equals(currentFieldName)) { + value = jp.getText(); + } else if ("boost".equals(currentFieldName)) { + boost = jp.getFloatValue(); + } + } + } + } else { + value = jp.getText(); + // move to the next token + jp.nextToken(); + } + + if (value == null) { + throw new QueryParsingException(index, "No value specified for term query"); + } + + MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(fieldName); + if (smartNameFieldMappers != null) { + FieldMapper fieldMapper = smartNameFieldMappers.fieldMappers().mapper(); + if (fieldMapper != null) { + fieldName = fieldMapper.indexName(); + value = fieldMapper.indexedValue(value); + } + } + + SpanTermQuery query = new SpanTermQuery(new Term(fieldName, value)); + query.setBoost(boost); + return wrapSmartNameQuery(query, smartNameFieldMappers, parseContext.filterCache()); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/TermJsonFilterBuilder.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/TermJsonFilterBuilder.java new file mode 100644 index 00000000000..3d6b48158e9 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/TermJsonFilterBuilder.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import org.elasticsearch.util.json.JsonBuilder; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class TermJsonFilterBuilder extends BaseJsonFilterBuilder { + + private final String name; + + private final Object value; + + public TermJsonFilterBuilder(String name, String value) { + this(name, (Object) value); + } + + public TermJsonFilterBuilder(String name, int value) { + this(name, (Object) value); + } + + public TermJsonFilterBuilder(String name, long value) { + this(name, (Object) value); + } + + public TermJsonFilterBuilder(String name, float value) { + this(name, (Object) value); + } + + public TermJsonFilterBuilder(String name, double value) { + this(name, (Object) value); + } + + private TermJsonFilterBuilder(String name, Object value) { + this.name = name; + this.value = value; + } + + @Override public void doJson(JsonBuilder builder) throws IOException { + builder.startObject(TermJsonFilterParser.NAME); + builder.field(name, value); + builder.endObject(); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/TermJsonFilterParser.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/TermJsonFilterParser.java new file mode 100644 index 00000000000..340eca026d2 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/TermJsonFilterParser.java @@ -0,0 +1,89 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import com.google.inject.Inject; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.Filter; +import org.codehaus.jackson.JsonParser; +import org.codehaus.jackson.JsonToken; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.mapper.FieldMapper; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.query.QueryParsingException; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.lucene.search.TermFilter; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; + +import static org.elasticsearch.index.query.support.QueryParsers.*; + +/** + * @author kimchy (Shay Banon) + */ +public class TermJsonFilterParser extends AbstractIndexComponent implements JsonFilterParser { + + public static final String NAME = "term"; + + @Inject public TermJsonFilterParser(Index index, @IndexSettings Settings settings) { + super(index, settings); + } + + @Override public String name() { + return NAME; + } + + @Override public Filter parse(JsonQueryParseContext parseContext) throws IOException, QueryParsingException { + JsonParser jp = parseContext.jp(); + + JsonToken token = jp.getCurrentToken(); + if (token == JsonToken.START_OBJECT) { + token = jp.nextToken(); + } + assert token == JsonToken.FIELD_NAME; + String fieldName = jp.getCurrentName(); + + + jp.nextToken(); + String value = jp.getText(); + // move to the next token (from VALUE) + jp.nextToken(); + + if (value == null) { + throw new QueryParsingException(index, "No value specified for term filter"); + } + + Filter filter = null; + MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(fieldName); + if (smartNameFieldMappers != null) { + FieldMapper fieldMapper = smartNameFieldMappers.fieldMappers().mapper(); + if (fieldMapper != null) { + filter = fieldMapper.fieldFilter(value); + } + } + if (filter == null) { + filter = new TermFilter(new Term(fieldName, value)); + } + filter = parseContext.cacheFilterIfPossible(filter); + return wrapSmartNameFilter(filter, smartNameFieldMappers, parseContext.filterCache()); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/TermJsonQueryBuilder.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/TermJsonQueryBuilder.java new file mode 100644 index 00000000000..1f028c4ee44 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/TermJsonQueryBuilder.java @@ -0,0 +1,79 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import org.elasticsearch.util.json.JsonBuilder; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class TermJsonQueryBuilder extends BaseJsonQueryBuilder { + + private final String name; + + private final Object value; + + private float boost = -1; + + public TermJsonQueryBuilder(String name, String value) { + this(name, (Object) value); + } + + public TermJsonQueryBuilder(String name, int value) { + this(name, (Object) value); + } + + public TermJsonQueryBuilder(String name, long value) { + this(name, (Object) value); + } + + public TermJsonQueryBuilder(String name, float value) { + this(name, (Object) value); + } + + public TermJsonQueryBuilder(String name, double value) { + this(name, (Object) value); + } + + private TermJsonQueryBuilder(String name, Object value) { + this.name = name; + this.value = value; + } + + public TermJsonQueryBuilder boost(float boost) { + this.boost = boost; + return this; + } + + @Override public void doJson(JsonBuilder builder) throws IOException { + builder.startObject(TermJsonQueryParser.NAME); + if (boost == -1) { + builder.field(name, value); + } else { + builder.startObject(name); + builder.field("value", value); + builder.field("boost", boost); + builder.endObject(); + } + builder.endObject(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/TermJsonQueryParser.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/TermJsonQueryParser.java new file mode 100644 index 00000000000..721bfa3479a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/TermJsonQueryParser.java @@ -0,0 +1,106 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import com.google.inject.Inject; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermQuery; +import org.codehaus.jackson.JsonParser; +import org.codehaus.jackson.JsonToken; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.mapper.FieldMapper; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.query.QueryParsingException; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; + +import static org.elasticsearch.index.query.support.QueryParsers.*; + +/** + * @author kimchy (Shay Banon) + */ +public class TermJsonQueryParser extends AbstractIndexComponent implements JsonQueryParser { + + public static final String NAME = "term"; + + @Inject public TermJsonQueryParser(Index index, @IndexSettings Settings settings) { + super(index, settings); + } + + @Override public String name() { + return NAME; + } + + @Override public Query parse(JsonQueryParseContext parseContext) throws IOException, QueryParsingException { + JsonParser jp = parseContext.jp(); + + JsonToken token = jp.getCurrentToken(); + if (token == JsonToken.START_OBJECT) { + token = jp.nextToken(); + } + assert token == JsonToken.FIELD_NAME; + String fieldName = jp.getCurrentName(); + + + String value = null; + float boost = 1.0f; + token = jp.nextToken(); + if (token == JsonToken.START_OBJECT) { + String currentFieldName = null; + while ((token = jp.nextToken()) != JsonToken.END_OBJECT) { + if (token == JsonToken.FIELD_NAME) { + currentFieldName = jp.getCurrentName(); + } else { + if ("value".equals(currentFieldName)) { + value = jp.getText(); + } else if ("boost".equals(currentFieldName)) { + boost = jp.getFloatValue(); + } + } + } + } else { + value = jp.getText(); + // move to the next token + jp.nextToken(); + } + + if (value == null) { + throw new QueryParsingException(index, "No value specified for term query"); + } + + Query query = null; + MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(fieldName); + if (smartNameFieldMappers != null) { + FieldMapper fieldMapper = smartNameFieldMappers.fieldMappers().mapper(); + if (fieldMapper != null) { + query = fieldMapper.fieldQuery(value); + } + } + if (query == null) { + query = new TermQuery(new Term(fieldName, value)); + } + query.setBoost(boost); + return wrapSmartNameQuery(query, smartNameFieldMappers, parseContext.filterCache()); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/WildcardJsonQueryBuilder.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/WildcardJsonQueryBuilder.java new file mode 100644 index 00000000000..ebd1d2840c9 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/WildcardJsonQueryBuilder.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import org.elasticsearch.util.json.JsonBuilder; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class WildcardJsonQueryBuilder extends BaseJsonQueryBuilder { + + private final String name; + + private final String value; + + private float boost = -1; + + public WildcardJsonQueryBuilder(String name, String value) { + this.name = name; + this.value = value; + } + + public WildcardJsonQueryBuilder boost(float boost) { + this.boost = boost; + return this; + } + + @Override public void doJson(JsonBuilder builder) throws IOException { + builder.startObject(WildcardJsonQueryParser.NAME); + if (boost == -1) { + builder.field(name, value); + } else { + builder.startObject(name); + builder.field("wildcard", value); + builder.field("boost", boost); + builder.endObject(); + } + builder.endObject(); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/WildcardJsonQueryParser.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/WildcardJsonQueryParser.java new file mode 100644 index 00000000000..70386f1b918 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/json/WildcardJsonQueryParser.java @@ -0,0 +1,106 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import com.google.inject.Inject; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.MultiTermQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.WildcardQuery; +import org.codehaus.jackson.JsonParser; +import org.codehaus.jackson.JsonToken; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.mapper.FieldMapper; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.query.QueryParsingException; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; + +import static org.elasticsearch.index.query.support.QueryParsers.*; + +/** + * @author kimchy (Shay Banon) + */ +public class WildcardJsonQueryParser extends AbstractIndexComponent implements JsonQueryParser { + + public static final String NAME = "wildcard"; + + @Inject public WildcardJsonQueryParser(Index index, @IndexSettings Settings settings) { + super(index, settings); + } + + @Override public String name() { + return NAME; + } + + @Override public Query parse(JsonQueryParseContext parseContext) throws IOException, QueryParsingException { + JsonParser jp = parseContext.jp(); + + JsonToken token = jp.getCurrentToken(); + if (token == JsonToken.START_OBJECT) { + token = jp.nextToken(); + } + assert token == JsonToken.FIELD_NAME; + String fieldName = jp.getCurrentName(); + + + String value = null; + float boost = 1.0f; + token = jp.nextToken(); + if (token == JsonToken.START_OBJECT) { + String currentFieldName = null; + while ((token = jp.nextToken()) != JsonToken.END_OBJECT) { + if (token == JsonToken.FIELD_NAME) { + currentFieldName = jp.getCurrentName(); + } else { + if ("wildcard".equals(currentFieldName)) { + value = jp.getText(); + } else if ("boost".equals(currentFieldName)) { + boost = jp.getFloatValue(); + } + } + } + } else { + value = jp.getText(); + jp.nextToken(); + } + + if (value == null) { + throw new QueryParsingException(index, "No value specified for prefix query"); + } + + MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(fieldName); + if (smartNameFieldMappers != null) { + FieldMapper fieldMapper = smartNameFieldMappers.fieldMappers().mapper(); + if (fieldMapper != null) { + fieldName = fieldMapper.indexName(); + value = fieldMapper.indexedValue(value); + } + } + + WildcardQuery query = new WildcardQuery(new Term(fieldName, value)); + query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT); + query.setBoost(boost); + return wrapSmartNameQuery(query, smartNameFieldMappers, parseContext.filterCache()); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/support/MapperQueryParser.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/support/MapperQueryParser.java new file mode 100644 index 00000000000..35385d40ca7 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/support/MapperQueryParser.java @@ -0,0 +1,142 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.support; + +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.queryParser.ParseException; +import org.apache.lucene.queryParser.QueryParser; +import org.apache.lucene.search.MultiTermQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.util.Version; +import org.elasticsearch.index.cache.filter.FilterCache; +import org.elasticsearch.index.mapper.FieldMapper; +import org.elasticsearch.index.mapper.FieldMappers; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.util.Nullable; + +import static org.elasticsearch.index.query.support.QueryParsers.*; + +/** + * A query parser that uses the {@link MapperService} in order to build smarter + * queries based on the mapping information. + * + *

Maps a logic name of a field {@link org.elasticsearch.index.mapper.FieldMapper#name()} + * into its {@link org.elasticsearch.index.mapper.FieldMapper#indexName()}. + * + *

Also breaks fields with [type].[name] into a boolean query that must include the type + * as well as the query on the name. + * + * @author kimchy (Shay Banon) + */ +public class MapperQueryParser extends QueryParser { + + private final MapperService mapperService; + + private final FilterCache filterCache; + + public MapperQueryParser(String defaultField, Analyzer analyzer, + @Nullable MapperService mapperService, + @Nullable FilterCache filterCache) { + super(Version.LUCENE_CURRENT, defaultField, analyzer); + this.mapperService = mapperService; + this.filterCache = filterCache; + setMultiTermRewriteMethod(MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT); + } + + @Override protected Query getFieldQuery(String field, String queryText) throws ParseException { + String indexedNameField = field; + if (mapperService != null) { + MapperService.SmartNameFieldMappers fieldMappers = mapperService.smartName(field); + if (fieldMappers != null) { + if (fieldMappers.fieldMappers().mapper() != null) { + Query query = fieldMappers.fieldMappers().mapper().fieldQuery(queryText); + return wrapSmartNameQuery(query, fieldMappers, filterCache); + } + } + } + return super.getFieldQuery(indexedNameField, queryText); + } + + @Override protected Query getRangeQuery(String field, String part1, String part2, boolean inclusive) throws ParseException { + if (mapperService != null) { + MapperService.SmartNameFieldMappers fieldMappers = mapperService.smartName(field); + if (fieldMappers != null) { + if (fieldMappers.fieldMappers().mapper() != null) { + Query rangeQuery = fieldMappers.fieldMappers().mapper().rangeQuery(part1, part2, inclusive, inclusive); + return wrapSmartNameQuery(rangeQuery, fieldMappers, filterCache); + } + } + } + return super.getRangeQuery(field, part1, part2, inclusive); + } + + @Override protected Query getPrefixQuery(String field, String termStr) throws ParseException { + String indexedNameField = field; + if (mapperService != null) { + MapperService.SmartNameFieldMappers fieldMappers = mapperService.smartName(field); + if (fieldMappers != null) { + if (fieldMappers.fieldMappers().mapper() != null) { + indexedNameField = fieldMappers.fieldMappers().mapper().indexName(); + } + return wrapSmartNameQuery(super.getPrefixQuery(indexedNameField, termStr), fieldMappers, filterCache); + } + } + return super.getPrefixQuery(indexedNameField, termStr); + } + + @Override protected Query getFuzzyQuery(String field, String termStr, float minSimilarity) throws ParseException { + String indexedNameField = field; + if (mapperService != null) { + MapperService.SmartNameFieldMappers fieldMappers = mapperService.smartName(field); + if (fieldMappers != null) { + if (fieldMappers.fieldMappers().mapper() != null) { + indexedNameField = fieldMappers.fieldMappers().mapper().indexName(); + } + return wrapSmartNameQuery(super.getFuzzyQuery(indexedNameField, termStr, minSimilarity), fieldMappers, filterCache); + } + } + return super.getFuzzyQuery(indexedNameField, termStr, minSimilarity); + } + + @Override protected Query getWildcardQuery(String field, String termStr) throws ParseException { + String indexedNameField = field; + if (mapperService != null) { + MapperService.SmartNameFieldMappers fieldMappers = mapperService.smartName(field); + if (fieldMappers != null) { + if (fieldMappers.fieldMappers().mapper() != null) { + indexedNameField = fieldMappers.fieldMappers().mapper().indexName(); + } + return wrapSmartNameQuery(super.getWildcardQuery(indexedNameField, termStr), fieldMappers, filterCache); + } + } + return super.getWildcardQuery(indexedNameField, termStr); + } + + protected FieldMapper fieldMapper(String smartName) { + if (mapperService == null) { + return null; + } + FieldMappers fieldMappers = mapperService.smartNameFieldMappers(smartName); + if (fieldMappers == null) { + return null; + } + return fieldMappers.mapper(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/support/QueryParsers.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/support/QueryParsers.java new file mode 100644 index 00000000000..3b1e9707379 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/query/support/QueryParsers.java @@ -0,0 +1,77 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.support; + +import org.apache.lucene.search.*; +import org.elasticsearch.index.cache.filter.FilterCache; +import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.util.Nullable; +import org.elasticsearch.util.lucene.search.TermFilter; + +/** + * @author kimchy (Shay Banon) + */ +public final class QueryParsers { + + private QueryParsers() { + + } + + public static Query wrapSmartNameQuery(Query query, @Nullable MapperService.SmartNameFieldMappers smartFieldMappers, + @Nullable FilterCache filterCache) { + if (smartFieldMappers == null) { + return query; + } + if (!smartFieldMappers.hasDocMapper()) { + return query; + } + DocumentMapper docMapper = smartFieldMappers.docMapper(); + Filter typeFilter = new TermFilter(docMapper.typeMapper().term(docMapper.type())); + if (filterCache != null) { + typeFilter = filterCache.cache(typeFilter); + } + return new FilteredQuery(query, typeFilter); + } + + public static Filter wrapSmartNameFilter(Filter filter, @Nullable MapperService.SmartNameFieldMappers smartFieldMappers, + @Nullable FilterCache filterCache) { + if (smartFieldMappers == null) { + return filter; + } + if (!smartFieldMappers.hasDocMapper()) { + return filter; + } + DocumentMapper docMapper = smartFieldMappers.docMapper(); + BooleanFilter booleanFilter = new BooleanFilter(); + Filter typeFilter = new TermFilter(docMapper.typeMapper().term(docMapper.type())); + if (filterCache != null) { + typeFilter = filterCache.cache(typeFilter); + } + booleanFilter.add(new FilterClause(typeFilter, BooleanClause.Occur.MUST)); + booleanFilter.add(new FilterClause(filter, BooleanClause.Occur.MUST)); + + Filter result = booleanFilter; + if (filterCache != null) { + result = filterCache.cache(result); + } + return result; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/routing/OperationRouting.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/routing/OperationRouting.java new file mode 100644 index 00000000000..f9503d3d9fc --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/routing/OperationRouting.java @@ -0,0 +1,46 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.routing; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.index.IndexShardMissingException; +import org.elasticsearch.indices.IndexMissingException; +import org.elasticsearch.util.Nullable; + +/** + * @author kimchy (Shay Banon) + */ +public interface OperationRouting { + + ShardsIterator indexShards(ClusterState clusterState, String type, String id) throws IndexMissingException, IndexShardMissingException; + + ShardsIterator deleteShards(ClusterState clusterState, String type, String id) throws IndexMissingException, IndexShardMissingException; + + ShardsIterator getShards(ClusterState clusterState, String type, String id) throws IndexMissingException, IndexShardMissingException; + + /** + * Returns the shards grouped by shard + */ + GroupShardsIterator deleteByQueryShards(ClusterState clusterState) throws IndexMissingException; + + GroupShardsIterator searchShards(ClusterState clusterState, @Nullable String queryHint) throws IndexMissingException; +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/routing/OperationRoutingModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/routing/OperationRoutingModule.java new file mode 100644 index 00000000000..aef944d88fa --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/routing/OperationRoutingModule.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.routing; + +import com.google.inject.AbstractModule; +import org.elasticsearch.index.routing.hash.HashFunction; +import org.elasticsearch.index.routing.hash.djb.DjbHashFunction; +import org.elasticsearch.index.routing.plain.PlainOperationRoutingModule; +import org.elasticsearch.util.settings.Settings; + +import static org.elasticsearch.util.guice.ModulesFactory.*; + +/** + * @author kimchy (Shay Banon) + */ +public class OperationRoutingModule extends AbstractModule { + + private final Settings indexSettings; + + public OperationRoutingModule(Settings indexSettings) { + this.indexSettings = indexSettings; + } + + @Override protected void configure() { + bind(HashFunction.class).to(indexSettings.getAsClass("index.routing.hash.type", DjbHashFunction.class, "org.elasticsearch.index.routing.hash.", "HashFunction")).asEagerSingleton(); + createModule(indexSettings.getAsClass("index.routing.type", PlainOperationRoutingModule.class, "org.elasticsearch.index.routing.", "OperationRoutingModule"), indexSettings).configure(binder()); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/routing/hash/HashFunction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/routing/hash/HashFunction.java new file mode 100644 index 00000000000..35c87f745f9 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/routing/hash/HashFunction.java @@ -0,0 +1,28 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.routing.hash; + +/** + * @author kimchy (Shay Banon) + */ +public interface HashFunction { + + int hash(String type, String id); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/routing/hash/djb/DjbHashFunction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/routing/hash/djb/DjbHashFunction.java new file mode 100644 index 00000000000..a63286ecfdd --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/routing/hash/djb/DjbHashFunction.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.routing.hash.djb; + +import org.elasticsearch.index.routing.hash.HashFunction; + +/** + * @author kimchy (Shay Banon) + */ +public class DjbHashFunction implements HashFunction { + + @Override public int hash(String type, String id) { + long hash = 5381; + + for (int i = 0; i < type.length(); i++) { + hash = ((hash << 5) + hash) + type.charAt(i); + } + + for (int i = 0; i < id.length(); i++) { + hash = ((hash << 5) + hash) + id.charAt(i); + } + + return (int) hash; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/routing/hash/simple/SimpleHashFunction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/routing/hash/simple/SimpleHashFunction.java new file mode 100644 index 00000000000..afd4b26be78 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/routing/hash/simple/SimpleHashFunction.java @@ -0,0 +1,32 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.routing.hash.simple; + +import org.elasticsearch.index.routing.hash.HashFunction; + +/** + * @author kimchy (Shay Banon) + */ +public class SimpleHashFunction implements HashFunction { + + @Override public int hash(String type, String id) { + return type.hashCode() + 31 * id.hashCode(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/routing/plain/PlainOperationRouting.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/routing/plain/PlainOperationRouting.java new file mode 100644 index 00000000000..e6d45bfce86 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/routing/plain/PlainOperationRouting.java @@ -0,0 +1,107 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.routing.plain; + +import com.google.inject.Inject; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexShardMissingException; +import org.elasticsearch.index.routing.OperationRouting; +import org.elasticsearch.index.routing.hash.HashFunction; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndexMissingException; +import org.elasticsearch.util.IdentityHashSet; +import org.elasticsearch.util.Nullable; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class PlainOperationRouting extends AbstractIndexComponent implements OperationRouting { + + private final HashFunction hashFunction; + + @Inject public PlainOperationRouting(Index index, @IndexSettings Settings indexSettings, HashFunction hashFunction) { + super(index, indexSettings); + this.hashFunction = hashFunction; + } + + @Override public ShardsIterator indexShards(ClusterState clusterState, String type, String id) throws IndexMissingException, IndexShardMissingException { + return shards(clusterState, type, id).shardsIt(); + } + + @Override public ShardsIterator deleteShards(ClusterState clusterState, String type, String id) throws IndexMissingException, IndexShardMissingException { + return shards(clusterState, type, id).shardsIt(); + } + + @Override public ShardsIterator getShards(ClusterState clusterState, String type, String id) throws IndexMissingException, IndexShardMissingException { + return shards(clusterState, type, id).shardsRandomIt(); + } + + @Override public GroupShardsIterator deleteByQueryShards(ClusterState clusterState) throws IndexMissingException { + return indexRoutingTable(clusterState).groupByShardsIt(); + } + + @Override public GroupShardsIterator searchShards(ClusterState clusterState, @Nullable String queryHint) throws IndexMissingException { + IdentityHashSet set = new IdentityHashSet(); + IndexRoutingTable indexRouting = indexRoutingTable(clusterState); + for (IndexShardRoutingTable indexShard : indexRouting) { + set.add(indexShard.shardsRandomIt()); + } + return new GroupShardsIterator(set); + } + + public IndexMetaData indexMetaData(ClusterState clusterState) { + IndexMetaData indexMetaData = clusterState.metaData().index(index.name()); + if (indexMetaData == null) { + throw new IndexMissingException(index); + } + return indexMetaData; + } + + protected IndexRoutingTable indexRoutingTable(ClusterState clusterState) { + IndexRoutingTable indexRouting = clusterState.routingTable().index(index.name()); + if (indexRouting == null) { + throw new IndexMissingException(index); + } + return indexRouting; + } + + + protected IndexShardRoutingTable shards(ClusterState clusterState, String type, String id) { + int shardId = Math.abs(hash(type, id)) % indexMetaData(clusterState).numberOfShards(); + IndexShardRoutingTable indexShard = indexRoutingTable(clusterState).shard(shardId); + if (indexShard == null) { + throw new IndexShardMissingException(new ShardId(index, shardId)); + } + return indexShard; + } + + protected int hash(String type, String id) { + return hashFunction.hash(type, id); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/routing/plain/PlainOperationRoutingModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/routing/plain/PlainOperationRoutingModule.java new file mode 100644 index 00000000000..382497717e7 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/routing/plain/PlainOperationRoutingModule.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.routing.plain; + +import com.google.inject.AbstractModule; +import org.elasticsearch.index.routing.OperationRouting; + +/** + * @author kimchy (Shay Banon) + */ +public class PlainOperationRoutingModule extends AbstractModule { + + @Override protected void configure() { + bind(OperationRouting.class).to(PlainOperationRouting.class).asEagerSingleton(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/settings/IndexSettings.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/settings/IndexSettings.java new file mode 100644 index 00000000000..8fce3361d12 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/settings/IndexSettings.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.settings; + +import com.google.inject.BindingAnnotation; +import org.elasticsearch.index.IndexLifecycle; + +import java.lang.annotation.Documented; +import java.lang.annotation.Retention; +import java.lang.annotation.Target; + +import static java.lang.annotation.ElementType.*; +import static java.lang.annotation.RetentionPolicy.*; + +/** + * @author kimchy (Shay Banon) + */ + +@BindingAnnotation +@Target({FIELD, PARAMETER}) +@Retention(RUNTIME) +@Documented +@IndexLifecycle +public @interface IndexSettings { +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/settings/IndexSettingsModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/settings/IndexSettingsModule.java new file mode 100644 index 00000000000..0e94d1d2a3f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/settings/IndexSettingsModule.java @@ -0,0 +1,41 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.settings; + +import com.google.inject.AbstractModule; +import org.elasticsearch.index.IndexLifecycle; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +@IndexLifecycle +public class IndexSettingsModule extends AbstractModule { + + private final Settings settings; + + public IndexSettingsModule(Settings settings) { + this.settings = settings; + } + + @Override protected void configure() { + bind(Settings.class).annotatedWith(IndexSettings.class).toInstance(settings); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/AbstractIndexShardComponent.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/AbstractIndexShardComponent.java new file mode 100644 index 00000000000..4b1e1f684cb --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/AbstractIndexShardComponent.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.shard; + +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.jmx.ManagedGroupName; +import org.elasticsearch.util.logging.Loggers; +import org.elasticsearch.util.settings.Settings; +import org.slf4j.Logger; + +import static org.elasticsearch.index.shard.IndexShardManagement.*; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class AbstractIndexShardComponent implements IndexShardComponent { + + protected final Logger logger; + + protected final ShardId shardId; + + protected final Settings indexSettings; + + protected final Settings componentSettings; + + protected AbstractIndexShardComponent(ShardId shardId, @IndexSettings Settings indexSettings) { + this.shardId = shardId; + this.indexSettings = indexSettings; + this.componentSettings = indexSettings.getComponentSettings(getClass()); + + this.logger = Loggers.getLogger(getClass(), indexSettings, shardId); + } + + @Override public ShardId shardId() { + return this.shardId; + } + + @Override public Settings indexSettings() { + return this.indexSettings; + } + + public String nodeName() { + return indexSettings.get("name", ""); + } + + @ManagedGroupName + public String managementGroupName() { + return buildShardGroupName(shardId); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IllegalIndexShardStateException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IllegalIndexShardStateException.java new file mode 100644 index 00000000000..11f7807f1aa --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IllegalIndexShardStateException.java @@ -0,0 +1,37 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.shard; + +/** + * @author kimchy (Shay Banon) + */ +public class IllegalIndexShardStateException extends IndexShardException { + + private final IndexShardState currentState; + + public IllegalIndexShardStateException(ShardId shardId, IndexShardState currentState, String msg) { + super(shardId, "CurrentState[" + currentState + "] " + msg); + this.currentState = currentState; + } + + public IndexShardState currentState() { + return currentState; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IndexShard.java new file mode 100644 index 00000000000..ca6db06ba9e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -0,0 +1,77 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.shard; + +import org.apache.lucene.index.Term; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.EngineException; +import org.elasticsearch.util.Nullable; +import org.elasticsearch.util.SizeValue; +import org.elasticsearch.util.concurrent.ThreadSafe; + +/** + * @author kimchy (Shay Banon) + */ +@IndexShardLifecycle +@ThreadSafe +public interface IndexShard extends IndexShardComponent { + + ShardRouting routingEntry(); + + IndexShardState state(); + + /** + * Returns the estimated flushable memory size. Returns null if not available. + */ + SizeValue estimateFlushableMemorySize() throws ElasticSearchException; + + void create(String type, String id, String source) throws ElasticSearchException; + + void index(String type, String id, String source) throws ElasticSearchException; + + void delete(String type, String id); + + void delete(Term uid); + + void deleteByQuery(String querySource, @Nullable String queryParserName, String... types) throws ElasticSearchException; + + String get(String type, String id) throws ElasticSearchException; + + long count(float minScore, String querySource, @Nullable String queryParserName, String... types) throws ElasticSearchException; + + void refresh(boolean waitForOperations) throws ElasticSearchException; + + void flush() throws ElasticSearchException; + + void snapshot(Engine.SnapshotHandler snapshotHandler) throws EngineException; + + void recover(Engine.RecoveryHandler recoveryHandler) throws EngineException; + + Engine.Searcher searcher(); + + void close(); + + /** + * Returns true if this shard can ignore a recovery attempt made to it (since the already doing/done it) + */ + public boolean ignoreRecoveryAttempt(); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IndexShardClosedException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IndexShardClosedException.java new file mode 100644 index 00000000000..e5b1f768131 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IndexShardClosedException.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.shard; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexShardClosedException extends IllegalIndexShardStateException { + public IndexShardClosedException(ShardId shardId) { + super(shardId, IndexShardState.CLOSED, "Closed"); + } + +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IndexShardComponent.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IndexShardComponent.java new file mode 100644 index 00000000000..dc0b6d84718 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IndexShardComponent.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.shard; + +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +@IndexShardLifecycle +public interface IndexShardComponent { + + ShardId shardId(); + + Settings indexSettings(); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IndexShardException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IndexShardException.java new file mode 100644 index 00000000000..63417dc6e29 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IndexShardException.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.shard; + +import org.elasticsearch.index.IndexException; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexShardException extends IndexException { + + private final ShardId shardId; + + public IndexShardException(ShardId shardId, String msg) { + this(shardId, msg, null); + } + + public IndexShardException(ShardId shardId, String msg, Throwable cause) { + super(shardId.index(), "Shard[" + shardId.id() + "] " + msg, cause); + this.shardId = shardId; + } + + public ShardId shardId() { + return shardId; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IndexShardLifecycle.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IndexShardLifecycle.java new file mode 100644 index 00000000000..0b74d8dfb66 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IndexShardLifecycle.java @@ -0,0 +1,41 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.shard; + +import java.lang.annotation.Documented; +import java.lang.annotation.Retention; +import java.lang.annotation.Target; + +import static java.lang.annotation.ElementType.*; +import static java.lang.annotation.RetentionPolicy.*; + +/** + * A simple annotation that marks a component to be bounded to a single index shard + * lifecycle. + *

+ *

Note, currently only acts as a marker interface for readability. + * + * @author kimchy (Shay Banon) + */ +@Target({TYPE, ANNOTATION_TYPE}) +@Retention(RUNTIME) +@Documented +public @interface IndexShardLifecycle { +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IndexShardManagement.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IndexShardManagement.java new file mode 100644 index 00000000000..608119be6a2 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IndexShardManagement.java @@ -0,0 +1,135 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.shard; + +import com.google.inject.Inject; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.jmx.JmxService; +import org.elasticsearch.jmx.MBean; +import org.elasticsearch.jmx.ManagedAttribute; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; + +import static org.elasticsearch.index.IndexServiceManagement.*; + +/** + * @author kimchy (Shay Banon) + */ +@MBean(objectName = "", description = "") +public class IndexShardManagement extends AbstractIndexShardComponent { + + public static String buildShardGroupName(ShardId shardId) { + return buildIndexGroupName(shardId.index()) + ",subService=shards,shard=" + shardId.id(); + } + + private final JmxService jmxService; + + private final IndexShard indexShard; + + private final Store store; + + private final Translog translog; + + @Inject public IndexShardManagement(ShardId shardId, @IndexSettings Settings indexSettings, JmxService jmxService, IndexShard indexShard, + Store store, Translog translog) { + super(shardId, indexSettings); + this.jmxService = jmxService; + this.indexShard = indexShard; + this.store = store; + this.translog = translog; + } + + public void close() { + jmxService.unregisterGroup(buildShardGroupName(indexShard.shardId())); + } + + @ManagedAttribute(description = "Index Name") + public String getIndex() { + return indexShard.shardId().index().name(); + } + + @ManagedAttribute(description = "Shard Id") + public int getShardId() { + return indexShard.shardId().id(); + } + + @ManagedAttribute(description = "Storage Size") + public String getStoreSize() { + try { + return store.estimateSize().toString(); + } catch (IOException e) { + return "NA"; + } + } + + @ManagedAttribute(description = "The current transaction log id") + public long getTranslogId() { + return translog.currentId(); + } + + @ManagedAttribute(description = "Number of transaction log operations") + public long getTranslogNumberOfOperations() { + return translog.size(); + } + + @ManagedAttribute(description = "Estimated size in memory the transaction log takes") + public String getTranslogSize() { + return translog.estimateMemorySize().toString(); + } + + @ManagedAttribute(description = "The state of the shard") + public String getState() { + return indexShard.state().toString(); + } + + @ManagedAttribute(description = "Primary") + public boolean isPrimary() { + return indexShard.routingEntry().primary(); + } + + @ManagedAttribute(description = "The state of the shard as perceived by the cluster") + public String getRoutingState() { + return indexShard.routingEntry().state().toString(); + } + + @ManagedAttribute(description = "The number of documents in the index") + public int getNumDocs() { + Engine.Searcher searcher = indexShard.searcher(); + try { + return searcher.reader().numDocs(); + } finally { + searcher.release(); + } + } + + @ManagedAttribute(description = "The total number of documents in the index (including deleted ones)") + public int getMaxDoc() { + Engine.Searcher searcher = indexShard.searcher(); + try { + return searcher.reader().maxDoc(); + } finally { + searcher.release(); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IndexShardModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IndexShardModule.java new file mode 100644 index 00000000000..02354364432 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IndexShardModule.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.shard; + +import com.google.inject.AbstractModule; +import org.elasticsearch.index.shard.recovery.RecoveryAction; + +/** + * @author kimchy (Shay Banon) + */ +@IndexShardLifecycle +public class IndexShardModule extends AbstractModule { + + private final ShardId shardId; + + public IndexShardModule(ShardId shardId) { + this.shardId = shardId; + } + + @Override protected void configure() { + bind(ShardId.class).toInstance(shardId); + bind(IndexShard.class).to(InternalIndexShard.class).asEagerSingleton(); + bind(IndexShardManagement.class).asEagerSingleton(); + + bind(RecoveryAction.class).asEagerSingleton(); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IndexShardNotRecoveringException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IndexShardNotRecoveringException.java new file mode 100644 index 00000000000..b0179ac4447 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IndexShardNotRecoveringException.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.shard; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexShardNotRecoveringException extends IllegalIndexShardStateException { + + public IndexShardNotRecoveringException(ShardId shardId, IndexShardState currentState) { + super(shardId, currentState, "Shard not in recovering state"); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IndexShardNotStartedException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IndexShardNotStartedException.java new file mode 100644 index 00000000000..e14de790367 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IndexShardNotStartedException.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.shard; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexShardNotStartedException extends IllegalIndexShardStateException { + + public IndexShardNotStartedException(ShardId shardId, IndexShardState currentState) { + super(shardId, currentState, "Shard not started"); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IndexShardRecoveringException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IndexShardRecoveringException.java new file mode 100644 index 00000000000..b3978a218da --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IndexShardRecoveringException.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.shard; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexShardRecoveringException extends IllegalIndexShardStateException { + + public IndexShardRecoveringException(ShardId shardId) { + super(shardId, IndexShardState.RECOVERING, "Already recovering"); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IndexShardRelocatedException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IndexShardRelocatedException.java new file mode 100644 index 00000000000..a650588bd18 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IndexShardRelocatedException.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.shard; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexShardRelocatedException extends IllegalIndexShardStateException { + + public IndexShardRelocatedException(ShardId shardId) { + super(shardId, IndexShardState.RELOCATED, "Already relocated"); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IndexShardStartedException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IndexShardStartedException.java new file mode 100644 index 00000000000..cfff25e1bc3 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IndexShardStartedException.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.shard; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexShardStartedException extends IllegalIndexShardStateException { + + public IndexShardStartedException(ShardId shardId) { + super(shardId, IndexShardState.STARTED, "Already started"); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IndexShardState.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IndexShardState.java new file mode 100644 index 00000000000..7946958161d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/IndexShardState.java @@ -0,0 +1,58 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.shard; + +import org.elasticsearch.ElasticSearchIllegalArgumentException; + +/** + * @author kimchy (Shay Banon) + */ +public enum IndexShardState { + CREATED((byte) 0), + RECOVERING((byte) 1), + STARTED((byte) 2), + RELOCATED((byte) 3), + CLOSED((byte) 4); + + private final byte id; + + IndexShardState(byte id) { + this.id = id; + } + + public byte id() { + return this.id; + } + + public static IndexShardState fromId(byte id) throws ElasticSearchIllegalArgumentException { + if (id == 0) { + return CREATED; + } else if (id == 1) { + return RECOVERING; + } else if (id == 2) { + return STARTED; + } else if (id == 3) { + return RELOCATED; + } else if (id == 4) { + return CLOSED; + } + throw new ElasticSearchIllegalArgumentException("No mapping for id [" + id + "]"); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/InternalIndexShard.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/InternalIndexShard.java new file mode 100644 index 00000000000..fd3fcbf1a35 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/InternalIndexShard.java @@ -0,0 +1,500 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.shard; + +import com.google.inject.Inject; +import org.apache.lucene.document.Document; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.*; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.ElasticSearchIllegalArgumentException; +import org.elasticsearch.ElasticSearchIllegalStateException; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.index.cache.filter.FilterCache; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.EngineException; +import org.elasticsearch.index.engine.ScheduledRefreshableEngine; +import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.index.mapper.DocumentMapperNotFoundException; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.query.IndexQueryParser; +import org.elasticsearch.index.query.IndexQueryParserMissingException; +import org.elasticsearch.index.query.IndexQueryParserService; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.indices.TypeMissingException; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.util.Nullable; +import org.elasticsearch.util.SizeValue; +import org.elasticsearch.util.Strings; +import org.elasticsearch.util.TimeValue; +import org.elasticsearch.util.concurrent.ThreadSafe; +import org.elasticsearch.util.lucene.Lucene; +import org.elasticsearch.util.lucene.search.TermFilter; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; +import java.util.concurrent.ScheduledFuture; + +/** + * @author kimchy (Shay Banon) + */ +@IndexShardLifecycle +@ThreadSafe +public class InternalIndexShard extends AbstractIndexShardComponent implements IndexShard { + + private final ThreadPool threadPool; + + private final MapperService mapperService; + + private final IndexQueryParserService queryParserService; + + private final FilterCache filterCache; + + private final Store store; + + private final Engine engine; + + private final Translog translog; + + private final Object mutex = new Object(); + + private volatile IndexShardState state; + + private ScheduledFuture refreshScheduledFuture; + + private volatile ShardRouting shardRouting; + + @Inject public InternalIndexShard(ShardId shardId, @IndexSettings Settings indexSettings, Store store, Engine engine, Translog translog, + ThreadPool threadPool, MapperService mapperService, IndexQueryParserService queryParserService, FilterCache filterCache) { + super(shardId, indexSettings); + this.store = store; + this.engine = engine; + this.translog = translog; + this.threadPool = threadPool; + this.mapperService = mapperService; + this.queryParserService = queryParserService; + this.filterCache = filterCache; + state = IndexShardState.CREATED; + } + + public Store store() { + return this.store; + } + + public Engine engine() { + return engine; + } + + public Translog translog() { + return translog; + } + + public ShardRouting routingEntry() { + return this.shardRouting; + } + + public InternalIndexShard routingEntry(ShardRouting shardRouting) { + if (!shardRouting.shardId().equals(shardId())) { + throw new ElasticSearchIllegalArgumentException("Trying to set a routing entry with shardId [" + shardRouting.shardId() + "] on a shard with shardId [" + shardId() + "]"); + } + if (this.shardRouting != null) { + if (!shardRouting.primary() && this.shardRouting.primary()) { + logger.warn("Suspect illegal state: Trying to move shard from primary mode to backup mode"); + } + } + this.shardRouting = shardRouting; + return this; + } + + public IndexShardState recovering() throws IndexShardStartedException, + IndexShardRelocatedException, IndexShardRecoveringException, IndexShardClosedException { + synchronized (mutex) { + IndexShardState returnValue = state; + if (state == IndexShardState.CLOSED) { + throw new IndexShardClosedException(shardId); + } + if (state == IndexShardState.STARTED) { + throw new IndexShardStartedException(shardId); + } + if (state == IndexShardState.RELOCATED) { + throw new IndexShardRelocatedException(shardId); + } + if (state == IndexShardState.RECOVERING) { + throw new IndexShardRecoveringException(shardId); + } + state = IndexShardState.RECOVERING; + return returnValue; + } + } + + public InternalIndexShard restoreRecoveryState(IndexShardState stateToRestore) { + synchronized (mutex) { + if (this.state != IndexShardState.RECOVERING) { + throw new IndexShardNotRecoveringException(shardId, state); + } + this.state = stateToRestore; + } + return this; + } + + public InternalIndexShard relocated() throws IndexShardNotStartedException { + synchronized (mutex) { + if (state != IndexShardState.STARTED) { + throw new IndexShardNotStartedException(shardId, state); + } + state = IndexShardState.RELOCATED; + } + return this; + } + + public InternalIndexShard start() throws IndexShardStartedException, IndexShardRelocatedException, IndexShardClosedException { + synchronized (mutex) { + if (state == IndexShardState.CLOSED) { + throw new IndexShardClosedException(shardId); + } + if (state == IndexShardState.STARTED) { + throw new IndexShardStartedException(shardId); + } + if (state == IndexShardState.RELOCATED) { + throw new IndexShardRelocatedException(shardId); + } + engine.start(); + scheduleRefresherIfNeeded(); + state = IndexShardState.STARTED; + } + return this; + } + + public IndexShardState state() { + return state; + } + + /** + * Returns the estimated flushable memory size. Returns null if not available. + */ + public SizeValue estimateFlushableMemorySize() throws ElasticSearchException { + writeAllowed(); + return engine.estimateFlushableMemorySize(); + } + + public void create(String type, String id, String source) throws ElasticSearchException { + writeAllowed(); + innerCreate(type, id, source); + } + + private void innerCreate(String type, String id, String source) { + DocumentMapper docMapper = mapperService.type(type); + if (docMapper == null) { + throw new DocumentMapperNotFoundException("No mapper found for type [" + type + "]"); + } + ParsedDocument doc = docMapper.parse(type, id, source); + if (logger.isTraceEnabled()) { + logger.trace("Indexing {}", doc); + } + engine.create(new Engine.Create(doc.doc(), docMapper.mappers().indexAnalyzer(), docMapper.type(), doc.id(), doc.source())); + } + + public void index(String type, String id, String source) throws ElasticSearchException { + writeAllowed(); + innerIndex(type, id, source); + } + + private void innerIndex(String type, String id, String source) { + DocumentMapper docMapper = mapperService.type(type); + if (docMapper == null) { + throw new DocumentMapperNotFoundException("No mapper found for type [" + type + "]"); + } + ParsedDocument doc = docMapper.parse(type, id, source); + if (logger.isTraceEnabled()) { + logger.trace("Indexing {}", doc); + } + engine.index(new Engine.Index(docMapper.uidMapper().term(doc.uid()), doc.doc(), docMapper.mappers().indexAnalyzer(), docMapper.type(), doc.id(), doc.source())); + } + + public void delete(String type, String id) { + writeAllowed(); + DocumentMapper docMapper = mapperService.type(type); + if (docMapper == null) { + throw new DocumentMapperNotFoundException("No mapper found for type [" + type + "]"); + } + innerDelete(docMapper.uidMapper().term(type, id)); + } + + public void delete(Term uid) { + writeAllowed(); + innerDelete(uid); + } + + private void innerDelete(Term uid) { + if (logger.isTraceEnabled()) { + logger.trace("Deleting [{}]", uid.text()); + } + engine.delete(new Engine.Delete(uid)); + } + + public void deleteByQuery(String querySource, @Nullable String queryParserName, String... types) throws ElasticSearchException { + writeAllowed(); + if (types == null) { + types = Strings.EMPTY_ARRAY; + } + innerDeleteByQuery(querySource, queryParserName, types); + } + + private void innerDeleteByQuery(String querySource, String queryParserName, String... types) { + IndexQueryParser queryParser = queryParserService.defaultIndexQueryParser(); + if (queryParserName != null) { + queryParser = queryParserService.indexQueryParser(queryParserName); + if (queryParser == null) { + throw new IndexQueryParserMissingException(queryParserName); + } + } + Query query = queryParser.parse(querySource); + query = filterByTypesIfNeeded(query, types); + + if (logger.isTraceEnabled()) { + logger.trace("Deleting By Query [{}]", query); + } + + engine.delete(new Engine.DeleteByQuery(query, querySource, queryParserName, types)); + } + + public String get(String type, String id) throws ElasticSearchException { + readAllowed(); + DocumentMapper docMapper = mapperService.type(type); + if (docMapper == null) { + throw new DocumentMapperNotFoundException("No mapper found for type [" + type + "]"); + } + Engine.Searcher searcher = engine.searcher(); + try { + int docId = Lucene.docId(searcher.reader(), docMapper.uidMapper().term(type, id)); + if (docId == Lucene.NO_DOC) { + if (logger.isTraceEnabled()) { + logger.trace("Get for [{}#{}] returned no result", type, id); + } + return null; + } + Document doc = searcher.reader().document(docId, docMapper.sourceMapper().fieldSelector()); + if (logger.isTraceEnabled()) { + logger.trace("Get for [{}#{}] returned [{}]", new Object[]{type, id, doc}); + } + return docMapper.sourceMapper().value(doc); + } catch (IOException e) { + throw new ElasticSearchException("Failed to get type [" + type + "] and id [" + id + "]", e); + } finally { + searcher.release(); + } + } + + public long count(float minScore, String querySource, @Nullable String queryParserName, String... types) throws ElasticSearchException { + readAllowed(); + IndexQueryParser queryParser = queryParserService.defaultIndexQueryParser(); + if (queryParserName != null) { + queryParser = queryParserService.indexQueryParser(queryParserName); + if (queryParser == null) { + throw new IndexQueryParserMissingException(queryParserName); + } + } + Query query = queryParser.parse(querySource); + query = filterByTypesIfNeeded(query, types); + Engine.Searcher searcher = engine.searcher(); + try { + long count = Lucene.count(searcher.searcher(), query, minScore); + if (logger.isTraceEnabled()) { + logger.trace("Count of [{}] is [{}]", query, count); + } + return count; + } catch (IOException e) { + throw new ElasticSearchException("Failed to count query [" + query + "]", e); + } finally { + searcher.release(); + } + } + + public void refresh(boolean waitForOperations) throws ElasticSearchException { + writeAllowed(); + if (logger.isTraceEnabled()) { + logger.trace("Refresh, waitForOperations[{}]", waitForOperations); + } + engine.refresh(waitForOperations); + } + + public void flush() throws ElasticSearchException { + writeAllowed(); + if (logger.isTraceEnabled()) { + logger.trace("Flush"); + } + engine.flush(); + } + + public void snapshot(Engine.SnapshotHandler snapshotHandler) throws EngineException { + readAllowed(); + engine.snapshot(snapshotHandler); + } + + public void recover(Engine.RecoveryHandler recoveryHandler) throws EngineException { + writeAllowed(); + engine.recover(recoveryHandler); + } + + public Engine.Searcher searcher() { + readAllowed(); + return engine.searcher(); + } + + public void close() { + synchronized (mutex) { + if (state != IndexShardState.CLOSED) { + if (refreshScheduledFuture != null) { + refreshScheduledFuture.cancel(true); + refreshScheduledFuture = null; + } + } + state = IndexShardState.CLOSED; + } + } + + public void performRecovery(Iterable operations) throws ElasticSearchException { + if (state != IndexShardState.RECOVERING) { + throw new IndexShardNotRecoveringException(shardId, state); + } + engine.start(); + applyTranslogOperations(operations); + synchronized (mutex) { + state = IndexShardState.STARTED; + } + scheduleRefresherIfNeeded(); + } + + public void performRecovery(Translog.Snapshot snapshot, boolean phase3) throws ElasticSearchException { + if (state != IndexShardState.RECOVERING) { + throw new IndexShardNotRecoveringException(shardId, state); + } + if (!phase3) { + // start the engine, but the shard is not started yet... + engine.start(); + } + applyTranslogOperations(snapshot); + if (phase3) { + synchronized (mutex) { + state = IndexShardState.STARTED; + } + scheduleRefresherIfNeeded(); + } + } + + private void applyTranslogOperations(Iterable snapshot) { + for (Translog.Operation operation : snapshot) { + switch (operation.opType()) { + case CREATE: + Translog.Create create = (Translog.Create) operation; + innerCreate(create.type(), create.id(), create.source()); + break; + case SAVE: + Translog.Index index = (Translog.Index) operation; + innerIndex(index.type(), index.id(), index.source()); + break; + case DELETE: + Translog.Delete delete = (Translog.Delete) operation; + innerDelete(delete.uid()); + break; + case DELETE_BY_QUERY: + Translog.DeleteByQuery deleteByQuery = (Translog.DeleteByQuery) operation; + innerDeleteByQuery(deleteByQuery.source(), deleteByQuery.queryParserName(), deleteByQuery.types()); + break; + default: + throw new ElasticSearchIllegalStateException("No operation defined for [" + operation + "]"); + } + } + } + + /** + * Returns true if this shard can ignore a recovery attempt made to it (since the already doing/done it) + */ + public boolean ignoreRecoveryAttempt() { + IndexShardState state = state(); // one time volatile read + return state == IndexShardState.RECOVERING || state == IndexShardState.STARTED || + state == IndexShardState.RELOCATED || state == IndexShardState.CLOSED; + } + + public void readAllowed() throws IllegalIndexShardStateException { + IndexShardState state = this.state; // one time volatile read + if (state != IndexShardState.STARTED && state != IndexShardState.RELOCATED) { + throw new IllegalIndexShardStateException(shardId, state, "Read operations only allowed when started/relocated"); + } + } + + public void writeAllowed() throws IllegalIndexShardStateException { + IndexShardState state = this.state; // one time volatile read + if (state != IndexShardState.STARTED) { + throw new IndexShardNotStartedException(shardId, state); + } + } + + private void scheduleRefresherIfNeeded() { + if (engine instanceof ScheduledRefreshableEngine) { + TimeValue refreshInterval = ((ScheduledRefreshableEngine) engine).refreshInterval(); + if (refreshInterval.millis() > 0) { + refreshScheduledFuture = threadPool.scheduleWithFixedDelay(new EngineRefresher(), refreshInterval); + logger.debug("Scheduling refresher every {}", refreshInterval); + } + } + } + + private Query filterByTypesIfNeeded(Query query, String[] types) { + if (types != null && types.length > 0) { + if (types.length == 1) { + String type = types[0]; + DocumentMapper docMapper = mapperService.documentMapper(type); + if (docMapper == null) { + throw new TypeMissingException(shardId.index(), type); + } + Filter typeFilter = new TermFilter(docMapper.typeMapper().term(docMapper.type())); + typeFilter = filterCache.cache(typeFilter); + query = new FilteredQuery(query, typeFilter); + } else { + BooleanFilter booleanFilter = new BooleanFilter(); + for (String type : types) { + DocumentMapper docMapper = mapperService.documentMapper(type); + if (docMapper == null) { + throw new TypeMissingException(shardId.index(), type); + } + Filter typeFilter = new TermFilter(docMapper.typeMapper().term(docMapper.type())); + typeFilter = filterCache.cache(typeFilter); + booleanFilter.add(new FilterClause(typeFilter, BooleanClause.Occur.SHOULD)); + } + query = new FilteredQuery(query, booleanFilter); + } + } + return query; + } + + private class EngineRefresher implements Runnable { + @Override public void run() { + try { + engine.refresh(false); + } catch (Exception e) { + logger.warn("Failed to perform scheduled engine refresh", e); + } + } + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/ShardId.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/ShardId.java new file mode 100644 index 00000000000..a9ec36d908c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/ShardId.java @@ -0,0 +1,101 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.shard; + +import org.elasticsearch.index.Index; +import org.elasticsearch.util.concurrent.Immutable; +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.io.Serializable; + +/** + * Allows for shard level components to be injected with the shard id. + * + * @author kimchy (Shay Banon) + */ +@Immutable +public class ShardId implements Serializable, Streamable { + + private Index index; + + private int shardId; + + private ShardId() { + + } + + public ShardId(String index, int shardId) { + this(new Index(index), shardId); + } + + public ShardId(Index index, int shardId) { + this.index = index; + this.shardId = shardId; + } + + public Index index() { + return this.index; + } + + public int id() { + return this.shardId; + } + + @Override public String toString() { + return "Index Shard [" + index.name() + "][" + shardId + "]"; + } + + @Override public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + ShardId shardId1 = (ShardId) o; + + if (shardId != shardId1.shardId) return false; + if (index != null ? !index.equals(shardId1.index) : shardId1.index != null) return false; + + return true; + } + + @Override public int hashCode() { + int result = index != null ? index.hashCode() : 0; + result = 31 * result + shardId; + return result; + } + + public static ShardId readShardId(DataInput in) throws IOException, ClassNotFoundException { + ShardId shardId = new ShardId(); + shardId.readFrom(in); + return shardId; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + index = Index.readIndexName(in); + shardId = in.readInt(); + } + + @Override public void writeTo(DataOutput out) throws IOException { + index.writeTo(out); + out.writeInt(shardId); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/recovery/IgnoreRecoveryException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/recovery/IgnoreRecoveryException.java new file mode 100644 index 00000000000..e1185189127 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/recovery/IgnoreRecoveryException.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.shard.recovery; + +import org.elasticsearch.ElasticSearchException; + +/** + * An exception marking that this recovery attempt should be ignored (since probably, we already recovered). + * + * @author kimchy (Shay Banon) + */ +public class IgnoreRecoveryException extends ElasticSearchException { + + public IgnoreRecoveryException(String msg) { + super(msg); + } + + public IgnoreRecoveryException(String msg, Throwable cause) { + super(msg, cause); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/recovery/RecoverFilesRecoveryException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/recovery/RecoverFilesRecoveryException.java new file mode 100644 index 00000000000..fc13cd27384 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/recovery/RecoverFilesRecoveryException.java @@ -0,0 +1,48 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.shard.recovery; + +import org.elasticsearch.index.shard.IndexShardException; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.util.SizeValue; + +/** + * @author kimchy (Shay Banon) + */ +public class RecoverFilesRecoveryException extends IndexShardException { + + private final int numberOfFiles; + + private final SizeValue totalFilesSize; + + public RecoverFilesRecoveryException(ShardId shardId, int numberOfFiles, SizeValue totalFilesSize, Throwable cause) { + super(shardId, "Failed to transfer [" + numberOfFiles + "] files with total size of [" + totalFilesSize + "]", cause); + this.numberOfFiles = numberOfFiles; + this.totalFilesSize = totalFilesSize; + } + + public int numberOfFiles() { + return numberOfFiles; + } + + public SizeValue totalFilesSize() { + return totalFilesSize; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/recovery/RecoveryAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/recovery/RecoveryAction.java new file mode 100644 index 00000000000..55ddeab670f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/recovery/RecoveryAction.java @@ -0,0 +1,473 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.shard.recovery; + +import com.google.inject.Inject; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.cluster.node.Node; +import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.index.shard.*; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.index.translog.memory.MemorySnapshot; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.*; +import org.elasticsearch.util.SizeUnit; +import org.elasticsearch.util.SizeValue; +import org.elasticsearch.util.StopWatch; +import org.elasticsearch.util.TimeValue; +import org.elasticsearch.util.io.Streamable; +import org.elasticsearch.util.io.VoidStreamable; +import org.elasticsearch.util.settings.Settings; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicReference; + +import static java.util.concurrent.TimeUnit.*; +import static org.elasticsearch.util.concurrent.ConcurrentMaps.*; + +/** + * @author kimchy (Shay Banon) + */ +public class RecoveryAction extends AbstractIndexShardComponent { + + private final SizeValue fileChunkSize; + + private final ThreadPool threadPool; + + private final TransportService transportService; + + private final InternalIndexShard indexShard; + + private final Store store; + + private final ConcurrentMap openIndexOutputs = newConcurrentMap(); + + private final String startTransportAction; + + private final String fileChunkTransportAction; + + private final String snapshotTransportAction; + + @Inject public RecoveryAction(ShardId shardId, @IndexSettings Settings indexSettings, ThreadPool threadPool, TransportService transportService, IndexShard indexShard, Store store) { + super(shardId, indexSettings); + this.threadPool = threadPool; + this.transportService = transportService; + this.indexShard = (InternalIndexShard) indexShard; + this.store = store; + + startTransportAction = shardId.index().name() + "/" + shardId.id() + "/recovery/start"; + transportService.registerHandler(startTransportAction, new StartRecoveryTransportRequestHandler()); + fileChunkTransportAction = shardId.index().name() + "/" + shardId.id() + "/recovery/fileChunk"; + transportService.registerHandler(fileChunkTransportAction, new FileChunkTransportRequestHandler()); + snapshotTransportAction = shardId.index().name() + "/" + shardId.id() + "/recovery/snapshot"; + transportService.registerHandler(snapshotTransportAction, new SnapshotTransportRequestHandler()); + + this.fileChunkSize = componentSettings.getAsSize("fileChunkSize", new SizeValue(16, SizeUnit.KB)); + logger.trace("Recovery Action registered, using fileChunkSize[{}]", fileChunkSize); + } + + public void close() { + transportService.removeHandler(startTransportAction); + transportService.removeHandler(fileChunkTransportAction); + transportService.removeHandler(snapshotTransportAction); + } + + public synchronized void startRecovery(Node node, Node targetNode, boolean markAsRelocated) throws ElasticSearchException { + // mark the shard as recovering + IndexShardState preRecoveringState; + try { + preRecoveringState = indexShard.recovering(); + } catch (IndexShardRecoveringException e) { + // that's fine, since we might be called concurrently, just ignore this, we are already recovering + throw new IgnoreRecoveryException("Already in recovering process", e); + } catch (IndexShardStartedException e) { + // that's fine, since we might be called concurrently, just ignore this, we are already started + throw new IgnoreRecoveryException("Already in recovering process", e); + } catch (IndexShardRelocatedException e) { + // that's fine, since we might be called concurrently, just ignore this, we are already relocated + throw new IgnoreRecoveryException("Already in recovering process", e); + } catch (IndexShardClosedException e) { + throw new IgnoreRecoveryException("can't recover a closed shard.", e); + } + logger.debug("Starting recovery from {}", targetNode); + StopWatch stopWatch = new StopWatch().start(); + try { + RecoveryStatus recoveryStatus = transportService.submitRequest(targetNode, startTransportAction, new StartRecoveryRequest(node, markAsRelocated), new FutureTransportResponseHandler() { + @Override public RecoveryStatus newInstance() { + return new RecoveryStatus(); + } + }).txGet(); + stopWatch.stop(); + if (logger.isDebugEnabled()) { + StringBuilder sb = new StringBuilder(); + sb.append("Recovery completed from ").append(targetNode).append(", took [").append(stopWatch.totalTime()).append("]\n"); + sb.append(" Phase1: recovered [").append(recoveryStatus.phase1FileNames.size()).append("]") + .append(" files with total size of [").append(new SizeValue(recoveryStatus.phase1TotalSize)).append("]") + .append(", took [").append(new TimeValue(recoveryStatus.phase1Time, MILLISECONDS)).append("]") + .append("\n"); + sb.append(" Phase2: recovered [").append(recoveryStatus.phase2Operations).append("]").append(" transaction log operations") + .append(", took [").append(new TimeValue(recoveryStatus.phase2Time, MILLISECONDS)).append("]") + .append("\n"); + sb.append(" Phase3: recovered [").append(recoveryStatus.phase3Operations).append("]").append(" transaction log operations") + .append(", took [").append(new TimeValue(recoveryStatus.phase3Time, MILLISECONDS)).append("]"); + logger.debug(sb.toString()); + } + } catch (RemoteTransportException e) { + Throwable cause = ExceptionsHelper.unwrapCause(e); + if (cause instanceof ActionNotFoundTransportException || + cause instanceof IndexShardNotStartedException) { + // the remote shard has not yet registered the action or not started yet, we need to ignore this recovery attempt, and restore the state previous to recovering + indexShard.restoreRecoveryState(preRecoveringState); + throw new IgnoreRecoveryException("Ignoring recovery attempt, remote shard not started", e); + } + throw new RecoveryFailedException(shardId, node, targetNode, e); + } catch (Exception e) { + throw new RecoveryFailedException(shardId, node, targetNode, e); + } + } + + private void cleanOpenIndex() { + for (IndexOutput indexOutput : openIndexOutputs.values()) { + try { + synchronized (indexOutput) { + indexOutput.close(); + } + } catch (Exception e) { + // ignore + } + } + openIndexOutputs.clear(); + } + + private static class StartRecoveryRequest implements Streamable { + + private Node node; + + private boolean markAsRelocated; + + private StartRecoveryRequest() { + } + + private StartRecoveryRequest(Node node, boolean markAsRelocated) { + this.node = node; + this.markAsRelocated = markAsRelocated; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + node = Node.readNode(in); + markAsRelocated = in.readBoolean(); + } + + @Override public void writeTo(DataOutput out) throws IOException { + node.writeTo(out); + out.writeBoolean(markAsRelocated); + } + } + + private class StartRecoveryTransportRequestHandler extends BaseTransportRequestHandler { + + @Override public StartRecoveryRequest newInstance() { + return new StartRecoveryRequest(); + } + + @Override public void messageReceived(final StartRecoveryRequest startRecoveryRequest, final TransportChannel channel) throws Exception { + logger.trace("Starting recovery to {}, markAsRelocated {}", startRecoveryRequest.node, startRecoveryRequest.markAsRelocated); + final Node node = startRecoveryRequest.node; + cleanOpenIndex(); + final RecoveryStatus recoveryStatus = new RecoveryStatus(); + indexShard.recover(new Engine.RecoveryHandler() { + @Override public void phase1(SnapshotIndexCommit snapshot) throws ElasticSearchException { + long totalSize = 0; + try { + StopWatch stopWatch = new StopWatch().start(); + + for (String name : snapshot.getFiles()) { + IndexInput indexInput = store.directory().openInput(name); + recoveryStatus.phase1FileNames.add(name); + recoveryStatus.phase1FileSizes.add(indexInput.length()); + totalSize += indexInput.length(); + indexInput.close(); + } + recoveryStatus.phase1TotalSize = totalSize; + + logger.trace("Recovery [phase1] to {}: recovering [{}] files with total size of [{}]", new Object[]{node, snapshot.getFiles().length, new SizeValue(totalSize)}); + + final CountDownLatch latch = new CountDownLatch(snapshot.getFiles().length); + final AtomicReference lastException = new AtomicReference(); + for (final String name : snapshot.getFiles()) { + threadPool.execute(new Runnable() { + @Override public void run() { + try { + final int BUFFER_SIZE = (int) fileChunkSize.bytes(); + byte[] buf = new byte[BUFFER_SIZE]; + IndexInput indexInput = store.directory().openInput(name); + long len = indexInput.length(); + long readCount = 0; + while (readCount < len) { + int toRead = readCount + BUFFER_SIZE > len ? (int) (len - readCount) : BUFFER_SIZE; + long position = indexInput.getFilePointer(); + indexInput.readBytes(buf, 0, toRead, false); + transportService.submitRequest(node, fileChunkTransportAction, new FileChunk(name, position, len, buf, toRead), VoidTransportResponseHandler.INSTANCE).txGet(30, SECONDS); + readCount += toRead; + } + indexInput.close(); + } catch (Exception e) { + lastException.set(e); + } finally { + latch.countDown(); + } + } + }); + } + + latch.await(); + + if (lastException.get() != null) { + throw lastException.get(); + } + + stopWatch.stop(); + logger.trace("Recovery [phase1] to {}: took [{}]", node, stopWatch.totalTime()); + recoveryStatus.phase1Time = stopWatch.totalTime().millis(); + } catch (Throwable e) { + throw new RecoverFilesRecoveryException(shardId, snapshot.getFiles().length, new SizeValue(totalSize), e); + } + } + + @Override public void phase2(Translog.Snapshot snapshot) throws ElasticSearchException { + logger.trace("Recovery [phase2] to {}: sending [{}] transaction log operations", node, snapshot.size()); + StopWatch stopWatch = new StopWatch().start(); + sendSnapshot(snapshot, false); + stopWatch.stop(); + logger.trace("Recovery [phase2] to {}: took [{}]", node, stopWatch.totalTime()); + recoveryStatus.phase2Time = stopWatch.totalTime().millis(); + recoveryStatus.phase2Operations = snapshot.size(); + } + + @Override public void phase3(Translog.Snapshot snapshot) throws ElasticSearchException { + logger.trace("Recovery [phase3] to {}: sending [{}] transaction log operations", node, snapshot.size()); + StopWatch stopWatch = new StopWatch().start(); + sendSnapshot(snapshot, true); + if (startRecoveryRequest.markAsRelocated) { + // TODO what happens if the recovery process fails afterwards, we need to mark this back to started + indexShard.relocated(); + } + stopWatch.stop(); + logger.trace("Recovery [phase3] to {}: took [{}]", node, stopWatch.totalTime()); + recoveryStatus.phase3Time = stopWatch.totalTime().millis(); + recoveryStatus.phase3Operations = snapshot.size(); + } + + private void sendSnapshot(Translog.Snapshot snapshot, boolean phase3) throws ElasticSearchException { + MemorySnapshot memorySnapshot; + if (snapshot instanceof MemorySnapshot) { + memorySnapshot = (MemorySnapshot) snapshot; + } else { + memorySnapshot = new MemorySnapshot(snapshot); + } + transportService.submitRequest(node, snapshotTransportAction, new SnapshotWrapper(memorySnapshot, phase3), VoidTransportResponseHandler.INSTANCE).txGet(); + } + }); + channel.sendResponse(recoveryStatus); + } + } + + private static class RecoveryStatus implements Streamable { + + List phase1FileNames = new ArrayList(); + List phase1FileSizes = new ArrayList(); + long phase1TotalSize; + long phase1Time; + + int phase2Operations; + long phase2Time; + + int phase3Operations; + long phase3Time; + + private RecoveryStatus() { + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + int size = in.readInt(); + phase1FileNames = new ArrayList(size); + for (int i = 0; i < size; i++) { + phase1FileNames.add(in.readUTF()); + } + size = in.readInt(); + phase1FileSizes = new ArrayList(size); + for (int i = 0; i < size; i++) { + phase1FileSizes.add(in.readLong()); + } + phase1TotalSize = in.readLong(); + phase1Time = in.readLong(); + phase2Operations = in.readInt(); + phase2Time = in.readLong(); + phase3Operations = in.readInt(); + phase3Time = in.readLong(); + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeInt(phase1FileNames.size()); + for (String name : phase1FileNames) { + out.writeUTF(name); + } + out.writeInt(phase1FileSizes.size()); + for (long size : phase1FileSizes) { + out.writeLong(size); + } + out.writeLong(phase1TotalSize); + out.writeLong(phase1Time); + out.writeInt(phase2Operations); + out.writeLong(phase2Time); + out.writeInt(phase3Operations); + out.writeLong(phase3Time); + } + } + + private class SnapshotTransportRequestHandler extends BaseTransportRequestHandler { + + @Override public SnapshotWrapper newInstance() { + return new SnapshotWrapper(); + } + + @Override public void messageReceived(SnapshotWrapper snapshot, TransportChannel channel) throws Exception { + if (!snapshot.phase3) { + // clean open index outputs in any case (there should not be any open, we close then in the chunk) + cleanOpenIndex(); + } + indexShard.performRecovery(snapshot.snapshot, snapshot.phase3); + if (snapshot.phase3) { + indexShard.refresh(true); + // probably need to do more here... + } + channel.sendResponse(VoidStreamable.INSTANCE); + } + } + + private static class SnapshotWrapper implements Streamable { + + private MemorySnapshot snapshot; + + private boolean phase3; + + private SnapshotWrapper() { + } + + private SnapshotWrapper(MemorySnapshot snapshot, boolean phase3) { + this.snapshot = snapshot; + this.phase3 = phase3; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + snapshot = new MemorySnapshot(); + snapshot.readFrom(in); + phase3 = in.readBoolean(); + } + + @Override public void writeTo(DataOutput out) throws IOException { + snapshot.writeTo(out); + out.writeBoolean(phase3); + } + } + + private class FileChunkTransportRequestHandler extends BaseTransportRequestHandler { + + @Override public FileChunk newInstance() { + return new FileChunk(); + } + + @Override public void messageReceived(FileChunk request, TransportChannel channel) throws Exception { + IndexOutput indexOutput; + if (request.position == 0) { + // first request + indexOutput = openIndexOutputs.remove(request.name); + if (indexOutput != null) { + try { + indexOutput.close(); + } catch (IOException e) { + // ignore + } + } + indexOutput = store.directory().createOutput(request.name); + openIndexOutputs.put(request.name, indexOutput); + } else { + indexOutput = openIndexOutputs.get(request.name); + } + synchronized (indexOutput) { + indexOutput.writeBytes(request.content, request.content.length); + if (indexOutput.getFilePointer() == request.length) { + // we are done + indexOutput.close(); + openIndexOutputs.remove(request.name); + } + } + channel.sendResponse(VoidStreamable.INSTANCE); + } + } + + private static class FileChunk implements Streamable { + String name; + long position; + long length; + byte[] content; + + transient int contentLength; + + private FileChunk() { + } + + private FileChunk(String name, long position, long length, byte[] content, int contentLength) { + this.name = name; + this.position = position; + this.length = length; + this.content = content; + this.contentLength = contentLength; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + name = in.readUTF(); + position = in.readLong(); + length = in.readLong(); + content = new byte[in.readInt()]; + in.readFully(content); + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeUTF(name); + out.writeLong(position); + out.writeLong(length); + out.writeInt(contentLength); + out.write(content, 0, contentLength); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/recovery/RecoveryFailedException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/recovery/RecoveryFailedException.java new file mode 100644 index 00000000000..71c38e34c47 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/recovery/RecoveryFailedException.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.shard.recovery; + +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.cluster.node.Node; +import org.elasticsearch.index.shard.ShardId; + +/** + * @author kimchy (Shay Banon) + */ +public class RecoveryFailedException extends ElasticSearchException { + + public RecoveryFailedException(ShardId shardId, Node node, Node targetNode, Throwable cause) { + super(shardId + ": Recovery failed from " + targetNode + " into " + node, cause); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/similarity/AbstractSimilarityProvider.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/similarity/AbstractSimilarityProvider.java new file mode 100644 index 00000000000..c4d5bb808f2 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/similarity/AbstractSimilarityProvider.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.similarity; + +import org.apache.lucene.search.Similarity; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class AbstractSimilarityProvider extends AbstractIndexComponent implements SimilarityProvider { + + private final String name; + + protected AbstractSimilarityProvider(Index index, @IndexSettings Settings indexSettings, String name) { + super(index, indexSettings); + this.name = name; + } + + @Override public String name() { + return this.name; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/similarity/DefaultSimilarityProvider.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/similarity/DefaultSimilarityProvider.java new file mode 100644 index 00000000000..bfcd0743a39 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/similarity/DefaultSimilarityProvider.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.similarity; + +import com.google.inject.Inject; +import com.google.inject.assistedinject.Assisted; +import org.apache.lucene.search.DefaultSimilarity; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class DefaultSimilarityProvider extends AbstractSimilarityProvider { + + private DefaultSimilarity similarity; + + @Inject public DefaultSimilarityProvider(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) { + super(index, indexSettings, name); + this.similarity = new DefaultSimilarity(); + } + + @Override public DefaultSimilarity get() { + return similarity; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/similarity/SimilarityModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/similarity/SimilarityModule.java new file mode 100644 index 00000000000..d26be93f7b4 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/similarity/SimilarityModule.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.similarity; + +import com.google.inject.AbstractModule; +import com.google.inject.Scopes; +import com.google.inject.assistedinject.FactoryProvider; +import com.google.inject.multibindings.MapBinder; +import org.elasticsearch.util.settings.Settings; + +import java.util.Map; + +/** + * @author kimchy (Shay Banon) + */ +public class SimilarityModule extends AbstractModule { + + private final Settings settings; + + public SimilarityModule(Settings settings) { + this.settings = settings; + } + + @Override protected void configure() { + MapBinder similarityBinder + = MapBinder.newMapBinder(binder(), String.class, SimilarityProviderFactory.class); + + Map similarityProvidersSettings = settings.getGroups("index.similarity"); + for (Map.Entry entry : similarityProvidersSettings.entrySet()) { + String name = entry.getKey(); + Settings settings = entry.getValue(); + + Class type = settings.getAsClass("type", null, "org.elasticsearch.index.similarity.", "SimilarityProvider"); + if (type == null) { + throw new IllegalArgumentException("Similarity [" + name + "] must have a type associated with it"); + } + similarityBinder.addBinding(name).toProvider(FactoryProvider.newFactory(SimilarityProviderFactory.class, type)).in(Scopes.SINGLETON); + } + + bind(SimilarityService.class).in(Scopes.SINGLETON); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/similarity/SimilarityProvider.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/similarity/SimilarityProvider.java new file mode 100644 index 00000000000..1fa7c03a8b5 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/similarity/SimilarityProvider.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.similarity; + +import com.google.inject.Provider; +import org.apache.lucene.search.Similarity; +import org.elasticsearch.index.IndexComponent; + +/** + * @author kimchy (Shay Banon) + */ +public interface SimilarityProvider extends IndexComponent, Provider { + + String name(); + + T get(); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/similarity/SimilarityProviderFactory.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/similarity/SimilarityProviderFactory.java new file mode 100644 index 00000000000..30407bd8774 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/similarity/SimilarityProviderFactory.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.similarity; + +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public interface SimilarityProviderFactory { + + SimilarityProvider create(String name, Settings settings); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java new file mode 100644 index 00000000000..4aa55ac1662 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java @@ -0,0 +1,98 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.similarity; + +import com.google.common.collect.ImmutableMap; +import com.google.inject.Inject; +import org.apache.lucene.search.Similarity; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.Nullable; +import org.elasticsearch.util.settings.ImmutableSettings; +import org.elasticsearch.util.settings.Settings; + +import java.util.Map; + +import static com.google.common.collect.Maps.*; + +/** + * @author kimchy (Shay Banon) + */ +public class SimilarityService extends AbstractIndexComponent { + + private final ImmutableMap similarityProviders; + + private final ImmutableMap similarities; + + public SimilarityService(Index index) { + this(index, ImmutableSettings.Builder.EMPTY_SETTINGS, null); + } + + @Inject public SimilarityService(Index index, @IndexSettings Settings indexSettings, + @Nullable Map providerFactories) { + super(index, indexSettings); + + Map similarityProviders = newHashMap(); + if (providerFactories != null) { + Map providersSettings = indexSettings.getGroups("index.similarity"); + for (Map.Entry entry : providerFactories.entrySet()) { + String similarityName = entry.getKey(); + SimilarityProviderFactory similarityProviderFactory = entry.getValue(); + + Settings similaritySettings = providersSettings.get(similarityName); + if (similaritySettings == null) { + similaritySettings = ImmutableSettings.Builder.EMPTY_SETTINGS; + } + + SimilarityProvider similarityProvider = similarityProviderFactory.create(similarityName, similaritySettings); + similarityProviders.put(similarityName, similarityProvider); + } + } + + // add defaults + if (!similarityProviders.containsKey("index")) { + similarityProviders.put("index", new DefaultSimilarityProvider(index, indexSettings, "index", ImmutableSettings.Builder.EMPTY_SETTINGS)); + } + if (!similarityProviders.containsKey("search")) { + similarityProviders.put("search", new DefaultSimilarityProvider(index, indexSettings, "search", ImmutableSettings.Builder.EMPTY_SETTINGS)); + } + this.similarityProviders = ImmutableMap.copyOf(similarityProviders); + + + Map similarities = newHashMap(); + for (SimilarityProvider provider : similarityProviders.values()) { + similarities.put(provider.name(), provider.get()); + } + this.similarities = ImmutableMap.copyOf(similarities); + } + + public Similarity similarity(String name) { + return similarities.get(name); + } + + public Similarity defaultIndexSimilarity() { + return similarities.get("index"); + } + + public Similarity defaultSearchSimilarity() { + return similarities.get("search"); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/Store.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/Store.java new file mode 100644 index 00000000000..c3d417e5c24 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/Store.java @@ -0,0 +1,66 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store; + +import org.apache.lucene.store.Directory; +import org.elasticsearch.index.shard.IndexShardComponent; +import org.elasticsearch.index.shard.IndexShardLifecycle; +import org.elasticsearch.util.SizeValue; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +@IndexShardLifecycle +public interface Store extends IndexShardComponent { + + /** + * The Lucene {@link Directory} this store is using. + */ + T directory(); + + /** + * Just deletes the content of the store. + */ + void deleteContent() throws IOException; + + /** + * Deletes the store completely. For example, in FS ones, also deletes the parent + * directory. + */ + void fullDelete() throws IOException; + + /** + * The estimated size this store is using. + */ + SizeValue estimateSize() throws IOException; + + /** + * The store can suggest the best setting for compound file the + * {@link org.apache.lucene.index.MergePolicy} will use. + */ + boolean suggestUseCompoundFile(); + + /** + * Close the store. + */ + void close() throws IOException; +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/StoreException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/StoreException.java new file mode 100644 index 00000000000..0dd1089ed3a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/StoreException.java @@ -0,0 +1,37 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store; + +import org.elasticsearch.index.shard.IndexShardException; +import org.elasticsearch.index.shard.ShardId; + +/** + * @author kimchy (Shay Banon) + */ +public class StoreException extends IndexShardException { + + public StoreException(ShardId shardId, String msg) { + super(shardId, msg); + } + + public StoreException(ShardId shardId, String msg, Throwable cause) { + super(shardId, msg, cause); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/StoreManagement.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/StoreManagement.java new file mode 100644 index 00000000000..cb3e38924f6 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/StoreManagement.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store; + +import com.google.inject.Inject; +import org.elasticsearch.index.shard.AbstractIndexShardComponent; +import org.elasticsearch.jmx.MBean; +import org.elasticsearch.jmx.ManagedAttribute; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +@MBean(objectName = "shardType=store", description = "The storage of the index shard") +public class StoreManagement extends AbstractIndexShardComponent { + + private final Store store; + + @Inject public StoreManagement(Store store) { + super(store.shardId(), store.indexSettings()); + this.store = store; + } + + @ManagedAttribute(description = "Size in bytes") + public long getSizeInBytes() { + try { + return store.estimateSize().bytes(); + } catch (IOException e) { + return -1; + } + } + + @ManagedAttribute(description = "Size") + public String getSize() { + try { + return store.estimateSize().toString(); + } catch (IOException e) { + return "NA"; + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/StoreModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/StoreModule.java new file mode 100644 index 00000000000..a39c00d2c5c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/StoreModule.java @@ -0,0 +1,71 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store; + +import com.google.inject.AbstractModule; +import com.google.inject.Module; +import org.elasticsearch.index.store.bytebuffer.ByteBufferStoreModule; +import org.elasticsearch.index.store.fs.MmapFsStoreModule; +import org.elasticsearch.index.store.fs.NioFsStoreModule; +import org.elasticsearch.index.store.fs.SimpleFsStoreModule; +import org.elasticsearch.index.store.memory.MemoryStoreModule; +import org.elasticsearch.index.store.ram.RamStoreModule; +import org.elasticsearch.util.OsUtils; +import org.elasticsearch.util.guice.ModulesFactory; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class StoreModule extends AbstractModule { + + private final Settings settings; + + public StoreModule(Settings settings) { + this.settings = settings; + } + + @Override protected void configure() { + Class storeModule = NioFsStoreModule.class; + if (OsUtils.WINDOWS) { + storeModule = SimpleFsStoreModule.class; + } + String storeType = settings.get("index.store.type"); + if ("ram".equalsIgnoreCase(storeType)) { + storeModule = RamStoreModule.class; + } else if ("memory".equalsIgnoreCase(storeType)) { + storeModule = MemoryStoreModule.class; + } else if ("bytebuffer".equalsIgnoreCase(storeType)) { + storeModule = ByteBufferStoreModule.class; + } else if ("fs".equalsIgnoreCase(storeType)) { + // nothing to set here ... (we default to fs) + } else if ("simplefs".equalsIgnoreCase(storeType)) { + storeModule = SimpleFsStoreModule.class; + } else if ("niofs".equalsIgnoreCase(storeType)) { + storeModule = NioFsStoreModule.class; + } else if ("mmapfs".equalsIgnoreCase(storeType)) { + storeModule = MmapFsStoreModule.class; + } else if (storeType != null) { + storeModule = settings.getAsClass("index.store.type", storeModule, "org.elasticsearch.index.store.", "StoreModule"); + } + ModulesFactory.createModule(storeModule, settings).configure(binder()); + bind(StoreManagement.class).asEagerSingleton(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/bytebuffer/ByteBufferDirectory.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/bytebuffer/ByteBufferDirectory.java new file mode 100644 index 00000000000..66933a239ef --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/bytebuffer/ByteBufferDirectory.java @@ -0,0 +1,212 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store.bytebuffer; + +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.store.SingleInstanceLockFactory; +import org.elasticsearch.util.SizeUnit; +import org.elasticsearch.util.SizeValue; +import org.elasticsearch.util.concurrent.highscalelib.NonBlockingHashMap; +import sun.nio.ch.DirectBuffer; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Map; +import java.util.Queue; +import java.util.concurrent.ArrayBlockingQueue; + +/** + * @author kimchy (Shay Banon) + */ +public class ByteBufferDirectory extends Directory { + + private final Map files = new NonBlockingHashMap(); + + private final Queue cache; + + private final int bufferSizeInBytes; + + private final SizeValue bufferSize; + + private final SizeValue cacheSize; + + private final boolean disableCache; + + private final boolean direct; + + public ByteBufferDirectory() { + this(new SizeValue(1, SizeUnit.KB), new SizeValue(20, SizeUnit.MB), false, false); + } + + public ByteBufferDirectory(SizeValue bufferSize, SizeValue cacheSize, boolean direct, boolean warmCache) { + disableCache = cacheSize.bytes() == 0; + if (!disableCache && cacheSize.bytes() < bufferSize.bytes()) { + throw new IllegalArgumentException("Cache size [" + cacheSize + "] is smaller than buffer size [" + bufferSize + "]"); + } + this.bufferSize = bufferSize; + this.bufferSizeInBytes = (int) bufferSize.bytes(); + int numberOfCacheEntries = (int) (cacheSize.bytes() / bufferSize.bytes()); + this.cache = disableCache ? null : new ArrayBlockingQueue(numberOfCacheEntries); + this.cacheSize = disableCache ? new SizeValue(0, SizeUnit.BYTES) : new SizeValue(numberOfCacheEntries * bufferSize.bytes(), SizeUnit.BYTES); + this.direct = direct; + setLockFactory(new SingleInstanceLockFactory()); + if (!disableCache && warmCache) { + for (int i = 0; i < numberOfCacheEntries; i++) { + cache.add(createBuffer()); + } + } + } + + public SizeValue bufferSize() { + return this.bufferSize; + } + + public SizeValue cacheSize() { + return this.cacheSize; + } + + int bufferSizeInBytes() { + return bufferSizeInBytes; + } + + public boolean isDirect() { + return direct; + } + + @Override public String[] listAll() throws IOException { + return files.keySet().toArray(new String[0]); + } + + @Override public boolean fileExists(String name) throws IOException { + return files.containsKey(name); + } + + @Override public long fileModified(String name) throws IOException { + ByteBufferFile file = files.get(name); + if (file == null) + throw new FileNotFoundException(name); + return file.lastModified(); + } + + @Override public void touchFile(String name) throws IOException { + ByteBufferFile file = files.get(name); + if (file == null) + throw new FileNotFoundException(name); + + long ts2, ts1 = System.currentTimeMillis(); + do { + try { + Thread.sleep(0, 1); + } catch (InterruptedException ie) { + // In 3.0 we will change this to throw + // InterruptedException instead + Thread.currentThread().interrupt(); + throw new RuntimeException(ie); + } + ts2 = System.currentTimeMillis(); + } while (ts1 == ts2); + + file.lastModified(ts2); + } + + @Override public void deleteFile(String name) throws IOException { + ByteBufferFile file = files.remove(name); + if (file == null) + throw new FileNotFoundException(name); + file.clean(); + } + + @Override public long fileLength(String name) throws IOException { + ByteBufferFile file = files.get(name); + if (file == null) + throw new FileNotFoundException(name); + return file.length(); + } + + @Override public IndexOutput createOutput(String name) throws IOException { + ByteBufferFile file = new ByteBufferFile(this); + ByteBufferFile existing = files.put(name, file); + if (existing != null) { + existing.clean(); + } + return new ByteBufferIndexOutput(this, file); + } + + @Override public IndexInput openInput(String name) throws IOException { + ByteBufferFile file = files.get(name); + if (file == null) + throw new FileNotFoundException(name); + return new ByteBufferIndexInput(this, file); + } + + @Override public void close() throws IOException { + String[] files = listAll(); + for (String file : files) { + deleteFile(file); + } + if (!disableCache) { + ByteBuffer buffer = cache.poll(); + while (buffer != null) { + closeBuffer(buffer); + buffer = cache.poll(); + } + } + } + + void releaseBuffer(ByteBuffer byteBuffer) { + if (disableCache) { + closeBuffer(byteBuffer); + return; + } + boolean success = cache.offer(byteBuffer); + if (!success) { + closeBuffer(byteBuffer); + } + } + + ByteBuffer acquireBuffer() { + if (disableCache) { + return createBuffer(); + } + ByteBuffer byteBuffer = cache.poll(); + if (byteBuffer == null) { + // everything is taken, return a new one + return createBuffer(); + } + byteBuffer.position(0); + return byteBuffer; + } + + ByteBuffer createBuffer() { + if (isDirect()) { + return ByteBuffer.allocateDirect(bufferSizeInBytes()); + } + return ByteBuffer.allocate(bufferSizeInBytes()); + } + + void closeBuffer(ByteBuffer byteBuffer) { + if (isDirect()) { + ((DirectBuffer) byteBuffer).cleaner().clean(); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/bytebuffer/ByteBufferFile.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/bytebuffer/ByteBufferFile.java new file mode 100644 index 00000000000..ae814be640a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/bytebuffer/ByteBufferFile.java @@ -0,0 +1,77 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store.bytebuffer; + +import java.nio.ByteBuffer; + +/** + * @author kimchy (Shay Banon) + */ +public class ByteBufferFile { + + private final ByteBufferDirectory dir; + + private volatile long lastModified = System.currentTimeMillis(); + + private volatile long length; + + private volatile ByteBuffer[] buffers; + + public ByteBufferFile(ByteBufferDirectory dir) { + this.dir = dir; + } + + long lastModified() { + return lastModified; + } + + void lastModified(long lastModified) { + this.lastModified = lastModified; + } + + long length() { + return length; + } + + void length(long length) { + this.length = length; + } + + ByteBuffer buffer(int i) { + return this.buffers[i]; + } + + int numberOfBuffers() { + return this.buffers.length; + } + + void buffers(ByteBuffer[] buffers) { + this.buffers = buffers; + } + + void clean() { + if (buffers != null) { + for (ByteBuffer buffer : buffers) { + dir.releaseBuffer(buffer); + } + buffers = null; + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/bytebuffer/ByteBufferIndexInput.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/bytebuffer/ByteBufferIndexInput.java new file mode 100644 index 00000000000..802de39c39f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/bytebuffer/ByteBufferIndexInput.java @@ -0,0 +1,114 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store.bytebuffer; + +import org.apache.lucene.store.IndexInput; + +import java.io.IOException; +import java.nio.ByteBuffer; + +/** + * @author kimchy (Shay Banon) + */ +public class ByteBufferIndexInput extends IndexInput { + + private final ByteBufferFile file; + private final int bufferSize; + private final long length; + + private ByteBuffer currentBuffer; + private int currentBufferIndex; + + private long bufferStart; + + + public ByteBufferIndexInput(ByteBufferDirectory dir, ByteBufferFile file) throws IOException { + this.file = file; + this.bufferSize = dir.bufferSizeInBytes(); + this.length = file.length(); + switchCurrentBuffer(true); + } + + @Override public byte readByte() throws IOException { + if (!currentBuffer.hasRemaining()) { + currentBufferIndex++; + switchCurrentBuffer(true); + } + return currentBuffer.get(); + } + + @Override public void readBytes(byte[] b, int offset, int len) throws IOException { + while (len > 0) { + if (!currentBuffer.hasRemaining()) { + currentBufferIndex++; + switchCurrentBuffer(true); + } + + int remainInBuffer = currentBuffer.remaining(); + int bytesToCopy = len < remainInBuffer ? len : remainInBuffer; + currentBuffer.get(b, offset, bytesToCopy); + offset += bytesToCopy; + len -= bytesToCopy; + } + } + + @Override public void close() throws IOException { + } + + @Override public long getFilePointer() { + return currentBufferIndex < 0 ? 0 : bufferStart + currentBuffer.position(); + } + + @Override public void seek(long pos) throws IOException { + if (currentBuffer == null || pos < bufferStart || pos >= bufferStart + bufferSize) { + currentBufferIndex = (int) (pos / bufferSize); + switchCurrentBuffer(false); + } + currentBuffer.position((int) (pos % bufferSize)); + } + + @Override public long length() { + return length; + } + + private void switchCurrentBuffer(boolean enforceEOF) throws IOException { + if (currentBufferIndex >= file.numberOfBuffers()) { + // end of file reached, no more buffers left + if (enforceEOF) + throw new IOException("Read past EOF"); + else { + // Force EOF if a read takes place at this position + currentBufferIndex--; + currentBuffer.position(bufferSize); + } + } else { + // we must duplicate (and make it read only while we are at it) since we need position and such to be independant + currentBuffer = file.buffer(currentBufferIndex).asReadOnlyBuffer(); + currentBuffer.position(0); + bufferStart = (long) bufferSize * (long) currentBufferIndex; + } + } + + @Override public Object clone() { + ByteBufferIndexInput cloned = (ByteBufferIndexInput) super.clone(); + cloned.currentBuffer = currentBuffer.asReadOnlyBuffer(); + return cloned; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/bytebuffer/ByteBufferIndexOutput.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/bytebuffer/ByteBufferIndexOutput.java new file mode 100644 index 00000000000..e75fe4d6e3e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/bytebuffer/ByteBufferIndexOutput.java @@ -0,0 +1,120 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store.bytebuffer; + +import org.apache.lucene.store.IndexOutput; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; + +/** + * @author kimchy (Shay Banon) + */ +public class ByteBufferIndexOutput extends IndexOutput { + + private final ByteBufferDirectory dir; + private final ByteBufferFile file; + + private ByteBuffer currentBuffer; + private int currentBufferIndex; + + private long bufferStart; + private int bufferLength; + + private ArrayList buffers = new ArrayList(); + + public ByteBufferIndexOutput(ByteBufferDirectory dir, ByteBufferFile file) throws IOException { + this.dir = dir; + this.file = file; + switchCurrentBuffer(); + } + + @Override public void writeByte(byte b) throws IOException { + if (!currentBuffer.hasRemaining()) { + currentBufferIndex++; + switchCurrentBuffer(); + } + currentBuffer.put(b); + } + + @Override public void writeBytes(byte[] b, int offset, int len) throws IOException { + while (len > 0) { + if (!currentBuffer.hasRemaining()) { + currentBufferIndex++; + switchCurrentBuffer(); + } + + int remainInBuffer = currentBuffer.remaining(); + int bytesToCopy = len < remainInBuffer ? len : remainInBuffer; + currentBuffer.put(b, offset, bytesToCopy); + offset += bytesToCopy; + len -= bytesToCopy; + } + } + + @Override public void flush() throws IOException { + file.lastModified(System.currentTimeMillis()); + setFileLength(); + } + + @Override public void close() throws IOException { + flush(); + file.buffers(buffers.toArray(new ByteBuffer[buffers.size()])); + } + + @Override public long getFilePointer() { + return currentBufferIndex < 0 ? 0 : bufferStart + currentBuffer.position(); + } + + @Override public void seek(long pos) throws IOException { + // set the file length in case we seek back + // and flush() has not been called yet + setFileLength(); + if (pos < bufferStart || pos >= bufferStart + bufferLength) { + currentBufferIndex = (int) (pos / dir.bufferSizeInBytes()); + switchCurrentBuffer(); + } + currentBuffer.position((int) (pos % dir.bufferSizeInBytes())); + } + + @Override public long length() throws IOException { + return file.length(); + } + + private void switchCurrentBuffer() throws IOException { + if (currentBufferIndex == buffers.size()) { + currentBuffer = dir.acquireBuffer(); + buffers.add(currentBuffer); + } else { + currentBuffer = buffers.get(currentBufferIndex); + } + currentBuffer.position(0); + bufferStart = (long) dir.bufferSizeInBytes() * (long) currentBufferIndex; + bufferLength = currentBuffer.capacity(); + } + + private void setFileLength() { + long pointer = bufferStart + currentBuffer.position(); + if (pointer > file.length()) { + file.length(pointer); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/bytebuffer/ByteBufferStore.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/bytebuffer/ByteBufferStore.java new file mode 100644 index 00000000000..809d63d13ff --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/bytebuffer/ByteBufferStore.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store.bytebuffer; + +import com.google.inject.Inject; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.store.support.AbstractStore; +import org.elasticsearch.util.SizeUnit; +import org.elasticsearch.util.SizeValue; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class ByteBufferStore extends AbstractStore { + + private final SizeValue bufferSize; + + private final SizeValue cacheSize; + + private final boolean direct; + + private final boolean warmCache; + + private final ByteBufferDirectory directory; + + @Inject public ByteBufferStore(ShardId shardId, @IndexSettings Settings indexSettings) { + super(shardId, indexSettings); + + this.bufferSize = componentSettings.getAsSize("bufferSize", new SizeValue(1, SizeUnit.KB)); + this.cacheSize = componentSettings.getAsSize("cacheSize", new SizeValue(20, SizeUnit.MB)); + this.direct = componentSettings.getAsBoolean("direct", true); + this.warmCache = componentSettings.getAsBoolean("warmCache", true); + this.directory = new ByteBufferDirectory(bufferSize, cacheSize, direct, warmCache); + logger.debug("Using [ByteBuffer] Store with bufferSize[{}], cacheSize[{}], direct[{}], warmCache[{}]", + new Object[]{directory.bufferSize(), directory.cacheSize(), directory.isDirect(), warmCache}); + } + + @Override public ByteBufferDirectory directory() { + return directory; + } + + /** + * Its better to not use the compound format when using the Ram store. + */ + @Override public boolean suggestUseCompoundFile() { + return false; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/bytebuffer/ByteBufferStoreModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/bytebuffer/ByteBufferStoreModule.java new file mode 100644 index 00000000000..16165bc0436 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/bytebuffer/ByteBufferStoreModule.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store.bytebuffer; + +import com.google.inject.AbstractModule; +import org.elasticsearch.index.store.Store; + +/** + * @author kimchy (Shay Banon) + */ +public class ByteBufferStoreModule extends AbstractModule { + + @Override protected void configure() { + bind(Store.class).to(ByteBufferStore.class).asEagerSingleton(); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/fs/AbstractFsStore.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/fs/AbstractFsStore.java new file mode 100644 index 00000000000..c653a41c647 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/fs/AbstractFsStore.java @@ -0,0 +1,47 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store.fs; + +import org.apache.lucene.store.FSDirectory; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.store.support.AbstractStore; +import org.elasticsearch.util.io.FileSystemUtils; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class AbstractFsStore extends AbstractStore { + + public AbstractFsStore(ShardId shardId, @IndexSettings Settings indexSettings) { + super(shardId, indexSettings); + } + + @Override public void fullDelete() throws IOException { + FileSystemUtils.deleteRecursively(directory().getFile()); + // if we are the last ones, delete also the actual index + if (directory().getFile().getParentFile().list().length == 0) { + FileSystemUtils.deleteRecursively(directory().getFile().getParentFile()); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/fs/FsStores.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/fs/FsStores.java new file mode 100644 index 00000000000..9e92160b069 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/fs/FsStores.java @@ -0,0 +1,58 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store.fs; + +import org.elasticsearch.index.shard.ShardId; + +import java.io.File; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class FsStores { + + public static final String DEFAULT_INDICES_LOCATION = "indices"; + + public static synchronized File createStoreFilePath(File basePath, String localNodeId, ShardId shardId) throws IOException { + // TODO we need to clean the nodeId from invalid folder characters + File f = new File(new File(basePath, DEFAULT_INDICES_LOCATION), localNodeId); + f = new File(f, shardId.index().name()); + f = new File(f, Integer.toString(shardId.id())); + + if (f.exists() && f.isDirectory()) { + return f; + } + boolean result = false; + for (int i = 0; i < 5; i++) { + result = f.mkdirs(); + if (result) { + break; + } + } + if (!result) { + if (f.exists() && f.isDirectory()) { + return f; + } + throw new IOException("Failed to create directories for [" + f + "]"); + } + return f; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/fs/MmapFsStore.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/fs/MmapFsStore.java new file mode 100644 index 00000000000..71551ec5043 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/fs/MmapFsStore.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store.fs; + +import com.google.inject.Inject; +import org.apache.lucene.store.MMapDirectory; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.LocalNodeId; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.index.shard.IndexShardLifecycle; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.util.settings.Settings; + +import java.io.File; +import java.io.IOException; + +import static org.elasticsearch.index.store.fs.FsStores.*; + +/** + * @author kimchy (Shay Banon) + */ +@IndexShardLifecycle +public class MmapFsStore extends AbstractFsStore { + + private final boolean syncToDisk; + + private final MMapDirectory directory; + + @Inject public MmapFsStore(ShardId shardId, @IndexSettings Settings indexSettings, Environment environment, @LocalNodeId String localNodeId) throws IOException { + super(shardId, indexSettings); + // by default, we don't need to sync to disk, since we use the gateway + this.syncToDisk = componentSettings.getAsBoolean("syncToDisk", false); + this.directory = new CustomMMapDirectory(createStoreFilePath(environment.workWithClusterFile(), localNodeId, shardId), syncToDisk); + logger.debug("Using [MmapFs] Store with path [{}]", directory.getFile()); + } + + @Override public MMapDirectory directory() { + return directory; + } + + private static class CustomMMapDirectory extends MMapDirectory { + + private final boolean syncToDisk; + + private CustomMMapDirectory(File path, boolean syncToDisk) throws IOException { + super(path); + this.syncToDisk = syncToDisk; + } + + @Override public void sync(String name) throws IOException { + if (!syncToDisk) { + return; + } + super.sync(name); + } + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/fs/MmapFsStoreModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/fs/MmapFsStoreModule.java new file mode 100644 index 00000000000..6da56a7fa8e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/fs/MmapFsStoreModule.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store.fs; + +import com.google.inject.AbstractModule; +import org.elasticsearch.index.store.Store; + +/** + * @author kimchy (Shay Banon) + */ +public class MmapFsStoreModule extends AbstractModule { + + @Override protected void configure() { + bind(Store.class).to(MmapFsStore.class).asEagerSingleton(); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/fs/NioFsStore.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/fs/NioFsStore.java new file mode 100644 index 00000000000..160fa2c3f54 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/fs/NioFsStore.java @@ -0,0 +1,72 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store.fs; + +import com.google.inject.Inject; +import org.apache.lucene.store.NIOFSDirectory; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.LocalNodeId; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.util.settings.Settings; + +import java.io.File; +import java.io.IOException; + +import static org.elasticsearch.index.store.fs.FsStores.*; + +/** + * @author kimchy (Shay Banon) + */ +public class NioFsStore extends AbstractFsStore { + + private final boolean syncToDisk; + + private final NIOFSDirectory directory; + + @Inject public NioFsStore(ShardId shardId, @IndexSettings Settings indexSettings, Environment environment, @LocalNodeId String localNodeId) throws IOException { + super(shardId, indexSettings); + // by default, we don't need to sync to disk, since we use the gateway + this.syncToDisk = componentSettings.getAsBoolean("syncToDisk", false); + this.directory = new CustomNioFSDirectory(createStoreFilePath(environment.workWithClusterFile(), localNodeId, shardId), syncToDisk); + logger.debug("Using [NioFs] Store with path [{}], syncToDisk [{}]", directory.getFile(), syncToDisk); + } + + @Override public NIOFSDirectory directory() { + return directory; + } + + private static class CustomNioFSDirectory extends NIOFSDirectory { + + private final boolean syncToDisk; + + private CustomNioFSDirectory(File path, boolean syncToDisk) throws IOException { + super(path); + this.syncToDisk = syncToDisk; + } + + @Override public void sync(String name) throws IOException { + if (!syncToDisk) { + return; + } + super.sync(name); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/fs/NioFsStoreModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/fs/NioFsStoreModule.java new file mode 100644 index 00000000000..1773cdaa292 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/fs/NioFsStoreModule.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store.fs; + +import com.google.inject.AbstractModule; +import org.elasticsearch.index.store.Store; + +/** + * @author kimchy (Shay Banon) + */ +public class NioFsStoreModule extends AbstractModule { + + @Override protected void configure() { + bind(Store.class).to(NioFsStore.class).asEagerSingleton(); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/fs/SimpleFsStore.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/fs/SimpleFsStore.java new file mode 100644 index 00000000000..181e60da40d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/fs/SimpleFsStore.java @@ -0,0 +1,72 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store.fs; + +import com.google.inject.Inject; +import org.apache.lucene.store.SimpleFSDirectory; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.LocalNodeId; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.util.settings.Settings; + +import java.io.File; +import java.io.IOException; + +import static org.elasticsearch.index.store.fs.FsStores.*; + +/** + * @author kimchy (Shay Banon) + */ +public class SimpleFsStore extends AbstractFsStore { + + private final boolean syncToDisk; + + private SimpleFSDirectory directory; + + @Inject public SimpleFsStore(ShardId shardId, @IndexSettings Settings indexSettings, Environment environment, @LocalNodeId String localNodeId) throws IOException { + super(shardId, indexSettings); + // by default, we don't need to sync to disk, since we use the gateway + this.syncToDisk = componentSettings.getAsBoolean("syncToDisk", false); + this.directory = new CustomSimpleFSDirectory(createStoreFilePath(environment.workWithClusterFile(), localNodeId, shardId), syncToDisk); + logger.debug("Using [SimpleFs] Store with path [{}], syncToDisk [{}]", directory.getFile(), syncToDisk); + } + + @Override public SimpleFSDirectory directory() { + return directory; + } + + private static class CustomSimpleFSDirectory extends SimpleFSDirectory { + + private final boolean syncToDisk; + + private CustomSimpleFSDirectory(File path, boolean syncToDisk) throws IOException { + super(path); + this.syncToDisk = syncToDisk; + } + + @Override public void sync(String name) throws IOException { + if (!syncToDisk) { + return; + } + super.sync(name); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/fs/SimpleFsStoreModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/fs/SimpleFsStoreModule.java new file mode 100644 index 00000000000..ec734b3243d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/fs/SimpleFsStoreModule.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store.fs; + +import com.google.inject.AbstractModule; +import org.elasticsearch.index.store.Store; + +/** + * @author kimchy (Shay Banon) + */ +public class SimpleFsStoreModule extends AbstractModule { + + @Override protected void configure() { + bind(Store.class).to(SimpleFsStore.class).asEagerSingleton(); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/memory/MemoryDirectory.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/memory/MemoryDirectory.java new file mode 100644 index 00000000000..3006e528c83 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/memory/MemoryDirectory.java @@ -0,0 +1,197 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store.memory; + +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.store.SingleInstanceLockFactory; +import org.elasticsearch.util.SizeUnit; +import org.elasticsearch.util.SizeValue; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.util.Map; +import java.util.Queue; +import java.util.concurrent.ArrayBlockingQueue; + +import static org.elasticsearch.util.concurrent.ConcurrentMaps.*; + +/** + * @author kimchy (Shay Banon) + */ +public class MemoryDirectory extends Directory { + + private final Map files = newConcurrentMap(); + + private final Queue cache; + + private final int bufferSizeInBytes; + + private final SizeValue bufferSize; + + private final SizeValue cacheSize; + + private final boolean disableCache; + + public MemoryDirectory() { + this(new SizeValue(1, SizeUnit.KB), new SizeValue(20, SizeUnit.MB), false); + } + + public MemoryDirectory(SizeValue bufferSize, SizeValue cacheSize, boolean warmCache) { + disableCache = cacheSize.bytes() == 0; + if (!disableCache && cacheSize.bytes() < bufferSize.bytes()) { + throw new IllegalArgumentException("Cache size [" + cacheSize + "] is smaller than buffer size [" + bufferSize + "]"); + } + this.bufferSize = bufferSize; + this.bufferSizeInBytes = (int) bufferSize.bytes(); + int numberOfCacheEntries = (int) (cacheSize.bytes() / bufferSize.bytes()); + this.cache = disableCache ? null : new ArrayBlockingQueue(numberOfCacheEntries); + this.cacheSize = disableCache ? new SizeValue(0, SizeUnit.BYTES) : new SizeValue(numberOfCacheEntries * bufferSize.bytes(), SizeUnit.BYTES); + setLockFactory(new SingleInstanceLockFactory()); + if (!disableCache && warmCache) { + for (int i = 0; i < numberOfCacheEntries; i++) { + cache.add(createBuffer()); + } + } + } + + public SizeValue bufferSize() { + return this.bufferSize; + } + + public SizeValue cacheSize() { + return this.cacheSize; + } + + int bufferSizeInBytes() { + return bufferSizeInBytes; + } + + @Override public String[] listAll() throws IOException { + return files.keySet().toArray(new String[0]); + } + + @Override public boolean fileExists(String name) throws IOException { + return files.containsKey(name); + } + + @Override public long fileModified(String name) throws IOException { + MemoryFile file = files.get(name); + if (file == null) + throw new FileNotFoundException(name); + return file.lastModified(); + } + + @Override public void touchFile(String name) throws IOException { + MemoryFile file = files.get(name); + if (file == null) + throw new FileNotFoundException(name); + + long ts2, ts1 = System.currentTimeMillis(); + do { + try { + Thread.sleep(0, 1); + } catch (InterruptedException ie) { + // In 3.0 we will change this to throw + // InterruptedException instead + Thread.currentThread().interrupt(); + throw new RuntimeException(ie); + } + ts2 = System.currentTimeMillis(); + } while (ts1 == ts2); + + file.lastModified(ts2); + } + + @Override public void deleteFile(String name) throws IOException { + MemoryFile file = files.remove(name); + if (file == null) + throw new FileNotFoundException(name); + file.clean(); + } + + @Override public long fileLength(String name) throws IOException { + MemoryFile file = files.get(name); + if (file == null) + throw new FileNotFoundException(name); + return file.length(); + } + + @Override public IndexOutput createOutput(String name) throws IOException { + MemoryFile file = new MemoryFile(this); + MemoryFile existing = files.put(name, file); + if (existing != null) { + existing.clean(); + } + return new MemoryIndexOutput(this, file); + } + + @Override public IndexInput openInput(String name) throws IOException { + MemoryFile file = files.get(name); + if (file == null) + throw new FileNotFoundException(name); + return new MemoryIndexInput(this, file); + } + + @Override public void close() throws IOException { + String[] files = listAll(); + for (String file : files) { + deleteFile(file); + } + if (!disableCache) { + byte[] buffer = cache.poll(); + while (buffer != null) { + closeBuffer(buffer); + buffer = cache.poll(); + } + } + } + + void releaseBuffer(byte[] buffer) { + if (disableCache) { + closeBuffer(buffer); + return; + } + boolean success = cache.offer(buffer); + if (!success) { + closeBuffer(buffer); + } + } + + byte[] acquireBuffer() { + if (disableCache) { + return createBuffer(); + } + byte[] buffer = cache.poll(); + if (buffer == null) { + // everything is taken, return a new one + return createBuffer(); + } + return buffer; + } + + byte[] createBuffer() { + return new byte[bufferSizeInBytes]; + } + + void closeBuffer(byte[] buffer) { + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/memory/MemoryFile.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/memory/MemoryFile.java new file mode 100644 index 00000000000..b6173881d1b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/memory/MemoryFile.java @@ -0,0 +1,75 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store.memory; + +/** + * @author kimchy (Shay Banon) + */ +public class MemoryFile { + + private final MemoryDirectory dir; + + private volatile long lastModified = System.currentTimeMillis(); + + private volatile long length; + + private volatile byte[][] buffers; + + public MemoryFile(MemoryDirectory dir) { + this.dir = dir; + } + + long lastModified() { + return lastModified; + } + + void lastModified(long lastModified) { + this.lastModified = lastModified; + } + + long length() { + return length; + } + + void length(long length) { + this.length = length; + } + + byte[] buffer(int i) { + return this.buffers[i]; + } + + int numberOfBuffers() { + return this.buffers.length; + } + + void buffers(byte[][] buffers) { + this.buffers = buffers; + } + + void clean() { + if (buffers != null) { + for (byte[] buffer : buffers) { + dir.releaseBuffer(buffer); + } + buffers = null; + } + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/memory/MemoryIndexInput.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/memory/MemoryIndexInput.java new file mode 100644 index 00000000000..bc2a39c6464 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/memory/MemoryIndexInput.java @@ -0,0 +1,119 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store.memory; + +import org.apache.lucene.store.IndexInput; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class MemoryIndexInput extends IndexInput { + + private final int bufferSize; + private final MemoryFile file; + + private long length; + + private byte[] currentBuffer; + private int currentBufferIndex; + + private int bufferPosition; + private long bufferStart; + private int bufferLength; + + public MemoryIndexInput(MemoryDirectory dir, MemoryFile file) throws IOException { + this.bufferSize = dir.bufferSizeInBytes(); + this.file = file; + + length = file.length(); + if (length / dir.bufferSizeInBytes() >= Integer.MAX_VALUE) { + throw new IOException("Too large RAMFile! " + length); + } + + // make sure that we switch to the + // first needed buffer lazily + currentBufferIndex = -1; + currentBuffer = null; + } + + @Override public byte readByte() throws IOException { + if (bufferPosition >= bufferLength) { + currentBufferIndex++; + switchCurrentBuffer(true); + } + return currentBuffer[bufferPosition++]; + } + + @Override public void readBytes(byte[] b, int offset, int len) throws IOException { + while (len > 0) { + if (bufferPosition >= bufferLength) { + currentBufferIndex++; + switchCurrentBuffer(true); + } + + int remainInBuffer = bufferLength - bufferPosition; + int bytesToCopy = len < remainInBuffer ? len : remainInBuffer; + System.arraycopy(currentBuffer, bufferPosition, b, offset, bytesToCopy); + offset += bytesToCopy; + len -= bytesToCopy; + bufferPosition += bytesToCopy; + } + } + + @Override public void close() throws IOException { + } + + @Override public long getFilePointer() { + return currentBufferIndex < 0 ? 0 : bufferStart + bufferPosition; + } + + @Override public void seek(long pos) throws IOException { + if (currentBuffer == null || pos < bufferStart || pos >= bufferStart + bufferSize) { + currentBufferIndex = (int) (pos / bufferSize); + switchCurrentBuffer(false); + } + bufferPosition = (int) (pos % bufferSize); + } + + @Override public long length() { + return length; + } + + private void switchCurrentBuffer(boolean enforceEOF) throws IOException { + if (currentBufferIndex >= file.numberOfBuffers()) { + // end of file reached, no more buffers left + if (enforceEOF) + throw new IOException("Read past EOF"); + else { + // Force EOF if a read takes place at this position + currentBufferIndex--; + bufferPosition = bufferSize; + } + } else { + currentBuffer = file.buffer(currentBufferIndex); + bufferPosition = 0; + bufferStart = (long) bufferSize * (long) currentBufferIndex; + long buflen = length - bufferStart; + bufferLength = buflen > bufferSize ? bufferSize : (int) buflen; + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/memory/MemoryIndexOutput.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/memory/MemoryIndexOutput.java new file mode 100644 index 00000000000..43e0b0e93f8 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/memory/MemoryIndexOutput.java @@ -0,0 +1,126 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store.memory; + +import org.apache.lucene.store.IndexOutput; + +import java.io.IOException; +import java.util.ArrayList; + +/** + * @author kimchy (Shay Banon) + */ +public class MemoryIndexOutput extends IndexOutput { + + private final MemoryDirectory dir; + private final MemoryFile file; + + private ArrayList buffers = new ArrayList(); + + private byte[] currentBuffer; + private int currentBufferIndex; + + private int bufferPosition; + private long bufferStart; + private int bufferLength; + + public MemoryIndexOutput(MemoryDirectory dir, MemoryFile file) { + this.dir = dir; + this.file = file; + + // make sure that we switch to the + // first needed buffer lazily + currentBufferIndex = -1; + currentBuffer = null; + } + + @Override public void writeByte(byte b) throws IOException { + if (bufferPosition == bufferLength) { + currentBufferIndex++; + switchCurrentBuffer(); + } + currentBuffer[bufferPosition++] = b; + } + + @Override public void writeBytes(byte[] b, int offset, int len) throws IOException { + while (len > 0) { + if (bufferPosition == bufferLength) { + currentBufferIndex++; + switchCurrentBuffer(); + } + + int remainInBuffer = currentBuffer.length - bufferPosition; + int bytesToCopy = len < remainInBuffer ? len : remainInBuffer; + System.arraycopy(b, offset, currentBuffer, bufferPosition, bytesToCopy); + offset += bytesToCopy; + len -= bytesToCopy; + bufferPosition += bytesToCopy; + } + } + + @Override public void flush() throws IOException { + file.lastModified(System.currentTimeMillis()); + setFileLength(); + } + + @Override public void close() throws IOException { + flush(); + file.buffers(buffers.toArray(new byte[buffers.size()][])); + } + + @Override public long getFilePointer() { + return currentBufferIndex < 0 ? 0 : bufferStart + bufferPosition; + } + + @Override public void seek(long pos) throws IOException { + // set the file length in case we seek back + // and flush() has not been called yet + setFileLength(); + if (pos < bufferStart || pos >= bufferStart + bufferLength) { + currentBufferIndex = (int) (pos / dir.bufferSizeInBytes()); + switchCurrentBuffer(); + } + + bufferPosition = (int) (pos % dir.bufferSizeInBytes()); + } + + @Override public long length() throws IOException { + return file.length(); + } + + private void switchCurrentBuffer() throws IOException { + if (currentBufferIndex == buffers.size()) { + currentBuffer = dir.acquireBuffer(); + buffers.add(currentBuffer); + } else { + currentBuffer = buffers.get(currentBufferIndex); + } + bufferPosition = 0; + bufferStart = (long) dir.bufferSizeInBytes() * (long) currentBufferIndex; + bufferLength = currentBuffer.length; + } + + private void setFileLength() { + long pointer = bufferStart + bufferPosition; + if (pointer > file.length()) { + file.length(pointer); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/memory/MemoryStore.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/memory/MemoryStore.java new file mode 100644 index 00000000000..af23a86f905 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/memory/MemoryStore.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store.memory; + +import com.google.inject.Inject; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.store.support.AbstractStore; +import org.elasticsearch.util.SizeUnit; +import org.elasticsearch.util.SizeValue; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class MemoryStore extends AbstractStore { + + private final SizeValue bufferSize; + + private final SizeValue cacheSize; + + private final boolean warmCache; + + private MemoryDirectory directory; + + @Inject public MemoryStore(ShardId shardId, @IndexSettings Settings indexSettings) { + super(shardId, indexSettings); + + this.bufferSize = componentSettings.getAsSize("bufferSize", new SizeValue(1, SizeUnit.KB)); + this.cacheSize = componentSettings.getAsSize("cacheSize", new SizeValue(20, SizeUnit.MB)); + this.warmCache = componentSettings.getAsBoolean("warmCache", true); + + this.directory = new MemoryDirectory(bufferSize, cacheSize, warmCache); + logger.debug("Using [Memory] Store with bufferSize[{}], cacheSize[{}], warmCache[{}]", + new Object[]{directory.bufferSize(), directory.cacheSize(), warmCache}); + } + + @Override public MemoryDirectory directory() { + return directory; + } + + /** + * Its better to not use the compound format when using the Ram store. + */ + @Override public boolean suggestUseCompoundFile() { + return false; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/memory/MemoryStoreModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/memory/MemoryStoreModule.java new file mode 100644 index 00000000000..b676e3ea59f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/memory/MemoryStoreModule.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store.memory; + +import com.google.inject.AbstractModule; +import org.elasticsearch.index.store.Store; + +/** + * @author kimchy (Shay Banon) + */ +public class MemoryStoreModule extends AbstractModule { + + @Override protected void configure() { + bind(Store.class).to(MemoryStore.class).asEagerSingleton(); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/ram/RamStore.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/ram/RamStore.java new file mode 100644 index 00000000000..b5d15f72482 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/ram/RamStore.java @@ -0,0 +1,60 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store.ram; + +import com.google.inject.Inject; +import org.apache.lucene.store.RAMDirectory; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.store.support.AbstractStore; +import org.elasticsearch.util.SizeUnit; +import org.elasticsearch.util.SizeValue; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class RamStore extends AbstractStore { + + private RAMDirectory directory; + + @Inject public RamStore(ShardId shardId, @IndexSettings Settings indexSettings) { + super(shardId, indexSettings); + this.directory = new RAMDirectory(); + logger.debug("Using [RAM] Store"); + } + + @Override public RAMDirectory directory() { + return directory; + } + + @Override public SizeValue estimateSize() throws IOException { + return new SizeValue(directory.sizeInBytes(), SizeUnit.BYTES); + } + + /** + * Its better to not use the compound format when using the Ram store. + */ + @Override public boolean suggestUseCompoundFile() { + return false; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/ram/RamStoreModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/ram/RamStoreModule.java new file mode 100644 index 00000000000..78207a163dc --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/ram/RamStoreModule.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store.ram; + +import com.google.inject.AbstractModule; +import org.elasticsearch.index.store.Store; + +/** + * @author kimchy (Shay Banon) + */ +public class RamStoreModule extends AbstractModule { + + @Override protected void configure() { + bind(Store.class).to(RamStore.class).asEagerSingleton(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/support/AbstractStore.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/support/AbstractStore.java new file mode 100644 index 00000000000..1439b66c5d4 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/store/support/AbstractStore.java @@ -0,0 +1,64 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store.support; + +import org.apache.lucene.store.Directory; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.index.shard.AbstractIndexShardComponent; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.util.SizeValue; +import org.elasticsearch.util.lucene.Directories; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class AbstractStore extends AbstractIndexShardComponent implements Store { + + protected AbstractStore(ShardId shardId, @IndexSettings Settings indexSettings) { + super(shardId, indexSettings); + } + + @Override public void deleteContent() throws IOException { + Directories.deleteFiles(directory()); + } + + @Override public void fullDelete() throws IOException { + deleteContent(); + } + + @Override public SizeValue estimateSize() throws IOException { + return Directories.estimateSize(directory()); + } + + /** + * Returns true by default. + */ + @Override public boolean suggestUseCompoundFile() { + return true; + } + + @Override public void close() throws IOException { + directory().close(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/translog/Translog.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/translog/Translog.java new file mode 100644 index 00000000000..24ef8246a35 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -0,0 +1,371 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.translog; + +import org.apache.lucene.index.Term; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardComponent; +import org.elasticsearch.util.Nullable; +import org.elasticsearch.util.SizeValue; +import org.elasticsearch.util.Strings; +import org.elasticsearch.util.concurrent.NotThreadSafe; +import org.elasticsearch.util.concurrent.ThreadSafe; +import org.elasticsearch.util.io.Streamable; +import org.elasticsearch.util.lease.Releasable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +@ThreadSafe +public interface Translog extends IndexShardComponent { + + /** + * Returns the id of the current transaction log. + */ + long currentId(); + + /** + * Returns the number of operations in the transaction log. + */ + int size(); + + /** + * The estimated memory size this translog is taking. + */ + SizeValue estimateMemorySize(); + + /** + * Creates a new transaction log internally. Note, users of this class should make + * sure that no operations are performed on the trans log when this is called. + */ + void newTranslog(); + + /** + * Adds a create operation to the transaction log. + */ + void add(Operation operation) throws TranslogException; + + /** + * Snapshots the current transaction log allowing to safely iterate over the snapshot. + */ + Snapshot snapshot() throws TranslogException; + + /** + * Snapshots the delta between the current state of the translog, and the state defined + * by the provided snapshot. If a new translog has been created after the provided snapshot + * has been take, will return a snapshot on the current trasnlog. + */ + Snapshot snapshot(Snapshot snapshot); + + /** + * Closes the transaction log. + */ + void close(); + + /** + * A snapshot of the transaction log, allows to iterate over all the transaction log operations. + */ + @NotThreadSafe + static interface Snapshot extends Iterable, Releasable, Streamable { + + /** + * The id of the translog the snapshot was taken with. + */ + long translogId(); + + /** + * The number of translog operations in the snapshot. + */ + int size(); + + Iterable skipTo(int skipTo); + } + + /** + * A generic interface representing an operation perfomed on the transaction log. + * Each is associated with a type. + */ + static interface Operation extends Streamable { + static enum Type { + CREATE((byte) 1), + SAVE((byte) 2), + DELETE((byte) 3), + DELETE_BY_QUERY((byte) 4); + + private final byte id; + + private Type(byte id) { + this.id = id; + } + + public byte id() { + return this.id; + } + + public static Type fromId(byte id) { + switch (id) { + case 1: + return CREATE; + case 2: + return SAVE; + case 3: + return DELETE; + case 4: + return DELETE_BY_QUERY; + default: + throw new IllegalArgumentException("No type mapped for [" + id + "]"); + } + } + } + + Type opType(); + + long estimateSize(); + + void execute(IndexShard indexShard) throws ElasticSearchException; + } + + static class Create implements Operation { + private String id; + private String type; + private String source; + + public Create() { + } + + public Create(Engine.Create create) { + this(create.type(), create.id(), create.source()); + } + + public Create(String type, String id, String source) { + this.id = id; + this.type = type; + this.source = source; + } + + @Override public Type opType() { + return Type.CREATE; + } + + @Override public long estimateSize() { + return ((id.length() + type.length() + source.length()) * 2) + 12; + } + + public String id() { + return this.id; + } + + public String source() { + return this.source; + } + + public String type() { + return this.type; + } + + @Override public void execute(IndexShard indexShard) throws ElasticSearchException { + indexShard.create(type, id, source); + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + id = in.readUTF(); + type = in.readUTF(); + source = in.readUTF(); + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeUTF(id); + out.writeUTF(type); + out.writeUTF(source); + } + } + + static class Index implements Operation { + private String id; + private String type; + private String source; + + public Index() { + } + + public Index(Engine.Index index) { + this(index.type(), index.id(), index.source()); + } + + public Index(String type, String id, String source) { + this.type = type; + this.id = id; + this.source = source; + } + + @Override public Type opType() { + return Type.SAVE; + } + + @Override public long estimateSize() { + return ((id.length() + type.length() + source.length()) * 2) + 12; + } + + public String type() { + return this.type; + } + + public String id() { + return this.id; + } + + public String source() { + return this.source; + } + + @Override public void execute(IndexShard indexShard) throws ElasticSearchException { + indexShard.index(type, id, source); + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + id = in.readUTF(); + type = in.readUTF(); + source = in.readUTF(); + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeUTF(id); + out.writeUTF(type); + out.writeUTF(source); + } + } + + static class Delete implements Operation { + private Term uid; + + public Delete() { + } + + public Delete(Engine.Delete delete) { + this(delete.uid()); + } + + public Delete(Term uid) { + this.uid = uid; + } + + @Override public Type opType() { + return Type.DELETE; + } + + @Override public long estimateSize() { + return ((uid.field().length() + uid.text().length()) * 2) + 20; + } + + public Term uid() { + return this.uid; + } + + @Override public void execute(IndexShard indexShard) throws ElasticSearchException { + indexShard.delete(uid); + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + uid = new Term(in.readUTF(), in.readUTF()); + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeUTF(uid.field()); + out.writeUTF(uid.text()); + } + } + + static class DeleteByQuery implements Operation { + private String source; + @Nullable private String queryParserName; + private String[] types = Strings.EMPTY_ARRAY; + + public DeleteByQuery() { + } + + public DeleteByQuery(Engine.DeleteByQuery deleteByQuery) { + this(deleteByQuery.source(), deleteByQuery.queryParserName(), deleteByQuery.types()); + } + + public DeleteByQuery(String source, @Nullable String queryParserName, String... types) { + this.queryParserName = queryParserName; + this.source = source; + this.types = types; + } + + @Override public Type opType() { + return Type.DELETE_BY_QUERY; + } + + @Override public long estimateSize() { + return ((source.length() + (queryParserName == null ? 0 : queryParserName.length())) * 2) + 8; + } + + public String queryParserName() { + return this.queryParserName; + } + + public String source() { + return this.source; + } + + public String[] types() { + return this.types; + } + + @Override public void execute(IndexShard indexShard) throws ElasticSearchException { + indexShard.deleteByQuery(source, queryParserName, types); + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + source = in.readUTF(); + if (in.readBoolean()) { + queryParserName = in.readUTF(); + } + int typesSize = in.readInt(); + if (typesSize > 0) { + types = new String[typesSize]; + for (int i = 0; i < typesSize; i++) { + types[i] = in.readUTF(); + } + } + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeUTF(source); + if (queryParserName == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeUTF(queryParserName); + } + out.writeInt(types.length); + for (String type : types) { + out.writeUTF(type); + } + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/translog/TranslogException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/translog/TranslogException.java new file mode 100644 index 00000000000..4e37605b33e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/translog/TranslogException.java @@ -0,0 +1,37 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.translog; + +import org.elasticsearch.index.shard.IndexShardException; +import org.elasticsearch.index.shard.ShardId; + +/** + * @author kimchy (Shay Banon) + */ +public class TranslogException extends IndexShardException { + + public TranslogException(ShardId shardId, String msg) { + super(shardId, msg); + } + + public TranslogException(ShardId shardId, String msg, Throwable cause) { + super(shardId, msg, cause); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/translog/TranslogModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/translog/TranslogModule.java new file mode 100644 index 00000000000..9618bb90b48 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/translog/TranslogModule.java @@ -0,0 +1,47 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.translog; + +import com.google.inject.AbstractModule; +import com.google.inject.Scopes; +import org.elasticsearch.index.translog.memory.MemoryTranslog; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class TranslogModule extends AbstractModule { + + public static class TranslogSettings { + public static final String TYPE = "index.translog.type"; + } + + private final Settings settings; + + public TranslogModule(Settings settings) { + this.settings = settings; + } + + @Override protected void configure() { + bind(Translog.class) + .to(settings.getAsClass(TranslogSettings.TYPE, MemoryTranslog.class)) + .in(Scopes.SINGLETON); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/translog/TranslogStreams.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/translog/TranslogStreams.java new file mode 100644 index 00000000000..4a41d1cdfbf --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/translog/TranslogStreams.java @@ -0,0 +1,58 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.translog; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class TranslogStreams { + + public static Translog.Operation readTranslogOperation(DataInput in) throws IOException, ClassNotFoundException { + Translog.Operation.Type type = Translog.Operation.Type.fromId(in.readByte()); + Translog.Operation operation; + switch (type) { + case CREATE: + operation = new Translog.Create(); + break; + case DELETE: + operation = new Translog.Delete(); + break; + case DELETE_BY_QUERY: + operation = new Translog.DeleteByQuery(); + break; + case SAVE: + operation = new Translog.Index(); + break; + default: + throw new IOException("No type for [" + type + "]"); + } + operation.readFrom(in); + return operation; + } + + public static void writeTranslogOperation(DataOutput out, Translog.Operation op) throws IOException { + out.writeByte(op.opType().id()); + op.writeTo(out); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/translog/memory/MemorySnapshot.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/translog/memory/MemorySnapshot.java new file mode 100644 index 00000000000..21075cd4231 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/translog/memory/MemorySnapshot.java @@ -0,0 +1,94 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.translog.memory; + +import com.google.common.collect.Iterables; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.ElasticSearchIllegalArgumentException; +import org.elasticsearch.index.translog.Translog; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.Arrays; +import java.util.Iterator; + +import static org.elasticsearch.index.translog.TranslogStreams.*; + +/** + * @author kimchy (Shay Banon) + */ +public class MemorySnapshot implements Translog.Snapshot { + + private long id; + + Translog.Operation[] operations; + + public MemorySnapshot() { + } + + public MemorySnapshot(Translog.Snapshot snapshot) { + this(snapshot.translogId(), Iterables.toArray(snapshot, Translog.Operation.class)); + } + + public MemorySnapshot(long id, Translog.Operation[] operations) { + this.id = id; + this.operations = operations; + } + + @Override public long translogId() { + return id; + } + + @Override public boolean release() throws ElasticSearchException { + return true; + } + + @Override public int size() { + return operations.length; + } + + @Override public Iterator iterator() { + return Arrays.asList(operations).iterator(); + } + + @Override public Iterable skipTo(int skipTo) { + if (operations.length < skipTo) { + throw new ElasticSearchIllegalArgumentException("skipTo [" + skipTo + "] is bigger than size [" + size() + "]"); + } + return Arrays.asList(Arrays.copyOfRange(operations, skipTo, operations.length)); + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + id = in.readLong(); + operations = new Translog.Operation[in.readInt()]; + for (int i = 0; i < operations.length; i++) { + operations[i] = readTranslogOperation(in); + } + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeLong(id); + out.writeInt(operations.length); + for (Translog.Operation op : operations) { + writeTranslogOperation(out, op); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/translog/memory/MemoryTranslog.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/translog/memory/MemoryTranslog.java new file mode 100644 index 00000000000..32d56175ff8 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/translog/memory/MemoryTranslog.java @@ -0,0 +1,110 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.translog.memory; + +import com.google.inject.Inject; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.index.shard.AbstractIndexShardComponent; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.index.translog.TranslogException; +import org.elasticsearch.util.SizeUnit; +import org.elasticsearch.util.SizeValue; +import org.elasticsearch.util.concurrent.ThreadSafe; +import org.elasticsearch.util.settings.Settings; + +import java.util.ArrayList; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.atomic.AtomicLong; + +/** + * @author kimchy (Shay Banon) + */ +@ThreadSafe +public class MemoryTranslog extends AbstractIndexShardComponent implements Translog { + + private final Object mutex = new Object(); + + private final AtomicLong idGenerator = new AtomicLong(); + + private final AtomicLong estimatedMemorySize = new AtomicLong(); + + private volatile long id; + + private final ConcurrentLinkedQueue operations = new ConcurrentLinkedQueue(); + + @Inject public MemoryTranslog(ShardId shardId, @IndexSettings Settings indexSettings) { + super(shardId, indexSettings); + newTranslog(); + } + + @Override public long currentId() { + return this.id; + } + + @Override public int size() { + return operations.size(); + } + + @Override public SizeValue estimateMemorySize() { + return new SizeValue(estimatedMemorySize.get(), SizeUnit.BYTES); + } + + @Override public void newTranslog() { + synchronized (mutex) { + estimatedMemorySize.set(0); + operations.clear(); + id = idGenerator.getAndIncrement(); + } + } + + @Override public void add(Operation operation) throws TranslogException { + operations.add(operation); + estimatedMemorySize.addAndGet(operation.estimateSize() + 20); + } + + @Override public Snapshot snapshot() { + synchronized (mutex) { + return new MemorySnapshot(currentId(), operations.toArray(new Operation[0])); + } + } + + @Override public Snapshot snapshot(Snapshot snapshot) { + synchronized (mutex) { + MemorySnapshot memorySnapshot = (MemorySnapshot) snapshot; + if (currentId() != snapshot.translogId()) { + return snapshot(); + } + ArrayList retVal = new ArrayList(); + int counter = 0; + int snapshotSize = memorySnapshot.operations.length; + for (Operation operation : operations) { + if (++counter > snapshotSize) { + retVal.add(operation); + } + } + return new MemorySnapshot(currentId(), retVal.toArray(new Operation[retVal.size()])); + } + } + + @Override public void close() { + } + +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/indices/IndexAlreadyExistsException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/indices/IndexAlreadyExistsException.java new file mode 100644 index 00000000000..031ad1fe2ca --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/indices/IndexAlreadyExistsException.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.indices; + +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexException; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexAlreadyExistsException extends IndexException { + + public IndexAlreadyExistsException(Index index) { + super(index, "Already exists"); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/indices/IndexMissingException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/indices/IndexMissingException.java new file mode 100644 index 00000000000..43a9357bdb8 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/indices/IndexMissingException.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.indices; + +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexException; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexMissingException extends IndexException { + + public IndexMissingException(Index index) { + super(index, "missing"); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/indices/IndicesMemoryCleaner.java b/modules/elasticsearch/src/main/java/org/elasticsearch/indices/IndicesMemoryCleaner.java new file mode 100644 index 00000000000..452b2683e99 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/indices/IndicesMemoryCleaner.java @@ -0,0 +1,184 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.indices; + +import com.google.inject.Inject; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.engine.FlushNotAllowedEngineException; +import org.elasticsearch.index.shard.IllegalIndexShardStateException; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardState; +import org.elasticsearch.index.shard.InternalIndexShard; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.util.SizeUnit; +import org.elasticsearch.util.SizeValue; +import org.elasticsearch.util.Tuple; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.settings.Settings; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; + +/** + * @author kimchy (Shay Banon) + */ +public class IndicesMemoryCleaner extends AbstractComponent { + + private final IndicesService indicesService; + + @Inject public IndicesMemoryCleaner(Settings settings, IndicesService indicesService) { + super(settings); + this.indicesService = indicesService; + } + + public TranslogCleanResult cleanTranslog(int translogNumberOfOperationsThreshold) { + int totalShards = 0; + int cleanedShards = 0; + long cleaned = 0; + for (IndexService indexService : indicesService) { + for (IndexShard indexShard : indexService) { + if (indexShard.state() != IndexShardState.STARTED) { + continue; + } + totalShards++; + Translog translog = ((InternalIndexShard) indexShard).translog(); + if (translog.size() > translogNumberOfOperationsThreshold) { + cleanedShards++; + cleaned = indexShard.estimateFlushableMemorySize().bytes(); + indexShard.flush(); + } + } + } + return new TranslogCleanResult(totalShards, cleanedShards, new SizeValue(cleaned, SizeUnit.BYTES)); + } + + /** + * Checks if memory needs to be cleaned and cleans it. Returns the amount of memory cleaned. + */ + public MemoryCleanResult cleanMemory(long memoryToClean, SizeValue minimumFlushableSizeToClean) { + int totalShards = 0; + long estimatedFlushableSize = 0; + ArrayList> shards = new ArrayList>(); + for (IndexService indexService : indicesService) { + for (IndexShard indexShard : indexService) { + if (indexShard.state() != IndexShardState.STARTED) { + continue; + } + totalShards++; + SizeValue estimatedSize = indexShard.estimateFlushableMemorySize(); + estimatedFlushableSize += estimatedSize.bytes(); + if (estimatedSize != null) { + shards.add(new Tuple(estimatedSize, indexShard)); + } + } + } + Collections.sort(shards, new Comparator>() { + @Override public int compare(Tuple o1, Tuple o2) { + return (int) (o1.v1().bytes() - o2.v1().bytes()); + } + }); + int cleanedShards = 0; + long cleaned = 0; + for (Tuple tuple : shards) { + if (tuple.v1().bytes() < minimumFlushableSizeToClean.bytes()) { + // we passed the minimum threshold, don't flush + break; + } + try { + tuple.v2().flush(); + } catch (FlushNotAllowedEngineException e) { + // ignore this one, its temporal + } catch (IllegalIndexShardStateException e) { + // ignore this one as well + } catch (Exception e) { + logger.warn(tuple.v2().shardId() + ": Failed to flush in order to clean memory", e); + } + cleanedShards++; + cleaned += tuple.v1().bytes(); + if (cleaned > memoryToClean) { + break; + } + } + return new MemoryCleanResult(totalShards, cleanedShards, new SizeValue(estimatedFlushableSize), new SizeValue(cleaned)); + } + + public static class TranslogCleanResult { + private final int totalShards; + private final int cleanedShards; + private final SizeValue cleaned; + + public TranslogCleanResult(int totalShards, int cleanedShards, SizeValue cleaned) { + this.totalShards = totalShards; + this.cleanedShards = cleanedShards; + this.cleaned = cleaned; + } + + public int totalShards() { + return totalShards; + } + + public int cleanedShards() { + return cleanedShards; + } + + public SizeValue cleaned() { + return cleaned; + } + + @Override public String toString() { + return "cleaned[" + cleaned + "], cleanedShards[" + cleanedShards + "], totalShards[" + totalShards + "]"; + } + } + + public static class MemoryCleanResult { + private final int totalShards; + private final int cleanedShards; + private final SizeValue estimatedFlushableSize; + private final SizeValue cleaned; + + public MemoryCleanResult(int totalShards, int cleanedShards, SizeValue estimatedFlushableSize, SizeValue cleaned) { + this.totalShards = totalShards; + this.cleanedShards = cleanedShards; + this.estimatedFlushableSize = estimatedFlushableSize; + this.cleaned = cleaned; + } + + public int totalShards() { + return totalShards; + } + + public int cleanedShards() { + return cleanedShards; + } + + public SizeValue estimatedFlushableSize() { + return estimatedFlushableSize; + } + + public SizeValue cleaned() { + return cleaned; + } + + @Override public String toString() { + return "cleaned[" + cleaned + "], estimatedFlushableSize[" + estimatedFlushableSize + "], cleanedShards[" + cleanedShards + "], totalShards[" + totalShards + "]"; + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/indices/IndicesModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/indices/IndicesModule.java new file mode 100644 index 00000000000..659a975ecb0 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/indices/IndicesModule.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.indices; + +import com.google.inject.AbstractModule; +import org.elasticsearch.indices.cluster.IndicesClusterStateService; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class IndicesModule extends AbstractModule { + + private final Settings settings; + + public IndicesModule(Settings settings) { + this.settings = settings; + } + + @Override protected void configure() { + bind(IndicesService.class).to(InternalIndicesService.class).asEagerSingleton(); + bind(IndicesClusterStateService.class).asEagerSingleton(); + bind(IndicesMemoryCleaner.class).asEagerSingleton(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/indices/IndicesService.java b/modules/elasticsearch/src/main/java/org/elasticsearch/indices/IndicesService.java new file mode 100644 index 00000000000..4bbd2969341 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -0,0 +1,61 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.indices; + +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.util.component.LifecycleComponent; +import org.elasticsearch.util.concurrent.ThreadSafe; +import org.elasticsearch.util.settings.Settings; + +import java.util.Set; + +/** + * @author kimchy (Shay Banon) + */ +@ThreadSafe +public interface IndicesService extends Iterable, LifecycleComponent { + + /** + * Returns true if changes (adding / removing) indices, shards and so on are allowed. + */ + public boolean changesAllowed(); + + boolean hasIndex(String index); + + Set indices(); + + IndexService indexService(String index); + + IndexService indexServiceSafe(String index) throws IndexMissingException; + + /** + * Gets all the "searchable" shards on all the given indices. + * + * @see org.elasticsearch.index.routing.OperationRouting#searchShards(org.elasticsearch.cluster.ClusterState, String) + */ + GroupShardsIterator searchShards(ClusterState clusterState, String[] indices, String queryHint) throws ElasticSearchException; + + IndexService createIndex(String index, Settings settings, String localNodeId) throws ElasticSearchException; + + void deleteIndex(String index) throws ElasticSearchException; +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/indices/InternalIndicesService.java b/modules/elasticsearch/src/main/java/org/elasticsearch/indices/InternalIndicesService.java new file mode 100644 index 00000000000..64b24608629 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/indices/InternalIndicesService.java @@ -0,0 +1,229 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.indices; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.UnmodifiableIterator; +import com.google.inject.Inject; +import com.google.inject.Injector; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.gateway.Gateway; +import org.elasticsearch.index.*; +import org.elasticsearch.index.analysis.AnalysisModule; +import org.elasticsearch.index.analysis.AnalysisService; +import org.elasticsearch.index.cache.filter.FilterCache; +import org.elasticsearch.index.cache.filter.FilterCacheModule; +import org.elasticsearch.index.gateway.IndexGateway; +import org.elasticsearch.index.gateway.IndexGatewayModule; +import org.elasticsearch.index.mapper.MapperServiceModule; +import org.elasticsearch.index.query.IndexQueryParserModule; +import org.elasticsearch.index.routing.OperationRoutingModule; +import org.elasticsearch.index.settings.IndexSettingsModule; +import org.elasticsearch.index.similarity.SimilarityModule; +import org.elasticsearch.indices.cluster.IndicesClusterStateService; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.component.Lifecycle; +import org.elasticsearch.util.concurrent.ThreadSafe; +import org.elasticsearch.util.guice.Injectors; +import org.elasticsearch.util.settings.Settings; + +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + +import static com.google.common.collect.Maps.*; +import static com.google.common.collect.Sets.*; +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; +import static org.elasticsearch.util.MapBuilder.*; +import static org.elasticsearch.util.settings.ImmutableSettings.*; + +/** + * @author kimchy (Shay Banon) + */ +@ThreadSafe +public class InternalIndicesService extends AbstractComponent implements IndicesService { + + private final Lifecycle lifecycle = new Lifecycle(); + + private final IndicesClusterStateService clusterStateService; + + private final Injector injector; + + private final Map indicesInjectors = new HashMap(); + + private volatile ImmutableMap indices = ImmutableMap.of(); + + @Inject public InternalIndicesService(Settings settings, IndicesClusterStateService clusterStateService, Injector injector) { + super(settings); + this.clusterStateService = clusterStateService; + this.injector = injector; + } + + @Override public Lifecycle.State lifecycleState() { + return lifecycle.state(); + } + + @Override public IndicesService start() throws ElasticSearchException { + if (!lifecycle.moveToStarted()) { + return this; + } + clusterStateService.start(); + return this; + } + + @Override public IndicesService stop() throws ElasticSearchException { + if (!lifecycle.moveToStopped()) { + return this; + } + clusterStateService.stop(); + for (String index : indices.keySet()) { + deleteIndex(index, true); + } + return this; + } + + public synchronized void close() { + if (lifecycle.started()) { + stop(); + } + if (!lifecycle.moveToClosed()) { + return; + } + clusterStateService.close(); + } + + /** + * Returns true if changes (adding / removing) indices, shards and so on are allowed. + */ + public boolean changesAllowed() { + // we check on stop here since we defined stop when we delete the indices + return lifecycle.started(); + } + + @Override public UnmodifiableIterator iterator() { + return indices.values().iterator(); + } + + public boolean hasIndex(String index) { + return indices.containsKey(index); + } + + public Set indices() { + return newHashSet(indices.keySet()); + } + + public IndexService indexService(String index) { + return indices.get(index); + } + + @Override public IndexService indexServiceSafe(String index) throws IndexMissingException { + IndexService indexService = indexService(index); + if (indexService == null) { + throw new IndexMissingException(new Index(index)); + } + return indexService; + } + + @Override public GroupShardsIterator searchShards(ClusterState clusterState, String[] indexNames, String queryHint) throws ElasticSearchException { + if (indexNames == null || indexNames.length == 0) { + ImmutableMap indices = this.indices; + indexNames = indices.keySet().toArray(new String[indices.keySet().size()]); + } + GroupShardsIterator its = new GroupShardsIterator(); + for (String index : indexNames) { + its.add(indexServiceSafe(index).operationRouting().searchShards(clusterState, queryHint)); + } + return its; + } + + public synchronized IndexService createIndex(String sIndexName, Settings settings, String localNodeId) throws ElasticSearchException { + Index index = new Index(sIndexName); + if (indicesInjectors.containsKey(index.name())) { + throw new IndexAlreadyExistsException(index); + } + + logger.debug("Creating Index [{}], shards [{}]/[{}]", new Object[]{sIndexName, settings.get(SETTING_NUMBER_OF_SHARDS), settings.get(SETTING_NUMBER_OF_REPLICAS)}); + + Settings indexSettings = settingsBuilder() + .put("settingsType", "index") + .putAll(this.settings) + .putAll(settings) + .classLoader(settings.getClassLoader()) + .globalSettings(settings.getGlobalSettings()) + .build(); + + Injector indexInjector = injector.createChildInjector( + new IndexNameModule(index), + new LocalNodeIdModule(localNodeId), + new IndexSettingsModule(indexSettings), + new AnalysisModule(indexSettings), + new SimilarityModule(indexSettings), + new FilterCacheModule(indexSettings), + new IndexQueryParserModule(indexSettings), + new MapperServiceModule(), + new IndexGatewayModule(indexSettings, injector.getInstance(Gateway.class)), + new OperationRoutingModule(indexSettings), + new IndexModule()); + + indicesInjectors.put(index.name(), indexInjector); + + IndexService indexService = indexInjector.getInstance(IndexService.class); + + indices = newMapBuilder(indices).put(index.name(), indexService).immutableMap(); + + return indexService; + } + + public synchronized void deleteIndex(String index) throws ElasticSearchException { + deleteIndex(index, false); + } + + private synchronized void deleteIndex(String index, boolean internalClose) throws ElasticSearchException { + Injector indexInjector = indicesInjectors.remove(index); + if (indexInjector == null) { + if (internalClose) { + return; + } + throw new IndexMissingException(new Index(index)); + } + if (!internalClose) { + logger.debug("Deleting Index [{}]", index); + } + + Map tmpMap = newHashMap(indices); + IndexService indexService = tmpMap.remove(index); + indices = ImmutableMap.copyOf(tmpMap); + + indexService.close(); + + indexInjector.getInstance(FilterCache.class).close(); + indexInjector.getInstance(AnalysisService.class).close(); + indexInjector.getInstance(IndexServiceManagement.class).close(); + + if (!internalClose) { + indexInjector.getInstance(IndexGateway.class).delete(); + } + indexInjector.getInstance(IndexGateway.class).close(); + + Injectors.close(injector); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/indices/InvalidIndexNameException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/indices/InvalidIndexNameException.java new file mode 100644 index 00000000000..9baf94cd7c0 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/indices/InvalidIndexNameException.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.indices; + +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexException; + +/** + * @author kimchy (Shay Banon) + */ +public class InvalidIndexNameException extends IndexException { + + public InvalidIndexNameException(Index index, String name, String desc) { + super(index, "Invalid index name [" + name + "], " + desc); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/indices/TypeMissingException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/indices/TypeMissingException.java new file mode 100644 index 00000000000..b6eeb8f2e7f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/indices/TypeMissingException.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.indices; + +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexException; + +/** + * @author kimchy (Shay Banon) + */ +public class TypeMissingException extends IndexException { + + public TypeMissingException(Index index, String type) { + super(index, "type[" + type + "] missing"); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/modules/elasticsearch/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java new file mode 100644 index 00000000000..9d1df932ceb --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.indices.cluster; + +import com.google.common.collect.ImmutableMap; +import com.google.inject.Inject; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.action.index.NodeIndexCreatedAction; +import org.elasticsearch.cluster.action.index.NodeIndexDeletedAction; +import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.Node; +import org.elasticsearch.cluster.node.Nodes; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.IndexShardAlreadyExistsException; +import org.elasticsearch.index.gateway.IgnoreGatewayRecoveryException; +import org.elasticsearch.index.gateway.IndexShardGatewayService; +import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardState; +import org.elasticsearch.index.shard.InternalIndexShard; +import org.elasticsearch.index.shard.recovery.IgnoreRecoveryException; +import org.elasticsearch.index.shard.recovery.RecoveryAction; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.component.Lifecycle; +import org.elasticsearch.util.component.LifecycleComponent; +import org.elasticsearch.util.settings.Settings; + +import java.util.Map; +import java.util.Set; + +import static com.google.common.collect.Sets.*; + +/** + * @author kimchy (Shay Banon) + */ +public class IndicesClusterStateService extends AbstractComponent implements ClusterStateListener, LifecycleComponent { + + private final Lifecycle lifecycle = new Lifecycle(); + + private final IndicesService indicesService; + + private final ClusterService clusterService; + + private final ThreadPool threadPool; + + private final ShardStateAction shardStateAction; + + private final NodeIndexCreatedAction nodeIndexCreatedAction; + + private final NodeIndexDeletedAction nodeIndexDeletedAction; + + @Inject public IndicesClusterStateService(Settings settings, IndicesService indicesService, ClusterService clusterService, + ThreadPool threadPool, ShardStateAction shardStateAction, + NodeIndexCreatedAction nodeIndexCreatedAction, NodeIndexDeletedAction nodeIndexDeletedAction) { + super(settings); + this.indicesService = indicesService; + this.clusterService = clusterService; + this.threadPool = threadPool; + this.shardStateAction = shardStateAction; + this.nodeIndexCreatedAction = nodeIndexCreatedAction; + this.nodeIndexDeletedAction = nodeIndexDeletedAction; + } + + @Override public Lifecycle.State lifecycleState() { + return lifecycle.state(); + } + + @Override public IndicesClusterStateService start() throws ElasticSearchException { + if (!lifecycle.moveToStarted()) { + return this; + } + clusterService.add(this); + return this; + } + + @Override public IndicesClusterStateService stop() throws ElasticSearchException { + if (!lifecycle.moveToStopped()) { + return this; + } + clusterService.remove(this); + return this; + } + + @Override public void close() throws ElasticSearchException { + if (lifecycle.started()) { + stop(); + } + if (!lifecycle.moveToClosed()) { + return; + } + } + + @Override public void clusterChanged(final ClusterChangedEvent event) { + if (!indicesService.changesAllowed()) + return; + + MetaData metaData = event.state().metaData(); + // first, go over and create and indices that needs to be created + for (final IndexMetaData indexMetaData : metaData) { + if (!indicesService.hasIndex(indexMetaData.index())) { + if (logger.isDebugEnabled()) { + logger.debug("Index [{}]: Creating", indexMetaData.index()); + } + indicesService.createIndex(indexMetaData.index(), indexMetaData.settings(), event.state().nodes().localNode().id()); + threadPool.execute(new Runnable() { + @Override public void run() { + nodeIndexCreatedAction.nodeIndexCreated(indexMetaData.index(), event.state().nodes().localNodeId()); + } + }); + } + } + + RoutingTable routingTable = event.state().routingTable(); + + RoutingNode routingNodes = event.state().routingNodes().nodesToShards().get(event.state().nodes().localNodeId()); + if (routingNodes != null) { + applyShards(routingNodes, routingTable, event.state().nodes()); + } + + // go over and update mappings + for (IndexMetaData indexMetaData : metaData) { + if (!indicesService.hasIndex(indexMetaData.index())) { + // we only create / update here + continue; + } + String index = indexMetaData.index(); + IndexService indexService = indicesService.indexServiceSafe(index); + MapperService mapperService = indexService.mapperService(); + ImmutableMap mappings = indexMetaData.mappings(); + // we don't support removing mappings for now ... + for (Map.Entry entry : mappings.entrySet()) { + String mappingType = entry.getKey(); + String mappingSource = entry.getValue(); + + try { + if (!mapperService.hasMapping(mappingType)) { + if (logger.isDebugEnabled()) { + logger.debug("Index [" + index + "] Adding mapping [" + mappingType + "], source [" + mappingSource + "]"); + } + mapperService.add(mappingType, mappingSource); + } else { + DocumentMapper existingMapper = mapperService.documentMapper(mappingType); + if (!mappingSource.equals(existingMapper.mappingSource())) { + // mapping changed, update it + if (logger.isDebugEnabled()) { + logger.debug("Index [" + index + "] Updating mapping [" + mappingType + "], source [" + mappingSource + "]"); + } + mapperService.add(mappingType, mappingSource); + } + } + } catch (Exception e) { + logger.warn("Failed to add mapping [" + mappingType + "], source [" + mappingSource + "]", e); + } + } + } + + // go over and delete either all indices or specific shards + for (final String index : indicesService.indices()) { + if (metaData.index(index) == null) { + if (logger.isDebugEnabled()) { + logger.debug("Index [{}]: Deleting", index); + } + indicesService.deleteIndex(index); + threadPool.execute(new Runnable() { + @Override public void run() { + nodeIndexDeletedAction.nodeIndexDeleted(index, event.state().nodes().localNodeId()); + } + }); + } else if (routingNodes != null) { + // now, go over and delete shards that needs to get deleted + Set newShardIds = newHashSet(); + for (final ShardRouting shardRouting : routingNodes) { + if (shardRouting.index().equals(index)) { + newShardIds.add(shardRouting.id()); + } + } + final IndexService indexService = indicesService.indexService(index); + if (indexService == null) { + continue; + } + for (Integer existingShardId : indexService.shardIds()) { + if (!newShardIds.contains(existingShardId)) { + if (logger.isDebugEnabled()) { + logger.debug("Index [{}]: Deleting shard [{}]", index, existingShardId); + } + indexService.deleteShard(existingShardId); + } + } + } + } + } + + private void applyShards(final RoutingNode routingNodes, final RoutingTable routingTable, final Nodes nodes) throws ElasticSearchException { + if (!indicesService.changesAllowed()) + return; + + for (final ShardRouting shardRouting : routingNodes) { + final IndexService indexService = indicesService.indexServiceSafe(shardRouting.index()); + + final int shardId = shardRouting.id(); + + if (!indexService.hasShard(shardId) && shardRouting.started()) { + // the master thinks we are started, but we don't have this shard at all, mark it as failed + logger.warn("[" + shardRouting.index() + "][" + shardRouting.shardId().id() + "] Master " + nodes.masterNode() + " marked shard as started, but shard have not been created, mark shard as failed"); + shardStateAction.shardFailed(shardRouting); + continue; + } + + if (indexService.hasShard(shardId)) { + InternalIndexShard indexShard = (InternalIndexShard) indexService.shard(shardId); + if (!shardRouting.equals(indexShard.routingEntry())) { + indexShard.routingEntry(shardRouting); + indexService.shardInjector(shardId).getInstance(IndexShardGatewayService.class).routingStateChanged(); + } + } + + if (shardRouting.initializing()) { + applyInitializingShard(routingTable, nodes, shardRouting); + } + } + } + + private void applyInitializingShard(final RoutingTable routingTable, final Nodes nodes, final ShardRouting shardRouting) throws ElasticSearchException { + final IndexService indexService = indicesService.indexServiceSafe(shardRouting.index()); + final int shardId = shardRouting.id(); + + if (indexService.hasShard(shardId)) { + IndexShard indexShard = indexService.shardSafe(shardId); + if (indexShard.state() == IndexShardState.STARTED) { + // the master thinks we are initializing, but we are already started + // (either master failover, or a cluster event before we managed to tell the master we started), mark us as started + if (logger.isTraceEnabled()) { + logger.trace("[" + shardRouting.index() + "][" + shardRouting.shardId().id() + "] Master " + nodes.masterNode() + " marked shard as initializing, but shard already started, mark shard as started"); + } + shardStateAction.shardStarted(shardRouting); + return; + } else { + if (indexShard.ignoreRecoveryAttempt()) { + return; + } + } + } + // if there is no shard, create it + if (!indexService.hasShard(shardId)) { + try { + if (logger.isDebugEnabled()) { + logger.debug("Index [{}]: Creating shard [{}]", shardRouting.index(), shardId); + } + InternalIndexShard indexShard = (InternalIndexShard) indexService.createShard(shardId); + indexShard.routingEntry(shardRouting); + } catch (IndexShardAlreadyExistsException e) { + // ignore this, the method call can happen several times + } catch (Exception e) { + logger.warn("Failed to create shard for index [" + indexService.index().name() + "] and shard id [" + shardRouting.id() + "]", e); + try { + indexService.deleteShard(shardId); + } catch (Exception e1) { + logger.warn("Failed to delete shard after failed creation for index [" + indexService.index().name() + "] and shard id [" + shardRouting.id() + "]", e1); + } + shardStateAction.shardFailed(shardRouting); + return; + } + } + final InternalIndexShard indexShard = (InternalIndexShard) indexService.shardSafe(shardId); + + if (indexShard.ignoreRecoveryAttempt()) { + // we are already recovering (we can get to this state since the cluster event can happen several + // times while we recover) + return; + } + + threadPool.execute(new Runnable() { + @Override public void run() { + // recheck here, since the cluster event can be called + if (indexShard.ignoreRecoveryAttempt()) { + return; + } + try { + RecoveryAction recoveryAction = indexService.shardInjector(shardId).getInstance(RecoveryAction.class); + if (!shardRouting.primary()) { + // recovery from primary + IndexShardRoutingTable shardRoutingTable = routingTable.index(shardRouting.index()).shard(shardRouting.id()); + for (ShardRouting entry : shardRoutingTable) { + if (entry.primary() && entry.started()) { + // only recover from started primary, if we can't find one, we will do it next round + Node node = nodes.get(entry.currentNodeId()); + try { + // we are recovering a backup from a primary, so no need to mark it as relocated + recoveryAction.startRecovery(nodes.localNode(), node, false); + shardStateAction.shardStarted(shardRouting); + } catch (IgnoreRecoveryException e) { + // that's fine, since we might be called concurrently, just ignore this + break; + } + break; + } + } + } else { + if (shardRouting.relocatingNodeId() == null) { + // we are the first primary, recover from the gateway + IndexShardGatewayService shardGatewayService = indexService.shardInjector(shardId).getInstance(IndexShardGatewayService.class); + try { + shardGatewayService.recover(); + shardStateAction.shardStarted(shardRouting); + } catch (IgnoreGatewayRecoveryException e) { + // that's fine, we might be called concurrently, just ignore this, we already recovered + } + } else { + // relocating primaries, recovery from the relocating shard + Node node = nodes.get(shardRouting.relocatingNodeId()); + try { + // we mark the primary we are going to recover from as relocated + recoveryAction.startRecovery(nodes.localNode(), node, true); + shardStateAction.shardStarted(shardRouting); + } catch (IgnoreRecoveryException e) { + // that's fine, since we might be called concurrently, just ignore this, we are already recovering + } + } + } + } catch (Exception e) { + logger.warn("Failed to start shard for index [" + indexService.index().name() + "] and shard id [" + shardRouting.id() + "]", e); + if (indexService.hasShard(shardId)) { + try { + indexService.deleteShard(shardId); + } catch (Exception e1) { + logger.warn("Failed to delete shard after failed startup for index [" + indexService.index().name() + "] and shard id [" + shardRouting.id() + "]", e1); + } + } + try { + shardStateAction.shardFailed(shardRouting); + } catch (Exception e1) { + logger.warn("Failed to mark shard as failed after a failed start for index [" + indexService.index().name() + "] and shard id [" + shardRouting.id() + "]", e); + } + } + } + }); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/jmx/JmxClusterService.java b/modules/elasticsearch/src/main/java/org/elasticsearch/jmx/JmxClusterService.java new file mode 100644 index 00000000000..936b60b7b8f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/jmx/JmxClusterService.java @@ -0,0 +1,116 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.jmx; + +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.node.Node; +import org.elasticsearch.jmx.action.GetJmxServiceUrlAction; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.settings.Settings; + +import javax.management.MBeanServerConnection; +import javax.management.remote.JMXConnector; +import javax.management.remote.JMXConnectorFactory; +import javax.management.remote.JMXServiceURL; +import java.util.concurrent.ExecutorService; + +import static java.util.concurrent.Executors.*; +import static org.elasticsearch.util.concurrent.DynamicExecutors.*; + +/** + * @author kimchy (Shay Banon) + */ +// TODO Disabled for now. Can be used to mbean proxy other nodes in the cluster from within the same console. Need the jmxruntime_optional jars though.., +public class JmxClusterService extends AbstractComponent { + + private final ClusterService clusterService; + + private final JmxService jmxService; + + private final GetJmxServiceUrlAction getJmxServiceUrlAction; + + private final ExecutorService clusterNodesJmxUpdater; + + public JmxClusterService(Settings settings, ClusterService clusterService, JmxService jmxService, final GetJmxServiceUrlAction getJmxServiceUrlAction) { + super(settings); + this.clusterService = clusterService; + this.jmxService = jmxService; + this.getJmxServiceUrlAction = getJmxServiceUrlAction; + + this.clusterNodesJmxUpdater = newSingleThreadExecutor(daemonThreadFactory(settings, "jmxService#updateTask")); + + if (jmxService.publishUrl() != null) { + clusterService.add(new JmxClusterEventListener()); + for (final Node node : clusterService.state().nodes()) { + clusterNodesJmxUpdater.execute(new Runnable() { + @Override public void run() { + String nodeServiceUrl = getJmxServiceUrlAction.obtainPublishUrl(node); + registerNode(node, nodeServiceUrl); + } + }); + } + } + } + + public void close() { + if (clusterNodesJmxUpdater != null) { + clusterNodesJmxUpdater.shutdownNow(); + } + } + + private void registerNode(Node node, String nodeServiceUrl) { + try { + JMXServiceURL jmxServiceURL = new JMXServiceURL(nodeServiceUrl); + JMXConnector jmxConnector = JMXConnectorFactory.connect(jmxServiceURL, null); + + MBeanServerConnection connection = jmxConnector.getMBeanServerConnection(); + +// for (ObjectName objectName : connection.queryNames(null, null)) { +// try { +// MBeanProxy mBeanProxy = new MBeanProxy(remoteName, connection); +// } catch (InstanceAlreadyExistsException e) { +// // ignore +// } catch (Exception e) { +// logger.warn("Failed to register proxy mbean", e); +// } +// } + } catch (Exception e) { + logger.warn("Failed to register node [" + node + "] with serviceUrl [" + nodeServiceUrl + "]", e); + } + } + + private class JmxClusterEventListener implements ClusterStateListener { + @Override public void clusterChanged(ClusterChangedEvent event) { + if (!event.nodesChanged()) { + return; + } + for (final Node node : event.nodesDelta().addedNodes()) { + clusterNodesJmxUpdater.execute(new Runnable() { + @Override public void run() { + String nodeServiceUrl = getJmxServiceUrlAction.obtainPublishUrl(node); + registerNode(node, nodeServiceUrl); + } + }); + } + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/jmx/JmxConnectorCreationException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/jmx/JmxConnectorCreationException.java new file mode 100644 index 00000000000..e0bb4894b36 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/jmx/JmxConnectorCreationException.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.jmx; + +/** + * @author kimchy (Shay Banon) + */ +public class JmxConnectorCreationException extends JmxException { + + public JmxConnectorCreationException(String message) { + super(message); + } + + public JmxConnectorCreationException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/jmx/JmxException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/jmx/JmxException.java new file mode 100644 index 00000000000..0c71be220de --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/jmx/JmxException.java @@ -0,0 +1,36 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.jmx; + +import org.elasticsearch.ElasticSearchException; + +/** + * @author kimchy (Shay Banon) + */ +public class JmxException extends ElasticSearchException { + + public JmxException(String message) { + super(message); + } + + public JmxException(String message, Throwable cause) { + super(message, cause); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/jmx/JmxModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/jmx/JmxModule.java new file mode 100644 index 00000000000..2ccc498eccd --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/jmx/JmxModule.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.jmx; + +import com.google.inject.AbstractModule; +import com.google.inject.TypeLiteral; +import com.google.inject.matcher.Matchers; +import com.google.inject.spi.InjectionListener; +import com.google.inject.spi.TypeEncounter; +import com.google.inject.spi.TypeListener; +import org.elasticsearch.jmx.action.GetJmxServiceUrlAction; +import org.elasticsearch.util.logging.Loggers; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class JmxModule extends AbstractModule { + + private final Settings settings; + + public JmxModule(Settings settings) { + this.settings = settings; + } + + @Override protected void configure() { + JmxService jmxService = new JmxService(Loggers.getLogger(JmxService.class, settings.get("name")), settings); + bind(JmxService.class).toInstance(jmxService); + bind(GetJmxServiceUrlAction.class).asEagerSingleton(); + bindListener(Matchers.any(), new JmxExporterTypeListener(jmxService)); + } + + private static class JmxExporterTypeListener implements TypeListener { + + private final JmxService jmxService; + + private JmxExporterTypeListener(JmxService jmxService) { + this.jmxService = jmxService; + } + + @Override public void hear(TypeLiteral typeLiteral, TypeEncounter typeEncounter) { + Class type = typeLiteral.getRawType(); + if (type.isAnnotationPresent(MBean.class)) { + typeEncounter.register(new JmxExporterInjectionListener(jmxService)); + } + } + } + + private static class JmxExporterInjectionListener implements InjectionListener { + + private final JmxService jmxService; + + private JmxExporterInjectionListener(JmxService jmxService) { + this.jmxService = jmxService; + } + + @Override public void afterInjection(I instance) { + jmxService.registerMBean(instance); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/jmx/JmxRegistrationException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/jmx/JmxRegistrationException.java new file mode 100644 index 00000000000..8b347c2662f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/jmx/JmxRegistrationException.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.jmx; + +/** + * @author kimchy (Shay Banon) + */ +public class JmxRegistrationException extends JmxException { + + public JmxRegistrationException(String message) { + super(message); + } + + public JmxRegistrationException(String message, Throwable cause) { + super(message, cause); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/jmx/JmxService.java b/modules/elasticsearch/src/main/java/org/elasticsearch/jmx/JmxService.java new file mode 100644 index 00000000000..834a9cde733 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/jmx/JmxService.java @@ -0,0 +1,241 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.jmx; + +import org.elasticsearch.util.io.HostResolver; +import org.elasticsearch.util.settings.Settings; +import org.elasticsearch.util.transport.PortsRange; +import org.slf4j.Logger; + +import javax.management.InstanceAlreadyExistsException; +import javax.management.MBeanServer; +import javax.management.ObjectName; +import javax.management.remote.JMXConnectorServer; +import javax.management.remote.JMXConnectorServerFactory; +import javax.management.remote.JMXServiceURL; +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.rmi.registry.LocateRegistry; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.atomic.AtomicReference; + +import static org.elasticsearch.util.io.HostResolver.*; + +/** + * @author kimchy (Shay Banon) + */ +public class JmxService { + + public static class SettingsConstants { + + public static final String CREATE_CONNECTOR = "jmx.createConnector"; + } + + // we use {jmx.port} without prefix of $ since we don't want it to be resolved as a setting property + + public static final String JMXRMI_URI_PATTERN = "service:jmx:rmi:///jndi/rmi://:{jmx.port}/jmxrmi"; + + public static final String JMXRMI_PUBLISH_URI_PATTERN = "service:jmx:rmi:///jndi/rmi://{jmx.host}:{jmx.port}/jmxrmi"; + + private final Logger logger; + + private final Settings settings; + + private final String jmxDomain; + + private String serviceUrl; + + private String publishUrl; + + private final MBeanServer mBeanServer; + + private JMXConnectorServer connectorServer; + + private final CopyOnWriteArrayList constructionMBeans = new CopyOnWriteArrayList(); + + private final CopyOnWriteArrayList registeredMBeans = new CopyOnWriteArrayList(); + + private String nodeDescription; + + private volatile boolean started = false; + + public JmxService(Logger logger, final Settings settings) { + this.logger = logger; + this.settings = settings; + + this.jmxDomain = settings.get("jmx.domain", "{elasticsearch}"); + + this.mBeanServer = ManagementFactory.getPlatformMBeanServer(); + } + + public String serviceUrl() { + return this.serviceUrl; + } + + public String publishUrl() { + return this.publishUrl; + } + + public void connectAndRegister(String nodeDescription) { + if (started) { + return; + } + started = true; + this.nodeDescription = nodeDescription; + if (settings.getAsBoolean(SettingsConstants.CREATE_CONNECTOR, false)) { + final String port = settings.get("jmx.port", "9400-9500"); + + PortsRange portsRange = new PortsRange(port); + final AtomicReference lastException = new AtomicReference(); + boolean success = portsRange.iterate(new PortsRange.PortCallback() { + @Override public boolean onPortNumber(int portNumber) { + try { + LocateRegistry.createRegistry(portNumber); + serviceUrl = settings.get("jmx.serviceUrl", JMXRMI_URI_PATTERN).replace("{jmx.port}", Integer.toString(portNumber)); + // Create the JMX service URL. + JMXServiceURL url = new JMXServiceURL(serviceUrl); + // Create the connector server now. + connectorServer = JMXConnectorServerFactory.newJMXConnectorServer(url, settings.getAsMap(), mBeanServer); + connectorServer.start(); + + // create the publish url + String publishHost = HostResolver.resultPublishHostAddress(settings.get("jmx.publishHost"), settings, LOCAL_IP).getHostAddress(); + publishUrl = settings.get("jmx.publishUrl", JMXRMI_PUBLISH_URI_PATTERN).replace("{jmx.port}", Integer.toString(portNumber)).replace("{jmx.host}", publishHost); + } catch (Exception e) { + lastException.set(e); + return false; + } + return true; + } + }); + if (!success) { + throw new JmxConnectorCreationException("Failed to bind to [" + port + "]", lastException.get()); + } + logger.info("boundAddress [{}], publishAddress [{}]", serviceUrl, publishUrl); + } + + for (ResourceDMBean resource : constructionMBeans) { + register(resource); + } + } + + public void registerMBean(Object instance) { + ResourceDMBean resourceDMBean = new ResourceDMBean(instance, logger); + if (!resourceDMBean.isManagedResource()) { + return; + } + if (!started) { + constructionMBeans.add(resourceDMBean); + return; + } + register(resourceDMBean); + } + + public void unregisterGroup(String groupName) { + for (ResourceDMBean resource : registeredMBeans) { + if (!groupName.equals(resource.getGroupName())) { + continue; + } + + registeredMBeans.remove(resource); + + String resourceName = resource.getFullObjectName(); + try { + ObjectName objectName = new ObjectName(getObjectName(resourceName)); + if (mBeanServer.isRegistered(objectName)) { + mBeanServer.unregisterMBean(objectName); + if (logger.isTraceEnabled()) { + logger.trace("Unregistered " + objectName); + } + } + } catch (Exception e) { + logger.warn("Failed to unregister " + resource.getFullObjectName()); + } + } + } + + + public void close() { + if (!started) { + return; + } + started = false; + // unregister mbeans + for (ResourceDMBean resource : registeredMBeans) { + String resourceName = resource.getFullObjectName(); + try { + ObjectName objectName = new ObjectName(getObjectName(resourceName)); + if (mBeanServer.isRegistered(objectName)) { + mBeanServer.unregisterMBean(objectName); + if (logger.isTraceEnabled()) { + logger.trace("Unregistered " + objectName); + } + } + } catch (Exception e) { + logger.warn("Failed to unregister " + resource.getFullObjectName()); + } + } + if (connectorServer != null) { + try { + connectorServer.stop(); + } catch (IOException e) { + logger.debug("Failed to close connector", e); + } + } + } + + private void register(ResourceDMBean resourceDMBean) throws JmxRegistrationException { + try { + String resourceName = resourceDMBean.getFullObjectName(); + ObjectName objectName = new ObjectName(getObjectName(resourceName)); + if (!mBeanServer.isRegistered(objectName)) { + try { + mBeanServer.registerMBean(resourceDMBean, objectName); + registeredMBeans.add(resourceDMBean); + if (logger.isTraceEnabled()) { + logger.trace("Registered " + resourceDMBean + " under " + objectName); + } + } catch (InstanceAlreadyExistsException e) { + //this might happen if multiple instances are trying to concurrently register same objectName + logger.warn("Could not register object with name:" + objectName + "(" + e.getMessage() + ")"); + } + } else { + logger.warn("Could not register object with name: " + objectName); + } + } catch (Exception e) { + logger.warn("Could not register object with name: " + resourceDMBean.getFullObjectName()); + } + } + + private String getObjectName(String resourceName) { + return getObjectName(jmxDomain, resourceName); + } + + private String getObjectName(String jmxDomain, String resourceName) { + String type; + if (settings.get("name") != null) { + type = settings.get("name") + " [" + nodeDescription + "]"; + } else { + type = nodeDescription; + } + type = type.replace(':', '_').replace('/', '_').replace('.', '_').replace(',', ' ').replace('\"', ' '); + return jmxDomain + ":" + "type=" + type + "," + resourceName; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/jmx/MBean.java b/modules/elasticsearch/src/main/java/org/elasticsearch/jmx/MBean.java new file mode 100644 index 00000000000..2efd4d636a0 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/jmx/MBean.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.jmx; + +import java.lang.annotation.*; + +/** + * @author kimchy (Shay Banon) + */ +@Retention(RetentionPolicy.RUNTIME) +@Target({ElementType.TYPE}) +@Inherited +public @interface MBean { + String description() default ""; + + String objectName() default ""; +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/jmx/ManagedAttribute.java b/modules/elasticsearch/src/main/java/org/elasticsearch/jmx/ManagedAttribute.java new file mode 100644 index 00000000000..09948ee3f94 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/jmx/ManagedAttribute.java @@ -0,0 +1,36 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.jmx; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * @author kimchy (Shay Banon) + */ +@Retention(RetentionPolicy.RUNTIME) +@Target({ElementType.METHOD, ElementType.FIELD}) +public @interface ManagedAttribute { + String description() default ""; + + boolean writable() default false; +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/jmx/ManagedGroupName.java b/modules/elasticsearch/src/main/java/org/elasticsearch/jmx/ManagedGroupName.java new file mode 100644 index 00000000000..e435a9cc144 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/jmx/ManagedGroupName.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.jmx; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * @author kimchy (Shay Banon) + */ +@Retention(RetentionPolicy.RUNTIME) +@Target({ElementType.METHOD}) +public @interface ManagedGroupName { +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/jmx/ManagedOperation.java b/modules/elasticsearch/src/main/java/org/elasticsearch/jmx/ManagedOperation.java new file mode 100644 index 00000000000..b5c6aa12d46 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/jmx/ManagedOperation.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.jmx; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * @author kimchy (Shay Banon) + */ +@Retention(RetentionPolicy.RUNTIME) +@Target({ElementType.METHOD}) +public @interface ManagedOperation { + String description() default ""; +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/jmx/ResourceDMBean.java b/modules/elasticsearch/src/main/java/org/elasticsearch/jmx/ResourceDMBean.java new file mode 100644 index 00000000000..37b5b0dfb96 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/jmx/ResourceDMBean.java @@ -0,0 +1,570 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.jmx; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import org.elasticsearch.util.Classes; +import org.elasticsearch.util.MapBuilder; +import org.elasticsearch.util.Preconditions; +import org.elasticsearch.util.Strings; +import org.slf4j.Logger; + +import javax.management.*; +import java.lang.reflect.Field; +import java.lang.reflect.Method; +import java.lang.reflect.Modifier; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import static org.elasticsearch.util.MapBuilder.*; + +/** + * @author kimchy (Shay Banon) + */ +public class ResourceDMBean implements DynamicMBean { + private static final Class[] primitives = {int.class, byte.class, short.class, long.class, + float.class, double.class, boolean.class, char.class}; + + private final Logger logger; + + private final Object obj; + + private final String objectName; + + private final String groupName; + + private final String fullObjectName; + + private final String description; + + private final MBeanAttributeInfo[] attributesInfo; + + private final MBeanOperationInfo[] operationsInfo; + + private final MBeanInfo mBeanInfo; + + private final ImmutableMap attributes; + + private final ImmutableList operations; + + public ResourceDMBean(Object instance, Logger logger) { + Preconditions.checkNotNull(instance, "Cannot make an MBean wrapper for null instance"); + this.obj = instance; + this.logger = logger; + + MapBuilder attributesBuilder = newMapBuilder(); + List operationsBuilder = new ArrayList(); + + MBean mBean = obj.getClass().getAnnotation(MBean.class); + + this.groupName = findGroupName(); + + if (mBean != null && Strings.hasLength(mBean.objectName())) { + objectName = mBean.objectName(); + } else { + if (Strings.hasLength(groupName)) { + // we have something in the group object name, don't put anything in the object name + objectName = ""; + } else { + objectName = obj.getClass().getSimpleName(); + } + } + + StringBuilder sb = new StringBuilder(groupName); + if (Strings.hasLength(groupName) && Strings.hasLength(objectName)) { + sb.append(","); + } + sb.append(objectName); + this.fullObjectName = sb.toString(); + + this.description = findDescription(); + findFields(attributesBuilder); + findMethods(attributesBuilder, operationsBuilder); + + this.attributes = attributesBuilder.immutableMap(); + this.operations = ImmutableList.copyOf(operationsBuilder); + + attributesInfo = new MBeanAttributeInfo[attributes.size()]; + int i = 0; + + MBeanAttributeInfo info; + for (AttributeEntry entry : attributes.values()) { + info = entry.getInfo(); + attributesInfo[i++] = info; + if (logger.isInfoEnabled()) { + logger.trace("Attribute " + info.getName() + "[r=" + info.isReadable() + ",w=" + + info.isWritable() + ",is=" + info.isIs() + ",type=" + info.getType() + "]"); + } + } + + operationsInfo = new MBeanOperationInfo[operations.size()]; + operations.toArray(operationsInfo); + + if (logger.isTraceEnabled()) { + if (operations.size() > 0) + logger.trace("Operations are:"); + for (MBeanOperationInfo op : operationsInfo) { + logger.trace("Operation " + op.getReturnType() + " " + op.getName()); + } + } + + this.mBeanInfo = new MBeanInfo(getObject().getClass().getCanonicalName(), description, attributesInfo, null, operationsInfo, null); + } + + public MBeanInfo getMBeanInfo() { + return mBeanInfo; + } + + public synchronized Object getAttribute(String name) throws AttributeNotFoundException { + if (name == null || name.length() == 0) + throw new NullPointerException("Invalid attribute requested " + name); + + Attribute attr = getNamedAttribute(name); + if (attr == null) { + throw new AttributeNotFoundException("Unknown attribute '" + name + + "'. Known attributes names are: " + attributes.keySet()); + } + return attr.getValue(); + } + + public synchronized void setAttribute(Attribute attribute) { + if (attribute == null || attribute.getName() == null) + throw new NullPointerException("Invalid attribute requested " + attribute); + + setNamedAttribute(attribute); + } + + public synchronized AttributeList getAttributes(String[] names) { + AttributeList al = new AttributeList(); + for (String name : names) { + Attribute attr = getNamedAttribute(name); + if (attr != null) { + al.add(attr); + } else { + logger.warn("Did not find attribute " + name); + } + } + return al; + } + + public synchronized AttributeList setAttributes(AttributeList list) { + AttributeList results = new AttributeList(); + for (Object aList : list) { + Attribute attr = (Attribute) aList; + + if (setNamedAttribute(attr)) { + results.add(attr); + } else { + if (logger.isWarnEnabled()) { + logger.warn("Failed to update attribute name " + attr.getName() + " with value " + + attr.getValue()); + } + } + } + return results; + } + + public Object invoke(String name, Object[] args, String[] sig) throws MBeanException, + ReflectionException { + if (logger.isDebugEnabled()) { + logger.debug("Invoke method called on " + name); + } + + MBeanOperationInfo opInfo = null; + for (MBeanOperationInfo op : operationsInfo) { + if (op.getName().equals(name)) { + opInfo = op; + break; + } + } + + if (opInfo == null) { + final String msg = "Operation " + name + " not in ModelMBeanInfo"; + throw new MBeanException(new ServiceNotFoundException(msg), msg); + } + + try { + Class[] classes = new Class[sig.length]; + for (int i = 0; i < classes.length; i++) { + classes[i] = getClassForName(sig[i]); + } + Method method = getObject().getClass().getMethod(name, classes); + return method.invoke(getObject(), args); + } catch (Exception e) { + throw new MBeanException(e); + } + } + + Object getObject() { + return obj; + } + + private Class getClassForName(String name) throws ClassNotFoundException { + try { + return Classes.getDefaultClassLoader().loadClass(name); + } catch (ClassNotFoundException cnfe) { + // Could be a primitive - let's check + for (Class primitive : primitives) { + if (name.equals(primitive.getName())) { + return primitive; + } + } + } + throw new ClassNotFoundException("Class " + name + " cannot be found"); + } + + private String findGroupName() { + Class objClass = getObject().getClass(); + while (objClass != Object.class) { + Method[] methods = objClass.getDeclaredMethods(); + for (Method method : methods) { + if (method.isAnnotationPresent(ManagedGroupName.class)) { + try { + method.setAccessible(true); + return (String) method.invoke(getObject()); + } catch (Exception e) { + logger.warn("Failed to get group name for [" + getObject() + "]", e); + } + } + } + objClass = objClass.getSuperclass(); + } + return ""; + } + + private String findDescription() { + MBean mbean = getObject().getClass().getAnnotation(MBean.class); + if (mbean != null && mbean.description() != null && mbean.description().trim().length() > 0) { + return mbean.description(); + } + return ""; + } + + private void findMethods(MapBuilder attributesBuilder, List ops) { + // find all methods but don't include methods from Object class + List methods = new ArrayList(Arrays.asList(getObject().getClass().getMethods())); + List objectMethods = new ArrayList(Arrays.asList(Object.class.getMethods())); + methods.removeAll(objectMethods); + + for (Method method : methods) { + // does method have @ManagedAttribute annotation? + ManagedAttribute attr = method.getAnnotation(ManagedAttribute.class); + if (attr != null) { + String methodName = method.getName(); + if (!methodName.startsWith("get") && !methodName.startsWith("set") && !methodName.startsWith("is")) { + if (logger.isWarnEnabled()) + logger.warn("method name " + methodName + + " doesn't start with \"get\", \"set\", or \"is\"" + + ", but is annotated with @ManagedAttribute: will be ignored"); + } else { + MBeanAttributeInfo info; + String attributeName = null; + boolean writeAttribute = false; + if (isSetMethod(method)) { // setter + attributeName = methodName.substring(3); + info = new MBeanAttributeInfo(attributeName, method.getParameterTypes()[0] + .getCanonicalName(), attr.description(), true, true, false); + writeAttribute = true; + } else { // getter + if (method.getParameterTypes().length == 0 + && method.getReturnType() != java.lang.Void.TYPE) { + boolean hasSetter = attributesBuilder.containsKey(attributeName); + // we found is method + if (methodName.startsWith("is")) { + attributeName = methodName.substring(2); + info = new MBeanAttributeInfo(attributeName, method.getReturnType() + .getCanonicalName(), attr.description(), true, hasSetter, true); + } else { + // this has to be get + attributeName = methodName.substring(3); + info = new MBeanAttributeInfo(attributeName, method.getReturnType() + .getCanonicalName(), attr.description(), true, hasSetter, false); + } + } else { + if (logger.isWarnEnabled()) { + logger.warn("Method " + method.getName() + + " must have a valid return type and zero parameters"); + } + continue; + } + } + + AttributeEntry ae = attributesBuilder.get(attributeName); + // is it a read method? + if (!writeAttribute) { + // we already have annotated field as read + if (ae instanceof FieldAttributeEntry && ae.getInfo().isReadable()) { + logger.warn("not adding annotated method " + method + + " since we already have read attribute"); + } + // we already have annotated set method + else if (ae instanceof MethodAttributeEntry) { + MethodAttributeEntry mae = (MethodAttributeEntry) ae; + if (mae.hasSetMethod()) { + attributesBuilder.put(attributeName, new MethodAttributeEntry(mae.getInfo(), mae + .getSetMethod(), method)); + } + } // we don't have such entry + else { + attributesBuilder.put(attributeName, new MethodAttributeEntry(info, null, method)); + } + }// is it a set method? + else { + if (ae instanceof FieldAttributeEntry) { + // we already have annotated field as write + if (ae.getInfo().isWritable()) { + logger.warn("Not adding annotated method " + methodName + + " since we already have writable attribute"); + } else { + // we already have annotated field as read + // lets make the field writable + Field f = ((FieldAttributeEntry) ae).getField(); + MBeanAttributeInfo i = new MBeanAttributeInfo(ae.getInfo().getName(), + f.getType().getCanonicalName(), attr.description(), true, + !Modifier.isFinal(f.getModifiers()), false); + attributesBuilder.put(attributeName, new FieldAttributeEntry(i, f)); + } + } + // we already have annotated getOrIs method + else if (ae instanceof MethodAttributeEntry) { + MethodAttributeEntry mae = (MethodAttributeEntry) ae; + if (mae.hasIsOrGetMethod()) { + attributesBuilder.put(attributeName, new MethodAttributeEntry(info, method, mae + .getIsOrGetMethod())); + } + } // we don't have such entry + else { + attributesBuilder.put(attributeName, new MethodAttributeEntry(info, method, null)); + } + } + } + } else if (method.isAnnotationPresent(ManagedOperation.class)) { + ManagedOperation op = method.getAnnotation(ManagedOperation.class); + String attName = method.getName(); + if (isSetMethod(method) || isGetMethod(method)) { + attName = attName.substring(3); + } else if (isIsMethod(method)) { + attName = attName.substring(2); + } + // expose unless we already exposed matching attribute field + boolean isAlreadyExposed = attributesBuilder.containsKey(attName); + if (!isAlreadyExposed) { + ops.add(new MBeanOperationInfo(op != null ? op.description() : "", method)); + } + } + } + } + + private boolean isSetMethod(Method method) { + return (method.getName().startsWith("set") && method.getParameterTypes().length == 1 && method.getReturnType() == java.lang.Void.TYPE); + } + + private boolean isGetMethod(Method method) { + return (method.getParameterTypes().length == 0 && method.getReturnType() != java.lang.Void.TYPE && method.getName().startsWith("get")); + } + + private boolean isIsMethod(Method method) { + return (method.getParameterTypes().length == 0 && (method.getReturnType() == boolean.class || method.getReturnType() == Boolean.class) && method.getName().startsWith("is")); + } + + private void findFields(MapBuilder attributesBuilder) { + // traverse class hierarchy and find all annotated fields + for (Class clazz = getObject().getClass(); clazz != null; clazz = clazz.getSuperclass()) { + Field[] fields = clazz.getDeclaredFields(); + for (Field field : fields) { + ManagedAttribute attr = field.getAnnotation(ManagedAttribute.class); + if (attr != null) { + String fieldName = renameToJavaCodingConvention(field.getName()); + MBeanAttributeInfo info = new MBeanAttributeInfo(fieldName, field.getType().getCanonicalName(), + attr.description(), true, !Modifier.isFinal(field.getModifiers()) && attr.writable(), false); + attributesBuilder.put(fieldName, new FieldAttributeEntry(info, field)); + } + } + } + } + + private Attribute getNamedAttribute(String name) { + Attribute result = null; + AttributeEntry entry = attributes.get(name); + if (entry != null) { + MBeanAttributeInfo i = entry.getInfo(); + try { + result = new Attribute(name, entry.invoke(null)); + if (logger.isDebugEnabled()) + logger.debug("Attribute " + name + " has r=" + i.isReadable() + ",w=" + + i.isWritable() + ",is=" + i.isIs() + " and value " + + result.getValue()); + } catch (Exception e) { + logger.debug("Exception while reading value of attribute " + name, e); + } + } else { + logger.warn("Did not find queried attribute with name " + name); + } + return result; + } + + private boolean setNamedAttribute(Attribute attribute) { + boolean result = false; + if (logger.isDebugEnabled()) + logger.debug("Invoking set on attribute " + attribute.getName() + " with value " + attribute.getValue()); + + AttributeEntry entry = attributes.get(attribute.getName()); + if (entry != null) { + try { + entry.invoke(attribute); + result = true; + } catch (Exception e) { + logger.warn("Exception while writing value for attribute " + attribute.getName(), e); + } + } else { + logger.warn("Could not invoke set on attribute " + attribute.getName() + " with value " + + attribute.getValue()); + } + return result; + } + + private String renameToJavaCodingConvention(String fieldName) { + if (fieldName.contains("_")) { + Pattern p = Pattern.compile("_."); + Matcher m = p.matcher(fieldName); + StringBuffer sb = new StringBuffer(); + while (m.find()) { + m.appendReplacement(sb, fieldName.substring(m.end() - 1, m.end()).toUpperCase()); + } + m.appendTail(sb); + char first = sb.charAt(0); + if (Character.isLowerCase(first)) { + sb.setCharAt(0, Character.toUpperCase(first)); + } + return sb.toString(); + } else { + if (Character.isLowerCase(fieldName.charAt(0))) { + return fieldName.substring(0, 1).toUpperCase() + fieldName.substring(1); + } else { + return fieldName; + } + } + } + + private class MethodAttributeEntry implements AttributeEntry { + + final MBeanAttributeInfo info; + + final Method isOrGetmethod; + + final Method setMethod; + + public MethodAttributeEntry(final MBeanAttributeInfo info, final Method setMethod, + final Method isOrGetMethod) { + super(); + this.info = info; + this.setMethod = setMethod; + this.isOrGetmethod = isOrGetMethod; + } + + public Object invoke(Attribute a) throws Exception { + if (a == null && isOrGetmethod != null) + return isOrGetmethod.invoke(getObject()); + else if (a != null && setMethod != null) + return setMethod.invoke(getObject(), a.getValue()); + else + return null; + } + + public MBeanAttributeInfo getInfo() { + return info; + } + + public boolean hasIsOrGetMethod() { + return isOrGetmethod != null; + } + + public boolean hasSetMethod() { + return setMethod != null; + } + + public Method getIsOrGetMethod() { + return isOrGetmethod; + } + + public Method getSetMethod() { + return setMethod; + } + } + + private class FieldAttributeEntry implements AttributeEntry { + + private final MBeanAttributeInfo info; + + private final Field field; + + public FieldAttributeEntry(final MBeanAttributeInfo info, final Field field) { + super(); + this.info = info; + this.field = field; + if (!field.isAccessible()) { + field.setAccessible(true); + } + } + + public Field getField() { + return field; + } + + public Object invoke(Attribute a) throws Exception { + if (a == null) { + return field.get(getObject()); + } else { + field.set(getObject(), a.getValue()); + return null; + } + } + + public MBeanAttributeInfo getInfo() { + return info; + } + } + + private interface AttributeEntry { + public Object invoke(Attribute a) throws Exception; + + public MBeanAttributeInfo getInfo(); + } + + public boolean isManagedResource() { + return !attributes.isEmpty() || !operations.isEmpty(); + } + + public String getFullObjectName() { + return this.fullObjectName; + } + + public String getObjectName() { + return this.objectName; + } + + public String getGroupName() { + return this.groupName; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/jmx/action/GetJmxServiceUrlAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/jmx/action/GetJmxServiceUrlAction.java new file mode 100644 index 00000000000..7f09ba1e8c1 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/jmx/action/GetJmxServiceUrlAction.java @@ -0,0 +1,81 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.jmx.action; + +import com.google.inject.Inject; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.node.Node; +import org.elasticsearch.jmx.JmxService; +import org.elasticsearch.transport.BaseTransportRequestHandler; +import org.elasticsearch.transport.FutureTransportResponseHandler; +import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.io.StringStreamable; +import org.elasticsearch.util.io.VoidStreamable; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class GetJmxServiceUrlAction extends AbstractComponent { + + private final JmxService jmxService; + + private final TransportService transportService; + + private final ClusterService clusterService; + + @Inject public GetJmxServiceUrlAction(Settings settings, JmxService jmxService, + TransportService transportService, ClusterService clusterService) { + super(settings); + this.jmxService = jmxService; + this.transportService = transportService; + this.clusterService = clusterService; + + transportService.registerHandler(GetJmxServiceUrlTransportHandler.ACTION, new GetJmxServiceUrlTransportHandler()); + } + + public String obtainPublishUrl(final Node node) throws ElasticSearchException { + if (clusterService.state().nodes().localNodeId().equals(node.id())) { + return jmxService.publishUrl(); + } else { + return transportService.submitRequest(node, GetJmxServiceUrlTransportHandler.ACTION, VoidStreamable.INSTANCE, new FutureTransportResponseHandler() { + @Override public StringStreamable newInstance() { + return new StringStreamable(); + } + }).txGet().get(); + } + } + + private class GetJmxServiceUrlTransportHandler extends BaseTransportRequestHandler { + + static final String ACTION = "jmx/publishUrl"; + + @Override public VoidStreamable newInstance() { + return VoidStreamable.INSTANCE; + } + + @Override public void messageReceived(VoidStreamable request, TransportChannel channel) throws Exception { + channel.sendResponse(new StringStreamable(jmxService.publishUrl())); + } + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/MonitorModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/MonitorModule.java new file mode 100644 index 00000000000..0f7c7fe0fa9 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/MonitorModule.java @@ -0,0 +1,99 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.monitor; + +import com.google.inject.AbstractModule; +import com.google.inject.Scopes; +import com.google.inject.assistedinject.FactoryProvider; +import com.google.inject.multibindings.MapBinder; +import org.elasticsearch.monitor.dump.DumpContributorFactory; +import org.elasticsearch.monitor.dump.DumpMonitorService; +import org.elasticsearch.monitor.dump.cluster.ClusterDumpContributor; +import org.elasticsearch.monitor.dump.heap.HeapDumpContributor; +import org.elasticsearch.monitor.dump.summary.SummaryDumpContributor; +import org.elasticsearch.monitor.dump.thread.ThreadDumpContributor; +import org.elasticsearch.monitor.jvm.JvmMonitorService; +import org.elasticsearch.monitor.memory.MemoryMonitor; +import org.elasticsearch.monitor.memory.MemoryMonitorService; +import org.elasticsearch.monitor.memory.alpha.AlphaMemoryMonitor; +import org.elasticsearch.util.settings.Settings; + +import java.util.Map; + +import static org.elasticsearch.monitor.dump.cluster.ClusterDumpContributor.*; +import static org.elasticsearch.monitor.dump.heap.HeapDumpContributor.*; +import static org.elasticsearch.monitor.dump.summary.SummaryDumpContributor.*; +import static org.elasticsearch.monitor.dump.thread.ThreadDumpContributor.*; + +/** + * @author kimchy (Shay Banon) + */ +public class MonitorModule extends AbstractModule { + + public static final class MonitorSettings { + public static final String MEMORY_MANAGER_TYPE = "monitor.memory.type"; + } + + private final Settings settings; + + public MonitorModule(Settings settings) { + this.settings = settings; + } + + @Override protected void configure() { + bind(MemoryMonitor.class) + .to(settings.getAsClass(MonitorSettings.MEMORY_MANAGER_TYPE, AlphaMemoryMonitor.class, "org.elasticsearch.monitor.memory.", "MemoryMonitor")) + .asEagerSingleton(); + bind(MemoryMonitorService.class).asEagerSingleton(); + + bind(JvmMonitorService.class).asEagerSingleton(); + + MapBinder tokenFilterBinder + = MapBinder.newMapBinder(binder(), String.class, DumpContributorFactory.class); + + Map dumpContSettings = settings.getGroups("monitor.dump"); + for (Map.Entry entry : dumpContSettings.entrySet()) { + String dumpContributorName = entry.getKey(); + Settings dumpContributorSettings = entry.getValue(); + + Class type = dumpContributorSettings.getAsClass("type", null, "org.elasticsearch.monitor.dump." + dumpContributorName + ".", "DumpContributor"); + if (type == null) { + throw new IllegalArgumentException("Dump Contributor [" + dumpContributorName + "] must have a type associated with it"); + } + tokenFilterBinder.addBinding(dumpContributorName).toProvider(FactoryProvider.newFactory(DumpContributorFactory.class, type)).in(Scopes.SINGLETON); + } + // add default + if (!dumpContSettings.containsKey(SUMMARY)) { + tokenFilterBinder.addBinding(SUMMARY).toProvider(FactoryProvider.newFactory(DumpContributorFactory.class, SummaryDumpContributor.class)).in(Scopes.SINGLETON); + } + if (!dumpContSettings.containsKey(THREAD_DUMP)) { + tokenFilterBinder.addBinding(THREAD_DUMP).toProvider(FactoryProvider.newFactory(DumpContributorFactory.class, ThreadDumpContributor.class)).in(Scopes.SINGLETON); + } + if (!dumpContSettings.containsKey(HEAP_DUMP)) { + tokenFilterBinder.addBinding(HEAP_DUMP).toProvider(FactoryProvider.newFactory(DumpContributorFactory.class, HeapDumpContributor.class)).in(Scopes.SINGLETON); + } + if (!dumpContSettings.containsKey(CLUSTER)) { + tokenFilterBinder.addBinding(CLUSTER).toProvider(FactoryProvider.newFactory(DumpContributorFactory.class, ClusterDumpContributor.class)).in(Scopes.SINGLETON); + } + + + bind(DumpMonitorService.class).asEagerSingleton(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/MonitorService.java b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/MonitorService.java new file mode 100644 index 00000000000..2602ab5f352 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/MonitorService.java @@ -0,0 +1,80 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.monitor; + +import com.google.inject.Inject; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.monitor.jvm.JvmMonitorService; +import org.elasticsearch.monitor.memory.MemoryMonitorService; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.component.Lifecycle; +import org.elasticsearch.util.component.LifecycleComponent; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class MonitorService extends AbstractComponent implements LifecycleComponent { + + private final Lifecycle lifecycle = new Lifecycle(); + + private final MemoryMonitorService memoryMonitorService; + + private final JvmMonitorService jvmMonitorService; + + @Inject public MonitorService(Settings settings, MemoryMonitorService memoryMonitorService, JvmMonitorService jvmMonitorService) { + super(settings); + this.memoryMonitorService = memoryMonitorService; + this.jvmMonitorService = jvmMonitorService; + } + + @Override public Lifecycle.State lifecycleState() { + return lifecycle.state(); + } + + @Override public MonitorService start() throws ElasticSearchException { + if (!lifecycle.moveToStarted()) { + return this; + } + memoryMonitorService.start(); + jvmMonitorService.start(); + return this; + } + + @Override public MonitorService stop() throws ElasticSearchException { + if (!lifecycle.moveToStopped()) { + return this; + } + memoryMonitorService.stop(); + jvmMonitorService.stop(); + return this; + } + + public void close() { + if (lifecycle.started()) { + stop(); + } + if (!lifecycle.moveToClosed()) { + return; + } + memoryMonitorService.close(); + jvmMonitorService.close(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/AbstractDump.java b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/AbstractDump.java new file mode 100644 index 00000000000..27b7f58db26 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/AbstractDump.java @@ -0,0 +1,90 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.monitor.dump; + +import com.google.common.collect.ImmutableMap; +import org.elasticsearch.util.Nullable; + +import java.io.*; +import java.util.ArrayList; +import java.util.Map; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class AbstractDump implements Dump { + + private final long timestamp; + + private final String cause; + + private final Map context; + + private final ArrayList files = new ArrayList(); + + protected AbstractDump(long timestamp, String cause, @Nullable Map context) { + this.timestamp = timestamp; + this.cause = cause; + if (context == null) { + context = ImmutableMap.of(); + } + this.context = context; + } + + @Override public long timestamp() { + return timestamp; + } + + @Override public Map context() { + return this.context; + } + + @Override public String cause() { + return cause; + } + + @Override public File[] files() { + return files.toArray(new File[files.size()]); + } + + @Override public File createFile(String name) throws DumpException { + File file = doCreateFile(name); + files.add(file); + return file; + } + + protected abstract File doCreateFile(String name) throws DumpException; + + @Override public OutputStream createFileOutputStream(String name) throws DumpException { + try { + return new FileOutputStream(createFile(name)); + } catch (FileNotFoundException e) { + throw new DumpException("Failed to create file [" + name + "]", e); + } + } + + @Override public Writer createFileWriter(String name) throws DumpException { + try { + return new FileWriter(createFile(name)); + } catch (IOException e) { + throw new DumpException("Failed to create file [" + name + "]", e); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/Dump.java b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/Dump.java new file mode 100644 index 00000000000..4f4d9844003 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/Dump.java @@ -0,0 +1,47 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.monitor.dump; + +import java.io.File; +import java.io.OutputStream; +import java.io.Writer; +import java.util.Map; + +/** + * @author kimchy (Shay Banon) + */ +public interface Dump { + + long timestamp(); + + Map context(); + + String cause(); + + File createFile(String name) throws DumpException; + + Writer createFileWriter(String name) throws DumpException; + + OutputStream createFileOutputStream(String name) throws DumpException; + + File[] files(); + + void finish() throws DumpException; +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/DumpContributionFailedException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/DumpContributionFailedException.java new file mode 100644 index 00000000000..25634754178 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/DumpContributionFailedException.java @@ -0,0 +1,41 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.monitor.dump; + +/** + * @author kimchy (Shay Banon) + */ +public class DumpContributionFailedException extends DumpException { + + private final String name; + + public DumpContributionFailedException(String name, String msg) { + this(name, msg, null); + } + + public DumpContributionFailedException(String name, String msg, Throwable cause) { + super(name + ": " + msg, cause); + this.name = name; + } + + public String name() { + return this.name; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/DumpContributor.java b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/DumpContributor.java new file mode 100644 index 00000000000..0a1fb72a08a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/DumpContributor.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.monitor.dump; + +/** + * @author kimchy (Shay Banon) + */ +public interface DumpContributor { + + String getName(); + + void contribute(Dump dump) throws DumpContributionFailedException; +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/DumpContributorFactory.java b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/DumpContributorFactory.java new file mode 100644 index 00000000000..61724430b11 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/DumpContributorFactory.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.monitor.dump; + +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public interface DumpContributorFactory { + + DumpContributor create(String name, Settings settings); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/DumpException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/DumpException.java new file mode 100644 index 00000000000..2901d5d2e8b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/DumpException.java @@ -0,0 +1,36 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.monitor.dump; + +import org.elasticsearch.ElasticSearchException; + +/** + * @author kimchy (Shay Banon) + */ +public class DumpException extends ElasticSearchException { + + public DumpException(String msg) { + super(msg); + } + + public DumpException(String msg, Throwable cause) { + super(msg, cause); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/DumpGenerationFailedException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/DumpGenerationFailedException.java new file mode 100644 index 00000000000..547ee6480f1 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/DumpGenerationFailedException.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.monitor.dump; + +/** + * @author kimchy (Shay Banon) + */ +public class DumpGenerationFailedException extends DumpException { + + public DumpGenerationFailedException(String msg) { + super(msg); + } + + public DumpGenerationFailedException(String msg, Throwable cause) { + super(msg, cause); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/DumpGenerator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/DumpGenerator.java new file mode 100644 index 00000000000..9fdf5270066 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/DumpGenerator.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.monitor.dump; + +import org.elasticsearch.util.Nullable; + +import java.io.File; +import java.util.Map; + +/** + * @author kimchy (Shay Banon) + */ +public interface DumpGenerator { + + Result generateDump(String cause, @Nullable Map context) throws DumpGenerationFailedException; + + Result generateDump(String cause, @Nullable Map context, String... contributors) throws DumpGenerationFailedException; + + static class Result { + private final File location; + private Iterable failedContributors; + + public Result(File location, Iterable failedContributors) { + this.location = location; + this.failedContributors = failedContributors; + } + + public String location() { + return location.toString(); + } + + public Iterable failedContributors() { + return failedContributors; + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/DumpMonitorService.java b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/DumpMonitorService.java new file mode 100644 index 00000000000..7997171a6ff --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/DumpMonitorService.java @@ -0,0 +1,121 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.monitor.dump; + +import com.google.inject.Inject; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.env.Environment; +import org.elasticsearch.monitor.dump.heap.HeapDumpContributor; +import org.elasticsearch.monitor.dump.summary.SummaryDumpContributor; +import org.elasticsearch.monitor.dump.thread.ThreadDumpContributor; +import org.elasticsearch.util.Nullable; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.settings.Settings; + +import java.io.File; +import java.util.Map; + +import static com.google.common.collect.Maps.*; +import static org.elasticsearch.monitor.dump.heap.HeapDumpContributor.*; +import static org.elasticsearch.monitor.dump.summary.SummaryDumpContributor.*; +import static org.elasticsearch.monitor.dump.thread.ThreadDumpContributor.*; +import static org.elasticsearch.util.settings.ImmutableSettings.Builder.*; + +/** + * @author kimchy (Shay Banon) + */ +public class DumpMonitorService extends AbstractComponent { + + private final String dumpLocation; + + private final DumpGenerator generator; + + private final ClusterService clusterService; + private final Map contSettings; + private final Map contributors; + private final File workFile; + + public DumpMonitorService() { + this(EMPTY_SETTINGS, new Environment(EMPTY_SETTINGS), null, null); + } + + @Inject public DumpMonitorService(Settings settings, Environment environment, + @Nullable ClusterService clusterService, @Nullable Map contributors) { + super(settings); + this.clusterService = clusterService; + this.contributors = contributors; + contSettings = settings.getGroups("monitor.dump"); + workFile = environment.workWithClusterFile(); + + this.dumpLocation = settings.get("dumpLocation"); + + File dumpLocationFile; + if (dumpLocation != null) { + dumpLocationFile = new File(dumpLocation); + } else { + dumpLocationFile = new File(workFile, "dump"); + } + boolean success = dumpLocationFile.mkdirs(); + + Map contributorMap = newHashMap(); + if (contributors != null) { + for (Map.Entry entry : contributors.entrySet()) { + String contName = entry.getKey(); + DumpContributorFactory dumpContributorFactory = entry.getValue(); + + Settings analyzerSettings = contSettings.get(contName); + if (analyzerSettings == null) { + analyzerSettings = EMPTY_SETTINGS; + } + + DumpContributor analyzerFactory = dumpContributorFactory.create(contName, analyzerSettings); + contributorMap.put(contName, analyzerFactory); + } + } + if (!contributorMap.containsKey(SUMMARY)) { + contributorMap.put(SUMMARY, new SummaryDumpContributor(SUMMARY, EMPTY_SETTINGS)); + } + if (!contributorMap.containsKey(HEAP_DUMP)) { + contributorMap.put(HEAP_DUMP, new HeapDumpContributor(HEAP_DUMP, EMPTY_SETTINGS)); + } + if (!contributorMap.containsKey(THREAD_DUMP)) { + contributorMap.put(THREAD_DUMP, new ThreadDumpContributor(THREAD_DUMP, EMPTY_SETTINGS)); + } + generator = new SimpleDumpGenerator(dumpLocationFile, contributorMap); + } + + public DumpGenerator.Result generateDump(String cause, @Nullable Map context) throws DumpGenerationFailedException { + return generator.generateDump(cause, fillContextMap(context)); + } + + public DumpGenerator.Result generateDump(String cause, @Nullable Map context, String... contributors) throws DumpGenerationFailedException { + return generator.generateDump(cause, fillContextMap(context), contributors); + } + + private Map fillContextMap(Map context) { + if (context == null) { + context = newHashMap(); + } + if (clusterService != null) { + context.put("localNode", clusterService.state().nodes().localNode()); + } + return context; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/SimpleDump.java b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/SimpleDump.java new file mode 100644 index 00000000000..810b73d0d15 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/SimpleDump.java @@ -0,0 +1,47 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.monitor.dump; + +import org.elasticsearch.util.Nullable; + +import java.io.File; +import java.io.FileNotFoundException; +import java.util.Map; + +/** + * @author kimchy (Shay Banon) + */ +public class SimpleDump extends AbstractDump { + + private final File location; + + public SimpleDump(long timestamp, String cause, @Nullable Map context, File location) throws FileNotFoundException { + super(timestamp, cause, context); + this.location = location; + } + + @Override protected File doCreateFile(String name) throws DumpException { + return new File(location, name); + } + + @Override public void finish() throws DumpException { + + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/SimpleDumpGenerator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/SimpleDumpGenerator.java new file mode 100644 index 00000000000..af97d7389b5 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/SimpleDumpGenerator.java @@ -0,0 +1,85 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.monitor.dump; + +import com.google.common.collect.ImmutableMap; +import org.elasticsearch.cluster.node.Node; +import org.elasticsearch.util.Nullable; + +import java.io.File; +import java.io.FileNotFoundException; +import java.util.ArrayList; +import java.util.Map; + +/** + * @author kimchy (Shay Banon) + */ +public class SimpleDumpGenerator implements DumpGenerator { + + private final File dumpLocation; + + private final ImmutableMap contributors; + + public SimpleDumpGenerator(File dumpLocation, Map contributors) { + this.dumpLocation = dumpLocation; + this.contributors = ImmutableMap.copyOf(contributors); + } + + public Result generateDump(String cause, @Nullable Map context) throws DumpGenerationFailedException { + return generateDump(cause, context, contributors.keySet().toArray(new String[contributors.size()])); + } + + public Result generateDump(String cause, @Nullable Map context, String... contributors) throws DumpGenerationFailedException { + long timestamp = System.currentTimeMillis(); + String fileName = ""; + if (context.containsKey("localNode")) { + Node localNode = (Node) context.get("localNode"); + if (localNode.name() != null) { + fileName += localNode.name() + "-"; + } + fileName += localNode.id() + "-"; + } + File file = new File(dumpLocation, fileName + cause + "-" + timestamp); + file.mkdirs(); + SimpleDump dump; + try { + dump = new SimpleDump(System.currentTimeMillis(), cause, context, file); + } catch (FileNotFoundException e) { + throw new DumpGenerationFailedException("Failed to generate dump", e); + } + ArrayList failedContributors = new ArrayList(); + for (String name : contributors) { + DumpContributor contributor = this.contributors.get(name); + if (contributor == null) { + failedContributors.add(new DumpContributionFailedException(name, "No contributor")); + continue; + } + try { + contributor.contribute(dump); + } catch (DumpContributionFailedException e) { + failedContributors.add(e); + } catch (Exception e) { + failedContributors.add(new DumpContributionFailedException(contributor.getName(), "Failed", e)); + } + } + dump.finish(); + return new Result(file, failedContributors); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/cluster/ClusterDumpContributor.java b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/cluster/ClusterDumpContributor.java new file mode 100644 index 00000000000..4f1e22ffbe0 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/cluster/ClusterDumpContributor.java @@ -0,0 +1,70 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.monitor.dump.cluster; + +import com.google.inject.Inject; +import com.google.inject.assistedinject.Assisted; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.Nodes; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.monitor.dump.Dump; +import org.elasticsearch.monitor.dump.DumpContributionFailedException; +import org.elasticsearch.monitor.dump.DumpContributor; +import org.elasticsearch.util.settings.Settings; + +import java.io.PrintWriter; + +/** + * @author kimchy (Shay Banon) + */ +public class ClusterDumpContributor implements DumpContributor { + + public static final String CLUSTER = "cluster"; + + private final String name; + + private final ClusterService clusterService; + + @Inject public ClusterDumpContributor(ClusterService clusterService, @Assisted String name, @Assisted Settings settings) { + this.clusterService = clusterService; + this.name = name; + } + + @Override public String getName() { + return name; + } + + @Override public void contribute(Dump dump) throws DumpContributionFailedException { + ClusterState clusterState = clusterService.state(); + Nodes nodes = clusterState.nodes(); + RoutingTable routingTable = clusterState.routingTable(); + + PrintWriter writer = new PrintWriter(dump.createFileWriter("cluster.txt")); + + writer.println("===== CLUSTER NODES ======"); + writer.print(nodes.prettyPrint()); + + writer.println("===== ROUTING TABLE ======"); + writer.print(routingTable.prettyPrint()); + + writer.close(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/heap/HeapDumpContributor.java b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/heap/HeapDumpContributor.java new file mode 100644 index 00000000000..6f2989ae8d5 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/heap/HeapDumpContributor.java @@ -0,0 +1,75 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.monitor.dump.heap; + +import com.google.inject.Inject; +import com.google.inject.assistedinject.Assisted; +import org.elasticsearch.monitor.dump.Dump; +import org.elasticsearch.monitor.dump.DumpContributionFailedException; +import org.elasticsearch.monitor.dump.DumpContributor; +import org.elasticsearch.util.settings.Settings; + +import java.lang.reflect.Method; + +/** + * @author kimchy (Shay Banon) + */ +public class HeapDumpContributor implements DumpContributor { + + public static final String HEAP_DUMP = "heap"; + + private final Method heapDumpMethod; + private final Object diagnosticMBean; + + private final String name; + + @Inject public HeapDumpContributor(@Assisted String name, @Assisted Settings settings) { + this.name = name; + Method heapDumpMethod; + Object diagnosticMBean; + try { + Class managementFactoryClass = Class.forName("sun.management.ManagementFactory", true, HeapDumpContributor.class.getClassLoader()); + Method method = managementFactoryClass.getMethod("getDiagnosticMXBean"); + diagnosticMBean = method.invoke(null); + heapDumpMethod = diagnosticMBean.getClass().getMethod("dumpHeap", String.class, boolean.class); + } + catch (Exception _ex) { + heapDumpMethod = null; + diagnosticMBean = null; + } + this.heapDumpMethod = heapDumpMethod; + this.diagnosticMBean = diagnosticMBean; + } + + @Override public String getName() { + return name; + } + + @Override public void contribute(Dump dump) throws DumpContributionFailedException { + if (heapDumpMethod == null) { + throw new DumpContributionFailedException(getName(), "Heap dump not enalbed on this JVM"); + } + try { + heapDumpMethod.invoke(diagnosticMBean, dump.createFile("heap.hprof").getAbsolutePath(), true); + } catch (Exception e) { + throw new DumpContributionFailedException(getName(), "Failed to generate heap dump", e); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/summary/SummaryDumpContributor.java b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/summary/SummaryDumpContributor.java new file mode 100644 index 00000000000..8d7020fbf1b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/summary/SummaryDumpContributor.java @@ -0,0 +1,112 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.monitor.dump.summary; + +import com.google.inject.Inject; +import com.google.inject.assistedinject.Assisted; +import org.elasticsearch.monitor.dump.Dump; +import org.elasticsearch.monitor.dump.DumpContributionFailedException; +import org.elasticsearch.monitor.dump.DumpContributor; +import org.elasticsearch.util.settings.Settings; + +import java.io.PrintWriter; +import java.text.DateFormat; +import java.text.SimpleDateFormat; +import java.util.Collection; +import java.util.Date; + +/** + * @author kimchy (Shay Banon) + */ +public class SummaryDumpContributor implements DumpContributor { + + private final DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss,SSS"); + private final Object formatterLock = new Object(); + + public static final String SUMMARY = "summary"; + + private final String name; + + @Inject public SummaryDumpContributor(@Assisted String name, @Assisted Settings settings) { + this.name = name; + } + + public String getName() { + return name; + } + + public void contribute(Dump dump) throws DumpContributionFailedException { + PrintWriter writer = new PrintWriter(dump.createFileWriter("summary.txt")); + try { + processHeader(writer, dump.timestamp()); + processCause(writer, dump.cause()); + processThrowables(writer, dump); + } catch (Exception e) { + throw new DumpContributionFailedException(getName(), "Failed to generate", e); + } finally { + try { + writer.close(); + } catch (Exception e) { + // ignore + } + } + } + + private void processHeader(PrintWriter writer, long timestamp) { + synchronized (formatterLock) { + writer.println("===== TIME ====="); + writer.println(dateFormat.format(new Date(timestamp))); + writer.println(); + } + } + + private void processCause(PrintWriter writer, String cause) { + writer.println("===== CAUSE ====="); + writer.println(cause); + writer.println(); + } + + private void processThrowables(PrintWriter writer, Dump dump) { + writer.println("===== EXCEPTIONS ====="); + Object throwables = dump.context().get("throwables"); + if (throwables == null) { + return; + } + if (throwables instanceof Throwable[]) { + Throwable[] array = (Throwable[]) throwables; + for (Throwable t : array) { + writer.println(); + writer.println("---- Exception ----"); + t.printStackTrace(writer); + } + } else if (throwables instanceof Collection) { + Collection collection = (Collection) throwables; + for (Object o : collection) { + Throwable t = (Throwable) o; + writer.println(); + writer.println("---- Exception ----"); + t.printStackTrace(writer); + } + } else { + throw new DumpContributionFailedException(getName(), "Can't handle throwables type [" + throwables.getClass() + "]"); + } + writer.println(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/thread/ThreadDumpContributor.java b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/thread/ThreadDumpContributor.java new file mode 100644 index 00000000000..f69482b19f5 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/dump/thread/ThreadDumpContributor.java @@ -0,0 +1,132 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.monitor.dump.thread; + +import com.google.inject.Inject; +import com.google.inject.assistedinject.Assisted; +import org.elasticsearch.monitor.dump.Dump; +import org.elasticsearch.monitor.dump.DumpContributionFailedException; +import org.elasticsearch.monitor.dump.DumpContributor; +import org.elasticsearch.util.settings.Settings; + +import java.io.PrintWriter; +import java.lang.management.ManagementFactory; +import java.lang.management.MonitorInfo; +import java.lang.management.ThreadInfo; +import java.lang.management.ThreadMXBean; + +/** + * @author kimchy (Shay Banon) + */ +public class ThreadDumpContributor implements DumpContributor { + + private static final ThreadMXBean threadBean = ManagementFactory.getThreadMXBean(); + + public static final String THREAD_DUMP = "thread"; + + private final String name; + + @Inject public ThreadDumpContributor(@Assisted String name, @Assisted Settings settings) { + this.name = name; + } + + @Override public String getName() { + return name; + } + + @Override public void contribute(Dump dump) throws DumpContributionFailedException { + PrintWriter writer = new PrintWriter(dump.createFileWriter("threads.txt")); + try { + processDeadlocks(writer); + processAllThreads(writer); + } catch (Exception e) { + throw new DumpContributionFailedException(getName(), "Failed to generate", e); + } finally { + try { + writer.close(); + } catch (Exception e) { + // ignore + } + } + } + + private void processDeadlocks(PrintWriter dump) { + dump.println("===== Deadlocked Threads ====="); + long deadlockedThreadIds[] = findDeadlockedThreads(); + if (deadlockedThreadIds != null) + dumpThreads(dump, getThreadInfo(deadlockedThreadIds)); + } + + private void processAllThreads(PrintWriter dump) { + dump.println(); + dump.println("===== All Threads ====="); + dumpThreads(dump, dumpAllThreads()); + } + + private void dumpThreads(PrintWriter dump, ThreadInfo infos[]) { + for (ThreadInfo info : infos) { + dump.println(); + write(info, dump); + } + } + + private ThreadInfo[] dumpAllThreads() { + return threadBean.dumpAllThreads(true, true); + } + + public long[] findDeadlockedThreads() { + return threadBean.findDeadlockedThreads(); + } + + public ThreadInfo[] getThreadInfo(long[] threadIds) { + return threadBean.getThreadInfo(threadIds, true, true); + } + + private void write(ThreadInfo threadInfo, PrintWriter writer) { + writer.print(String.format("\"%s\" Id=%s %s", threadInfo.getThreadName(), threadInfo.getThreadId(), threadInfo.getThreadState())); + if (threadInfo.getLockName() != null) { + writer.print(String.format(" on %s", threadInfo.getLockName())); + if (threadInfo.getLockOwnerName() != null) + writer.print(String.format(" owned by \"%s\" Id=%s", threadInfo.getLockOwnerName(), threadInfo.getLockOwnerId())); + } + if (threadInfo.isInNative()) + writer.println(" (in native)"); + else + writer.println(); + MonitorInfo[] lockedMonitors = threadInfo.getLockedMonitors(); + StackTraceElement stackTraceElements[] = threadInfo.getStackTrace(); + for (StackTraceElement stackTraceElement : stackTraceElements) { + writer.println(" at " + stackTraceElement); + MonitorInfo lockedMonitor = findLockedMonitor(stackTraceElement, lockedMonitors); + if (lockedMonitor != null) + writer.println((" - locked " + lockedMonitor.getClassName() + "@" + lockedMonitor.getIdentityHashCode())); + } + + } + + private static MonitorInfo findLockedMonitor(StackTraceElement stackTraceElement, MonitorInfo lockedMonitors[]) { + for (MonitorInfo monitorInfo : lockedMonitors) { + if (stackTraceElement.equals(monitorInfo.getLockedStackFrame())) + return monitorInfo; + } + + return null; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/jvm/DeadlockAnalyzer.java b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/jvm/DeadlockAnalyzer.java new file mode 100644 index 00000000000..856a38ba3f7 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/jvm/DeadlockAnalyzer.java @@ -0,0 +1,171 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.monitor.jvm; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; + +import java.lang.management.ManagementFactory; +import java.lang.management.ThreadInfo; +import java.lang.management.ThreadMXBean; +import java.util.*; + +/** + * @author kimchy (Shay Banon) + */ +public class DeadlockAnalyzer { + + private static final Deadlock NULL_RESULT[] = new Deadlock[0]; + private final ThreadMXBean threadBean = ManagementFactory.getThreadMXBean(); + + private static DeadlockAnalyzer INSTANCE = new DeadlockAnalyzer(); + + public static DeadlockAnalyzer deadlockAnalyzer() { + return INSTANCE; + } + + private DeadlockAnalyzer() { + + } + + public Deadlock[] findDeadlocks() { + long deadlockedThreads[] = threadBean.findMonitorDeadlockedThreads(); + if (deadlockedThreads == null || deadlockedThreads.length == 0) { + return NULL_RESULT; + } + ImmutableMap threadInfoMap = createThreadInfoMap(deadlockedThreads); + Set> cycles = calculateCycles(threadInfoMap); + Set> chains = calculateCycleDeadlockChains(threadInfoMap, cycles); + cycles.addAll(chains); + return createDeadlockDescriptions(cycles); + } + + + private Deadlock[] createDeadlockDescriptions(Set> cycles) { + Deadlock result[] = new Deadlock[cycles.size()]; + int count = 0; + for (LinkedHashSet cycle : cycles) { + ThreadInfo asArray[] = cycle.toArray(new ThreadInfo[cycle.size()]); + Deadlock d = new Deadlock(asArray); + result[count++] = d; + } + return result; + } + + + private Set> calculateCycles(ImmutableMap threadInfoMap) { + Set> cycles = new HashSet>(); + for (Map.Entry entry : threadInfoMap.entrySet()) { + LinkedHashSet cycle = new LinkedHashSet(); + for (ThreadInfo t = entry.getValue(); !cycle.contains(t); t = threadInfoMap.get(Long.valueOf(t.getLockOwnerId()))) + cycle.add(t); + + if (!cycles.contains(cycle)) + cycles.add(cycle); + } + return cycles; + } + + + private Set> calculateCycleDeadlockChains(ImmutableMap threadInfoMap, Set> cycles) { + ThreadInfo allThreads[] = threadBean.getThreadInfo(threadBean.getAllThreadIds()); + Set> deadlockChain = new HashSet>(); + Set knownDeadlockedThreads = threadInfoMap.keySet(); + for (ThreadInfo threadInfo : allThreads) { + Thread.State state = threadInfo.getThreadState(); + if (state == Thread.State.BLOCKED && !knownDeadlockedThreads.contains(threadInfo.getThreadId())) { + for (LinkedHashSet cycle : cycles) { + if (cycle.contains(threadInfoMap.get(Long.valueOf(threadInfo.getLockOwnerId())))) { + LinkedHashSet chain = new LinkedHashSet(); + for (ThreadInfo node = threadInfo; !chain.contains(node); node = threadInfoMap.get(Long.valueOf(node.getLockOwnerId()))) + chain.add(node); + + deadlockChain.add(chain); + } + } + + } + } + + return deadlockChain; + } + + + private ImmutableMap createThreadInfoMap(long threadIds[]) { + ThreadInfo threadInfos[] = threadBean.getThreadInfo(threadIds); + ImmutableMap.Builder threadInfoMap = ImmutableMap.builder(); + for (ThreadInfo threadInfo : threadInfos) { + threadInfoMap.put(threadInfo.getThreadId(), threadInfo); + } + return threadInfoMap.build(); + } + + + public static class Deadlock { + + private final ThreadInfo members[]; + private final String description; + private final ImmutableSet memberIds; + + public Deadlock(ThreadInfo[] members) { + this.members = members; + + ImmutableSet.Builder builder = ImmutableSet.builder(); + StringBuilder sb = new StringBuilder(); + for (int x = 0; x < members.length; x++) { + ThreadInfo ti = members[x]; + sb.append(ti.getThreadName()); + if (x < members.length) + sb.append(" > "); + if (x == members.length - 1) + sb.append(ti.getLockOwnerName()); + builder.add(ti.getThreadId()); + } + this.description = sb.toString(); + this.memberIds = builder.build(); + } + + public ThreadInfo[] members() { + return members; + } + + @Override public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + Deadlock deadlock = (Deadlock) o; + + if (memberIds != null ? !memberIds.equals(deadlock.memberIds) : deadlock.memberIds != null) return false; + + return true; + } + + @Override public int hashCode() { + int result = members != null ? Arrays.hashCode(members) : 0; + result = 31 * result + (description != null ? description.hashCode() : 0); + result = 31 * result + (memberIds != null ? memberIds.hashCode() : 0); + return result; + } + + @Override public String toString() { + return description; + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/jvm/JvmConfig.java b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/jvm/JvmConfig.java new file mode 100644 index 00000000000..30697a20170 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/jvm/JvmConfig.java @@ -0,0 +1,216 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.monitor.jvm; + +import org.elasticsearch.util.SizeValue; +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.io.Serializable; +import java.lang.management.ManagementFactory; +import java.lang.management.MemoryMXBean; +import java.lang.management.RuntimeMXBean; +import java.util.HashMap; +import java.util.Map; + +/** + * @author kimchy (Shay Banon) + */ +public class JvmConfig implements Streamable, Serializable { + + private static JvmConfig INSTANCE; + + static { + RuntimeMXBean runtimeMXBean = ManagementFactory.getRuntimeMXBean(); + MemoryMXBean memoryMXBean = ManagementFactory.getMemoryMXBean(); + + // returns the @ + long pid; + String xPid = runtimeMXBean.getName(); + try { + xPid = xPid.split("@")[0]; + pid = Long.parseLong(xPid); + } catch (Exception e) { + pid = -1; + } + INSTANCE = new JvmConfig(pid, runtimeMXBean.getVmName(), System.getProperty("java.version"), System.getProperty("java.vendor"), + runtimeMXBean.getStartTime(), + memoryMXBean.getHeapMemoryUsage().getInit(), memoryMXBean.getHeapMemoryUsage().getMax(), + memoryMXBean.getNonHeapMemoryUsage().getInit(), memoryMXBean.getNonHeapMemoryUsage().getMax(), + runtimeMXBean.getInputArguments().toArray(new String[runtimeMXBean.getInputArguments().size()]), runtimeMXBean.getBootClassPath(), runtimeMXBean.getClassPath(), runtimeMXBean.getSystemProperties()); + } + + public static JvmConfig jvmConfig() { + return INSTANCE; + } + + private long pid = -1; + + private String vmName = ""; + + private String vmVersion = ""; + + private String vmVendor = ""; + + private long startTime = -1; + + private long memoryHeapInit = -1; + + private long memoryHeapMax = -1; + + private long memoryNonHeapInit = -1; + + private long memoryNonHeapMax = -1; + + private String[] inputArguments; + + private String bootClassPath; + + private String classPath; + + private Map systemProperties; + + private JvmConfig() { + } + + public JvmConfig(long pid, String vmName, String vmVersion, String vmVendor, long startTime, + long memoryHeapInit, long memoryHeapMax, long memoryNonHeapInit, long memoryNonHeapMax, + String[] inputArguments, String bootClassPath, String classPath, Map systemProperties) { + this.pid = pid; + this.vmName = vmName; + this.vmVersion = vmVersion; + this.vmVendor = vmVendor; + this.startTime = startTime; + this.memoryHeapInit = memoryHeapInit; + this.memoryHeapMax = memoryHeapMax; + this.memoryNonHeapInit = memoryNonHeapInit; + this.memoryNonHeapMax = memoryNonHeapMax; + this.inputArguments = inputArguments; + this.bootClassPath = bootClassPath; + this.classPath = classPath; + this.systemProperties = systemProperties; + } + + public long pid() { + return this.pid; + } + + public String vmName() { + return vmName; + } + + public String vmVersion() { + return vmVersion; + } + + public String vmVendor() { + return vmVendor; + } + + public long startTime() { + return startTime; + } + + public SizeValue memoryHeapInit() { + return new SizeValue(memoryHeapInit); + } + + public SizeValue memoryHeapMax() { + return new SizeValue(memoryHeapMax); + } + + public SizeValue memoryNonHeapInit() { + return new SizeValue(memoryNonHeapInit); + } + + public SizeValue memoryNonHeapMax() { + return new SizeValue(memoryNonHeapMax); + } + + public String[] inputArguments() { + return inputArguments; + } + + public String bootClassPath() { + return bootClassPath; + } + + public String classPath() { + return classPath; + } + + public Map systemProperties() { + return systemProperties; + } + + public static JvmConfig readJvmComing(DataInput in) throws IOException, ClassNotFoundException { + JvmConfig jvmConfig = new JvmConfig(); + jvmConfig.readFrom(in); + return jvmConfig; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + pid = in.readLong(); + vmName = in.readUTF(); + vmVersion = in.readUTF(); + vmVendor = in.readUTF(); + startTime = in.readLong(); + memoryHeapInit = in.readLong(); + memoryHeapMax = in.readLong(); + memoryNonHeapInit = in.readLong(); + memoryNonHeapMax = in.readLong(); + inputArguments = new String[in.readInt()]; + for (int i = 0; i < inputArguments.length; i++) { + inputArguments[i] = in.readUTF(); + } + bootClassPath = in.readUTF(); + classPath = in.readUTF(); + systemProperties = new HashMap(); + int size = in.readInt(); + for (int i = 0; i < size; i++) { + systemProperties.put(in.readUTF(), in.readUTF()); + } + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeLong(pid); + out.writeUTF(vmName); + out.writeUTF(vmVersion); + out.writeUTF(vmVendor); + out.writeLong(startTime); + out.writeLong(memoryHeapInit); + out.writeLong(memoryHeapMax); + out.writeLong(memoryNonHeapInit); + out.writeLong(memoryNonHeapMax); + out.writeInt(inputArguments.length); + for (String inputArgument : inputArguments) { + out.writeUTF(inputArgument); + } + out.writeUTF(bootClassPath); + out.writeUTF(classPath); + out.writeInt(systemProperties.size()); + for (Map.Entry entry : systemProperties.entrySet()) { + out.writeUTF(entry.getKey()); + out.writeUTF(entry.getValue()); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/jvm/JvmMonitorService.java b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/jvm/JvmMonitorService.java new file mode 100644 index 00000000000..cf14d1bf9ea --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/jvm/JvmMonitorService.java @@ -0,0 +1,151 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.monitor.jvm; + +import com.google.common.collect.ImmutableSet; +import com.google.inject.Inject; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.monitor.dump.DumpGenerator; +import org.elasticsearch.monitor.dump.DumpMonitorService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.util.TimeValue; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.component.Lifecycle; +import org.elasticsearch.util.component.LifecycleComponent; +import org.elasticsearch.util.settings.Settings; + +import java.util.HashSet; +import java.util.Set; +import java.util.concurrent.ScheduledFuture; + +import static org.elasticsearch.monitor.dump.summary.SummaryDumpContributor.*; +import static org.elasticsearch.monitor.dump.thread.ThreadDumpContributor.*; +import static org.elasticsearch.monitor.jvm.DeadlockAnalyzer.*; +import static org.elasticsearch.monitor.jvm.JvmStats.*; +import static org.elasticsearch.util.TimeValue.*; + +/** + * @author kimchy (Shay Banon) + */ +public class JvmMonitorService extends AbstractComponent implements LifecycleComponent { + + private final Lifecycle lifecycle = new Lifecycle(); + + private final ThreadPool threadPool; + + private final DumpMonitorService dumpMonitorService; + + private final boolean enabled; + + private final TimeValue interval; + + private final TimeValue gcCollectionWarning; + + private volatile ScheduledFuture scheduledFuture; + + @Inject public JvmMonitorService(Settings settings, ThreadPool threadPool, DumpMonitorService dumpMonitorService) { + super(settings); + this.threadPool = threadPool; + this.dumpMonitorService = dumpMonitorService; + + this.enabled = componentSettings.getAsBoolean("enabled", true); + this.interval = componentSettings.getAsTime("interval", timeValueSeconds(10)); + this.gcCollectionWarning = componentSettings.getAsTime("gcCollectionWarning", timeValueSeconds(10)); + } + + @Override public Lifecycle.State lifecycleState() { + return lifecycle.state(); + } + + @Override public JvmMonitorService start() throws ElasticSearchException { + if (!lifecycle.moveToStarted()) { + return this; + } + if (!enabled) { + return this; + } + scheduledFuture = threadPool.scheduleWithFixedDelay(new JvmMonitor(), interval); + return this; + } + + @Override public JvmMonitorService stop() throws ElasticSearchException { + if (!lifecycle.moveToStopped()) { + return this; + } + if (!enabled) { + return this; + } + scheduledFuture.cancel(true); + return this; + } + + @Override public void close() { + if (lifecycle.started()) { + stop(); + } + if (!lifecycle.moveToClosed()) { + return; + } + } + + private class JvmMonitor implements Runnable { + + private JvmStats lastJvmStats = jvmStats(); + + private final Set lastSeenDeadlocks = new HashSet(); + + public JvmMonitor() { + } + + @Override public void run() { + monitorDeadlock(); + monitorLongGc(); + } + + private void monitorLongGc() { + JvmStats currentJvmStats = jvmStats(); + long collectionTime = currentJvmStats.gcCollectionTime().millis() - lastJvmStats.gcCollectionTime().millis(); + if (collectionTime > gcCollectionWarning.millis()) { + logger.warn("Long GC collection occurred, took [" + new TimeValue(collectionTime) + "], breached threshold [" + gcCollectionWarning + "]"); + } + lastJvmStats = currentJvmStats; + } + + private void monitorDeadlock() { + DeadlockAnalyzer.Deadlock[] deadlocks = deadlockAnalyzer().findDeadlocks(); + if (deadlocks != null && deadlocks.length > 0) { + ImmutableSet asSet = new ImmutableSet.Builder().add(deadlocks).build(); + if (!asSet.equals(lastSeenDeadlocks)) { + DumpGenerator.Result genResult = dumpMonitorService.generateDump("deadlock", null, SUMMARY, THREAD_DUMP); + StringBuilder sb = new StringBuilder("Detected Deadlock(s)"); + for (DeadlockAnalyzer.Deadlock deadlock : asSet) { + sb.append("\n ----> ").append(deadlock); + } + sb.append("\nDump generated [").append(genResult.location()).append("]"); + logger.error(sb.toString()); + lastSeenDeadlocks.clear(); + lastSeenDeadlocks.addAll(asSet); + } + } else { + lastSeenDeadlocks.clear(); + } + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java new file mode 100644 index 00000000000..a0b19d0c53b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java @@ -0,0 +1,178 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.monitor.jvm; + +import org.elasticsearch.util.SizeValue; +import org.elasticsearch.util.TimeValue; +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.io.Serializable; +import java.lang.management.*; +import java.util.List; +import java.util.concurrent.TimeUnit; + +/** + * @author kimchy (Shay Banon) + */ +public class JvmStats implements Streamable, Serializable { + + private static RuntimeMXBean runtimeMXBean; + private static MemoryMXBean memoryMXBean; + private static ThreadMXBean threadMXBean; + + static { + runtimeMXBean = ManagementFactory.getRuntimeMXBean(); + memoryMXBean = ManagementFactory.getMemoryMXBean(); + threadMXBean = ManagementFactory.getThreadMXBean(); + } + + public static JvmStats jvmStats() { + long gcCollectionCount = 0; + long gcCollectionTime = 0; + List gcMxBeans = ManagementFactory.getGarbageCollectorMXBeans(); + for (GarbageCollectorMXBean gcMxBean : gcMxBeans) { + long tmp = gcMxBean.getCollectionCount(); + if (tmp != -1) { + gcCollectionCount += tmp; + } + tmp = gcMxBean.getCollectionTime(); + if (tmp != -1) { + gcCollectionTime += tmp; + } + } + return new JvmStats(System.currentTimeMillis(), runtimeMXBean.getUptime(), + memoryMXBean.getHeapMemoryUsage().getCommitted(), memoryMXBean.getHeapMemoryUsage().getUsed(), + memoryMXBean.getNonHeapMemoryUsage().getCommitted(), memoryMXBean.getNonHeapMemoryUsage().getUsed(), + threadMXBean.getThreadCount(), threadMXBean.getPeakThreadCount(), gcCollectionCount, gcCollectionTime); + } + + private long timestamp = -1; + + private long uptime; + + private long memoryHeapCommitted; + + private long memoryHeapUsed; + + private long memoryNonHeapCommitted; + + private long memoryNonHeapUsed; + + private int threadCount; + + private int peakThreadCount; + + private long gcCollectionCount; + + private long gcCollectionTime; + + private JvmStats() { + } + + public JvmStats(long timestamp, long uptime, + long memoryHeapCommitted, long memoryHeapUsed, long memoryNonHeapCommitted, long memoryNonHeapUsed, + int threadCount, int peakThreadCount, long gcCollectionCount, long gcCollectionTime) { + this.timestamp = timestamp; + this.uptime = uptime; + this.memoryHeapCommitted = memoryHeapCommitted; + this.memoryHeapUsed = memoryHeapUsed; + this.memoryNonHeapCommitted = memoryNonHeapCommitted; + this.memoryNonHeapUsed = memoryNonHeapUsed; + this.threadCount = threadCount; + this.peakThreadCount = peakThreadCount; + this.gcCollectionCount = gcCollectionCount; + this.gcCollectionTime = gcCollectionTime; + } + + public long timestamp() { + return timestamp; + } + + public long uptime() { + return uptime; + } + + public SizeValue memoryHeapCommitted() { + return new SizeValue(memoryHeapCommitted); + } + + public SizeValue memoryHeapUsed() { + return new SizeValue(memoryHeapUsed); + } + + public SizeValue memoryNonHeapCommitted() { + return new SizeValue(memoryNonHeapCommitted); + } + + public SizeValue memoryNonHeapUsed() { + return new SizeValue(memoryNonHeapUsed); + } + + public int threadCount() { + return threadCount; + } + + public int peakThreadCount() { + return peakThreadCount; + } + + public long gcCollectionCount() { + return gcCollectionCount; + } + + public TimeValue gcCollectionTime() { + return new TimeValue(gcCollectionTime, TimeUnit.MILLISECONDS); + } + + public static JvmStats readJvmStats(DataInput in) throws IOException, ClassNotFoundException { + JvmStats jvmStats = new JvmStats(); + jvmStats.readFrom(in); + return jvmStats; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + timestamp = in.readLong(); + uptime = in.readLong(); + memoryHeapCommitted = in.readLong(); + memoryHeapUsed = in.readLong(); + memoryNonHeapCommitted = in.readLong(); + memoryNonHeapUsed = in.readLong(); + threadCount = in.readInt(); + peakThreadCount = in.readInt(); + gcCollectionCount = in.readLong(); + gcCollectionTime = in.readLong(); + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeLong(timestamp); + out.writeLong(uptime); + out.writeLong(memoryHeapCommitted); + out.writeLong(memoryHeapUsed); + out.writeLong(memoryNonHeapCommitted); + out.writeLong(memoryNonHeapUsed); + out.writeInt(threadCount); + out.writeInt(peakThreadCount); + out.writeLong(gcCollectionCount); + out.writeLong(gcCollectionTime); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/memory/MemoryMonitor.java b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/memory/MemoryMonitor.java new file mode 100644 index 00000000000..8d5184b2b16 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/memory/MemoryMonitor.java @@ -0,0 +1,29 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.monitor.memory; + +import org.elasticsearch.util.component.LifecycleComponent; + +/** + * @author kimchy (Shay Banon) + */ +public interface MemoryMonitor extends LifecycleComponent { + +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/memory/MemoryMonitorService.java b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/memory/MemoryMonitorService.java new file mode 100644 index 00000000000..467402c720a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/memory/MemoryMonitorService.java @@ -0,0 +1,71 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.monitor.memory; + +import com.google.inject.Inject; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.component.Lifecycle; +import org.elasticsearch.util.component.LifecycleComponent; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class MemoryMonitorService extends AbstractComponent implements LifecycleComponent { + + private final Lifecycle lifecycle = new Lifecycle(); + + private final MemoryMonitor memoryMonitor; + + @Inject public MemoryMonitorService(Settings settings, MemoryMonitor memoryMonitor) { + super(settings); + this.memoryMonitor = memoryMonitor; + } + + @Override public Lifecycle.State lifecycleState() { + return lifecycle.state(); + } + + @Override public MemoryMonitorService start() throws ElasticSearchException { + if (!lifecycle.moveToStarted()) { + return this; + } + memoryMonitor.start(); + return this; + } + + @Override public MemoryMonitorService stop() throws ElasticSearchException { + if (!lifecycle.moveToStopped()) { + return this; + } + memoryMonitor.stop(); + return this; + } + + @Override public void close() throws ElasticSearchException { + if (lifecycle.started()) { + stop(); + } + if (!lifecycle.moveToClosed()) { + return; + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/memory/alpha/AlphaMemoryMonitor.java b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/memory/alpha/AlphaMemoryMonitor.java new file mode 100644 index 00000000000..cb66f3f1b94 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/monitor/memory/alpha/AlphaMemoryMonitor.java @@ -0,0 +1,210 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.monitor.memory.alpha; + +import com.google.inject.Inject; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.indices.IndicesMemoryCleaner; +import org.elasticsearch.monitor.memory.MemoryMonitor; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.util.SizeUnit; +import org.elasticsearch.util.SizeValue; +import org.elasticsearch.util.StopWatch; +import org.elasticsearch.util.TimeValue; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.component.Lifecycle; +import org.elasticsearch.util.settings.Settings; + +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.atomic.AtomicLong; + +import static org.elasticsearch.util.TimeValue.*; + +/** + * @author kimchy (Shay Banon) + */ +public class AlphaMemoryMonitor extends AbstractComponent implements MemoryMonitor { + + private final Lifecycle lifecycle = new Lifecycle(); + + private final double upperMemoryThreshold; + + private final double lowerMemoryThreshold; + + private final TimeValue interval; + + private final int gcThreshold; + + private final int cleanThreshold; + + private final SizeValue minimumFlushableSizeToClean; + + private final int translogNumberOfOperationsThreshold; + + private final ThreadPool threadPool; + + private final IndicesMemoryCleaner indicesMemoryCleaner; + + private final Runtime runtime; + + private final SizeValue maxMemory; + + private final SizeValue totalMemory; + + private volatile ScheduledFuture scheduledFuture; + + private AtomicLong totalCleans = new AtomicLong(); + private AtomicLong totalGCs = new AtomicLong(); + + @Inject public AlphaMemoryMonitor(Settings settings, ThreadPool threadPool, IndicesMemoryCleaner indicesMemoryCleaner) { + super(settings); + this.threadPool = threadPool; + this.indicesMemoryCleaner = indicesMemoryCleaner; + + this.upperMemoryThreshold = componentSettings.getAsDouble("upperMemoryThreshold", 0.8); + this.lowerMemoryThreshold = componentSettings.getAsDouble("lowerMemoryThreshold", 0.5); + this.interval = componentSettings.getAsTime("interval", timeValueMillis(500)); + this.gcThreshold = componentSettings.getAsInt("gcThreshold", 5); + this.cleanThreshold = componentSettings.getAsInt("cleanThreshold", 10); + this.minimumFlushableSizeToClean = componentSettings.getAsSize("minimumFlushableSizeToClean", new SizeValue(5, SizeUnit.MB)); + this.translogNumberOfOperationsThreshold = componentSettings.getAsInt("translogNumberOfOperationsThreshold", 5000); + + logger.debug("Interval[" + interval + "], upperMemoryThreshold[" + upperMemoryThreshold + "], lowerMemoryThreshold[" + lowerMemoryThreshold + "], translogNumberOfOperationsThreshold[" + translogNumberOfOperationsThreshold + "]"); + + this.runtime = Runtime.getRuntime(); + this.maxMemory = new SizeValue(runtime.maxMemory()); + this.totalMemory = maxMemory.bytes() == runtime.totalMemory() ? new SizeValue(runtime.totalMemory()) : null; // Xmx==Xms when the JVM was started. + } + + @Override public Lifecycle.State lifecycleState() { + return lifecycle.state(); + } + + @Override public MemoryMonitor start() throws ElasticSearchException { + if (!lifecycle.moveToStarted()) { + return this; + } + scheduledFuture = threadPool.scheduleWithFixedDelay(new MemoryCleaner(), interval); + return this; + } + + @Override public MemoryMonitor stop() throws ElasticSearchException { + if (!lifecycle.moveToStopped()) { + return this; + } + scheduledFuture.cancel(true); + return this; + } + + public void close() { + if (lifecycle.started()) { + stop(); + } + if (!lifecycle.moveToClosed()) { + return; + } + } + + private long freeMemory() { + return runtime.freeMemory(); + } + + private long totalMemory() { + return totalMemory == null ? runtime.totalMemory() : totalMemory.bytes(); + } + + private class MemoryCleaner implements Runnable { + + private int gcCounter; + + private boolean performedClean; + + private int cleanCounter; + + private StopWatch stopWatch = new StopWatch().keepTaskList(false); + + @Override public void run() { + // try and clean translog based on a threshold, since we don't want to get a very large transaction log + // which means recovery it will take a long time (since the target reindex all this data) + IndicesMemoryCleaner.TranslogCleanResult translogCleanResult = indicesMemoryCleaner.cleanTranslog(translogNumberOfOperationsThreshold); + if (translogCleanResult.cleanedShards() > 0) { + long totalClean = totalCleans.incrementAndGet(); + logger.debug("[" + totalClean + "] Translog Clean: " + translogCleanResult); + } + + // the logic is simple, if the used memory is above the upper threshold, we need to clean + // we clean down as much as we can to down to the lower threshold + + // in order not to get trashing, we only perform a clean after another clean if a the clean counter + // has expired. + + // we also do the same for GC invocations + + long upperMemory = maxMemory.bytes(); + long totalMemory = totalMemory(); + long usedMemory = totalMemory - freeMemory(); + long upperThresholdMemory = (long) (upperMemory * upperMemoryThreshold); + + if (usedMemory - upperThresholdMemory <= 0) { + gcCounter = 0; + performedClean = false; + cleanCounter = 0; + return; + } + + if (performedClean) { + if (++cleanCounter < cleanThreshold) { + return; + } + } + + long totalClean = totalCleans.incrementAndGet(); + + long lowerThresholdMemory = (long) (upperMemory * lowerMemoryThreshold); + long memoryToClean = usedMemory - lowerThresholdMemory; + if (logger.isDebugEnabled()) { + StringBuilder sb = new StringBuilder(); + sb.append('[').append(totalClean).append("]: "); + sb.append("Cleaning, memoryToClean[").append(new SizeValue(memoryToClean)).append(']'); + sb.append(", lowerMemoryThreshold[").append(new SizeValue(lowerThresholdMemory)).append(']'); + sb.append(", upperMemoryThreshold[").append(new SizeValue(upperThresholdMemory)).append(']'); + sb.append(", usedMemory[").append(new SizeValue(usedMemory)).append(']'); + sb.append(", totalMemory[").append(new SizeValue(totalMemory)).append(']'); + sb.append(", maxMemory[").append(maxMemory).append(']'); + logger.debug(sb.toString()); + } + + IndicesMemoryCleaner.MemoryCleanResult memoryCleanResult = indicesMemoryCleaner.cleanMemory(memoryToClean, minimumFlushableSizeToClean); + if (logger.isDebugEnabled()) { + logger.debug("[" + totalClean + "] Memory Clean: " + memoryCleanResult); + } + performedClean = true; + cleanCounter = 0; + + if (++gcCounter >= gcThreshold) { + long totalGc = totalGCs.incrementAndGet(); + logger.debug("[" + totalGc + "]: Running GC after [" + gcCounter + "] memory clean swipes"); + System.gc(); + gcCounter = 0; + } + + } + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/Scroll.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/Scroll.java new file mode 100644 index 00000000000..1ec30e8182c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/Scroll.java @@ -0,0 +1,63 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search; + +import org.elasticsearch.util.TimeValue; +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import static org.elasticsearch.util.TimeValue.*; + +/** + * @author kimchy (Shay Banon) + */ +public class Scroll implements Streamable { + + private TimeValue timeout; + + private Scroll() { + + } + + public Scroll(TimeValue timeout) { + this.timeout = timeout; + } + + public TimeValue timeout() { + return timeout; + } + + public static Scroll readScroll(DataInput in) throws IOException, ClassNotFoundException { + Scroll scroll = new Scroll(); + scroll.readFrom(in); + return scroll; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + timeout = readTimeValue(in); + } + + @Override public void writeTo(DataOutput out) throws IOException { + timeout.writeTo(out); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/SearchContextMissingException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/SearchContextMissingException.java new file mode 100644 index 00000000000..21c4fa0a7af --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/SearchContextMissingException.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search; + +import org.elasticsearch.ElasticSearchException; + +/** + * @author kimchy (Shay Banon) + */ +public class SearchContextMissingException extends ElasticSearchException { + + private final long id; + + public SearchContextMissingException(long id) { + super("No search context found for id [" + id + "]"); + this.id = id; + } + + public long id() { + return this.id; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/SearchException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/SearchException.java new file mode 100644 index 00000000000..a3aeab3a99b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/SearchException.java @@ -0,0 +1,36 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search; + +import org.elasticsearch.ElasticSearchException; + +/** + * @author kimchy (Shay Banon) + */ +public class SearchException extends ElasticSearchException { + + public SearchException(String msg) { + super(msg); + } + + public SearchException(String msg, Throwable cause) { + super(msg, cause); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/SearchHit.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/SearchHit.java new file mode 100644 index 00000000000..8acae5c538a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/SearchHit.java @@ -0,0 +1,46 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search; + +import org.apache.lucene.search.Explanation; +import org.elasticsearch.util.io.Streamable; +import org.elasticsearch.util.json.ToJson; + +import java.util.Map; + +/** + * @author kimchy (Shay Banon) + */ +public interface SearchHit extends Streamable, ToJson { + + String index(); + + String id(); + + String type(); + + String source(); + + Explanation explanation(); + + Map fields(); + + SearchShardTarget target(); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/SearchHitField.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/SearchHitField.java new file mode 100644 index 00000000000..a92085905fa --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/SearchHitField.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search; + +import org.elasticsearch.util.io.Streamable; + +import java.util.List; + +/** + * @author kimchy (Shay Banon) + */ +public interface SearchHitField extends Streamable { + + String name(); + + List values(); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/SearchHits.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/SearchHits.java new file mode 100644 index 00000000000..6c598b58bb4 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/SearchHits.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search; + +import org.elasticsearch.util.io.Streamable; +import org.elasticsearch.util.json.ToJson; + +/** + * @author kimchy (Shay Banon) + */ +public interface SearchHits extends Streamable, ToJson { + + long totalHits(); + + SearchHit[] hits(); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/SearchModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/SearchModule.java new file mode 100644 index 00000000000..3a37b15b787 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/SearchModule.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search; + +import com.google.inject.AbstractModule; +import org.elasticsearch.search.action.SearchServiceTransportAction; +import org.elasticsearch.search.controller.SearchPhaseController; +import org.elasticsearch.search.dfs.DfsPhase; +import org.elasticsearch.search.facets.FacetsPhase; +import org.elasticsearch.search.fetch.FetchPhase; +import org.elasticsearch.search.query.QueryPhase; + +/** + * @author kimchy (Shay Banon) + */ +public class SearchModule extends AbstractModule { + + @Override protected void configure() { + bind(DfsPhase.class).asEagerSingleton(); + bind(FacetsPhase.class).asEagerSingleton(); + bind(QueryPhase.class).asEagerSingleton(); + bind(FetchPhase.class).asEagerSingleton(); + bind(SearchService.class).asEagerSingleton(); + bind(SearchPhaseController.class).asEagerSingleton(); + + bind(SearchServiceTransportAction.class).asEagerSingleton(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/SearchParseElement.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/SearchParseElement.java new file mode 100644 index 00000000000..d3ffe26ef1c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/SearchParseElement.java @@ -0,0 +1,31 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search; + +import org.codehaus.jackson.JsonParser; +import org.elasticsearch.search.internal.SearchContext; + +/** + * @author kimchy (Shay Banon) + */ +public interface SearchParseElement { + + void parse(JsonParser jp, SearchContext context) throws Exception; +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/SearchParseException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/SearchParseException.java new file mode 100644 index 00000000000..c947bd763af --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/SearchParseException.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search; + +/** + * @author kimchy (Shay Banon) + */ +public class SearchParseException extends SearchException { + + public SearchParseException(String msg) { + super(msg); + } + + public SearchParseException(String msg, Throwable cause) { + super(msg, cause); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/SearchPhase.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/SearchPhase.java new file mode 100644 index 00000000000..b86f9f4301c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/SearchPhase.java @@ -0,0 +1,35 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search; + +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.search.internal.SearchContext; + +import java.util.Map; + +/** + * @author kimchy (Shay Banon) + */ +public interface SearchPhase { + + Map parseElements(); + + void execute(SearchContext context) throws ElasticSearchException; +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/SearchService.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/SearchService.java new file mode 100644 index 00000000000..f516bb64416 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/SearchService.java @@ -0,0 +1,314 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search; + +import com.google.common.collect.ImmutableMap; +import com.google.inject.Inject; +import org.apache.lucene.search.TopDocs; +import org.codehaus.jackson.JsonFactory; +import org.codehaus.jackson.JsonParser; +import org.codehaus.jackson.JsonToken; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.search.dfs.CachedDfSource; +import org.elasticsearch.search.dfs.DfsPhase; +import org.elasticsearch.search.dfs.DfsSearchResult; +import org.elasticsearch.search.fetch.FetchPhase; +import org.elasticsearch.search.fetch.FetchSearchRequest; +import org.elasticsearch.search.fetch.FetchSearchResult; +import org.elasticsearch.search.fetch.QueryFetchSearchResult; +import org.elasticsearch.search.internal.InternalScrollSearchRequest; +import org.elasticsearch.search.internal.InternalSearchRequest; +import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.search.query.QueryPhase; +import org.elasticsearch.search.query.QuerySearchRequest; +import org.elasticsearch.search.query.QuerySearchResult; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.component.Lifecycle; +import org.elasticsearch.util.component.LifecycleComponent; +import org.elasticsearch.util.concurrent.highscalelib.NonBlockingHashMapLong; +import org.elasticsearch.util.io.FastStringReader; +import org.elasticsearch.util.json.Jackson; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; + +/** + * @author kimchy (Shay Banon) + */ +public class SearchService extends AbstractComponent implements LifecycleComponent { + + private final Lifecycle lifecycle = new Lifecycle(); + + private final JsonFactory jsonFactory = Jackson.defaultJsonFactory(); + + private final ClusterService clusterService; + + private final IndicesService indicesService; + + private final DfsPhase dfsPhase; + + private final QueryPhase queryPhase; + + private final FetchPhase fetchPhase; + + private final AtomicLong idGenerator = new AtomicLong(); + + private final NonBlockingHashMapLong activeContexts = new NonBlockingHashMapLong(); + + private final ImmutableMap elementParsers; + + @Inject public SearchService(Settings settings, ClusterService clusterService, IndicesService indicesService, + DfsPhase dfsPhase, QueryPhase queryPhase, FetchPhase fetchPhase) { + super(settings); + this.clusterService = clusterService; + this.indicesService = indicesService; + this.dfsPhase = dfsPhase; + this.queryPhase = queryPhase; + this.fetchPhase = fetchPhase; + + Map elementParsers = new HashMap(); + elementParsers.putAll(dfsPhase.parseElements()); + elementParsers.putAll(queryPhase.parseElements()); + elementParsers.putAll(fetchPhase.parseElements()); + this.elementParsers = ImmutableMap.copyOf(elementParsers); + } + + @Override public Lifecycle.State lifecycleState() { + return lifecycle.state(); + } + + @Override public SearchService start() throws ElasticSearchException { + if (!lifecycle.moveToStarted()) { + return this; + } + return this; + } + + @Override public SearchService stop() throws ElasticSearchException { + if (!lifecycle.moveToStopped()) { + return this; + } + for (SearchContext context : activeContexts.values()) { + freeContext(context); + } + activeContexts.clear(); + return this; + } + + @Override public void close() throws ElasticSearchException { + if (lifecycle.started()) { + stop(); + } + if (!lifecycle.moveToClosed()) { + return; + } + } + + public DfsSearchResult executeDfsPhase(InternalSearchRequest request) throws ElasticSearchException { + SearchContext context = createContext(request); + activeContexts.put(context.id(), context); + dfsPhase.execute(context); + return context.dfsResult(); + } + + public QuerySearchResult executeQueryPhase(InternalSearchRequest request) throws ElasticSearchException { + SearchContext context = createContext(request); + activeContexts.put(context.id(), context); + queryPhase.execute(context); + return context.queryResult(); + } + + public QuerySearchResult executeQueryPhase(InternalScrollSearchRequest request) throws ElasticSearchException { + SearchContext context = findContext(request.id()); + processScroll(request, context); + queryPhase.execute(context); + return context.queryResult(); + } + + public QuerySearchResult executeQueryPhase(QuerySearchRequest request) throws ElasticSearchException { + SearchContext context = findContext(request.id()); + try { + context.searcher().dfSource(new CachedDfSource(request.dfs(), context.similarityService().defaultSearchSimilarity())); + } catch (IOException e) { + throw new SearchException("Failed to set aggreagted df", e); + } + queryPhase.execute(context); + return context.queryResult(); + } + + public QueryFetchSearchResult executeFetchPhase(InternalSearchRequest request) throws ElasticSearchException { + SearchContext context = createContext(request); + queryPhase.execute(context); + shortcutDocIdsToLoad(context); + fetchPhase.execute(context); + if (context.scroll() != null) { + activeContexts.put(context.id(), context); + } + return new QueryFetchSearchResult(context.queryResult(), context.fetchResult()); + } + + public QueryFetchSearchResult executeFetchPhase(QuerySearchRequest request) throws ElasticSearchException { + SearchContext context = findContext(request.id()); + try { + context.searcher().dfSource(new CachedDfSource(request.dfs(), context.similarityService().defaultSearchSimilarity())); + } catch (IOException e) { + throw new SearchException("Failed to set aggregated df", e); + } + queryPhase.execute(context); + shortcutDocIdsToLoad(context); + fetchPhase.execute(context); + if (context.scroll() != null) { + activeContexts.put(context.id(), context); + } + return new QueryFetchSearchResult(context.queryResult(), context.fetchResult()); + } + + public QueryFetchSearchResult executeFetchPhase(InternalScrollSearchRequest request) throws ElasticSearchException { + SearchContext context = findContext(request.id()); + processScroll(request, context); + queryPhase.execute(context); + shortcutDocIdsToLoad(context); + fetchPhase.execute(context); + if (context.scroll() == null) { + freeContext(request.id()); + } + return new QueryFetchSearchResult(context.queryResult(), context.fetchResult()); + } + + public FetchSearchResult executeFetchPhase(FetchSearchRequest request) throws ElasticSearchException { + SearchContext context = findContext(request.id()); + context.docIdsToLoad(request.docIds()); + fetchPhase.execute(context); + if (context.scroll() == null) { + freeContext(request.id()); + } + return context.fetchResult(); + } + + private SearchContext findContext(long id) throws SearchContextMissingException { + SearchContext context = activeContexts.get(id); + if (context == null) { + throw new SearchContextMissingException(id); + } + return context; + } + + private SearchContext createContext(InternalSearchRequest request) throws ElasticSearchException { + IndexService indexService = indicesService.indexServiceSafe(request.index()); + IndexShard indexShard = indexService.shardSafe(request.shardId()); + Engine.Searcher engineSearcher = indexShard.searcher(); + + SearchShardTarget shardTarget = new SearchShardTarget(clusterService.state().nodes().localNodeId(), request.index(), request.shardId()); + + SearchContext context = new SearchContext(idGenerator.incrementAndGet(), shardTarget, request.timeout(), + request.queryBoost(), request.source(), request.types(), engineSearcher, indexService); + + // init the from and size + context.from(request.from()); + context.size(request.size()); + + context.scroll(request.scroll()); + + parseSource(context); + + // if the from and size are still not set, default them + if (context.from() == -1) { + context.from(0); + } + if (context.size() == -1) { + context.size(10); + } + + return context; + } + + private void freeContext(long id) { + SearchContext context = activeContexts.remove(id); + if (context == null) { + return; + } + freeContext(context); + } + + private void freeContext(SearchContext context) { + context.release(); + } + + private void parseSource(SearchContext context) throws SearchParseException { + try { + JsonParser jp = jsonFactory.createJsonParser(new FastStringReader(context.source())); + JsonToken token; + while ((token = jp.nextToken()) != JsonToken.END_OBJECT) { + if (token == JsonToken.FIELD_NAME) { + String fieldName = jp.getCurrentName(); + jp.nextToken(); + SearchParseElement element = elementParsers.get(fieldName); + if (element == null) { + throw new SearchParseException("No parser for element [" + fieldName + "]"); + } + element.parse(jp, context); + } else if (token == null) { + break; + } + } + } catch (Exception e) { + throw new SearchParseException("Failed to parse [" + context.source() + "]", e); + } + } + + private void shortcutDocIdsToLoad(SearchContext context) { + TopDocs topDocs = context.queryResult().topDocs(); + if (topDocs.scoreDocs.length < context.from()) { + // no more docs... + context.docIdsToLoad(new int[0]); + return; + } + int totalSize = context.from() + context.size(); + int[] docIdsToLoad = new int[context.size()]; + int counter = 0; + for (int i = context.from(); i < totalSize; i++) { + if (i < topDocs.scoreDocs.length) { + docIdsToLoad[counter] = topDocs.scoreDocs[i].doc; + } else { + break; + } + counter++; + } + if (counter < context.size()) { + docIdsToLoad = Arrays.copyOfRange(docIdsToLoad, 0, counter); + } + context.docIdsToLoad(docIdsToLoad); + } + + private void processScroll(InternalScrollSearchRequest request, SearchContext context) { + // process scroll + context.from(context.from() + context.size()); + context.scroll(request.scroll()); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/SearchShardTarget.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/SearchShardTarget.java new file mode 100644 index 00000000000..71e29c12d54 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/SearchShardTarget.java @@ -0,0 +1,104 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search; + +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * The target that the search request was executed on. + * + * @author kimchy (Shay Banon) + */ +public class SearchShardTarget implements Streamable { + + private String nodeId; + + private String index; + + private int shardId; + + private SearchShardTarget() { + + } + + public SearchShardTarget(String nodeId, String index, int shardId) { + this.nodeId = nodeId; + this.index = index; + this.shardId = shardId; + } + + public String nodeId() { + return nodeId; + } + + public String index() { + return index; + } + + public int shardId() { + return shardId; + } + + public static SearchShardTarget readSearchShardTarget(DataInput in) throws IOException, ClassNotFoundException { + SearchShardTarget result = new SearchShardTarget(); + result.readFrom(in); + return result; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + nodeId = in.readUTF(); + index = in.readUTF(); + shardId = in.readInt(); + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeUTF(nodeId); + out.writeUTF(index); + out.writeInt(shardId); + } + + @Override public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + SearchShardTarget that = (SearchShardTarget) o; + + if (shardId != that.shardId) return false; + if (index != null ? !index.equals(that.index) : that.index != null) return false; + if (nodeId != null ? !nodeId.equals(that.nodeId) : that.nodeId != null) return false; + + return true; + } + + @Override public int hashCode() { + int result = nodeId != null ? nodeId.hashCode() : 0; + result = 31 * result + (index != null ? index.hashCode() : 0); + result = 31 * result + shardId; + return result; + } + + @Override public String toString() { + return "[" + nodeId + "][" + index + "][" + shardId + "]"; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/action/SearchServiceListener.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/action/SearchServiceListener.java new file mode 100644 index 00000000000..2efc2e0d89e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/action/SearchServiceListener.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.action; + +/** + * @author kimchy (Shay Banon) + */ +public interface SearchServiceListener { + + void onResult(T result); + + void onFailure(Throwable t); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/action/SearchServiceTransportAction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/action/SearchServiceTransportAction.java new file mode 100644 index 00000000000..4dfa069f100 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/action/SearchServiceTransportAction.java @@ -0,0 +1,417 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.action; + +import com.google.inject.Inject; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.node.Node; +import org.elasticsearch.search.SearchService; +import org.elasticsearch.search.dfs.DfsSearchResult; +import org.elasticsearch.search.fetch.FetchSearchRequest; +import org.elasticsearch.search.fetch.FetchSearchResult; +import org.elasticsearch.search.fetch.QueryFetchSearchResult; +import org.elasticsearch.search.internal.InternalScrollSearchRequest; +import org.elasticsearch.search.internal.InternalSearchRequest; +import org.elasticsearch.search.query.QuerySearchRequest; +import org.elasticsearch.search.query.QuerySearchResult; +import org.elasticsearch.transport.*; + +/** + * An encapsulation of {@link org.elasticsearch.search.SearchService} operations exposed through + * transport. + * + * @author kimchy (Shay Banon) + */ +public class SearchServiceTransportAction { + + private final TransportService transportService; + + private final ClusterService clusterService; + + private final SearchService searchService; + + @Inject public SearchServiceTransportAction(TransportService transportService, ClusterService clusterService, SearchService searchService) { + this.transportService = transportService; + this.clusterService = clusterService; + this.searchService = searchService; + + transportService.registerHandler(SearchDfsTransportHandler.ACTION, new SearchDfsTransportHandler()); + transportService.registerHandler(SearchQueryTransportHandler.ACTION, new SearchQueryTransportHandler()); + transportService.registerHandler(SearchQueryByIdTransportHandler.ACTION, new SearchQueryByIdTransportHandler()); + transportService.registerHandler(SearchQueryScrollTransportHandler.ACTION, new SearchQueryScrollTransportHandler()); + transportService.registerHandler(SearchQueryFetchTransportHandler.ACTION, new SearchQueryFetchTransportHandler()); + transportService.registerHandler(SearchQueryQueryFetchTransportHandler.ACTION, new SearchQueryQueryFetchTransportHandler()); + transportService.registerHandler(SearchQueryFetchScrollTransportHandler.ACTION, new SearchQueryFetchScrollTransportHandler()); + transportService.registerHandler(SearchFetchByIdTransportHandler.ACTION, new SearchFetchByIdTransportHandler()); + } + + public void sendExecuteDfs(Node node, final InternalSearchRequest request, final SearchServiceListener listener) { + if (clusterService.state().nodes().localNodeId().equals(node.id())) { + try { + DfsSearchResult result = searchService.executeDfsPhase(request); + listener.onResult(result); + } catch (Exception e) { + listener.onFailure(e); + } + } else { + transportService.sendRequest(node, SearchDfsTransportHandler.ACTION, request, new BaseTransportResponseHandler() { + + @Override public DfsSearchResult newInstance() { + return new DfsSearchResult(); + } + + @Override public void handleResponse(DfsSearchResult response) { + listener.onResult(response); + } + + @Override public void handleException(RemoteTransportException exp) { + listener.onFailure(exp); + } + + @Override public boolean spawn() { + return false; + } + }); + } + } + + public void sendExecuteQuery(Node node, final InternalSearchRequest request, final SearchServiceListener listener) { + if (clusterService.state().nodes().localNodeId().equals(node.id())) { + try { + QuerySearchResult result = searchService.executeQueryPhase(request); + listener.onResult(result); + } catch (Exception e) { + listener.onFailure(e); + } + } else { + transportService.sendRequest(node, SearchQueryTransportHandler.ACTION, request, new BaseTransportResponseHandler() { + + @Override public QuerySearchResult newInstance() { + return new QuerySearchResult(); + } + + @Override public void handleResponse(QuerySearchResult response) { + listener.onResult(response); + } + + @Override public void handleException(RemoteTransportException exp) { + listener.onFailure(exp); + } + + @Override public boolean spawn() { + return false; + } + }); + } + } + + public void sendExecuteQuery(Node node, final QuerySearchRequest request, final SearchServiceListener listener) { + if (clusterService.state().nodes().localNodeId().equals(node.id())) { + try { + QuerySearchResult result = searchService.executeQueryPhase(request); + listener.onResult(result); + } catch (Exception e) { + listener.onFailure(e); + } + } else { + transportService.sendRequest(node, SearchQueryByIdTransportHandler.ACTION, request, new BaseTransportResponseHandler() { + + @Override public QuerySearchResult newInstance() { + return new QuerySearchResult(); + } + + @Override public void handleResponse(QuerySearchResult response) { + listener.onResult(response); + } + + @Override public void handleException(RemoteTransportException exp) { + listener.onFailure(exp); + } + + @Override public boolean spawn() { + return false; + } + }); + } + } + + public void sendExecuteQuery(Node node, final InternalScrollSearchRequest request, final SearchServiceListener listener) { + if (clusterService.state().nodes().localNodeId().equals(node.id())) { + try { + QuerySearchResult result = searchService.executeQueryPhase(request); + listener.onResult(result); + } catch (Exception e) { + listener.onFailure(e); + } + } else { + transportService.sendRequest(node, SearchQueryScrollTransportHandler.ACTION, request, new BaseTransportResponseHandler() { + + @Override public QuerySearchResult newInstance() { + return new QuerySearchResult(); + } + + @Override public void handleResponse(QuerySearchResult response) { + listener.onResult(response); + } + + @Override public void handleException(RemoteTransportException exp) { + listener.onFailure(exp); + } + + @Override public boolean spawn() { + return false; + } + }); + } + } + + public void sendExecuteFetch(Node node, final InternalSearchRequest request, final SearchServiceListener listener) { + if (clusterService.state().nodes().localNodeId().equals(node.id())) { + try { + QueryFetchSearchResult result = searchService.executeFetchPhase(request); + listener.onResult(result); + } catch (Exception e) { + listener.onFailure(e); + } + } else { + transportService.sendRequest(node, SearchQueryFetchTransportHandler.ACTION, request, new BaseTransportResponseHandler() { + + @Override public QueryFetchSearchResult newInstance() { + return new QueryFetchSearchResult(); + } + + @Override public void handleResponse(QueryFetchSearchResult response) { + listener.onResult(response); + } + + @Override public void handleException(RemoteTransportException exp) { + listener.onFailure(exp); + } + + @Override public boolean spawn() { + return false; + } + }); + } + } + + public void sendExecuteFetch(Node node, final QuerySearchRequest request, final SearchServiceListener listener) { + if (clusterService.state().nodes().localNodeId().equals(node.id())) { + try { + QueryFetchSearchResult result = searchService.executeFetchPhase(request); + listener.onResult(result); + } catch (Exception e) { + listener.onFailure(e); + } + } else { + transportService.sendRequest(node, SearchQueryQueryFetchTransportHandler.ACTION, request, new BaseTransportResponseHandler() { + + @Override public QueryFetchSearchResult newInstance() { + return new QueryFetchSearchResult(); + } + + @Override public void handleResponse(QueryFetchSearchResult response) { + listener.onResult(response); + } + + @Override public void handleException(RemoteTransportException exp) { + listener.onFailure(exp); + } + + @Override public boolean spawn() { + return false; + } + }); + } + } + + public void sendExecuteFetch(Node node, final InternalScrollSearchRequest request, final SearchServiceListener listener) { + if (clusterService.state().nodes().localNodeId().equals(node.id())) { + try { + QueryFetchSearchResult result = searchService.executeFetchPhase(request); + listener.onResult(result); + } catch (Exception e) { + listener.onFailure(e); + } + } else { + transportService.sendRequest(node, SearchQueryFetchScrollTransportHandler.ACTION, request, new BaseTransportResponseHandler() { + + @Override public QueryFetchSearchResult newInstance() { + return new QueryFetchSearchResult(); + } + + @Override public void handleResponse(QueryFetchSearchResult response) { + listener.onResult(response); + } + + @Override public void handleException(RemoteTransportException exp) { + listener.onFailure(exp); + } + + @Override public boolean spawn() { + return false; + } + }); + } + } + + public void sendExecuteFetch(Node node, final FetchSearchRequest request, final SearchServiceListener listener) { + if (clusterService.state().nodes().localNodeId().equals(node.id())) { + try { + FetchSearchResult result = searchService.executeFetchPhase(request); + listener.onResult(result); + } catch (Exception e) { + listener.onFailure(e); + } + } else { + transportService.sendRequest(node, SearchFetchByIdTransportHandler.ACTION, request, new BaseTransportResponseHandler() { + + @Override public FetchSearchResult newInstance() { + return new FetchSearchResult(); + } + + @Override public void handleResponse(FetchSearchResult response) { + listener.onResult(response); + } + + @Override public void handleException(RemoteTransportException exp) { + listener.onFailure(exp); + } + + @Override public boolean spawn() { + return false; + } + }); + } + } + + + private class SearchDfsTransportHandler extends BaseTransportRequestHandler { + + static final String ACTION = "search/phase/dfs"; + + @Override public InternalSearchRequest newInstance() { + return new InternalSearchRequest(); + } + + @Override public void messageReceived(InternalSearchRequest request, TransportChannel channel) throws Exception { + DfsSearchResult result = searchService.executeDfsPhase(request); + channel.sendResponse(result); + } + } + + private class SearchQueryTransportHandler extends BaseTransportRequestHandler { + + static final String ACTION = "search/phase/query"; + + @Override public InternalSearchRequest newInstance() { + return new InternalSearchRequest(); + } + + @Override public void messageReceived(InternalSearchRequest request, TransportChannel channel) throws Exception { + QuerySearchResult result = searchService.executeQueryPhase(request); + channel.sendResponse(result); + } + } + + private class SearchQueryByIdTransportHandler extends BaseTransportRequestHandler { + + static final String ACTION = "search/phase/query/id"; + + @Override public QuerySearchRequest newInstance() { + return new QuerySearchRequest(); + } + + @Override public void messageReceived(QuerySearchRequest request, TransportChannel channel) throws Exception { + QuerySearchResult result = searchService.executeQueryPhase(request); + channel.sendResponse(result); + } + } + + private class SearchQueryScrollTransportHandler extends BaseTransportRequestHandler { + + static final String ACTION = "search/phase/query/scroll"; + + @Override public InternalScrollSearchRequest newInstance() { + return new InternalScrollSearchRequest(); + } + + @Override public void messageReceived(InternalScrollSearchRequest request, TransportChannel channel) throws Exception { + QuerySearchResult result = searchService.executeQueryPhase(request); + channel.sendResponse(result); + } + } + + private class SearchQueryFetchTransportHandler extends BaseTransportRequestHandler { + + static final String ACTION = "search/phase/query+fetch"; + + @Override public InternalSearchRequest newInstance() { + return new InternalSearchRequest(); + } + + @Override public void messageReceived(InternalSearchRequest request, TransportChannel channel) throws Exception { + QueryFetchSearchResult result = searchService.executeFetchPhase(request); + channel.sendResponse(result); + } + } + + private class SearchQueryQueryFetchTransportHandler extends BaseTransportRequestHandler { + + static final String ACTION = "search/phase/queyr/query+fetch"; + + @Override public QuerySearchRequest newInstance() { + return new QuerySearchRequest(); + } + + @Override public void messageReceived(QuerySearchRequest request, TransportChannel channel) throws Exception { + QueryFetchSearchResult result = searchService.executeFetchPhase(request); + channel.sendResponse(result); + } + } + + private class SearchFetchByIdTransportHandler extends BaseTransportRequestHandler { + + static final String ACTION = "search/phase/fetch/id"; + + @Override public FetchSearchRequest newInstance() { + return new FetchSearchRequest(); + } + + @Override public void messageReceived(FetchSearchRequest request, TransportChannel channel) throws Exception { + FetchSearchResult result = searchService.executeFetchPhase(request); + channel.sendResponse(result); + } + } + + private class SearchQueryFetchScrollTransportHandler extends BaseTransportRequestHandler { + + static final String ACTION = "search/phase/query+fetch/scroll"; + + @Override public InternalScrollSearchRequest newInstance() { + return new InternalScrollSearchRequest(); + } + + @Override public void messageReceived(InternalScrollSearchRequest request, TransportChannel channel) throws Exception { + QueryFetchSearchResult result = searchService.executeFetchPhase(request); + channel.sendResponse(result); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java new file mode 100644 index 00000000000..60ab3a34c4f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -0,0 +1,211 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.builder; + +import org.elasticsearch.index.query.json.JsonQueryBuilder; +import org.elasticsearch.util.json.JsonBuilder; + +import java.util.ArrayList; +import java.util.List; + +import static com.google.common.collect.Lists.*; + +/** + * @author kimchy (Shay Banon) + */ +public class SearchSourceBuilder { + + public static SearchSourceBuilder searchSource() { + return new SearchSourceBuilder(); + } + + public static SearchSourceFacetsBuilder facets() { + return new SearchSourceFacetsBuilder(); + } + + private JsonQueryBuilder queryBuilder; + + private int from = -1; + + private int size = -1; + + private String queryParserName; + + private Boolean explain; + + private List sortFields; + + private List fieldNames; + + private SearchSourceFacetsBuilder facetsBuilder; + + public SearchSourceBuilder() { + } + + public SearchSourceBuilder query(JsonQueryBuilder query) { + this.queryBuilder = query; + return this; + } + + public SearchSourceBuilder from(int from) { + this.from = from; + return this; + } + + public SearchSourceBuilder size(int size) { + this.size = size; + return this; + } + + public SearchSourceBuilder queryParserName(String queryParserName) { + this.queryParserName = queryParserName; + return this; + } + + public SearchSourceBuilder explain(boolean explain) { + this.explain = explain; + return this; + } + + public SearchSourceBuilder sort(String name, boolean reverse) { + return sort(name, null, reverse); + } + + public SearchSourceBuilder sort(String name) { + return sort(name, null, false); + } + + public SearchSourceBuilder sort(String name, String type) { + return sort(name, type, false); + } + + public SearchSourceBuilder sort(String name, String type, boolean reverse) { + if (sortFields == null) { + sortFields = newArrayListWithCapacity(2); + } + sortFields.add(new SortTuple(name, reverse, type)); + return this; + } + + public SearchSourceBuilder facets(SearchSourceFacetsBuilder facetsBuilder) { + this.facetsBuilder = facetsBuilder; + return this; + } + + public SearchSourceBuilder fields(List fields) { + this.fieldNames = fields; + return this; + } + + public SearchSourceBuilder field(String name) { + if (fieldNames == null) { + fieldNames = new ArrayList(); + } + fieldNames.add(name); + return this; + } + + public String build() { + try { + JsonBuilder builder = JsonBuilder.cached(); + builder.startObject(); + + if (from != -1) { + builder.field("from", from); + } + if (size != -1) { + builder.field("size", size); + } + if (queryParserName != null) { + builder.field("queryParserName", queryParserName); + } + + builder.field("query"); + queryBuilder.toJson(builder); + + if (explain != null) { + builder.field("explain", explain); + } + + if (fieldNames != null) { + if (fieldNames.size() == 1) { + builder.field("fields", fieldNames.get(0)); + } else { + builder.startArray("fields"); + for (String fieldName : fieldNames) { + builder.string(fieldName); + } + builder.endArray(); + } + } + + if (sortFields != null) { + builder.field("sort"); + builder.startObject(); + for (SortTuple sortTuple : sortFields) { + builder.field(sortTuple.fieldName()); + builder.startObject(); + if (sortTuple.reverse) { + builder.field("reverse", true); + } + if (sortTuple.type != null) { + builder.field("type", sortTuple.type()); + } + builder.endObject(); + } + builder.endObject(); + } + + if (facetsBuilder != null) { + facetsBuilder.json(builder); + } + + builder.endObject(); + + return builder.string(); + } catch (Exception e) { + throw new SearchSourceBuilderException("Failed to build search source", e); + } + } + + private static class SortTuple { + private final String fieldName; + private final boolean reverse; + private final String type; + + private SortTuple(String fieldName, boolean reverse, String type) { + this.fieldName = fieldName; + this.reverse = reverse; + this.type = type; + } + + public String fieldName() { + return fieldName; + } + + public boolean reverse() { + return reverse; + } + + public String type() { + return type; + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilderException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilderException.java new file mode 100644 index 00000000000..bd2453b3e37 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilderException.java @@ -0,0 +1,36 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.builder; + +import org.elasticsearch.search.SearchException; + +/** + * @author kimchy (Shay Banon) + */ +public class SearchSourceBuilderException extends SearchException { + + public SearchSourceBuilderException(String msg) { + super(msg); + } + + public SearchSourceBuilderException(String msg, Throwable cause) { + super(msg, cause); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/builder/SearchSourceFacetsBuilder.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/builder/SearchSourceFacetsBuilder.java new file mode 100644 index 00000000000..dad72ae471d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/builder/SearchSourceFacetsBuilder.java @@ -0,0 +1,92 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.builder; + +import org.elasticsearch.index.query.json.JsonQueryBuilder; +import org.elasticsearch.util.json.JsonBuilder; + +import java.io.IOException; +import java.util.List; + +import static com.google.common.collect.Lists.*; + +/** + * @author kimchy (Shay Banon) + */ +public class SearchSourceFacetsBuilder { + + private String queryExecution; + + private List queryFacets; + + public SearchSourceFacetsBuilder queryExecution(String queryExecution) { + this.queryExecution = queryExecution; + return this; + } + + public SearchSourceFacetsBuilder facet(String name, JsonQueryBuilder query) { + if (queryFacets == null) { + queryFacets = newArrayListWithCapacity(2); + } + queryFacets.add(new FacetQuery(name, query)); + return this; + } + + void json(JsonBuilder builder) throws IOException { + if (queryExecution == null && queryFacets == null) { + return; + } + builder.field("facets"); + + builder.startObject(); + + if (queryExecution != null) { + builder.field("queryExecution", queryExecution); + } + if (queryFacets != null) { + for (FacetQuery facetQuery : queryFacets) { + builder.startObject(facetQuery.name()); + builder.field("query"); + facetQuery.queryBuilder().toJson(builder); + builder.endObject(); + } + } + + builder.endObject(); + } + + private static class FacetQuery { + private final String name; + private final JsonQueryBuilder queryBuilder; + + private FacetQuery(String name, JsonQueryBuilder queryBuilder) { + this.name = name; + this.queryBuilder = queryBuilder; + } + + public String name() { + return name; + } + + public JsonQueryBuilder queryBuilder() { + return queryBuilder; + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/controller/ScoreDocQueue.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/controller/ScoreDocQueue.java new file mode 100644 index 00000000000..d822ee7384d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/controller/ScoreDocQueue.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.controller; + +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.util.PriorityQueue; + +/** + *

Same as lucene {@link org.apache.lucene.search.HitQueue}. + * + * @author kimchy (Shay Banon) + */ +public class ScoreDocQueue extends PriorityQueue { + + public ScoreDocQueue(int size) { + initialize(size); + } + + protected final boolean lessThan(ScoreDoc hitA, ScoreDoc hitB) { + if (hitA.score == hitB.score) + return hitA.doc > hitB.doc; + else + return hitA.score < hitB.score; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java new file mode 100644 index 00000000000..ec3453f9876 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java @@ -0,0 +1,216 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.controller; + +import com.google.common.collect.Iterables; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.FieldDoc; +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.ShardFieldDocSortedHitQueue; +import org.apache.lucene.search.TopFieldDocs; +import org.apache.lucene.util.PriorityQueue; +import org.elasticsearch.ElasticSearchIllegalStateException; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.dfs.AggregatedDfs; +import org.elasticsearch.search.dfs.DfsSearchResult; +import org.elasticsearch.search.facets.CountFacet; +import org.elasticsearch.search.facets.Facet; +import org.elasticsearch.search.facets.Facets; +import org.elasticsearch.search.fetch.FetchSearchResult; +import org.elasticsearch.search.fetch.FetchSearchResultProvider; +import org.elasticsearch.search.internal.InternalSearchHit; +import org.elasticsearch.search.internal.InternalSearchHits; +import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.search.query.QuerySearchResult; +import org.elasticsearch.search.query.QuerySearchResultProvider; +import org.elasticsearch.util.trove.ExtTIntArrayList; +import org.elasticsearch.util.trove.ExtTObjectIntHasMap; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; + +/** + * @author kimchy (Shay Banon) + */ +public class SearchPhaseController { + private static final ShardDoc[] EMPTY = new ShardDoc[0]; + + public AggregatedDfs aggregateDfs(Iterable results) { + ExtTObjectIntHasMap dfMap = new ExtTObjectIntHasMap().defaultReturnValue(-1); + int numDocs = 0; + for (DfsSearchResult result : results) { + for (int i = 0; i < result.freqs().length; i++) { + int freq = dfMap.get(result.terms()[i]); + if (freq == -1) { + freq = result.freqs()[i]; + } else { + freq += result.freqs()[i]; + } + dfMap.put(result.terms()[i], freq); + } + numDocs += result.numDocs(); + } + return new AggregatedDfs(dfMap, numDocs); + } + + public ShardDoc[] sortDocs(Collection results) { + if (Iterables.isEmpty(results)) { + return EMPTY; + } + + QuerySearchResultProvider queryResultProvider = Iterables.get(results, 0); + + int totalNumDocs = 0; + + int queueSize = queryResultProvider.queryResult().from() + queryResultProvider.queryResult().size(); + if (queryResultProvider.includeFetch()) { + // if we did both query and fetch on the same go, we have fetched all the docs from each shards already, use them... + queueSize *= results.size(); + } + PriorityQueue queue; + if (queryResultProvider.queryResult().topDocs() instanceof TopFieldDocs) { + // sorting ... + queue = new ShardFieldDocSortedHitQueue(((TopFieldDocs) queryResultProvider.queryResult().topDocs()).fields, queueSize); // we need to accumulate for all and then filter the from + for (QuerySearchResultProvider resultProvider : results) { + QuerySearchResult result = resultProvider.queryResult(); + ScoreDoc[] scoreDocs = result.topDocs().scoreDocs; + totalNumDocs += scoreDocs.length; + for (ScoreDoc doc : scoreDocs) { + ShardFieldDoc nodeFieldDoc = new ShardFieldDoc(result.shardTarget(), doc.doc, doc.score, ((FieldDoc) doc).fields); + if (queue.insertWithOverflow(nodeFieldDoc) == nodeFieldDoc) { + // filled the queue, break + break; + } + } + } + } else { + queue = new ScoreDocQueue(queueSize); // we need to accumulate for all and then filter the from + for (QuerySearchResultProvider resultProvider : results) { + QuerySearchResult result = resultProvider.queryResult(); + ScoreDoc[] scoreDocs = result.topDocs().scoreDocs; + totalNumDocs += scoreDocs.length; + for (ScoreDoc doc : scoreDocs) { + ShardScoreDoc nodeScoreDoc = new ShardScoreDoc(result.shardTarget(), doc.doc, doc.score); + if (queue.insertWithOverflow(nodeScoreDoc) == nodeScoreDoc) { + // filled the queue, break + break; + } + } + } + + } + + int resultDocsSize = queryResultProvider.queryResult().size(); + if (queryResultProvider.includeFetch()) { + // if we did both query and fetch on the same go, we have fetched all the docs from each shards already, use them... + resultDocsSize *= results.size(); + } + if (totalNumDocs < queueSize) { + resultDocsSize = totalNumDocs - queryResultProvider.queryResult().from(); + } + + if (resultDocsSize <= 0) { + return EMPTY; + } + + ShardDoc[] shardDocs = new ShardDoc[resultDocsSize]; + for (int i = resultDocsSize - 1; i >= 0; i--) // put docs in array + shardDocs[i] = (ShardDoc) queue.pop(); + return shardDocs; + } + + public Map docIdsToLoad(ShardDoc[] shardDocs) { + Map result = Maps.newHashMap(); + for (ShardDoc shardDoc : shardDocs) { + ExtTIntArrayList list = result.get(shardDoc.shardTarget()); + if (list == null) { + list = new ExtTIntArrayList(); + result.put(shardDoc.shardTarget(), list); + } + list.add(shardDoc.docId()); + } + return result; + } + + public InternalSearchResponse merge(ShardDoc[] sortedDocs, Map queryResults, Map fetchResults) { + // merge facets + Facets facets = null; + if (!queryResults.isEmpty()) { + // we rely on the fact that the order of facets is the same on all query results + QuerySearchResult queryResult = queryResults.values().iterator().next().queryResult(); + + if (queryResult.facets() != null && queryResult.facets().facets() != null && !queryResult.facets().facets().isEmpty()) { + List mergedFacets = Lists.newArrayListWithCapacity(2); + for (Facet facet : queryResult.facets().facets()) { + if (facet.type() == Facet.Type.COUNT) { + mergedFacets.add(new CountFacet(facet.name(), 0)); + } else { + throw new ElasticSearchIllegalStateException("Can't handle type [" + facet.type() + "]"); + } + } + for (QuerySearchResultProvider queryResultProvider : queryResults.values()) { + List queryFacets = queryResultProvider.queryResult().facets().facets(); + for (int i = 0; i < mergedFacets.size(); i++) { + Facet queryFacet = queryFacets.get(i); + Facet mergedFacet = mergedFacets.get(i); + if (queryFacet.type() == Facet.Type.COUNT) { + ((CountFacet) mergedFacet).increment(((CountFacet) queryFacet).count()); + } + } + } + facets = new Facets(mergedFacets); + } + } + + // count the total (we use the query result provider here, since we might not get any hits (we scrolled past them)) + long totalHits = 0; + for (QuerySearchResultProvider queryResultProvider : queryResults.values()) { + totalHits += queryResultProvider.queryResult().topDocs().totalHits; + } + + // clean the fetch counter + for (FetchSearchResultProvider fetchSearchResultProvider : fetchResults.values()) { + fetchSearchResultProvider.fetchResult().initCounter(); + } + + // merge hits + List hits = new ArrayList(); + if (!fetchResults.isEmpty()) { + for (ShardDoc shardDoc : sortedDocs) { + FetchSearchResultProvider fetchResultProvider = fetchResults.get(shardDoc.shardTarget()); + if (fetchResultProvider == null) { + continue; + } + FetchSearchResult fetchResult = fetchResultProvider.fetchResult(); + int index = fetchResult.counterGetAndIncrement(); + SearchHit searchHit = fetchResult.hits().hits()[index]; + ((InternalSearchHit) searchHit).shard(fetchResult.shardTarget()); + hits.add(searchHit); + } + } + InternalSearchHits searchHits = new InternalSearchHits(hits.toArray(new SearchHit[hits.size()]), totalHits); + return new InternalSearchResponse(searchHits, facets); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/controller/ShardDoc.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/controller/ShardDoc.java new file mode 100644 index 00000000000..8e93b4e80d7 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/controller/ShardDoc.java @@ -0,0 +1,32 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.controller; + +import org.elasticsearch.search.SearchShardTarget; + +/** + * @author kimchy (Shay Banon) + */ +public interface ShardDoc { + + SearchShardTarget shardTarget(); + + int docId(); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/controller/ShardFieldDoc.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/controller/ShardFieldDoc.java new file mode 100644 index 00000000000..87f36367ed4 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/controller/ShardFieldDoc.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.controller; + +import org.apache.lucene.search.FieldDoc; +import org.elasticsearch.search.SearchShardTarget; + +/** + * @author kimchy (Shay Banon) + */ +public class ShardFieldDoc extends FieldDoc implements ShardDoc { + + private final SearchShardTarget shardTarget; + + public ShardFieldDoc(SearchShardTarget shardTarget, int doc, float score) { + super(doc, score); + this.shardTarget = shardTarget; + } + + public ShardFieldDoc(SearchShardTarget shardTarget, int doc, float score, Comparable[] fields) { + super(doc, score, fields); + this.shardTarget = shardTarget; + } + + @Override public SearchShardTarget shardTarget() { + return this.shardTarget; + } + + @Override public int docId() { + return this.doc; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/controller/ShardScoreDoc.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/controller/ShardScoreDoc.java new file mode 100644 index 00000000000..b5893740e85 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/controller/ShardScoreDoc.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.controller; + +import org.apache.lucene.search.ScoreDoc; +import org.elasticsearch.search.SearchShardTarget; + +/** + * @author kimchy (Shay Banon) + */ +public class ShardScoreDoc extends ScoreDoc implements ShardDoc { + + private final SearchShardTarget shardTarget; + + public ShardScoreDoc(SearchShardTarget shardTarget, int doc, float score) { + super(doc, score); + this.shardTarget = shardTarget; + } + + @Override public SearchShardTarget shardTarget() { + return this.shardTarget; + } + + @Override public int docId() { + return doc; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/dfs/AggregatedDfs.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/dfs/AggregatedDfs.java new file mode 100644 index 00000000000..c7e62a1090f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/dfs/AggregatedDfs.java @@ -0,0 +1,103 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.dfs; + +import org.apache.lucene.index.Term; +import org.elasticsearch.util.gnu.trove.TObjectIntProcedure; +import org.elasticsearch.util.io.Streamable; +import org.elasticsearch.util.trove.ExtTObjectIntHasMap; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class AggregatedDfs implements Streamable { + + private ExtTObjectIntHasMap dfMap; + + private int numDocs; + + private AggregatedDfs() { + + } + + public AggregatedDfs(ExtTObjectIntHasMap dfMap, int numDocs) { + this.dfMap = dfMap.defaultReturnValue(-1); + this.numDocs = numDocs; + } + + public ExtTObjectIntHasMap dfMap() { + return dfMap; + } + + public int numDocs() { + return numDocs; + } + + public static AggregatedDfs readAggregatedDfs(DataInput in) throws IOException, ClassNotFoundException { + AggregatedDfs result = new AggregatedDfs(); + result.readFrom(in); + return result; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + int size = in.readInt(); + dfMap = new ExtTObjectIntHasMap(size).defaultReturnValue(-1); + for (int i = 0; i < size; i++) { + dfMap.put(new Term(in.readUTF(), in.readUTF()), in.readInt()); + } + numDocs = in.readInt(); + } + + @Override public void writeTo(final DataOutput out) throws IOException { + out.writeInt(dfMap.size()); + WriteToProcedure writeToProcedure = new WriteToProcedure(out); + if (!dfMap.forEachEntry(writeToProcedure)) { + throw writeToProcedure.exception; + } + out.writeInt(numDocs); + } + + private static class WriteToProcedure implements TObjectIntProcedure { + + private final DataOutput out; + + IOException exception; + + private WriteToProcedure(DataOutput out) { + this.out = out; + } + + @Override public boolean execute(Term a, int b) { + try { + out.writeUTF(a.field()); + out.writeUTF(a.text()); + out.writeInt(b); + return true; + } catch (IOException e) { + exception = e; + } + return false; + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/dfs/CachedDfSource.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/dfs/CachedDfSource.java new file mode 100644 index 00000000000..4003073813a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/dfs/CachedDfSource.java @@ -0,0 +1,97 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.dfs; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.FieldSelector; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.*; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class CachedDfSource extends Searcher { + + private final AggregatedDfs dfs; + + public CachedDfSource(AggregatedDfs dfs, Similarity similarity) throws IOException { + this.dfs = dfs; + setSimilarity(similarity); + } + + public int docFreq(Term term) { + int df = dfs.dfMap().get(term); + if (df == -1) { + throw new IllegalArgumentException("df for term " + term.text() + " not available"); + } + return df; + } + + public int[] docFreqs(Term[] terms) { + int[] result = new int[terms.length]; + for (int i = 0; i < terms.length; i++) { + result[i] = docFreq(terms[i]); + } + return result; + } + + public int maxDoc() { + return dfs.numDocs(); + } + + public Query rewrite(Query query) { + // this is a bit of a hack. We know that a query which + // creates a Weight based on this Dummy-Searcher is + // always already rewritten (see preparedWeight()). + // Therefore we just return the unmodified query here + return query; + } + + public void close() { + throw new UnsupportedOperationException(); + } + + public Document doc(int i) { + throw new UnsupportedOperationException(); + } + + public Document doc(int i, FieldSelector fieldSelector) { + throw new UnsupportedOperationException(); + } + + public Explanation explain(Weight weight, int doc) { + throw new UnsupportedOperationException(); + } + + public void search(Weight weight, Filter filter, Collector results) { + throw new UnsupportedOperationException(); + } + + public TopDocs search(Weight weight, Filter filter, int n) { + throw new UnsupportedOperationException(); + } + + public TopFieldDocs search(Weight weight, Filter filter, int n, Sort sort) { + throw new UnsupportedOperationException(); + } + +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java new file mode 100644 index 00000000000..1b94fb58991 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java @@ -0,0 +1,55 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.dfs; + +import com.google.common.collect.ImmutableMap; +import org.apache.lucene.index.Term; +import org.elasticsearch.search.SearchParseElement; +import org.elasticsearch.search.SearchPhase; +import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.util.gnu.trove.THashSet; + +import java.util.Map; + +/** + * @author kimchy (Shay Banon) + */ +public class DfsPhase implements SearchPhase { + + @Override public Map parseElements() { + return ImmutableMap.of(); + } + + public void execute(SearchContext context) { + try { + context.rewriteQuery(); + + THashSet termsSet = new THashSet(); + context.query().extractTerms(termsSet); + Term[] terms = termsSet.toArray(new Term[termsSet.size()]); + int[] freqs = context.searcher().docFreqs(terms); + + context.dfsResult().termsAndFreqs(terms, freqs); + context.dfsResult().numDocs(context.searcher().getIndexReader().numDocs()); + } catch (Exception e) { + throw new DfsPhaseExecutionException(context); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/dfs/DfsPhaseExecutionException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/dfs/DfsPhaseExecutionException.java new file mode 100644 index 00000000000..8c1d1c0d981 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/dfs/DfsPhaseExecutionException.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.dfs; + +import org.elasticsearch.search.SearchException; +import org.elasticsearch.search.internal.SearchContext; + +/** + * @author kimchy (Shay Banon) + */ +public class DfsPhaseExecutionException extends SearchException { + + public DfsPhaseExecutionException(SearchContext context) { + super("Failed to execute dfs [" + context.query() + "]"); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java new file mode 100644 index 00000000000..6bcac391a25 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java @@ -0,0 +1,135 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.dfs; + +import org.apache.lucene.index.Term; +import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import static org.elasticsearch.search.SearchShardTarget.*; + +/** + * @author kimchy (Shay Banon) + */ +public class DfsSearchResult implements Streamable { + + private static Term[] EMPTY_TERMS = new Term[0]; + + private static int[] EMPTY_FREQS = new int[0]; + + private SearchShardTarget shardTarget; + + private long id; + + private Term[] terms; + + private int[] freqs; + + private int numDocs; + + public DfsSearchResult() { + + } + + public DfsSearchResult(long id, SearchShardTarget shardTarget) { + this.id = id; + this.shardTarget = shardTarget; + } + + public long id() { + return this.id; + } + + public SearchShardTarget shardTarget() { + return shardTarget; + } + + public DfsSearchResult numDocs(int numDocs) { + this.numDocs = numDocs; + return this; + } + + public int numDocs() { + return numDocs; + } + + public DfsSearchResult termsAndFreqs(Term[] terms, int[] freqs) { + this.terms = terms; + this.freqs = freqs; + return this; + } + + public Term[] terms() { + return terms; + } + + public int[] freqs() { + return freqs; + } + + public static DfsSearchResult readDfsSearchResult(DataInput in) throws IOException, ClassNotFoundException { + DfsSearchResult result = new DfsSearchResult(); + result.readFrom(in); + return result; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + id = in.readLong(); + shardTarget = readSearchShardTarget(in); + int termsSize = in.readInt(); + if (termsSize == 0) { + terms = EMPTY_TERMS; + } else { + terms = new Term[termsSize]; + for (int i = 0; i < terms.length; i++) { + terms[i] = new Term(in.readUTF(), in.readUTF()); + } + } + int freqsSize = in.readInt(); + if (freqsSize == 0) { + freqs = EMPTY_FREQS; + } else { + freqs = new int[freqsSize]; + for (int i = 0; i < freqs.length; i++) { + freqs[i] = in.readInt(); + } + } + numDocs = in.readInt(); + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeLong(id); + shardTarget.writeTo(out); + out.writeInt(terms.length); + for (Term term : terms) { + out.writeUTF(term.field()); + out.writeUTF(term.text()); + } + out.writeInt(freqs.length); + for (int freq : freqs) { + out.writeInt(freq); + } + out.writeInt(numDocs); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/facets/CountFacet.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/facets/CountFacet.java new file mode 100644 index 00000000000..520a41f0381 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/facets/CountFacet.java @@ -0,0 +1,81 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.facets; + +import org.elasticsearch.util.json.JsonBuilder; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class CountFacet implements Facet { + + private String name; + + private long count; + + private CountFacet() { + + } + + public CountFacet(String name, long count) { + this.name = name; + this.count = count; + } + + @Override public Type type() { + return Type.COUNT; + } + + public String name() { + return name; + } + + public long count() { + return count; + } + + public void increment(long increment) { + count += increment; + } + + @Override public void toJson(JsonBuilder builder) throws IOException { + builder.field(name, count); + } + + public static CountFacet readCountFacet(DataInput in) throws IOException, ClassNotFoundException { + CountFacet result = new CountFacet(); + result.readFrom(in); + return result; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + name = in.readUTF(); + count = in.readLong(); + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeUTF(name); + out.writeLong(count); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/facets/Facet.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/facets/Facet.java new file mode 100644 index 00000000000..d93e9067c3b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/facets/Facet.java @@ -0,0 +1,56 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.facets; + +import org.elasticsearch.ElasticSearchIllegalArgumentException; +import org.elasticsearch.util.io.Streamable; +import org.elasticsearch.util.json.ToJson; + +/** + * @author kimchy (Shay Banon) + */ +public interface Facet extends Streamable, ToJson { + + enum Type { + COUNT((byte) 0); + + byte id; + + Type(byte id) { + this.id = id; + } + + public byte id() { + return id; + } + + public static Type fromId(byte id) { + if (id == 0) { + return COUNT; + } else { + throw new ElasticSearchIllegalArgumentException("No match for id [" + id + "]"); + } + } + } + + String name(); + + Type type(); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/facets/FacetPhaseExecutionException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/facets/FacetPhaseExecutionException.java new file mode 100644 index 00000000000..49eadf9c78c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/facets/FacetPhaseExecutionException.java @@ -0,0 +1,32 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.facets; + +import org.elasticsearch.ElasticSearchException; + +/** + * @author kimchy (Shay Banon) + */ +public class FacetPhaseExecutionException extends ElasticSearchException { + + public FacetPhaseExecutionException(String facetName, String msg, Throwable t) { + super("Facet [" + facetName + "]: " + msg, t); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/facets/Facets.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/facets/Facets.java new file mode 100644 index 00000000000..a82e493ad75 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/facets/Facets.java @@ -0,0 +1,110 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.facets; + +import com.google.common.collect.ImmutableList; +import org.elasticsearch.util.io.Streamable; +import org.elasticsearch.util.json.JsonBuilder; +import org.elasticsearch.util.json.ToJson; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.List; + +import static com.google.common.collect.Lists.*; +import static org.elasticsearch.search.facets.CountFacet.*; + +/** + * @author kimchy (Shay Banon) + */ +public class Facets implements Streamable, ToJson { + + private final List EMPTY = ImmutableList.of(); + + private List facets; + + private Facets() { + + } + + public Facets(List facets) { + this.facets = facets; + } + + public List facets() { + return facets; + } + + public CountFacet countFacet(String name) { + return (CountFacet) facet(name); + } + + public Facet facet(String name) { + if (facets == null) { + return null; + } + for (Facet facet : facets) { + if (facet.name().equals(name)) { + return facet; + } + } + return null; + } + + @Override public void toJson(JsonBuilder builder) throws IOException { + builder.startObject("facets"); + for (Facet facet : facets) { + facet.toJson(builder); + } + builder.endObject(); + } + + public static Facets readFacets(DataInput in) throws IOException, ClassNotFoundException { + Facets result = new Facets(); + result.readFrom(in); + return result; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + int size = in.readInt(); + if (size == 0) { + facets = EMPTY; + } else { + facets = newArrayListWithCapacity(size); + for (int i = 0; i < size; i++) { + byte id = in.readByte(); + if (id == Facet.Type.COUNT.id()) { + facets.add(readCountFacet(in)); + } else { + throw new IOException("Can't handle facet type with id [" + id + "]"); + } + } + } + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.write(facets.size()); + for (Facet facet : facets) { + out.write(facet.type().id()); + facet.writeTo(out); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/facets/FacetsParseElement.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/facets/FacetsParseElement.java new file mode 100644 index 00000000000..3a996c412f3 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/facets/FacetsParseElement.java @@ -0,0 +1,96 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.facets; + +import com.google.common.collect.Lists; +import org.apache.lucene.search.Query; +import org.codehaus.jackson.JsonParser; +import org.codehaus.jackson.JsonToken; +import org.elasticsearch.index.query.json.JsonIndexQueryParser; +import org.elasticsearch.search.SearchParseElement; +import org.elasticsearch.search.SearchParseException; +import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.search.internal.SearchContextFacets; + +import java.util.List; + +/** + *

+ * facets : {
+ *  queryExecution : "collect|idset",
+ *  facet1: {
+ *      query : { ... }
+ *  }
+ * }
+ * 
+ * + * @author kimchy (Shay Banon) + */ +public class FacetsParseElement implements SearchParseElement { + + @Override public void parse(JsonParser jp, SearchContext context) throws Exception { + JsonToken token; + SearchContextFacets.QueryExecutionType queryExecutionType = SearchContextFacets.QueryExecutionType.COLLECT; + List queryFacets = null; + while ((token = jp.nextToken()) != JsonToken.END_OBJECT) { + if (token == JsonToken.FIELD_NAME) { + String topLevelFieldName = jp.getCurrentName(); + + if ("queryExecution".equals(topLevelFieldName)) { + jp.nextToken(); // move to value + String text = jp.getText(); + if ("collect".equals(text)) { + queryExecutionType = SearchContextFacets.QueryExecutionType.COLLECT; + } else if ("idset".equals(text)) { + queryExecutionType = SearchContextFacets.QueryExecutionType.IDSET; + } else { + throw new SearchParseException("Unsupported query type [" + text + "]"); + } + } else { + + jp.nextToken(); // move to START_OBJECT + + jp.nextToken(); // move to FIELD_NAME + String facetType = jp.getCurrentName(); + + if ("query".equals(facetType)) { + JsonIndexQueryParser indexQueryParser = (JsonIndexQueryParser) context.queryParser(); + Query facetQuery = indexQueryParser.parse(jp, context.source()); + + if (queryFacets == null) { + queryFacets = Lists.newArrayListWithCapacity(2); + } + queryFacets.add(new SearchContextFacets.QueryFacet(topLevelFieldName, facetQuery)); + } else { + throw new SearchParseException("Unsupported facet type [" + facetType + "] for facet name [" + topLevelFieldName + "]"); + } + jp.nextToken(); + } + } + } + + if (queryExecutionType == SearchContextFacets.QueryExecutionType.IDSET) { + // if we are using doc id sets, we need to enable the fact that we accumelate it + context.searcher().enabledDocIdSet(); + } + + context.facets(new SearchContextFacets(queryExecutionType, queryFacets)); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/facets/FacetsPhase.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/facets/FacetsPhase.java new file mode 100644 index 00000000000..26df99c4ed3 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/facets/FacetsPhase.java @@ -0,0 +1,98 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.facets; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Lists; +import org.apache.lucene.search.DocIdSet; +import org.apache.lucene.search.Filter; +import org.apache.lucene.search.QueryWrapperFilter; +import org.apache.lucene.util.OpenBitSet; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.ElasticSearchIllegalStateException; +import org.elasticsearch.search.SearchParseElement; +import org.elasticsearch.search.SearchPhase; +import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.search.internal.SearchContextFacets; +import org.elasticsearch.util.lucene.Lucene; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +/** + * @author kimchy (Shay Banon) + */ +public class FacetsPhase implements SearchPhase { + + @Override public Map parseElements() { + return ImmutableMap.of("facets", new FacetsParseElement()); + } + + @Override public void execute(SearchContext context) throws ElasticSearchException { + if (context.facets() == null) { + return; + } + if (context.queryResult().facets() != null) { + // no need to compute the facets twice, they should be computed on a per conext basis + return; + } + + SearchContextFacets contextFacets = context.facets(); + + List facets = Lists.newArrayListWithCapacity(2); + if (contextFacets.queryFacets() != null) { + for (SearchContextFacets.QueryFacet queryFacet : contextFacets.queryFacets()) { + Filter facetFilter = new QueryWrapperFilter(queryFacet.query()); + facetFilter = context.filterCache().cache(facetFilter); + long count; + if (contextFacets.queryType() == SearchContextFacets.QueryExecutionType.COLLECT) { + count = executeQueryCollectorCount(context, queryFacet, facetFilter); + } else if (contextFacets.queryType() == SearchContextFacets.QueryExecutionType.IDSET) { + count = executeQueryIdSetCount(context, queryFacet, facetFilter); + } else { + throw new ElasticSearchIllegalStateException("No matching for type [" + contextFacets.queryType() + "]"); + } + facets.add(new CountFacet(queryFacet.name(), count)); + } + } + + context.queryResult().facets(new Facets(facets)); + } + + private long executeQueryIdSetCount(SearchContext context, SearchContextFacets.QueryFacet queryFacet, Filter facetFilter) { + try { + DocIdSet filterDocIdSet = facetFilter.getDocIdSet(context.searcher().getIndexReader()); + return OpenBitSet.intersectionCount(context.searcher().docIdSet(), (OpenBitSet) filterDocIdSet); + } catch (IOException e) { + throw new FacetPhaseExecutionException(queryFacet.name(), "Failed to bitset facets for query [" + queryFacet.query() + "]", e); + } + } + + private long executeQueryCollectorCount(SearchContext context, SearchContextFacets.QueryFacet queryFacet, Filter facetFilter) { + Lucene.CountCollector countCollector = new Lucene.CountCollector(-1.0f); + try { + context.searcher().search(context.query(), facetFilter, countCollector); + } catch (IOException e) { + throw new FacetPhaseExecutionException(queryFacet.name(), "Failed to collect facets for query [" + queryFacet.query() + "]", e); + } + return countCollector.count(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/fetch/ExplainParseElement.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/fetch/ExplainParseElement.java new file mode 100644 index 00000000000..b1f72e36880 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/fetch/ExplainParseElement.java @@ -0,0 +1,35 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.fetch; + +import org.codehaus.jackson.JsonParser; +import org.codehaus.jackson.JsonToken; +import org.elasticsearch.search.SearchParseElement; +import org.elasticsearch.search.internal.SearchContext; + +/** + * @author kimchy (Shay Banon) + */ +public class ExplainParseElement implements SearchParseElement { + + @Override public void parse(JsonParser jp, SearchContext context) throws Exception { + context.explain(jp.getCurrentToken() == JsonToken.VALUE_TRUE); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java new file mode 100644 index 00000000000..0e185e51333 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java @@ -0,0 +1,167 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.fetch; + +import com.google.common.collect.ImmutableMap; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Fieldable; +import org.elasticsearch.index.mapper.*; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHitField; +import org.elasticsearch.search.SearchParseElement; +import org.elasticsearch.search.SearchPhase; +import org.elasticsearch.search.internal.InternalSearchHit; +import org.elasticsearch.search.internal.InternalSearchHitField; +import org.elasticsearch.search.internal.InternalSearchHits; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Map; + +/** + * @author kimchy (Shay Banon) + */ +public class FetchPhase implements SearchPhase { + + @Override public Map parseElements() { + return ImmutableMap.of("explain", new ExplainParseElement(), "fields", new FieldsParseElement()); + } + + public void execute(SearchContext context) { + FieldMappersFieldSelector fieldSelector = buildFieldSelectors(context); + + SearchHit[] hits = new SearchHit[context.docIdsToLoad().length]; + int index = 0; + for (int docId : context.docIdsToLoad()) { + Document doc = loadDocument(context, fieldSelector, docId); + Uid uid = extractUid(context, doc); + + DocumentMapper documentMapper = context.mapperService().type(uid.type()); + + String source = extractSource(doc, documentMapper); + + InternalSearchHit searchHit = new InternalSearchHit(uid.id(), uid.type(), source, null); + hits[index] = searchHit; + + for (Object oField : doc.getFields()) { + Fieldable field = (Fieldable) oField; + String name = field.name(); + Object value = null; + FieldMappers fieldMappers = documentMapper.mappers().indexName(field.name()); + if (fieldMappers != null) { + FieldMapper mapper = fieldMappers.mapper(); + if (mapper != null) { + name = mapper.name(); + value = mapper.valueForSearch(field); + } + } + if (value == null) { + if (field.isBinary()) { + value = field.getBinaryValue(); + } else { + value = field.stringValue(); + } + } + + if (searchHit.fields() == null) { + searchHit.fields(new HashMap(2)); + } + + SearchHitField hitField = searchHit.fields().get(name); + if (hitField == null) { + hitField = new InternalSearchHitField(name, new ArrayList(2)); + searchHit.fields().put(name, hitField); + } + hitField.values().add(value); + } + doExplanation(context, docId, searchHit); + + index++; + } + context.fetchResult().hits(new InternalSearchHits(hits, context.queryResult().topDocs().totalHits)); + } + + private void doExplanation(SearchContext context, int docId, InternalSearchHit searchHit) { + if (context.explain()) { + try { + searchHit.explanation(context.searcher().explain(context.query(), docId)); + } catch (IOException e) { + throw new FetchPhaseExecutionException(context, "Failed to explain doc [" + docId + "]", e); + } + } + } + + private String extractSource(Document doc, DocumentMapper documentMapper) { + String source = null; + Fieldable sourceField = doc.getFieldable(documentMapper.sourceMapper().indexName()); + if (sourceField != null) { + source = documentMapper.sourceMapper().valueAsString(sourceField); + doc.removeField(documentMapper.sourceMapper().indexName()); + } + return source; + } + + private Uid extractUid(SearchContext context, Document doc) { + Uid uid = null; + for (FieldMapper fieldMapper : context.mapperService().uidFieldMappers()) { + String sUid = doc.get(fieldMapper.indexName()); + if (sUid != null) { + uid = Uid.createUid(sUid); + doc.removeField(fieldMapper.indexName()); + break; + } + } + if (uid == null) { + // no type, nothing to do (should not really happen + throw new FetchPhaseExecutionException(context, "Failed to load uid from the index"); + } + return uid; + } + + private Document loadDocument(SearchContext context, FieldMappersFieldSelector fieldSelector, int docId) { + Document doc; + try { + doc = context.searcher().doc(docId, fieldSelector); + } catch (IOException e) { + throw new FetchPhaseExecutionException(context, "Failed to fetch doc id [" + docId + "]", e); + } + return doc; + } + + private FieldMappersFieldSelector buildFieldSelectors(SearchContext context) { + FieldMappersFieldSelector fieldSelector = new FieldMappersFieldSelector(); + if (context.fieldNames() != null) { + for (String fieldName : context.fieldNames()) { + FieldMappers x = context.mapperService().smartNameFieldMappers(fieldName); + if (x == null) { + throw new FetchPhaseExecutionException(context, "No mapping for field [" + fieldName + "]"); + } + fieldSelector.add(x); + } + } else { + fieldSelector.add(context.mapperService().sourceFieldMappers()); + } + // add the uids by default, so we can return the id/type + fieldSelector.add(context.mapperService().uidFieldMappers()); + return fieldSelector; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/fetch/FetchPhaseExecutionException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/fetch/FetchPhaseExecutionException.java new file mode 100644 index 00000000000..2172ed9156a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/fetch/FetchPhaseExecutionException.java @@ -0,0 +1,37 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.fetch; + +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.search.internal.SearchContext; + +/** + * @author kimchy (Shay Banon) + */ +public class FetchPhaseExecutionException extends ElasticSearchException { + + public FetchPhaseExecutionException(SearchContext context, String msg) { + this(context, msg, null); + } + + public FetchPhaseExecutionException(SearchContext context, String msg, Throwable t) { + super("Failed to fetch query [" + context.query() + "], sort [" + context.sort() + "], from [" + context.from() + "], size [" + context.size() + "], reason [" + msg + "]", t); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/fetch/FetchSearchRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/fetch/FetchSearchRequest.java new file mode 100644 index 00000000000..11e226fa073 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/fetch/FetchSearchRequest.java @@ -0,0 +1,79 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.fetch; + +import org.elasticsearch.util.io.Streamable; +import org.elasticsearch.util.trove.ExtTIntArrayList; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class FetchSearchRequest implements Streamable { + + private long id; + + private int[] docIds; + + private transient int size; + + public FetchSearchRequest() { + } + + public FetchSearchRequest(long id, ExtTIntArrayList list) { + this.id = id; + this.docIds = list.unsafeArray(); + this.size = list.size(); + } + + public FetchSearchRequest(long id, int[] docIds) { + this.id = id; + this.docIds = docIds; + this.size = docIds.length; + } + + public long id() { + return id; + } + + public int[] docIds() { + return docIds; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + id = in.readLong(); + size = in.readInt(); + docIds = new int[size]; + for (int i = 0; i < docIds.length; i++) { + docIds[i] = in.readInt(); + } + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeLong(id); + out.writeInt(size); + for (int i = 0; i < size; i++) { + out.writeInt(docIds[i]); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java new file mode 100644 index 00000000000..cc3690b4987 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java @@ -0,0 +1,102 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.fetch; + +import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.internal.InternalSearchHits; +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import static org.elasticsearch.search.SearchShardTarget.*; +import static org.elasticsearch.search.internal.InternalSearchHits.*; + +/** + * @author kimchy (Shay Banon) + */ +public class FetchSearchResult implements Streamable, FetchSearchResultProvider { + + private long id; + + private SearchShardTarget shardTarget; + + private InternalSearchHits hits; + + // client side counter + private transient int counter; + + public FetchSearchResult() { + + } + + public FetchSearchResult(long id, SearchShardTarget shardTarget) { + this.id = id; + this.shardTarget = shardTarget; + } + + @Override public FetchSearchResult fetchResult() { + return this; + } + + public long id() { + return this.id; + } + + public SearchShardTarget shardTarget() { + return this.shardTarget; + } + + public void hits(InternalSearchHits hits) { + this.hits = hits; + } + + public InternalSearchHits hits() { + return hits; + } + + public FetchSearchResult initCounter() { + counter = 0; + return this; + } + + public int counterGetAndIncrement() { + return counter++; + } + + public static FetchSearchResult readFetchSearchResult(DataInput in) throws IOException, ClassNotFoundException { + FetchSearchResult result = new FetchSearchResult(); + result.readFrom(in); + return result; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + id = in.readLong(); + shardTarget = readSearchShardTarget(in); + hits = readSearchHits(in); + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeLong(id); + shardTarget.writeTo(out); + hits.writeTo(out); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/fetch/FetchSearchResultProvider.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/fetch/FetchSearchResultProvider.java new file mode 100644 index 00000000000..1cb0892fd09 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/fetch/FetchSearchResultProvider.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.fetch; + +import org.elasticsearch.search.SearchShardTarget; + +/** + * @author kimchy (Shay Banon) + */ +public interface FetchSearchResultProvider { + + long id(); + + SearchShardTarget shardTarget(); + + FetchSearchResult fetchResult(); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/fetch/FieldsParseElement.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/fetch/FieldsParseElement.java new file mode 100644 index 00000000000..064ef4fd605 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/fetch/FieldsParseElement.java @@ -0,0 +1,47 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.fetch; + +import org.codehaus.jackson.JsonParser; +import org.codehaus.jackson.JsonToken; +import org.elasticsearch.search.SearchParseElement; +import org.elasticsearch.search.internal.SearchContext; + +import java.util.ArrayList; + +/** + * @author kimchy (Shay Banon) + */ +public class FieldsParseElement implements SearchParseElement { + + @Override public void parse(JsonParser jp, SearchContext context) throws Exception { + JsonToken token = jp.getCurrentToken(); + if (token == JsonToken.START_ARRAY) { + jp.nextToken(); + ArrayList fieldNames = new ArrayList(); + do { + fieldNames.add(jp.getText()); + } while ((token = jp.nextToken()) != JsonToken.END_ARRAY); + context.fieldNames(fieldNames.toArray(new String[fieldNames.size()])); + } else if (token == JsonToken.VALUE_STRING) { + context.fieldNames(new String[]{jp.getText()}); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/fetch/QueryFetchSearchResult.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/fetch/QueryFetchSearchResult.java new file mode 100644 index 00000000000..224019e7296 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/fetch/QueryFetchSearchResult.java @@ -0,0 +1,87 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.fetch; + +import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.query.QuerySearchResult; +import org.elasticsearch.search.query.QuerySearchResultProvider; +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import static org.elasticsearch.search.fetch.FetchSearchResult.*; +import static org.elasticsearch.search.query.QuerySearchResult.*; + +/** + * @author kimchy (Shay Banon) + */ +public class QueryFetchSearchResult implements Streamable, QuerySearchResultProvider, FetchSearchResultProvider { + + private QuerySearchResult queryResult; + + private FetchSearchResult fetchResult; + + public QueryFetchSearchResult() { + + } + + public QueryFetchSearchResult(QuerySearchResult queryResult, FetchSearchResult fetchResult) { + this.queryResult = queryResult; + this.fetchResult = fetchResult; + } + + public long id() { + return queryResult.id(); + } + + public SearchShardTarget shardTarget() { + return queryResult.shardTarget(); + } + + @Override public boolean includeFetch() { + return true; + } + + public QuerySearchResult queryResult() { + return queryResult; + } + + public FetchSearchResult fetchResult() { + return fetchResult; + } + + public static QueryFetchSearchResult readQueryFetchSearchResult(DataInput in) throws IOException, ClassNotFoundException { + QueryFetchSearchResult result = new QueryFetchSearchResult(); + result.readFrom(in); + return result; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + queryResult = readQuerySearchResult(in); + fetchResult = readFetchSearchResult(in); + } + + @Override public void writeTo(DataOutput out) throws IOException { + queryResult.writeTo(out); + fetchResult.writeTo(out); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java new file mode 100644 index 00000000000..0965ea88226 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java @@ -0,0 +1,89 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.internal; + +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.search.*; +import org.apache.lucene.util.OpenBitSet; +import org.elasticsearch.search.dfs.CachedDfSource; +import org.elasticsearch.util.lucene.docidset.DocIdSetCollector; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class ContextIndexSearcher extends IndexSearcher { + + private SearchContext searchContext; + + private CachedDfSource dfSource; + + private boolean docIdSetEnabled; + + private OpenBitSet docIdSet; + + public ContextIndexSearcher(SearchContext searchContext, IndexReader r) { + super(r); + this.searchContext = searchContext; + } + + public void dfSource(CachedDfSource dfSource) { + this.dfSource = dfSource; + } + + public void enabledDocIdSet() { + docIdSetEnabled = true; + } + + public OpenBitSet docIdSet() { + return docIdSet; + } + + @Override protected Weight createWeight(Query query) throws IOException { + if (dfSource == null) { + return super.createWeight(query); + } + return query.weight(dfSource); + } + + @Override public void search(Weight weight, Filter filter, Collector collector) throws IOException { + if (searchContext.timeout() != null) { + collector = new TimeLimitingCollector(collector, searchContext.timeout().millis()); + } + // we only compute the doc id set once since within a context, we execute the same query always... + if (docIdSetEnabled && docIdSet == null) { + collector = new DocIdSetCollector(collector, getIndexReader()); + } + if (searchContext.timeout() != null) { + searchContext.queryResult().searchTimedOut(false); + try { + super.search(weight, filter, collector); + } catch (TimeLimitingCollector.TimeExceededException e) { + searchContext.queryResult().searchTimedOut(true); + } + } else { + super.search(weight, filter, collector); + } + if (docIdSetEnabled && docIdSet == null) { + this.docIdSet = ((DocIdSetCollector) collector).docIdSet(); + } + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/internal/InternalScrollSearchRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/internal/InternalScrollSearchRequest.java new file mode 100644 index 00000000000..ce7f7b1ee3e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/internal/InternalScrollSearchRequest.java @@ -0,0 +1,76 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.internal; + +import org.elasticsearch.search.Scroll; +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import static org.elasticsearch.search.Scroll.*; + +/** + * @author kimchy (Shay Banon) + */ +public class InternalScrollSearchRequest implements Streamable { + + private long id; + + private Scroll scroll; + + public InternalScrollSearchRequest() { + } + + public InternalScrollSearchRequest(long id) { + this.id = id; + } + + public long id() { + return id; + } + + public Scroll scroll() { + return scroll; + } + + public InternalScrollSearchRequest scroll(Scroll scroll) { + this.scroll = scroll; + return this; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + id = in.readLong(); + if (in.readBoolean()) { + scroll = readScroll(in); + } + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeLong(id); + if (scroll == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + scroll.writeTo(out); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java new file mode 100644 index 00000000000..f9aeefe0857 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java @@ -0,0 +1,247 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.internal; + +import com.google.common.collect.ImmutableMap; +import org.apache.lucene.search.Explanation; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHitField; +import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.util.Nullable; +import org.elasticsearch.util.json.JsonBuilder; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.Map; + +import static org.elasticsearch.search.SearchShardTarget.*; +import static org.elasticsearch.search.internal.InternalSearchHitField.*; +import static org.elasticsearch.util.lucene.Lucene.*; + +/** + * @author kimchy (Shay Banon) + */ +public class InternalSearchHit implements SearchHit { + + private String id; + + private String type; + + private String source; + + private Map fields; + + private Explanation explanation; + + @Nullable private SearchShardTarget shard; + + private InternalSearchHit() { + + } + + public InternalSearchHit(String id, String type, String source, Map fields) { + this.id = id; + this.type = type; + this.source = source; + this.fields = fields; + } + + @Override public String index() { + return shard.index(); + } + + public String id() { + return id; + } + + public String type() { + return type; + } + + public String source() { + return source; + } + + public Map fields() { + return fields; + } + + public void fields(Map fields) { + this.fields = fields; + } + + public Explanation explanation() { + return explanation; + } + + public void explanation(Explanation explanation) { + this.explanation = explanation; + } + + public SearchShardTarget shard() { + return shard; + } + + public void shard(SearchShardTarget target) { + this.shard = target; + } + + @Override public SearchShardTarget target() { + return null; + } + + @Override public void toJson(JsonBuilder builder) throws IOException { + builder.startObject(); + builder.field("_index", shard.index()); +// builder.field("_shard", shard.shardId()); +// builder.field("_node", shard.nodeId()); + builder.field("_type", type()); + builder.field("_id", id()); + if (source() != null) { + builder.raw(", \"_source\" : "); + builder.raw(source()); + } + if (fields() != null) { + for (SearchHitField field : fields().values()) { + if (field.values().isEmpty()) { + continue; + } + if (field.values().size() == 1) { + builder.field(field.name(), field.values().get(0)); + } else { + builder.field(field.name()); + builder.startArray(); + for (Object value : field.values()) { + builder.value(value); + } + builder.endArray(); + } + } + } + if (explanation() != null) { + builder.field("_explanation"); + buildExplanation(builder, explanation()); + } + builder.endObject(); + } + + private void buildExplanation(JsonBuilder builder, Explanation explanation) throws IOException { + builder.startObject(); + builder.field("value", explanation.getValue()); + builder.field("description", explanation.getDescription()); + Explanation[] innerExps = explanation.getDetails(); + if (innerExps != null) { + builder.startArray("details"); + for (Explanation exp : innerExps) { + buildExplanation(builder, exp); + } + builder.endArray(); + } + builder.endObject(); + } + + public static InternalSearchHit readSearchHit(DataInput in) throws IOException, ClassNotFoundException { + InternalSearchHit hit = new InternalSearchHit(); + hit.readFrom(in); + return hit; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + id = in.readUTF(); + type = in.readUTF(); + if (in.readBoolean()) { + source = in.readUTF(); + } + if (in.readBoolean()) { + explanation = readExplanation(in); + } + int size = in.readInt(); + if (size == 0) { + fields = ImmutableMap.of(); + } else if (size == 1) { + SearchHitField hitField = readSearchHitField(in); + fields = ImmutableMap.of(hitField.name(), hitField); + } else if (size == 2) { + SearchHitField hitField1 = readSearchHitField(in); + SearchHitField hitField2 = readSearchHitField(in); + fields = ImmutableMap.of(hitField1.name(), hitField1, hitField2.name(), hitField2); + } else if (size == 3) { + SearchHitField hitField1 = readSearchHitField(in); + SearchHitField hitField2 = readSearchHitField(in); + SearchHitField hitField3 = readSearchHitField(in); + fields = ImmutableMap.of(hitField1.name(), hitField1, hitField2.name(), hitField2, hitField3.name(), hitField3); + } else if (size == 4) { + SearchHitField hitField1 = readSearchHitField(in); + SearchHitField hitField2 = readSearchHitField(in); + SearchHitField hitField3 = readSearchHitField(in); + SearchHitField hitField4 = readSearchHitField(in); + fields = ImmutableMap.of(hitField1.name(), hitField1, hitField2.name(), hitField2, hitField3.name(), hitField3, hitField4.name(), hitField4); + } else if (size == 5) { + SearchHitField hitField1 = readSearchHitField(in); + SearchHitField hitField2 = readSearchHitField(in); + SearchHitField hitField3 = readSearchHitField(in); + SearchHitField hitField4 = readSearchHitField(in); + SearchHitField hitField5 = readSearchHitField(in); + fields = ImmutableMap.of(hitField1.name(), hitField1, hitField2.name(), hitField2, hitField3.name(), hitField3, hitField4.name(), hitField4, hitField5.name(), hitField5); + } else { + ImmutableMap.Builder builder = ImmutableMap.builder(); + for (int i = 0; i < size; i++) { + SearchHitField hitField = readSearchHitField(in); + builder.put(hitField.name(), hitField); + } + fields = builder.build(); + } + if (in.readBoolean()) { + shard = readSearchShardTarget(in); + } + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeUTF(id); + out.writeUTF(type); + if (source == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeUTF(source); + } + if (explanation == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + writeExplanation(out, explanation); + } + if (fields == null) { + out.writeInt(0); + } else { + out.writeInt(fields.size()); + for (SearchHitField hitField : fields().values()) { + hitField.writeTo(out); + } + } + if (shard == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + shard.writeTo(out); + } + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/internal/InternalSearchHitField.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/internal/InternalSearchHitField.java new file mode 100644 index 00000000000..38d0eb8d0b7 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/internal/InternalSearchHitField.java @@ -0,0 +1,124 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.internal; + +import org.elasticsearch.search.SearchHitField; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +/** + * @author kimchy (Shay Banon) + */ +public class InternalSearchHitField implements SearchHitField { + + private String name; + + private List values; + + private InternalSearchHitField() { + + } + + public InternalSearchHitField(String name, List values) { + this.name = name; + this.values = values; + } + + public String name() { + return name; + } + + public List values() { + return values; + } + + public static InternalSearchHitField readSearchHitField(DataInput in) throws IOException, ClassNotFoundException { + InternalSearchHitField result = new InternalSearchHitField(); + result.readFrom(in); + return result; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + name = in.readUTF(); + int size = in.readInt(); + values = new ArrayList(size); + for (int i = 0; i < size; i++) { + Object value; + byte type = in.readByte(); + if (type == 0) { + value = in.readUTF(); + } else if (type == 1) { + value = in.readInt(); + } else if (type == 2) { + value = in.readLong(); + } else if (type == 3) { + value = in.readFloat(); + } else if (type == 4) { + value = in.readDouble(); + } else if (type == 5) { + value = in.readBoolean(); + } else if (type == 6) { + int bytesSize = in.readInt(); + value = new byte[bytesSize]; + in.readFully(((byte[]) value)); + } else { + throw new IOException("Can't read unknown type [" + type + "]"); + } + values.add(value); + } + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeUTF(name); + out.writeInt(values.size()); + for (Object obj : values) { + Class type = obj.getClass(); + if (type == String.class) { + out.write(0); + out.writeUTF((String) obj); + } else if (type == Integer.class) { + out.write(1); + out.writeInt((Integer) obj); + } else if (type == Long.class) { + out.write(2); + out.writeLong((Long) obj); + } else if (type == Float.class) { + out.write(3); + out.writeFloat((Float) obj); + } else if (type == Double.class) { + out.write(4); + out.writeDouble((Double) obj); + } else if (type == Boolean.class) { + out.write(5); + out.writeBoolean((Boolean) obj); + } else if (type == byte[].class) { + out.write(6); + out.writeInt(((byte[]) obj).length); + out.write(((byte[]) obj)); + } else { + throw new IOException("Can't write type [" + type + "]"); + } + } + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/internal/InternalSearchHits.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/internal/InternalSearchHits.java new file mode 100644 index 00000000000..694b4768f9f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/internal/InternalSearchHits.java @@ -0,0 +1,98 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.internal; + +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; +import org.elasticsearch.util.json.JsonBuilder; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import static org.elasticsearch.search.internal.InternalSearchHit.*; + +/** + * @author kimchy (Shay Banon) + */ +public class InternalSearchHits implements SearchHits { + + private static final SearchHit[] EMPTY = new SearchHit[0]; + + private SearchHit[] hits; + + private long totalHits; + + private InternalSearchHits() { + + } + + public InternalSearchHits(SearchHit[] hits, long totalHits) { + this.hits = hits; + this.totalHits = totalHits; + } + + public long totalHits() { + return totalHits; + } + + public SearchHit[] hits() { + return this.hits; + } + + public static InternalSearchHits readSearchHits(DataInput in) throws IOException, ClassNotFoundException { + InternalSearchHits hits = new InternalSearchHits(); + hits.readFrom(in); + return hits; + } + + @Override public void toJson(JsonBuilder builder) throws IOException { + builder.startObject("hits"); + builder.field("total", totalHits); + builder.field("hits"); + builder.startArray(); + for (SearchHit hit : hits) { + hit.toJson(builder); + } + builder.endArray(); + builder.endObject(); + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + totalHits = in.readLong(); + int size = in.readInt(); + if (size == 0) { + hits = EMPTY; + } else { + hits = new SearchHit[size]; + for (int i = 0; i < hits.length; i++) { + hits[i] = readSearchHit(in); + } + } + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeLong(totalHits); + out.writeInt(hits.length); + for (SearchHit hit : hits) { + hit.writeTo(out); + } + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/internal/InternalSearchRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/internal/InternalSearchRequest.java new file mode 100644 index 00000000000..5321e7ca0af --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/internal/InternalSearchRequest.java @@ -0,0 +1,202 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.internal; + +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.search.Scroll; +import org.elasticsearch.util.Strings; +import org.elasticsearch.util.TimeValue; +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import static org.elasticsearch.search.Scroll.*; +import static org.elasticsearch.util.TimeValue.*; + +/** + * Source structure: + *

+ *

+ * {
+ *  from : 0, size : 20, (optional, can be set on the request)
+ *  sort : { "name.first" : {}, "name.last" : { reverse : true } }
+ *  fields : [ "name.first", "name.last" ]
+ *  queryParserName : "",
+ *  query : { ... }
+ *  facets : {
+ *      "facet1" : {
+ *          query : { ... }
+ *      }
+ *  }
+ * }
+ * 
+ * + * @author kimchy (Shay Banon) + */ +public class InternalSearchRequest implements Streamable { + + private String index; + + private int shardId; + + private Scroll scroll; + + private int from = -1; + + private int size = -1; + + private float queryBoost = 1.0f; + + private TimeValue timeout; + + private String[] types = Strings.EMPTY_ARRAY; + + private String source; + + public InternalSearchRequest() { + } + + public InternalSearchRequest(ShardRouting shardRouting, String source) { + this(shardRouting.index(), shardRouting.id(), source); + } + + public InternalSearchRequest(String index, int shardId, String source) { + this.index = index; + this.shardId = shardId; + this.source = source; + } + + public String index() { + return index; + } + + public int shardId() { + return shardId; + } + + public String source() { + return this.source; + } + + public Scroll scroll() { + return scroll; + } + + public InternalSearchRequest scroll(Scroll scroll) { + this.scroll = scroll; + return this; + } + + public int from() { + return from; + } + + public InternalSearchRequest from(int from) { + this.from = from; + return this; + } + + public TimeValue timeout() { + return timeout; + } + + public void timeout(TimeValue timeout) { + this.timeout = timeout; + } + + /** + * Allows to set a dynamic query boost on an index level query. Very handy when, for example, each user has + * his own index, and friends matter more than friends of friends. + */ + public float queryBoost() { + return queryBoost; + } + + public InternalSearchRequest queryBoost(float queryBoost) { + this.queryBoost = queryBoost; + return this; + } + + public int size() { + return size; + } + + public InternalSearchRequest size(int size) { + this.size = size; + return this; + } + + public String[] types() { + return types; + } + + public void types(String[] types) { + this.types = types; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + index = in.readUTF(); + shardId = in.readInt(); + if (in.readBoolean()) { + scroll = readScroll(in); + } + from = in.readInt(); + size = in.readInt(); + if (in.readBoolean()) { + timeout = readTimeValue(in); + } + source = in.readUTF(); + queryBoost = in.readFloat(); + int typesSize = in.readInt(); + if (typesSize > 0) { + types = new String[typesSize]; + for (int i = 0; i < typesSize; i++) { + types[i] = in.readUTF(); + } + } + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeUTF(index); + out.writeInt(shardId); + if (scroll == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + scroll.writeTo(out); + } + out.writeInt(from); + out.writeInt(size); + if (timeout == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + timeout.writeTo(out); + } + out.writeUTF(source); + out.writeFloat(queryBoost); + out.writeInt(types.length); + for (String type : types) { + out.writeUTF(type); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java new file mode 100644 index 00000000000..969201e3461 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java @@ -0,0 +1,89 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.internal; + +import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.facets.Facets; +import org.elasticsearch.util.io.Streamable; +import org.elasticsearch.util.json.JsonBuilder; +import org.elasticsearch.util.json.ToJson; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import static org.elasticsearch.search.facets.Facets.*; +import static org.elasticsearch.search.internal.InternalSearchHits.*; + +/** + * @author kimchy (Shay Banon) + */ +public class InternalSearchResponse implements Streamable, ToJson { + + private InternalSearchHits hits; + + private Facets facets; + + private InternalSearchResponse() { + } + + public InternalSearchResponse(InternalSearchHits hits, Facets facets) { + this.hits = hits; + this.facets = facets; + } + + public SearchHits hits() { + return hits; + } + + public Facets facets() { + return facets; + } + + public static InternalSearchResponse readInternalSearchResponse(DataInput in) throws IOException, ClassNotFoundException { + InternalSearchResponse response = new InternalSearchResponse(); + response.readFrom(in); + return response; + } + + @Override public void toJson(JsonBuilder builder) throws IOException { + hits.toJson(builder); + if (facets != null) { + facets.toJson(builder); + } + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + hits = readSearchHits(in); + if (in.readBoolean()) { + facets = readFacets(in); + } + } + + @Override public void writeTo(DataOutput out) throws IOException { + hits.writeTo(out); + if (facets == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + facets.writeTo(out); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/internal/SearchContext.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/internal/SearchContext.java new file mode 100644 index 00000000000..45b53c93451 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/internal/SearchContext.java @@ -0,0 +1,289 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.internal; + +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Sort; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.cache.filter.FilterCache; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.query.IndexQueryParser; +import org.elasticsearch.index.query.IndexQueryParserMissingException; +import org.elasticsearch.index.query.IndexQueryParserService; +import org.elasticsearch.index.similarity.SimilarityService; +import org.elasticsearch.search.Scroll; +import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.dfs.DfsSearchResult; +import org.elasticsearch.search.fetch.FetchSearchResult; +import org.elasticsearch.search.query.QuerySearchResult; +import org.elasticsearch.util.TimeValue; +import org.elasticsearch.util.lease.Releasable; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class SearchContext implements Releasable { + + private final long id; + + private final String source; + + private final Engine.Searcher engineSearcher; + + private final IndexService indexService; + + private final ContextIndexSearcher searcher; + + private final DfsSearchResult dfsResult; + + private final QuerySearchResult queryResult; + + private final FetchSearchResult fetchResult; + + private final TimeValue timeout; + + private final float queryBoost; + + + private Scroll scroll; + + private boolean explain; + + private String[] fieldNames; + + private int from = -1; + + private int size = -1; + + private String[] types; + + private Sort sort; + + private String queryParserName; + + private Query query; + + private int[] docIdsToLoad; + + private SearchContextFacets facets; + + + private boolean queryRewritten; + + public SearchContext(long id, SearchShardTarget shardTarget, TimeValue timeout, float queryBoost, String source, + String[] types, Engine.Searcher engineSearcher, IndexService indexService) { + this.id = id; + this.timeout = timeout; + this.queryBoost = queryBoost; + this.source = source; + this.types = types; + this.engineSearcher = engineSearcher; + this.dfsResult = new DfsSearchResult(id, shardTarget); + this.queryResult = new QuerySearchResult(id, shardTarget); + this.fetchResult = new FetchSearchResult(id, shardTarget); + this.indexService = indexService; + + this.searcher = new ContextIndexSearcher(this, engineSearcher.reader()); + } + + @Override public boolean release() throws ElasticSearchException { + try { + searcher.close(); + } catch (IOException e) { + // ignore this exception + } + engineSearcher.release(); + return true; + } + + public long id() { + return this.id; + } + + public String source() { + return source; + } + + public String[] types() { + return types; + } + + public float queryBoost() { + return queryBoost; + } + + public Scroll scroll() { + return this.scroll; + } + + public SearchContext scroll(Scroll scroll) { + this.scroll = scroll; + return this; + } + + public SearchContextFacets facets() { + return facets; + } + + public SearchContext facets(SearchContextFacets facets) { + this.facets = facets; + return this; + } + + public Engine.Searcher engineSearcher() { + return this.engineSearcher; + } + + public ContextIndexSearcher searcher() { + return this.searcher; + } + + public IndexQueryParser queryParser() throws IndexQueryParserMissingException { + if (queryParserName != null) { + IndexQueryParser queryParser = queryParserService().indexQueryParser(queryParserName); + if (queryParser == null) { + throw new IndexQueryParserMissingException(queryParserName); + } + return queryParser; + } + return queryParserService().defaultIndexQueryParser(); + } + + public MapperService mapperService() { + return indexService.mapperService(); + } + + public IndexQueryParserService queryParserService() { + return indexService.queryParserService(); + } + + public SimilarityService similarityService() { + return indexService.similarityService(); + } + + public FilterCache filterCache() { + return indexService.filterCache(); + } + + public TimeValue timeout() { + return timeout; + } + + public SearchContext sort(Sort sort) { + this.sort = sort; + return this; + } + + public Sort sort() { + return this.sort; + } + + public String queryParserName() { + return queryParserName; + } + + public SearchContext queryParserName(String queryParserName) { + this.queryParserName = queryParserName; + return this; + } + + public SearchContext query(Query query) { + if (query == null) { + this.query = query; + return this; + } + queryRewritten = false; + this.query = query; + return this; + } + + public Query query() { + return this.query; + } + + public int from() { + return from; + } + + public SearchContext from(int from) { + this.from = from; + return this; + } + + public int size() { + return size; + } + + public SearchContext size(int size) { + this.size = size; + return this; + } + + public String[] fieldNames() { + return fieldNames; + } + + public SearchContext fieldNames(String[] fieldNames) { + this.fieldNames = fieldNames; + return this; + } + + public boolean explain() { + return explain; + } + + public void explain(boolean explain) { + this.explain = explain; + } + + public SearchContext rewriteQuery() throws IOException { + if (queryRewritten) { + return this; + } + query = query.rewrite(searcher.getIndexReader()); + queryRewritten = true; + return this; + } + + public int[] docIdsToLoad() { + return docIdsToLoad; + } + + public SearchContext docIdsToLoad(int[] docIdsToLoad) { + this.docIdsToLoad = docIdsToLoad; + return this; + } + + public DfsSearchResult dfsResult() { + return dfsResult; + } + + public QuerySearchResult queryResult() { + return queryResult; + } + + public FetchSearchResult fetchResult() { + return fetchResult; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/internal/SearchContextFacets.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/internal/SearchContextFacets.java new file mode 100644 index 00000000000..af15d117a84 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/internal/SearchContextFacets.java @@ -0,0 +1,70 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.internal; + +import org.apache.lucene.search.Query; + +import java.util.List; + +/** + * @author kimchy (Shay Banon) + */ +public class SearchContextFacets { + + public static enum QueryExecutionType { + COLLECT, + IDSET + } + + private final QueryExecutionType queryExecutionType; + + private final List queryFacets; + + public SearchContextFacets(QueryExecutionType queryExecutionType, List queryFacets) { + this.queryExecutionType = queryExecutionType; + this.queryFacets = queryFacets; + } + + public QueryExecutionType queryType() { + return this.queryExecutionType; + } + + public List queryFacets() { + return queryFacets; + } + + public static class QueryFacet { + private final String name; + private final Query query; + + public QueryFacet(String name, Query query) { + this.name = name; + this.query = query; + } + + public String name() { + return name; + } + + public Query query() { + return query; + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/query/FromParseElement.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/query/FromParseElement.java new file mode 100644 index 00000000000..261be51e867 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/query/FromParseElement.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.query; + +import org.codehaus.jackson.JsonParser; +import org.elasticsearch.search.SearchParseElement; +import org.elasticsearch.search.internal.SearchContext; + +/** + * @author kimchy (Shay Banon) + */ +public class FromParseElement implements SearchParseElement { + + @Override public void parse(JsonParser jp, SearchContext context) throws Exception { + if (context.from() != -1) { + // it was externally set + return; + } + context.from(jp.getIntValue()); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/query/QueryParseElement.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/query/QueryParseElement.java new file mode 100644 index 00000000000..a386bb11762 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/query/QueryParseElement.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.query; + +import org.apache.lucene.search.Query; +import org.codehaus.jackson.JsonParser; +import org.elasticsearch.index.query.json.JsonIndexQueryParser; +import org.elasticsearch.search.SearchParseElement; +import org.elasticsearch.search.internal.SearchContext; + +/** + * @author kimchy (Shay Banon) + */ +public class QueryParseElement implements SearchParseElement { + + @Override public void parse(JsonParser jp, SearchContext context) throws Exception { + JsonIndexQueryParser indexQueryParser = (JsonIndexQueryParser) context.queryParser(); + Query query = indexQueryParser.parse(jp, context.source()); + query.setBoost(query.getBoost() * context.queryBoost()); + context.query(query); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/query/QueryParserNameParseElement.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/query/QueryParserNameParseElement.java new file mode 100644 index 00000000000..1ecca8816a0 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/query/QueryParserNameParseElement.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.query; + +import org.codehaus.jackson.JsonParser; +import org.elasticsearch.search.SearchParseElement; +import org.elasticsearch.search.internal.SearchContext; + +/** + * @author kimchy (Shay Banon) + */ +public class QueryParserNameParseElement implements SearchParseElement { + + @Override public void parse(JsonParser jp, SearchContext context) throws Exception { + context.queryParserName(jp.getText()); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/query/QueryPhase.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/query/QueryPhase.java new file mode 100644 index 00000000000..8a26ab3ea4a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/query/QueryPhase.java @@ -0,0 +1,93 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.query; + +import com.google.common.collect.ImmutableMap; +import com.google.inject.Inject; +import org.apache.lucene.search.*; +import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.search.SearchParseElement; +import org.elasticsearch.search.SearchPhase; +import org.elasticsearch.search.facets.FacetsPhase; +import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.util.lucene.search.TermFilter; + +import java.util.Map; + +/** + * @author kimchy (Shay Banon) + */ +public class QueryPhase implements SearchPhase { + + private final FacetsPhase facetsPhase; + + @Inject public QueryPhase(FacetsPhase facetsPhase) { + this.facetsPhase = facetsPhase; + } + + @Override public Map parseElements() { + ImmutableMap.Builder parseElements = ImmutableMap.builder(); + parseElements.put("from", new FromParseElement()).put("size", new SizeParseElement()) + .put("queryParserName", new QueryParserNameParseElement()) + .put("query", new QueryParseElement()) + .put("sort", new SortParseElement()) + .putAll(facetsPhase.parseElements()); + return parseElements.build(); + } + + public void execute(SearchContext searchContext) throws QueryPhaseExecutionException { + try { + searchContext.queryResult().from(searchContext.from()); + searchContext.queryResult().size(searchContext.size()); + + Query query = searchContext.query(); + if (searchContext.types().length > 0) { + if (searchContext.types().length == 1) { + String type = searchContext.types()[0]; + DocumentMapper docMapper = searchContext.mapperService().documentMapper(type); + Filter typeFilter = new TermFilter(docMapper.typeMapper().term(docMapper.type())); + typeFilter = searchContext.filterCache().cache(typeFilter); + query = new FilteredQuery(query, typeFilter); + } else { + BooleanFilter booleanFilter = new BooleanFilter(); + for (String type : searchContext.types()) { + DocumentMapper docMapper = searchContext.mapperService().documentMapper(type); + Filter typeFilter = new TermFilter(docMapper.typeMapper().term(docMapper.type())); + typeFilter = searchContext.filterCache().cache(typeFilter); + booleanFilter.add(new FilterClause(typeFilter, BooleanClause.Occur.SHOULD)); + } + query = new FilteredQuery(query, booleanFilter); + } + } + + TopDocs topDocs; + if (searchContext.sort() != null) { + topDocs = searchContext.searcher().search(query, null, searchContext.from() + searchContext.size(), searchContext.sort()); + } else { + topDocs = searchContext.searcher().search(query, searchContext.from() + searchContext.size()); + } + searchContext.queryResult().topDocs(topDocs); + } catch (Exception e) { + throw new QueryPhaseExecutionException(searchContext); + } + + facetsPhase.execute(searchContext); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/query/QueryPhaseExecutionException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/query/QueryPhaseExecutionException.java new file mode 100644 index 00000000000..d52306021ce --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/query/QueryPhaseExecutionException.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.query; + +import org.elasticsearch.search.SearchException; +import org.elasticsearch.search.internal.SearchContext; + +/** + * @author kimchy (Shay Banon) + */ +public class QueryPhaseExecutionException extends SearchException { + + public QueryPhaseExecutionException(SearchContext context) { + super("Failed to execute query [" + context.query() + "], sort [" + context.sort() + "], from [" + context.from() + "], size [" + context.size() + "]"); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/query/QuerySearchRequest.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/query/QuerySearchRequest.java new file mode 100644 index 00000000000..e2e4c139ff9 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/query/QuerySearchRequest.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.query; + +import org.elasticsearch.search.dfs.AggregatedDfs; +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import static org.elasticsearch.search.dfs.AggregatedDfs.*; + +/** + * @author kimchy (Shay Banon) + */ +public class QuerySearchRequest implements Streamable { + + private long id; + + private AggregatedDfs dfs; + + public QuerySearchRequest() { + } + + public QuerySearchRequest(long id, AggregatedDfs dfs) { + this.id = id; + this.dfs = dfs; + } + + public long id() { + return id; + } + + public AggregatedDfs dfs() { + return dfs; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + id = in.readLong(); + dfs = readAggregatedDfs(in); + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeLong(id); + dfs.writeTo(out); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java new file mode 100644 index 00000000000..90dd9bbcf41 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java @@ -0,0 +1,153 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.query; + +import org.apache.lucene.search.TopDocs; +import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.facets.Facets; +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import static org.elasticsearch.search.SearchShardTarget.*; +import static org.elasticsearch.search.facets.Facets.*; +import static org.elasticsearch.util.lucene.Lucene.*; + +/** + * @author kimchy (Shay Banon) + */ +public class QuerySearchResult implements Streamable, QuerySearchResultProvider { + + private long id; + + private SearchShardTarget shardTarget; + + private int from; + + private int size; + + private TopDocs topDocs; + + private Facets facets; + + private boolean searchTimedOut; + + public QuerySearchResult() { + + } + + public QuerySearchResult(long id, SearchShardTarget shardTarget) { + this.id = id; + this.shardTarget = shardTarget; + } + + @Override public boolean includeFetch() { + return false; + } + + @Override public QuerySearchResult queryResult() { + return this; + } + + public long id() { + return this.id; + } + + public SearchShardTarget shardTarget() { + return shardTarget; + } + + public void searchTimedOut(boolean searchTimedOut) { + this.searchTimedOut = searchTimedOut; + } + + public boolean searchTimedOut() { + return searchTimedOut; + } + + public TopDocs topDocs() { + return topDocs; + } + + public void topDocs(TopDocs topDocs) { + this.topDocs = topDocs; + } + + public Facets facets() { + return facets; + } + + public void facets(Facets facets) { + this.facets = facets; + } + + public int from() { + return from; + } + + public QuerySearchResult from(int from) { + this.from = from; + return this; + } + + public int size() { + return size; + } + + public QuerySearchResult size(int size) { + this.size = size; + return this; + } + + public static QuerySearchResult readQuerySearchResult(DataInput in) throws IOException, ClassNotFoundException { + QuerySearchResult result = new QuerySearchResult(); + result.readFrom(in); + return result; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + id = in.readLong(); + shardTarget = readSearchShardTarget(in); + from = in.readInt(); + size = in.readInt(); + topDocs = readTopDocs(in); + if (in.readBoolean()) { + facets = readFacets(in); + } + searchTimedOut = in.readBoolean(); + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeLong(id); + shardTarget.writeTo(out); + out.writeInt(from); + out.writeInt(size); + writeTopDocs(out, topDocs, 0); + if (facets == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + facets.writeTo(out); + } + out.writeBoolean(searchTimedOut); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/query/QuerySearchResultProvider.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/query/QuerySearchResultProvider.java new file mode 100644 index 00000000000..956cc0bb34c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/query/QuerySearchResultProvider.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.query; + +import org.elasticsearch.search.SearchShardTarget; + +/** + * @author kimchy (Shay Banon) + */ +public interface QuerySearchResultProvider { + + /** + * If both query and fetch happened on the same call. + */ + boolean includeFetch(); + + long id(); + + SearchShardTarget shardTarget(); + + QuerySearchResult queryResult(); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/query/SizeParseElement.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/query/SizeParseElement.java new file mode 100644 index 00000000000..43aa65c84ca --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/query/SizeParseElement.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.query; + +import org.codehaus.jackson.JsonParser; +import org.elasticsearch.search.SearchParseElement; +import org.elasticsearch.search.internal.SearchContext; + +/** + * @author kimchy (Shay Banon) + */ +public class SizeParseElement implements SearchParseElement { + + @Override public void parse(JsonParser jp, SearchContext context) throws Exception { + if (context.size() != -1) { + // it was externally set + return; + } + context.size(jp.getIntValue()); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/query/SortParseElement.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/query/SortParseElement.java new file mode 100644 index 00000000000..f334f1216ab --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/query/SortParseElement.java @@ -0,0 +1,116 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.query; + +import com.google.common.collect.Lists; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; +import org.codehaus.jackson.JsonParser; +import org.codehaus.jackson.JsonToken; +import org.elasticsearch.index.mapper.FieldMappers; +import org.elasticsearch.search.SearchParseElement; +import org.elasticsearch.search.SearchParseException; +import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.util.gnu.trove.TObjectIntHashMap; +import org.elasticsearch.util.trove.ExtTObjectIntHasMap; + +import java.util.List; + +/** + * @author kimchy (Shay Banon) + */ +public class SortParseElement implements SearchParseElement { + + private final TObjectIntHashMap sortFieldTypesMapper = new ExtTObjectIntHasMap().defaultReturnValue(-1); + + private static final SortField SORT_SCORE = new SortField(null, SortField.SCORE); + private static final SortField SORT_SCORE_REVERSE = new SortField(null, SortField.SCORE, true); + private static final SortField SORT_DOC = new SortField(null, SortField.DOC); + private static final SortField SORT_DOC_REVERSE = new SortField(null, SortField.DOC, true); + + public SortParseElement() { + sortFieldTypesMapper.put("string", SortField.STRING); + sortFieldTypesMapper.put("int", SortField.INT); + sortFieldTypesMapper.put("float", SortField.FLOAT); + sortFieldTypesMapper.put("long", SortField.LONG); + sortFieldTypesMapper.put("double", SortField.DOUBLE); + sortFieldTypesMapper.put("short", SortField.SHORT); + sortFieldTypesMapper.put("byte", SortField.BYTE); + sortFieldTypesMapper.put("string_val", SortField.STRING_VAL); + } + + @Override public void parse(JsonParser jp, SearchContext context) throws Exception { + JsonToken token; + List sortFields = Lists.newArrayListWithCapacity(2); + while ((token = jp.nextToken()) != JsonToken.END_OBJECT) { + if (token == JsonToken.FIELD_NAME) { + String fieldName = jp.getCurrentName(); + boolean reverse = false; + String innerJsonName = null; + int type = -1; + while ((token = jp.nextToken()) != JsonToken.END_OBJECT) { + if (token == JsonToken.FIELD_NAME) { + innerJsonName = jp.getCurrentName(); + } else if (token == JsonToken.VALUE_TRUE) { + if ("reverse".equals(innerJsonName)) { + reverse = true; + } + } else { + if ("type".equals(innerJsonName)) { + type = sortFieldTypesMapper.get(jp.getText()); + if (type == -1) { + throw new SearchParseException("No sort type for [" + jp.getText() + "] with field [" + fieldName + "]"); + } + } + } + } + if ("score".equals(fieldName)) { + if (reverse) { + sortFields.add(SORT_SCORE_REVERSE); + } else { + sortFields.add(SORT_SCORE); + } + } else if ("doc".equals(fieldName)) { + if (reverse) { + sortFields.add(SORT_DOC_REVERSE); + } else { + sortFields.add(SORT_DOC); + } + } else { + FieldMappers fieldMappers = context.mapperService().smartNameFieldMappers(fieldName); + if (fieldMappers == null || fieldMappers.mappers().isEmpty()) { + if (type == -1) { + throw new SearchParseException("No built in mapping found for [" + fieldName + "], and no explicit type defined"); + } + } else { + fieldName = fieldMappers.mappers().get(0).indexName(); + if (type == -1) { + type = fieldMappers.mappers().get(0).sortType(); + } + } + sortFields.add(new SortField(fieldName, type, reverse)); + } + } + } + if (!sortFields.isEmpty()) { + context.sort(new Sort(sortFields.toArray(new SortField[sortFields.size()]))); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/server/Server.java b/modules/elasticsearch/src/main/java/org/elasticsearch/server/Server.java new file mode 100644 index 00000000000..cedaab6bf64 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/server/Server.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.server; + +import org.elasticsearch.client.Client; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public interface Server { + + Settings settings(); + + Client client(); + + Server start(); + + Server stop(); + + void close(); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/server/ServerBuilder.java b/modules/elasticsearch/src/main/java/org/elasticsearch/server/ServerBuilder.java new file mode 100644 index 00000000000..2adfee6f2b3 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/server/ServerBuilder.java @@ -0,0 +1,66 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.server; + +import org.elasticsearch.server.internal.InternalServer; +import org.elasticsearch.util.settings.ImmutableSettings; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class ServerBuilder { + + private Settings settings = ImmutableSettings.Builder.EMPTY_SETTINGS; + + private boolean loadConfigSettings = true; + + public static ServerBuilder serverBuilder() { + return new ServerBuilder(); + } + + public ServerBuilder settings(Settings.Builder settings) { + return settings(settings.build()); + } + + public ServerBuilder settings(Settings settings) { + this.settings = settings; + return this; + } + + public ServerBuilder loadConfigSettings(boolean loadConfigSettings) { + this.loadConfigSettings = loadConfigSettings; + return this; + } + + /** + * Builds the server without starting it. + */ + public Server build() { + return new InternalServer(settings, loadConfigSettings); + } + + /** + * {@link #build()}s and starts the server. + */ + public Server server() { + return build().start(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/server/internal/InternalServer.java b/modules/elasticsearch/src/main/java/org/elasticsearch/server/internal/InternalServer.java new file mode 100644 index 00000000000..b75d1a099cc --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/server/internal/InternalServer.java @@ -0,0 +1,246 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.server.internal; + +import com.google.inject.Guice; +import com.google.inject.Injector; +import com.google.inject.Module; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.Version; +import org.elasticsearch.action.TransportActionModule; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.server.ServerClientModule; +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.cluster.ClusterNameModule; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.routing.RoutingService; +import org.elasticsearch.discovery.DiscoveryModule; +import org.elasticsearch.discovery.DiscoveryService; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.EnvironmentModule; +import org.elasticsearch.gateway.GatewayModule; +import org.elasticsearch.gateway.GatewayService; +import org.elasticsearch.http.HttpServer; +import org.elasticsearch.http.HttpServerModule; +import org.elasticsearch.index.store.fs.FsStores; +import org.elasticsearch.indices.IndicesModule; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.jmx.JmxModule; +import org.elasticsearch.jmx.JmxService; +import org.elasticsearch.monitor.MonitorModule; +import org.elasticsearch.monitor.MonitorService; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.SearchService; +import org.elasticsearch.server.Server; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.threadpool.ThreadPoolModule; +import org.elasticsearch.transport.TransportModule; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.Tuple; +import org.elasticsearch.util.component.Lifecycle; +import org.elasticsearch.util.guice.Injectors; +import org.elasticsearch.util.io.FileSystemUtils; +import org.elasticsearch.util.logging.Loggers; +import org.elasticsearch.util.settings.Settings; +import org.elasticsearch.util.settings.SettingsModule; +import org.slf4j.Logger; + +import java.io.File; +import java.util.ArrayList; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.util.settings.ImmutableSettings.*; + +/** + * @author kimchy (Shay Banon) + */ +public final class InternalServer implements Server { + + private final Lifecycle lifecycle = new Lifecycle(); + + private final Injector injector; + + private final Settings settings; + + private final Environment environment; + + private final Client client; + + public InternalServer() throws ElasticSearchException { + this(Builder.EMPTY_SETTINGS, true); + } + + public InternalServer(Settings pSettings, boolean loadConfigSettings) throws ElasticSearchException { + Tuple tuple = InternalSettingsPerparer.prepareSettings(pSettings, loadConfigSettings); + this.settings = tuple.v1(); + this.environment = tuple.v2(); + + Logger logger = Loggers.getLogger(Server.class, settings.get("name")); + logger.info("{{}}: Initializing ...", Version.full()); + + ArrayList modules = new ArrayList(); + modules.add(new JmxModule(settings)); + modules.add(new EnvironmentModule(environment)); + modules.add(new ClusterNameModule(settings)); + modules.add(new SettingsModule(settings)); + modules.add(new ThreadPoolModule(settings)); + modules.add(new DiscoveryModule(settings)); + modules.add(new ClusterModule(settings)); + modules.add(new TransportModule(settings)); + if (settings.getAsBoolean("http.enabled", true)) { + modules.add(new HttpServerModule(settings)); + } + modules.add(new IndicesModule(settings)); + modules.add(new SearchModule()); + modules.add(new TransportActionModule()); + modules.add(new MonitorModule(settings)); + modules.add(new GatewayModule(settings)); + modules.add(new ServerClientModule()); + + + injector = Guice.createInjector(modules); + + client = injector.getInstance(Client.class); + + logger.info("{{}}: Initialized", Version.full()); + } + + @Override public Settings settings() { + return this.settings; + } + + @Override public Client client() { + return client; + } + + public Server start() { + if (!lifecycle.moveToStarted()) { + return this; + } + + Logger logger = Loggers.getLogger(Server.class, settings.get("name")); + logger.info("{{}}: Starting ...", Version.full()); + + injector().getInstance(IndicesService.class).start(); + injector().getInstance(GatewayService.class).start(); + injector.getInstance(ClusterService.class).start(); + injector.getInstance(RoutingService.class).start(); + injector.getInstance(SearchService.class).start(); + injector().getInstance(MonitorService.class).start(); + injector.getInstance(TransportService.class).start(); + DiscoveryService discoService = injector.getInstance(DiscoveryService.class).start(); + if (settings.getAsBoolean("http.enabled", true)) { + injector.getInstance(HttpServer.class).start(); + } + injector.getInstance(JmxService.class).connectAndRegister(discoService.nodeDescription()); + + logger.info("{{}}: Started", Version.full()); + + return this; + } + + @Override public Server stop() { + if (!lifecycle.moveToStopped()) { + return this; + } + Logger logger = Loggers.getLogger(Server.class, settings.get("name")); + logger.info("{ElasticSearch/{}}: Stopping ...", Version.full()); + + if (settings.getAsBoolean("http.enabled", true)) { + injector.getInstance(HttpServer.class).stop(); + } + injector.getInstance(RoutingService.class).stop(); + injector.getInstance(ClusterService.class).stop(); + injector.getInstance(DiscoveryService.class).stop(); + injector.getInstance(MonitorService.class).stop(); + injector.getInstance(GatewayService.class).stop(); + injector.getInstance(SearchService.class).stop(); + injector.getInstance(IndicesService.class).stop(); + injector.getInstance(TransportService.class).stop(); + injector.getInstance(JmxService.class).close(); + + // Not pretty, but here we go + try { + FileSystemUtils.deleteRecursively(new File(new File(environment.workWithClusterFile(), FsStores.DEFAULT_INDICES_LOCATION), + injector.getInstance(ClusterService.class).state().nodes().localNodeId())); + } catch (Exception e) { + // ignore + } + + Injectors.close(injector); + + logger.info("{{}}: Stopped", Version.full()); + + return this; + } + + public void close() { + if (lifecycle.started()) { + stop(); + } + if (!lifecycle.moveToClosed()) { + return; + } + + Logger logger = Loggers.getLogger(Server.class, settings.get("name")); + logger.info("{{}}: Closing ...", Version.full()); + + if (settings.getAsBoolean("http.enabled", true)) { + injector.getInstance(HttpServer.class).close(); + } + injector.getInstance(Client.class).close(); + injector.getInstance(RoutingService.class).close(); + injector.getInstance(ClusterService.class).close(); + injector.getInstance(DiscoveryService.class).close(); + injector.getInstance(MonitorService.class).close(); + injector.getInstance(GatewayService.class).close(); + injector.getInstance(SearchService.class).close(); + injector.getInstance(IndicesService.class).close(); + injector.getInstance(TransportService.class).close(); + + injector.getInstance(ThreadPool.class).shutdown(); + try { + injector.getInstance(ThreadPool.class).awaitTermination(10, TimeUnit.SECONDS); + } catch (InterruptedException e) { + // ignore + } + try { + injector.getInstance(ThreadPool.class).shutdownNow(); + } catch (Exception e) { + // ignore + } + + logger.info("{{}}: Closed", Version.full()); + } + + public Injector injector() { + return this.injector; + } + + public static void main(String[] args) throws Exception { + final InternalServer server = new InternalServer(); + server.start(); + Runtime.getRuntime().addShutdownHook(new Thread() { + @Override public void run() { + server.close(); + } + }); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/server/internal/InternalSettingsPerparer.java b/modules/elasticsearch/src/main/java/org/elasticsearch/server/internal/InternalSettingsPerparer.java new file mode 100644 index 00000000000..a5a78ccafc1 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/server/internal/InternalSettingsPerparer.java @@ -0,0 +1,104 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.server.internal; + +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.FailedToResolveConfigException; +import org.elasticsearch.util.Names; +import org.elasticsearch.util.Tuple; +import org.elasticsearch.util.settings.ImmutableSettings; +import org.elasticsearch.util.settings.Settings; + +import static org.elasticsearch.util.Strings.*; +import static org.elasticsearch.util.settings.ImmutableSettings.*; + +/** + * @author kimchy (Shay Banon) + */ +public class InternalSettingsPerparer { + + public static Tuple prepareSettings(Settings pSettings, boolean loadConfigSettings) { + // just create enough settings to build the environment + ImmutableSettings.Builder settingsBuilder = settingsBuilder() + .putAll(pSettings) + .putProperties("elasticsearch.", System.getProperties()) + .putProperties("es.", System.getProperties()) + .replacePropertyPlaceholders(); + + Environment environment = new Environment(settingsBuilder.build()); + + // put back the env settings + settingsBuilder = settingsBuilder().putAll(pSettings); + settingsBuilder.put("path.home", cleanPath(environment.homeFile().getAbsolutePath())); + settingsBuilder.put("path.work", cleanPath(environment.workFile().getAbsolutePath())); + settingsBuilder.put("path.workWithCluster", cleanPath(environment.workWithClusterFile().getAbsolutePath())); + settingsBuilder.put("path.logs", cleanPath(environment.logsFile().getAbsolutePath())); + + if (loadConfigSettings) { + try { + settingsBuilder.loadFromUrl(environment.resolveConfig("elasticsearch.yml")); + } catch (FailedToResolveConfigException e) { + // ignore + } catch (NoClassDefFoundError e) { + // ignore, no yaml + } + try { + settingsBuilder.loadFromUrl(environment.resolveConfig("elasticsearch.json")); + } catch (FailedToResolveConfigException e) { + // ignore + } + try { + settingsBuilder.loadFromUrl(environment.resolveConfig("elasticsearch.properties")); + } catch (FailedToResolveConfigException e) { + // ignore + } + if (System.getProperty("es.config") != null) { + settingsBuilder.loadFromUrl(environment.resolveConfig(System.getProperty("es.config"))); + } + if (System.getProperty("elasticsearch.config") != null) { + settingsBuilder.loadFromUrl(environment.resolveConfig(System.getProperty("elasticsearch.config"))); + } + } + + settingsBuilder.putAll(pSettings) + .putProperties("elasticsearch.", System.getProperties()) + .putProperties("es.", System.getProperties()) + .replacePropertyPlaceholders(); + + // generate the name + if (settingsBuilder.get("name") == null) { + String name = System.getProperty("name"); + if (name == null || name.isEmpty()) + name = Names.randomNodeName(environment.resolveConfig("names.txt")); + + if (name != null) { + settingsBuilder.put("name", name); + } + } + + // put the cluster name + if (settingsBuilder.get(ClusterName.SETTING) == null) { + settingsBuilder.put(ClusterName.SETTING, ClusterName.DEFAULT.value()); + } + + return new Tuple(settingsBuilder.build(), environment); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/threadpool/FutureListener.java b/modules/elasticsearch/src/main/java/org/elasticsearch/threadpool/FutureListener.java new file mode 100644 index 00000000000..7b9cf5b6d8e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/threadpool/FutureListener.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.threadpool; + +/** + * @author kimchy (Shay Banon) + */ +public interface FutureListener { + + void onResult(T result); + + void onException(Exception e); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/modules/elasticsearch/src/main/java/org/elasticsearch/threadpool/ThreadPool.java new file mode 100644 index 00000000000..d48d445a32e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/threadpool/ThreadPool.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.threadpool; + +import org.elasticsearch.util.TimeValue; + +import java.util.concurrent.Callable; +import java.util.concurrent.Future; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; + +/** + * @author kimchy (Shay Banon) + */ +public interface ThreadPool extends ScheduledExecutorService { + + boolean isStarted(); + + Future submit(Callable task, FutureListener listener); + + Future submit(Runnable task, T result, FutureListener listener); + + Future submit(Runnable task, FutureListener listener); + + public ScheduledFuture scheduleWithFixedDelay(Runnable command, TimeValue interval); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/threadpool/ThreadPoolModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/threadpool/ThreadPoolModule.java new file mode 100644 index 00000000000..6c2df427387 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/threadpool/ThreadPoolModule.java @@ -0,0 +1,41 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.threadpool; + +import com.google.inject.AbstractModule; +import org.elasticsearch.threadpool.cached.CachedThreadPool; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class ThreadPoolModule extends AbstractModule { + + private final Settings settings; + + public ThreadPoolModule(Settings settings) { + this.settings = settings; + } + + @Override protected void configure() { + bind(ThreadPool.class) + .to(settings.getAsClass("threadpool.type", CachedThreadPool.class, "org.elasticsearch.threadpool.", "ThreadPool")).asEagerSingleton(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/threadpool/cached/CachedThreadPool.java b/modules/elasticsearch/src/main/java/org/elasticsearch/threadpool/cached/CachedThreadPool.java new file mode 100644 index 00000000000..fa47f5d45d3 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/threadpool/cached/CachedThreadPool.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.threadpool.cached; + +import com.google.inject.Inject; +import org.elasticsearch.threadpool.support.AbstractThreadPool; +import org.elasticsearch.util.TimeValue; +import org.elasticsearch.util.concurrent.DynamicExecutors; +import org.elasticsearch.util.settings.Settings; + +import java.util.concurrent.Executors; +import java.util.concurrent.SynchronousQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.util.TimeValue.*; +import static org.elasticsearch.util.settings.ImmutableSettings.Builder.*; + +/** + * @author kimchy (Shay Banon) + */ +public class CachedThreadPool extends AbstractThreadPool { + + private final TimeValue keepAlive; + + private final int scheduledSize; + + public CachedThreadPool() { + this(EMPTY_SETTINGS); + } + + @Inject public CachedThreadPool(Settings settings) { + super(settings); + this.scheduledSize = componentSettings.getAsInt("scheduledSize", 20); + this.keepAlive = componentSettings.getAsTime("keepAlive", timeValueSeconds(60)); + logger.debug("Initializing {} thread pool with keepAlive[{}], scheduledSize[{}]", new Object[]{getType(), keepAlive, scheduledSize}); + executorService = new ThreadPoolExecutor(0, Integer.MAX_VALUE, + keepAlive.millis(), TimeUnit.MILLISECONDS, + new SynchronousQueue(), + DynamicExecutors.daemonThreadFactory(settings, "[tp]")); + scheduledExecutorService = Executors.newScheduledThreadPool(scheduledSize, DynamicExecutors.daemonThreadFactory(settings, "[sc]")); + started = true; + } + + @Override public String getType() { + return "cached"; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/threadpool/dynamic/DynamicThreadPool.java b/modules/elasticsearch/src/main/java/org/elasticsearch/threadpool/dynamic/DynamicThreadPool.java new file mode 100644 index 00000000000..223a1d7b489 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/threadpool/dynamic/DynamicThreadPool.java @@ -0,0 +1,63 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.threadpool.dynamic; + +import com.google.inject.Inject; +import org.elasticsearch.threadpool.support.AbstractThreadPool; +import org.elasticsearch.util.TimeValue; +import org.elasticsearch.util.concurrent.DynamicExecutors; +import org.elasticsearch.util.settings.Settings; + +import java.util.concurrent.Executors; + +import static org.elasticsearch.util.TimeValue.*; +import static org.elasticsearch.util.settings.ImmutableSettings.Builder.*; + +/** + * @author kimchy (Shay Banon) + */ +public class DynamicThreadPool extends AbstractThreadPool { + + private final int min; + private final int max; + private final TimeValue keepAlive; + + private final int scheduledSize; + + public DynamicThreadPool() { + this(EMPTY_SETTINGS); + } + + @Inject public DynamicThreadPool(Settings settings) { + super(settings); + this.min = componentSettings.getAsInt("min", 1); + this.max = componentSettings.getAsInt("max", 100); + this.keepAlive = componentSettings.getAsTime("keepAlive", timeValueSeconds(60)); + this.scheduledSize = componentSettings.getAsInt("scheduledSize", 20); + logger.debug("Initializing {} thread pool with min[{}], max[{}], keepAlive[{}], scheduledSize[{}]", new Object[]{getType(), min, max, keepAlive, scheduledSize}); + executorService = DynamicExecutors.newScalingThreadPool(min, max, keepAlive.millis(), DynamicExecutors.daemonThreadFactory(settings, "[tp]")); + scheduledExecutorService = Executors.newScheduledThreadPool(scheduledSize, DynamicExecutors.daemonThreadFactory(settings, "[sc]")); + started = true; + } + + @Override public String getType() { + return "dynamic"; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/threadpool/support/AbstractThreadPool.java b/modules/elasticsearch/src/main/java/org/elasticsearch/threadpool/support/AbstractThreadPool.java new file mode 100644 index 00000000000..d0818480c74 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/threadpool/support/AbstractThreadPool.java @@ -0,0 +1,196 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.threadpool.support; + +import org.elasticsearch.threadpool.FutureListener; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.util.TimeValue; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.settings.Settings; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.concurrent.*; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class AbstractThreadPool extends AbstractComponent implements ThreadPool { + + protected volatile boolean started; + + protected ExecutorService executorService; + + protected ScheduledExecutorService scheduledExecutorService; + + protected AbstractThreadPool(Settings settings) { + super(settings); + } + + public abstract String getType(); + + @Override public boolean isStarted() { + return started; + } + + @Override public ScheduledFuture schedule(Runnable command, long delay, TimeUnit unit) { + return scheduledExecutorService.schedule(command, delay, unit); + } + + @Override public ScheduledFuture schedule(Callable callable, long delay, TimeUnit unit) { + return scheduledExecutorService.schedule(callable, delay, unit); + } + + @Override public ScheduledFuture scheduleAtFixedRate(Runnable command, long initialDelay, long period, TimeUnit unit) { + return scheduledExecutorService.scheduleAtFixedRate(command, initialDelay, period, unit); + } + + @Override public ScheduledFuture scheduleWithFixedDelay(Runnable command, long initialDelay, long delay, TimeUnit unit) { + return scheduledExecutorService.scheduleWithFixedDelay(command, initialDelay, delay, unit); + } + + @Override public void shutdown() { + started = false; + logger.debug("Shutting down {} thread pool", getType()); + executorService.shutdown(); + scheduledExecutorService.shutdown(); + } + + @Override public List shutdownNow() { + started = false; + List result = new ArrayList(); + result.addAll(executorService.shutdownNow()); + result.addAll(scheduledExecutorService.shutdownNow()); + return result; + } + + @Override public boolean isShutdown() { + return executorService.isShutdown() || scheduledExecutorService.isShutdown(); + } + + @Override public boolean isTerminated() { + return executorService.isTerminated() || scheduledExecutorService.isTerminated(); + } + + @Override public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException { + boolean result = executorService.awaitTermination(timeout, unit); + result &= scheduledExecutorService.awaitTermination(timeout, unit); + return result; + } + + @Override public Future submit(Callable task) { + return executorService.submit(task); + } + + @Override public Future submit(Callable task, FutureListener listener) { + return executorService.submit(new FutureCallable(task, listener)); + } + + @Override public Future submit(Runnable task, T result) { + return executorService.submit(task, result); + } + + @Override public Future submit(Runnable task, T result, FutureListener listener) { + return executorService.submit(new FutureRunnable(task, result, listener), result); + } + + @Override public Future submit(Runnable task) { + return executorService.submit(task); + } + + @Override public Future submit(Runnable task, FutureListener listener) { + return executorService.submit(new FutureRunnable(task, null, listener)); + } + + @Override public ScheduledFuture scheduleWithFixedDelay(Runnable command, TimeValue interval) { + return scheduleWithFixedDelay(command, interval.millis(), interval.millis(), TimeUnit.MILLISECONDS); + } + + @Override public List> invokeAll(Collection> tasks) throws InterruptedException { + return executorService.invokeAll(tasks); + } + + @Override public List> invokeAll(Collection> tasks, long timeout, TimeUnit unit) throws InterruptedException { + return executorService.invokeAll(tasks, timeout, unit); + } + + @Override public T invokeAny(Collection> tasks) throws InterruptedException, ExecutionException { + return executorService.invokeAny(tasks); + } + + @Override public T invokeAny(Collection> tasks, long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { + return executorService.invokeAny(tasks, timeout, unit); + } + + @Override public void execute(Runnable command) { + executorService.execute(command); + } + + protected static class FutureCallable implements Callable { + + private final Callable callable; + + private final FutureListener listener; + + public FutureCallable(Callable callable, FutureListener listener) { + this.callable = callable; + this.listener = listener; + } + + @Override public T call() throws Exception { + try { + T result = callable.call(); + listener.onResult(result); + return result; + } catch (Exception e) { + listener.onException(e); + throw e; + } + } + } + + protected static class FutureRunnable implements Runnable { + + private final Runnable runnable; + + private final T result; + + private final FutureListener listener; + + private FutureRunnable(Runnable runnable, T result, FutureListener listener) { + this.runnable = runnable; + this.result = result; + this.listener = listener; + } + + @Override public void run() { + try { + runnable.run(); + listener.onResult(result); + } catch (Exception e) { + listener.onException(e); + if (e instanceof RuntimeException) { + throw (RuntimeException) e; + } + } + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/transport/ActionNotFoundTransportException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/ActionNotFoundTransportException.java new file mode 100644 index 00000000000..bb045b3a96c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/ActionNotFoundTransportException.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +/** + * @author kimchy (Shay Banon) + */ +public class ActionNotFoundTransportException extends TransportException { + + public ActionNotFoundTransportException(String message) { + super(message); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/transport/ActionTransportRequestHandler.java b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/ActionTransportRequestHandler.java new file mode 100644 index 00000000000..417f28a3b9a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/ActionTransportRequestHandler.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.util.io.Streamable; + +/** + * @author kimchy (Shay Banon) + */ +public interface ActionTransportRequestHandler extends TransportRequestHandler { + + String action(); +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/transport/BaseTransportRequestHandler.java b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/BaseTransportRequestHandler.java new file mode 100644 index 00000000000..4150ba6c7ce --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/BaseTransportRequestHandler.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.util.io.Streamable; + +/** + * A simple based class that always spawns. + * + * @author kimchy (Shay Banon) + */ +public abstract class BaseTransportRequestHandler implements TransportRequestHandler { + + @Override public boolean spawn() { + return true; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/transport/BaseTransportResponseHandler.java b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/BaseTransportResponseHandler.java new file mode 100644 index 00000000000..27dc6111b65 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/BaseTransportResponseHandler.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.util.io.Streamable; + +/** + * A simple based class that always spawns. + * + * @author kimchy (Shay Banon) + */ +public abstract class BaseTransportResponseHandler implements TransportResponseHandler { + + @Override public boolean spawn() { + return true; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/transport/BindTransportException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/BindTransportException.java new file mode 100644 index 00000000000..9f5dc05e0da --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/BindTransportException.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +/** + * @author kimchy (Shay Banon) + */ +public class BindTransportException extends TransportException { + + public BindTransportException(String message) { + super(message); + } + + public BindTransportException(String message, Throwable cause) { + super(message, cause); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/transport/ConnectTransportException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/ConnectTransportException.java new file mode 100644 index 00000000000..05a661db429 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/ConnectTransportException.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.cluster.node.Node; + +/** + * @author kimchy (Shay Banon) + */ +public class ConnectTransportException extends TransportException { + + private final Node node; + + public ConnectTransportException(Node node, String msg) { + this(node, msg, null); + } + + public ConnectTransportException(Node node, String msg, Throwable cause) { + super(node + ": " + msg, cause); + this.node = node; + } + + public Node node() { + return node; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/transport/FailedCommunicationException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/FailedCommunicationException.java new file mode 100644 index 00000000000..fe2c5e9108c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/FailedCommunicationException.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +/** + * @author kimchy (Shay Banon) + */ +public class FailedCommunicationException extends TransportException { + + public FailedCommunicationException(String message) { + super(message); + } + + public FailedCommunicationException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/transport/FutureTransportResponseHandler.java b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/FutureTransportResponseHandler.java new file mode 100644 index 00000000000..2d49ce67c0a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/FutureTransportResponseHandler.java @@ -0,0 +1,36 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.util.io.Streamable; + +/** + * A response handler to be used when all interaction will be done through the {@link TransportFuture}. + * + * @author kimchy (Shay Banon) + */ +public abstract class FutureTransportResponseHandler extends BaseTransportResponseHandler { + + @Override public void handleResponse(T response) { + } + + @Override public void handleException(RemoteTransportException exp) { + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/transport/NotSerializableTransportException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/NotSerializableTransportException.java new file mode 100644 index 00000000000..3ca65ddef5b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/NotSerializableTransportException.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +/** + * @author kimchy (Shay Banon) + */ +public class NotSerializableTransportException extends TransportException { + + public NotSerializableTransportException(Throwable t) { + super(buildMessage(t)); + } + + @Override public Throwable fillInStackTrace() { + return null; + } + + private static String buildMessage(Throwable t) { + StringBuilder sb = new StringBuilder(); + sb.append("[").append(t.getClass().getName()).append("] "); + while (t != null) { + sb.append(t.getMessage()).append("; "); + t = t.getCause(); + } + return sb.toString(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/transport/PlainTransportFuture.java b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/PlainTransportFuture.java new file mode 100644 index 00000000000..59df36486c8 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/PlainTransportFuture.java @@ -0,0 +1,150 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.ElasticSearchInterruptedException; +import org.elasticsearch.util.io.Streamable; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +/** + * @author kimchy (Shay Banon) + */ +public class PlainTransportFuture implements TransportFuture, TransportResponseHandler { + + private final CountDownLatch latch; + private final TransportResponseHandler handler; + private volatile boolean done; + private volatile boolean canceled; + private volatile V result; + private volatile Exception exp; + + public PlainTransportFuture(TransportResponseHandler handler) { + this.handler = handler; + latch = new CountDownLatch(1); + } + + @Override public boolean cancel(boolean mayInterruptIfRunning) { + if (done) + return true; + + canceled = true; + latch.countDown(); + return true; + } + + @Override public boolean isCancelled() { + return canceled; + } + + @Override public boolean isDone() { + return done; + } + + @Override public V get() throws InterruptedException, ExecutionException { + latch.await(); + + if (!done || canceled) { + throw new InterruptedException("future was interrupted"); + } + + if (exp != null) { + throw new ExecutionException(exp.getMessage(), exp); + } + + return this.result; + } + + @Override public V get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { + latch.await(timeout, unit); + + if (!done || canceled) { + throw new TimeoutException("response did not arrive"); + } + + if (exp != null) { + throw new ExecutionException(exp.getMessage(), exp); + } + + return this.result; + } + + @Override public V txGet() throws ElasticSearchException { + try { + return get(); + } catch (InterruptedException e) { + throw new ElasticSearchInterruptedException(e.getMessage()); + } catch (ExecutionException e) { + if (e.getCause() instanceof ElasticSearchException) { + throw (ElasticSearchException) e.getCause(); + } else { + throw new TransportException("Failed execution", e); + } + } + } + + @Override public V txGet(long timeout, TimeUnit unit) throws ElasticSearchException, TimeoutException { + try { + return get(timeout, unit); + } catch (InterruptedException e) { + throw new ElasticSearchInterruptedException(e.getMessage()); + } catch (ExecutionException e) { + if (e.getCause() instanceof ElasticSearchException) { + throw (ElasticSearchException) e.getCause(); + } else { + throw new TransportException("Failed execution", e); + } + } + } + + @Override public V newInstance() { + return handler.newInstance(); + } + + @Override public void handleResponse(V response) { + this.done = true; + this.result = response; + + if (canceled) + return; + + handler.handleResponse(response); + latch.countDown(); + } + + @Override public void handleException(RemoteTransportException exp) { + this.done = true; + this.exp = exp; + + if (canceled) + return; + + handler.handleException(exp); + latch.countDown(); + } + + @Override public boolean spawn() { + return handler.spawn(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/transport/RemoteTransportException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/RemoteTransportException.java new file mode 100644 index 00000000000..9d11feec84d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/RemoteTransportException.java @@ -0,0 +1,70 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.ElasticSearchWrapperException; +import org.elasticsearch.util.transport.TransportAddress; + +/** + * @author kimchy (Shay Banon) + */ +public class RemoteTransportException extends TransportException implements ElasticSearchWrapperException { + + private TransportAddress address; + + private String action; + + public RemoteTransportException(String msg, Throwable cause) { + super(msg, cause); + } + + public RemoteTransportException(String name, TransportAddress address, String action, Throwable cause) { + super(buildMessage(name, address, action), cause); + this.address = address; + this.action = action; + } + + public TransportAddress address() { + return address; + } + + public String action() { + return action; + } + + @Override public Throwable fillInStackTrace() { + // no need for stack trace here, we always have cause + return null; + } + + private static String buildMessage(String name, TransportAddress address, String action) { + StringBuilder sb = new StringBuilder(); + if (name != null) { + sb.append('[').append(name).append(']'); + } + if (address != null) { + sb.append('[').append(address).append(']'); + } + if (action != null) { + sb.append('[').append(action).append(']'); + } + return sb.toString(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/transport/ResponseHandlerFailureTransportException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/ResponseHandlerFailureTransportException.java new file mode 100644 index 00000000000..79b8a19ea80 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/ResponseHandlerFailureTransportException.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +/** + * @author kimchy (Shay Banon) + */ +public class ResponseHandlerFailureTransportException extends TransportException { + + public ResponseHandlerFailureTransportException(String msg) { + super(msg); + } + + public ResponseHandlerFailureTransportException(String msg, Throwable cause) { + super(msg, cause); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/transport/ResponseHandlerNotFoundTransportException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/ResponseHandlerNotFoundTransportException.java new file mode 100644 index 00000000000..ea69b42acaf --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/ResponseHandlerNotFoundTransportException.java @@ -0,0 +1,37 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +/** + * @author kimchy (Shay Banon) + */ +public class ResponseHandlerNotFoundTransportException extends TransportException { + + private final long requestId; + + public ResponseHandlerNotFoundTransportException(long requestId) { + super("Transport response handler not found of id [" + requestId + "]"); + this.requestId = requestId; + } + + public long requestId() { + return requestId; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/transport/Transport.java b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/Transport.java new file mode 100644 index 00000000000..51e9802baa4 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/Transport.java @@ -0,0 +1,73 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.cluster.node.Node; +import org.elasticsearch.util.component.LifecycleComponent; +import org.elasticsearch.util.io.Streamable; +import org.elasticsearch.util.transport.BoundTransportAddress; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public interface Transport extends LifecycleComponent { + + class Helper { + public static final byte TRANSPORT_TYPE = 1; + public static final byte RESPONSE_TYPE = 1 << 1; + + public static boolean isRequest(byte value) { + return (value & TRANSPORT_TYPE) == 0; + } + + public static byte setRequest(byte value) { + value &= ~TRANSPORT_TYPE; + return value; + } + + public static byte setResponse(byte value) { + value |= TRANSPORT_TYPE; + return value; + } + + public static boolean isError(byte value) { + return (value & RESPONSE_TYPE) != 0; + } + + public static byte setError(byte value) { + value |= RESPONSE_TYPE; + return value; + } + + } + + void transportServiceAdapter(TransportServiceAdapter service); + + BoundTransportAddress boundAddress(); + + void nodesAdded(Iterable nodes); + + void nodesRemoved(Iterable nodes); + + void sendRequest(Node node, long requestId, String action, + Streamable message, TransportResponseHandler handler) throws IOException, TransportException; +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/transport/TransportChannel.java b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/TransportChannel.java new file mode 100644 index 00000000000..2288bc8b099 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/TransportChannel.java @@ -0,0 +1,35 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.util.io.Streamable; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public interface TransportChannel { + String action(); + + void sendResponse(Streamable message) throws IOException; + + void sendResponse(Throwable error) throws IOException; +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/transport/TransportException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/TransportException.java new file mode 100644 index 00000000000..6da3521530e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/TransportException.java @@ -0,0 +1,36 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.ElasticSearchException; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportException extends ElasticSearchException { + + public TransportException(String msg) { + super(msg); + } + + public TransportException(String msg, Throwable cause) { + super(msg, cause); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/transport/TransportFuture.java b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/TransportFuture.java new file mode 100644 index 00000000000..2bc21916603 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/TransportFuture.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.ElasticSearchException; + +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +/** + * @author kimchy (Shay Banon) + */ +public interface TransportFuture extends Future { + + /** + * Waits if necessary for the computation to complete, and then + * retrieves its result. + */ + V txGet() throws ElasticSearchException; + + /** + * Waits if necessary for at most the given time for the computation + * to complete, and then retrieves its result, if available. + */ + V txGet(long timeout, TimeUnit unit) throws ElasticSearchException, TimeoutException; +} + diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/transport/TransportModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/TransportModule.java new file mode 100644 index 00000000000..e15295d6971 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/TransportModule.java @@ -0,0 +1,56 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import com.google.inject.AbstractModule; +import com.google.inject.Module; +import org.elasticsearch.util.Classes; +import org.elasticsearch.util.settings.Settings; + +import static org.elasticsearch.util.guice.ModulesFactory.*; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportModule extends AbstractModule { + + private final Settings settings; + + public TransportModule(Settings settings) { + this.settings = settings; + } + + @Override + protected void configure() { + bind(TransportService.class).asEagerSingleton(); + bind(TransportServiceManagement.class).asEagerSingleton(); + + Class defaultTransportModule = null; + try { + Classes.getDefaultClassLoader().loadClass("org.elasticsearch.transport.netty.NettyTransport"); + defaultTransportModule = (Class) Classes.getDefaultClassLoader().loadClass("org.elasticsearch.transport.netty.NettyTransportModule"); + } catch (ClassNotFoundException e) { + // TODO default to the local one + } + + Class moduleClass = settings.getAsClass("transport.type", defaultTransportModule, "org.elasticsearch.transport.", "TransportModule"); + createModule(moduleClass, settings).configure(binder()); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/transport/TransportRequestHandler.java b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/TransportRequestHandler.java new file mode 100644 index 00000000000..0cf719e518b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/TransportRequestHandler.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.util.io.Streamable; + +/** + * @author kimchy (Shay Banon) + */ +public interface TransportRequestHandler { + + T newInstance(); + + void messageReceived(T request, TransportChannel channel) throws Exception; + + boolean spawn(); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/transport/TransportResponseHandler.java b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/TransportResponseHandler.java new file mode 100644 index 00000000000..97f74e1f429 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/TransportResponseHandler.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.util.io.Streamable; + +/** + * @author kimchy (Shay Banon) + */ +public interface TransportResponseHandler { + + /** + * creates a new instance of the return type from the remote call. + * called by the infra before deserializing the response. + * + * @return a new reponse copy. + */ + T newInstance(); + + void handleResponse(T response); + + void handleException(RemoteTransportException exp); + + boolean spawn(); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/transport/TransportSerializationException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/TransportSerializationException.java new file mode 100644 index 00000000000..897c66c4ce3 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/TransportSerializationException.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportSerializationException extends TransportException { + + public TransportSerializationException(String msg) { + super(msg); + } + + public TransportSerializationException(String msg, Throwable cause) { + super(msg, cause); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/transport/TransportService.java b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/TransportService.java new file mode 100644 index 00000000000..b1b2935960b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/TransportService.java @@ -0,0 +1,160 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import com.google.inject.Inject; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.cluster.node.Node; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.component.Lifecycle; +import org.elasticsearch.util.component.LifecycleComponent; +import org.elasticsearch.util.concurrent.highscalelib.NonBlockingHashMapLong; +import org.elasticsearch.util.io.Streamable; +import org.elasticsearch.util.settings.Settings; +import org.elasticsearch.util.transport.BoundTransportAddress; + +import java.io.IOException; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.AtomicLong; + +import static org.elasticsearch.util.concurrent.ConcurrentMaps.*; +import static org.elasticsearch.util.settings.ImmutableSettings.Builder.*; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportService extends AbstractComponent implements LifecycleComponent { + + private final Lifecycle lifecycle = new Lifecycle(); + + private final Transport transport; + + private final ConcurrentMap serverHandlers = newConcurrentMap(); + + private final NonBlockingHashMapLong clientHandlers = new NonBlockingHashMapLong(); + + final AtomicLong requestIds = new AtomicLong(); + + public TransportService(Transport transport) { + this(EMPTY_SETTINGS, transport); + } + + @Inject public TransportService(Settings settings, Transport transport) { + super(settings); + this.transport = transport; + } + + @Override public Lifecycle.State lifecycleState() { + return this.lifecycle.state(); + } + + public TransportService start() throws ElasticSearchException { + if (!lifecycle.moveToStarted()) { + return this; + } + // register us as an adapter for the transport service + transport.transportServiceAdapter(new TransportServiceAdapter() { + @Override public TransportRequestHandler handler(String action) { + return serverHandlers.get(action); + } + + @Override public TransportResponseHandler remove(long requestId) { + return clientHandlers.remove(requestId); + } + }); + transport.start(); + if (transport.boundAddress() != null && logger.isInfoEnabled()) { + logger.info("{}", transport.boundAddress()); + } + return this; + } + + public TransportService stop() throws ElasticSearchException { + if (!lifecycle.moveToStopped()) { + return this; + } + transport.stop(); + return this; + } + + public void close() { + if (lifecycle.started()) { + stop(); + } + if (!lifecycle.moveToClosed()) { + return; + } + transport.close(); + } + + public BoundTransportAddress boundAddress() { + return transport.boundAddress(); + } + + public void nodesAdded(Iterable nodes) { + try { + transport.nodesAdded(nodes); + } catch (Exception e) { + logger.warn("Failed add nodes [" + nodes + "] to transport", e); + } + } + + public void nodesRemoved(Iterable nodes) { + try { + transport.nodesRemoved(nodes); + } catch (Exception e) { + logger.warn("Failed to remove nodes[" + nodes + "] from transport", e); + } + } + + public TransportFuture submitRequest(Node node, String action, Streamable message, + TransportResponseHandler handler) throws TransportException { + PlainTransportFuture futureHandler = new PlainTransportFuture(handler); + sendRequest(node, action, message, futureHandler); + return futureHandler; + } + + public void sendRequest(Node node, String action, Streamable message, + TransportResponseHandler handler) throws TransportException { + try { + final long requestId = newRequestId(); + clientHandlers.put(requestId, handler); + transport.sendRequest(node, requestId, action, message, handler); + } catch (IOException e) { + throw new TransportException("Can't serialize request", e); + } + } + + private long newRequestId() { + return requestIds.getAndIncrement(); + } + + public void registerHandler(ActionTransportRequestHandler handler) { + registerHandler(handler.action(), handler); + } + + public void registerHandler(String action, TransportRequestHandler handler) { + serverHandlers.put(action, handler); + } + + public void removeHandler(String action) { + serverHandlers.remove(action); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/transport/TransportServiceAdapter.java b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/TransportServiceAdapter.java new file mode 100644 index 00000000000..63810ccdc74 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/TransportServiceAdapter.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +/** + * @author kimchy (Shay Banon) + */ +public interface TransportServiceAdapter { + + TransportRequestHandler handler(String action); + + TransportResponseHandler remove(long requestId); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/transport/TransportServiceManagement.java b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/TransportServiceManagement.java new file mode 100644 index 00000000000..b9ab8c7baaf --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/TransportServiceManagement.java @@ -0,0 +1,52 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import com.google.inject.Inject; +import org.elasticsearch.jmx.MBean; +import org.elasticsearch.jmx.ManagedAttribute; + +/** + * @author kimchy (Shay Banon) + */ +@MBean(objectName = "service=transport", description = "Transport") +public class TransportServiceManagement { + + private final TransportService transportService; + + @Inject public TransportServiceManagement(TransportService transportService) { + this.transportService = transportService; + } + + @ManagedAttribute(description = "Transport address published to other nodes") + public String getPublishAddress() { + return transportService.boundAddress().publishAddress().toString(); + } + + @ManagedAttribute(description = "Transport address bounded on") + public String getBoundAddress() { + return transportService.boundAddress().boundAddress().toString(); + } + + @ManagedAttribute(description = "Total number of transport requests sent") + public long getTotalNumberOfRequests() { + return transportService.requestIds.get(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/transport/VoidTransportResponseHandler.java b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/VoidTransportResponseHandler.java new file mode 100644 index 00000000000..04930cb8b1f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/VoidTransportResponseHandler.java @@ -0,0 +1,55 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.util.io.VoidStreamable; + +/** + * @author kimchy (Shay Banon) + */ +public class VoidTransportResponseHandler implements TransportResponseHandler { + + public static final VoidTransportResponseHandler INSTANCE = new VoidTransportResponseHandler(true); + public static final VoidTransportResponseHandler INSTANCE_NOSPAWN = new VoidTransportResponseHandler(false); + + private boolean spawn; + + public VoidTransportResponseHandler() { + this(true); + } + + public VoidTransportResponseHandler(boolean spawn) { + this.spawn = spawn; + } + + @Override public VoidStreamable newInstance() { + return VoidStreamable.INSTANCE; + } + + @Override public void handleResponse(VoidStreamable response) { + } + + @Override public void handleException(RemoteTransportException exp) { + } + + @Override public boolean spawn() { + return spawn; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java new file mode 100644 index 00000000000..6acbc221d86 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java @@ -0,0 +1,180 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.netty; + +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.*; +import org.elasticsearch.util.io.DataInputInputStream; +import org.elasticsearch.util.io.Streamable; +import org.elasticsearch.util.io.ThrowableObjectInputStream; +import org.jboss.netty.buffer.ChannelBufferInputStream; +import org.jboss.netty.channel.*; +import org.slf4j.Logger; + +import java.io.IOException; + +import static org.elasticsearch.transport.Transport.Helper.*; + +/** + * @author kimchy (Shay Banon) + */ +@ChannelPipelineCoverage("one") +public class MessageChannelHandler extends SimpleChannelUpstreamHandler { + + private final Logger logger; + + private final ThreadPool threadPool; + + private final TransportServiceAdapter transportServiceAdapter; + + private final NettyTransport transport; + + public MessageChannelHandler(NettyTransport transport, Logger logger) { + this.threadPool = transport.threadPool(); + this.transportServiceAdapter = transport.transportServiceAdapter(); + this.transport = transport; + this.logger = logger; + } + + @Override public void messageReceived(ChannelHandlerContext ctx, MessageEvent event) throws Exception { + ChannelBufferInputStream buffer = (ChannelBufferInputStream) event.getMessage(); + + long requestId = buffer.readLong(); + byte status = buffer.readByte(); + boolean isRequest = isRequest(status); + + if (isRequest) { + handleRequest(event, buffer, requestId); + } else { + final TransportResponseHandler handler = transportServiceAdapter.remove(requestId); + if (handler == null) { + throw new ResponseHandlerNotFoundTransportException(requestId); + } + if (isError(status)) { + handlerResponseError(buffer, handler); + } else { + handleResponse(buffer, handler); + } + } + } + + private void handleResponse(ChannelBufferInputStream buffer, final TransportResponseHandler handler) { + final Streamable streamable = handler.newInstance(); + try { + streamable.readFrom(buffer); + } catch (Exception e) { + handleException(handler, new TransportSerializationException("Failed to deserialize response of type [" + streamable.getClass().getName() + "]", e)); + return; + } + if (handler.spawn()) { + threadPool.execute(new Runnable() { + @SuppressWarnings({"unchecked"}) @Override public void run() { + try { + handler.handleResponse(streamable); + } catch (Exception e) { + handleException(handler, new ResponseHandlerFailureTransportException("Failed to handler response", e)); + } + } + }); + } else { + try { + //noinspection unchecked + handler.handleResponse(streamable); + } catch (Exception e) { + handleException(handler, new ResponseHandlerFailureTransportException("Failed to handler response", e)); + } + } + } + + private void handlerResponseError(ChannelBufferInputStream buffer, final TransportResponseHandler handler) { + Throwable error; + try { + ThrowableObjectInputStream ois = new ThrowableObjectInputStream(new DataInputInputStream(buffer)); + error = (Throwable) ois.readObject(); + } catch (Exception e) { + error = new TransportSerializationException("Failed to deserialize exception response from stream", e); + } + handleException(handler, error); + } + + private void handleException(final TransportResponseHandler handler, Throwable error) { + if (!(error instanceof RemoteTransportException)) { + error = new RemoteTransportException("None remote transport exception", error); + } + final RemoteTransportException rtx = (RemoteTransportException) error; + if (handler.spawn()) { + threadPool.execute(new Runnable() { + @Override public void run() { + try { + handler.handleException(rtx); + } catch (Exception e) { + logger.error("Failed to handle exception response", e); + } + } + }); + } else { + handler.handleException(rtx); + } + } + + private void handleRequest(MessageEvent event, ChannelBufferInputStream buffer, long requestId) throws IOException { + final String action = buffer.readUTF(); + + final NettyTransportChannel transportChannel = new NettyTransportChannel(transport, action, event.getChannel(), requestId); + try { + final TransportRequestHandler handler = transportServiceAdapter.handler(action); + if (handler == null) { + throw new ActionNotFoundTransportException("Action [" + action + "] not found"); + } + final Streamable streamable = handler.newInstance(); + streamable.readFrom(buffer); + if (handler.spawn()) { + threadPool.execute(new Runnable() { + @SuppressWarnings({"unchecked"}) @Override public void run() { + try { + handler.messageReceived(streamable, transportChannel); + } catch (Throwable e) { + try { + transportChannel.sendResponse(e); + } catch (IOException e1) { + logger.warn("Failed to send error message back to client for action [" + action + "]", e1); + logger.warn("Actual Exception", e); + } + } + } + }); + } else { + //noinspection unchecked + handler.messageReceived(streamable, transportChannel); + } + } catch (Exception e) { + try { + transportChannel.sendResponse(e); + } catch (IOException e1) { + logger.warn("Failed to send error message back to client for action [" + action + "]", e); + logger.warn("Actual Exception", e1); + } + } + } + + @Override public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) throws Exception { + transport.exceptionCaught(ctx, e); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java new file mode 100644 index 00000000000..9292c519db5 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java @@ -0,0 +1,575 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.netty; + +import com.google.common.collect.Lists; +import com.google.inject.Inject; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.ElasticSearchIllegalStateException; +import org.elasticsearch.cluster.node.Node; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.*; +import org.elasticsearch.util.SizeValue; +import org.elasticsearch.util.TimeValue; +import org.elasticsearch.util.component.AbstractComponent; +import org.elasticsearch.util.component.Lifecycle; +import org.elasticsearch.util.io.ByteArrayDataOutputStream; +import org.elasticsearch.util.io.Streamable; +import org.elasticsearch.util.settings.Settings; +import org.elasticsearch.util.transport.BoundTransportAddress; +import org.elasticsearch.util.transport.InetSocketTransportAddress; +import org.elasticsearch.util.transport.PortsRange; +import org.elasticsearch.util.transport.TransportAddress; +import org.jboss.netty.bootstrap.ClientBootstrap; +import org.jboss.netty.bootstrap.ServerBootstrap; +import org.jboss.netty.buffer.ChannelBuffer; +import org.jboss.netty.buffer.ChannelBuffers; +import org.jboss.netty.channel.*; +import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory; +import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory; +import org.jboss.netty.logging.InternalLogger; +import org.jboss.netty.logging.InternalLoggerFactory; +import org.jboss.netty.logging.Slf4JLoggerFactory; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +import static org.elasticsearch.transport.Transport.Helper.*; +import static org.elasticsearch.util.TimeValue.*; +import static org.elasticsearch.util.concurrent.ConcurrentMaps.*; +import static org.elasticsearch.util.concurrent.DynamicExecutors.*; +import static org.elasticsearch.util.io.HostResolver.*; +import static org.elasticsearch.util.settings.ImmutableSettings.Builder.*; +import static org.elasticsearch.util.transport.NetworkExceptionHelper.*; + +/** + * @author kimchy (Shay Banon) + */ +public class NettyTransport extends AbstractComponent implements Transport { + + static { + InternalLoggerFactory.setDefaultFactory(new Slf4JLoggerFactory() { + @Override public InternalLogger newInstance(String name) { + return super.newInstance(name.replace("org.jboss.netty.", "netty.")); + } + }); + } + + private final Lifecycle lifecycle = new Lifecycle(); + + final int workerCount; + + final String port; + + final String bindHost; + + final String publishHost; + + final TimeValue connectTimeout; + + final int connectionsPerNode; + + final int connectRetries; + + final Boolean tcpNoDelay; + + final Boolean tcpKeepAlive; + + final Boolean reuseAddress; + + final SizeValue tcpSendBufferSize; + + final SizeValue tcpReceiveBufferSize; + + private final ThreadPool threadPool; + + private volatile OpenChannelsHandler serverOpenChannels; + + private volatile ClientBootstrap clientBootstrap; + + private volatile ServerBootstrap serverBootstrap; + + // node id to actual channel + final ConcurrentMap clientChannels = newConcurrentMap(); + + + private volatile Channel serverChannel; + + private volatile TransportServiceAdapter transportServiceAdapter; + + private volatile BoundTransportAddress boundAddress; + + public NettyTransport(ThreadPool threadPool) { + this(EMPTY_SETTINGS, threadPool); + } + + @Inject public NettyTransport(Settings settings, ThreadPool threadPool) { + super(settings); + this.threadPool = threadPool; + + this.workerCount = componentSettings.getAsInt("workerCount", Runtime.getRuntime().availableProcessors()); + this.port = componentSettings.get("port", "9300-9400"); + this.bindHost = componentSettings.get("bindHost"); + this.connectionsPerNode = componentSettings.getAsInt("connectionsPerNode", 5); + this.publishHost = componentSettings.get("publishHost"); + this.connectTimeout = componentSettings.getAsTime("connectTimeout", timeValueSeconds(1)); + this.connectRetries = componentSettings.getAsInt("connectRetries", 2); + this.tcpNoDelay = componentSettings.getAsBoolean("tcpNoDelay", true); + this.tcpKeepAlive = componentSettings.getAsBoolean("tcpKeepAlive", null); + this.reuseAddress = componentSettings.getAsBoolean("reuseAddress", true); + this.tcpSendBufferSize = componentSettings.getAsSize("tcpSendBufferSize", null); + this.tcpReceiveBufferSize = componentSettings.getAsSize("tcpReceiveBufferSize", null); + } + + @Override public Lifecycle.State lifecycleState() { + return this.lifecycle.state(); + } + + public Settings settings() { + return this.settings; + } + + @Override public void transportServiceAdapter(TransportServiceAdapter service) { + this.transportServiceAdapter = service; + } + + TransportServiceAdapter transportServiceAdapter() { + return transportServiceAdapter; + } + + ThreadPool threadPool() { + return threadPool; + } + + @Override public Transport start() throws TransportException { + if (!lifecycle.moveToStarted()) { + return this; + } + + clientBootstrap = new ClientBootstrap(new NioClientSocketChannelFactory( + Executors.newCachedThreadPool(daemonThreadFactory(settings, "transportClientBoss")), + Executors.newCachedThreadPool(daemonThreadFactory(settings, "transportClientIoWorker")), + workerCount)); + ChannelPipelineFactory clientPipelineFactory = new ChannelPipelineFactory() { + @Override public ChannelPipeline getPipeline() throws Exception { + ChannelPipeline pipeline = Channels.pipeline(); + pipeline.addLast("decoder", new SizeHeaderFrameDecoder()); + pipeline.addLast("dispatcher", new MessageChannelHandler(NettyTransport.this, logger)); + return pipeline; + } + }; + clientBootstrap.setPipelineFactory(clientPipelineFactory); + clientBootstrap.setOption("connectTimeoutMillis", connectTimeout.millis()); + if (tcpNoDelay != null) { + clientBootstrap.setOption("tcpNoDelay", tcpNoDelay); + } + if (tcpKeepAlive != null) { + clientBootstrap.setOption("keepAlive", tcpKeepAlive); + } + if (tcpSendBufferSize != null) { + clientBootstrap.setOption("sendBufferSize", tcpSendBufferSize.bytes()); + } + if (tcpReceiveBufferSize != null) { + clientBootstrap.setOption("receiveBufferSize", tcpReceiveBufferSize.bytes()); + } + if (reuseAddress != null) { + clientBootstrap.setOption("reuseAddress", reuseAddress); + } + + if (!settings.getAsBoolean("network.server", true)) { + return null; + } + + serverOpenChannels = new OpenChannelsHandler(); + serverBootstrap = new ServerBootstrap(new NioServerSocketChannelFactory( + Executors.newCachedThreadPool(daemonThreadFactory(settings, "transportServerBoss")), + Executors.newCachedThreadPool(daemonThreadFactory(settings, "transportServerIoWorker")), + workerCount)); + ChannelPipelineFactory serverPipelineFactory = new ChannelPipelineFactory() { + @Override public ChannelPipeline getPipeline() throws Exception { + ChannelPipeline pipeline = Channels.pipeline(); + pipeline.addLast("openChannels", serverOpenChannels); + pipeline.addLast("decoder", new SizeHeaderFrameDecoder()); + pipeline.addLast("dispatcher", new MessageChannelHandler(NettyTransport.this, logger)); + return pipeline; + } + }; + serverBootstrap.setPipelineFactory(serverPipelineFactory); + if (tcpNoDelay != null) { + serverBootstrap.setOption("child.tcpNoDelay", tcpNoDelay); + } + if (tcpKeepAlive != null) { + serverBootstrap.setOption("child.keepAlive", tcpKeepAlive); + } + if (tcpSendBufferSize != null) { + serverBootstrap.setOption("child.sendBufferSize", tcpSendBufferSize.bytes()); + } + if (tcpReceiveBufferSize != null) { + serverBootstrap.setOption("child.receiveBufferSize", tcpReceiveBufferSize.bytes()); + } + if (reuseAddress != null) { + serverBootstrap.setOption("reuseAddress", reuseAddress); + serverBootstrap.setOption("child.reuseAddress", reuseAddress); + } + + // Bind and start to accept incoming connections. + InetAddress hostAddressX; + try { + hostAddressX = resultBindHostAddress(bindHost, settings); + } catch (IOException e) { + throw new BindTransportException("Failed to resolve host [" + bindHost + "]", e); + } + final InetAddress hostAddress = hostAddressX; + + PortsRange portsRange = new PortsRange(port); + final AtomicReference lastException = new AtomicReference(); + boolean success = portsRange.iterate(new PortsRange.PortCallback() { + @Override public boolean onPortNumber(int portNumber) { + try { + serverChannel = serverBootstrap.bind(new InetSocketAddress(hostAddress, portNumber)); + } catch (Exception e) { + lastException.set(e); + return false; + } + return true; + } + }); + if (!success) { + throw new BindTransportException("Failed to bind to [" + port + "]", lastException.get()); + } + + logger.debug("Bound to address [{}]", serverChannel.getLocalAddress()); + + InetSocketAddress boundAddress = (InetSocketAddress) serverChannel.getLocalAddress(); + InetSocketAddress publishAddress; + try { + InetAddress publishAddressX = resultPublishHostAddress(publishHost, settings); + if (publishAddressX == null) { + // if its 0.0.0.0, we can't publish that.., default to the local ip address + if (boundAddress.getAddress().isAnyLocalAddress()) { + publishAddress = new InetSocketAddress(resultPublishHostAddress(publishHost, settings, LOCAL_IP), boundAddress.getPort()); + } else { + publishAddress = boundAddress; + } + } else { + publishAddress = new InetSocketAddress(publishAddressX, boundAddress.getPort()); + } + } catch (Exception e) { + throw new BindTransportException("Failed to resolve publish address", e); + } + this.boundAddress = new BoundTransportAddress(new InetSocketTransportAddress(boundAddress), new InetSocketTransportAddress(publishAddress)); + return this; + } + + @Override public Transport stop() throws ElasticSearchException { + if (!lifecycle.moveToStopped()) { + return this; + } + + if (serverChannel != null) { + try { + serverChannel.close().awaitUninterruptibly(); + } finally { + serverChannel = null; + } + } + + if (serverOpenChannels != null) { + serverOpenChannels.close(); + serverOpenChannels = null; + } + + if (serverBootstrap != null) { + serverBootstrap.releaseExternalResources(); + serverBootstrap = null; + } + + for (Iterator it = clientChannels.values().iterator(); it.hasNext();) { + NodeConnections nodeConnections = it.next(); + it.remove(); + nodeConnections.close(); + } + + if (clientBootstrap != null) { + // HACK, make sure we try and close open client channels also after + // we releaseExternalResources, they seem to hang when there are open client channels + ScheduledFuture scheduledFuture = threadPool.schedule(new Runnable() { + @Override public void run() { + try { + for (Iterator it = clientChannels.values().iterator(); it.hasNext();) { + NodeConnections nodeConnections = it.next(); + it.remove(); + nodeConnections.close(); + } + } catch (Exception e) { + // ignore + } + } + }, 500, TimeUnit.MILLISECONDS); + clientBootstrap.releaseExternalResources(); + scheduledFuture.cancel(false); + clientBootstrap = null; + } + return this; + } + + @Override public void close() { + if (lifecycle.started()) { + stop(); + } + if (!lifecycle.moveToClosed()) { + return; + } + } + + @Override public BoundTransportAddress boundAddress() { + return this.boundAddress; + } + + void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) throws Exception { + if (!lifecycle.started()) { + // ignore + } + if (isCloseConnectionException(e.getCause()) || isConnectException(e.getCause())) { + if (logger.isTraceEnabled()) { + logger.trace("(Ignoring) Exception caught on netty layer [" + ctx.getChannel() + "]", e.getCause()); + } + } else { + logger.warn("Exception caught on netty layer [" + ctx.getChannel() + "]", e.getCause()); + } + } + + TransportAddress wrapAddress(SocketAddress socketAddress) { + return new InetSocketTransportAddress((InetSocketAddress) socketAddress); + } + + private static final byte[] LENGTH_PLACEHOLDER = new byte[4]; + + @Override public void sendRequest(Node node, long requestId, String action, + Streamable streamable, final TransportResponseHandler handler) throws IOException, TransportException { + + Channel targetChannel = nodeChannel(node); + + ByteArrayDataOutputStream stream = ByteArrayDataOutputStream.Cached.cached(); + stream.write(LENGTH_PLACEHOLDER); // fake size + + stream.writeLong(requestId); + byte status = 0; + status = setRequest(status); + stream.writeByte(status); // 0 for request, 1 for response. + + stream.writeUTF(action); + streamable.writeTo(stream); + + ChannelBuffer buffer = ChannelBuffers.wrappedBuffer(stream.copiedByteArray()); + + int size = buffer.writerIndex() - 4; + if (size == 0) { + handler.handleException(new RemoteTransportException("", new FailedCommunicationException("Trying to send a stream with 0 size"))); + } + buffer.setInt(0, size); // update real size. + ChannelFuture channelFuture = targetChannel.write(buffer); + // TODO do we need this listener? +// channelFuture.addListener(new ChannelFutureListener() { +// @Override public void operationComplete(ChannelFuture future) throws Exception { +// if (!future.isSuccess()) { +// // maybe add back the retry? +// handler.handleException(new RemoteTransportException("", new FailedCommunicationException("Error sending request", future.getCause()))); +// } +// } +// }); + } + + @Override public void nodesAdded(Iterable nodes) { + if (!lifecycle.started()) { + throw new ElasticSearchIllegalStateException("Can't add nodes to a stopped transport"); + } + for (Node node : nodes) { + try { + nodeChannel(node); + } catch (Exception e) { + logger.warn("Failed to connect to discovered node [" + node + "]", e); + } + } + } + + @Override public void nodesRemoved(Iterable nodes) { + for (Node node : nodes) { + NodeConnections nodeConnections = clientChannels.remove(node.id()); + if (nodeConnections != null) { + nodeConnections.close(); + } + } + } + + private Channel nodeChannel(Node node) throws ConnectTransportException { + if (node == null) { + throw new ConnectTransportException(node, "Can't connect to a null node"); + } + NodeConnections nodeConnections = clientChannels.get(node.id()); + if (nodeConnections != null) { + return nodeConnections.channel(); + } + synchronized (this) { + // recheck here, within the sync block (we cache connections, so we don't care about this single sync block) + nodeConnections = clientChannels.get(node.id()); + if (nodeConnections != null) { + return nodeConnections.channel(); + } + // build connection(s) to the node + ArrayList channels = new ArrayList(); + Throwable lastConnectException = null; + for (int connectionIndex = 0; connectionIndex < connectionsPerNode; connectionIndex++) { + for (int i = 1; i <= connectRetries; i++) { + if (!lifecycle.started()) { + for (Channel channel1 : channels) { + channel1.close().awaitUninterruptibly(); + } + throw new ConnectTransportException(node, "Can't connect when the transport is stopped"); + } + InetSocketAddress address = ((InetSocketTransportAddress) node.address()).address(); + ChannelFuture channelFuture = clientBootstrap.connect(address); + channelFuture.awaitUninterruptibly((long) (connectTimeout.millis() * 1.25)); + if (!channelFuture.isSuccess()) { + // we failed to connect, check if we need to bail or retry + if (i == connectRetries && connectionIndex == 0) { + lastConnectException = channelFuture.getCause(); + if (connectionIndex == 0) { + throw new ConnectTransportException(node, "connectTimeout[" + connectTimeout + "], connectRetries[" + connectRetries + "]", lastConnectException); + } else { + // break out of the retry loop, try another connection + break; + } + } else { + logger.trace("Retry #[" + i + "], connect to [" + node + "]"); + try { + channelFuture.getChannel().close(); + } catch (Exception e) { + // ignore + } + continue; + } + } + // we got a connection, add it to our connections + Channel channel = channelFuture.getChannel(); + if (!lifecycle.started()) { + channel.close(); + for (Channel channel1 : channels) { + channel1.close().awaitUninterruptibly(); + } + throw new ConnectTransportException(node, "Can't connect when the transport is stopped"); + } + channel.getCloseFuture().addListener(new ChannelCloseListener(node.id())); + channels.add(channel); + break; + } + } + if (channels.isEmpty()) { + if (lastConnectException != null) { + throw new ConnectTransportException(node, "connectTimeout[" + connectTimeout + "], connectRetries[" + connectRetries + "]", lastConnectException); + } + throw new ConnectTransportException(node, "connectTimeout[" + connectTimeout + "], connectRetries[" + connectRetries + "], reason unknown"); + } + if (logger.isDebugEnabled()) { + logger.debug("Connected to node [{}], numberOfConnections [{}]", node, channels.size()); + } + clientChannels.put(node.id(), new NodeConnections(channels.toArray(new Channel[channels.size()]))); + } + + return clientChannels.get(node.id()).channel(); + } + + private static class NodeConnections { + + private final AtomicInteger counter = new AtomicInteger(); + + private volatile Channel[] channels; + + private volatile boolean closed = false; + + private NodeConnections(Channel[] channels) { + this.channels = channels; + } + + private Channel channel() { + return channels[Math.abs(counter.incrementAndGet()) % channels.length]; + } + + private void channelClosed(Channel closedChannel) { + List updated = Lists.newArrayList(); + for (Channel channel : channels) { + if (!channel.getId().equals(closedChannel.getId())) { + updated.add(channel); + } + } + this.channels = updated.toArray(new Channel[updated.size()]); + } + + private int numberOfChannels() { + return channels.length; + } + + private synchronized void close() { + if (closed) { + return; + } + closed = true; + Channel[] channelsToClose = channels; + channels = new Channel[0]; + for (Channel channel : channelsToClose) { + if (channel.isOpen()) { + channel.close().awaitUninterruptibly(); + } + } + } + } + + private class ChannelCloseListener implements ChannelFutureListener { + + private final String nodeId; + + private ChannelCloseListener(String nodeId) { + this.nodeId = nodeId; + } + + @Override public void operationComplete(ChannelFuture future) throws Exception { + final NodeConnections nodeConnections = clientChannels.get(nodeId); + if (nodeConnections != null) { + nodeConnections.channelClosed(future.getChannel()); + if (nodeConnections.numberOfChannels() == 0) { + // all the channels in the node connections are closed, remove it from + // our client channels + clientChannels.remove(nodeId); + } + } + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/transport/netty/NettyTransportChannel.java b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/netty/NettyTransportChannel.java new file mode 100644 index 00000000000..bb473a06149 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/netty/NettyTransportChannel.java @@ -0,0 +1,108 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.netty; + +import org.elasticsearch.transport.NotSerializableTransportException; +import org.elasticsearch.transport.RemoteTransportException; +import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.util.io.ByteArrayDataOutputStream; +import org.elasticsearch.util.io.Streamable; +import org.elasticsearch.util.io.ThrowableObjectOutputStream; +import org.jboss.netty.buffer.ChannelBuffer; +import org.jboss.netty.buffer.ChannelBufferOutputStream; +import org.jboss.netty.buffer.ChannelBuffers; +import org.jboss.netty.channel.Channel; + +import java.io.IOException; +import java.io.NotSerializableException; + +import static org.elasticsearch.transport.Transport.Helper.*; + +/** + * @author kimchy (Shay Banon) + */ +public class NettyTransportChannel implements TransportChannel { + + private static final byte[] LENGTH_PLACEHOLDER = new byte[4]; + + private final NettyTransport transport; + + private final String action; + + private final Channel channel; + + private final long requestId; + + public NettyTransportChannel(NettyTransport transport, String action, Channel channel, long requestId) { + this.transport = transport; + this.action = action; + this.channel = channel; + this.requestId = requestId; + } + + @Override public String action() { + return this.action; + } + + @Override public void sendResponse(Streamable message) throws IOException { + ByteArrayDataOutputStream stream = ByteArrayDataOutputStream.Cached.cached(); + stream.write(LENGTH_PLACEHOLDER); // fake size + stream.writeLong(requestId); + byte status = 0; + status = setResponse(status); + stream.writeByte(status); // 0 for request, 1 for response. + message.writeTo(stream); + ChannelBuffer buffer = ChannelBuffers.wrappedBuffer(stream.copiedByteArray()); + buffer.setInt(0, buffer.writerIndex() - 4); // update real size. + channel.write(buffer); + } + + @Override public void sendResponse(Throwable error) throws IOException { + ChannelBuffer buffer = ChannelBuffers.dynamicBuffer(); + ChannelBufferOutputStream os = new ChannelBufferOutputStream(buffer); + + os.write(LENGTH_PLACEHOLDER); + os.writeLong(requestId); + + byte status = 0; + status = setResponse(status); + status = setError(status); + os.writeByte(status); + + // mark the buffer, so we can reset it when the exception is not serializable + os.flush(); + buffer.markWriterIndex(); + try { + RemoteTransportException tx = new RemoteTransportException(transport.settings().get("name"), transport.wrapAddress(channel.getLocalAddress()), action, error); + ThrowableObjectOutputStream too = new ThrowableObjectOutputStream(os); + too.writeObject(tx); + too.close(); + } catch (NotSerializableException e) { + buffer.resetWriterIndex(); + RemoteTransportException tx = new RemoteTransportException(transport.settings().get("name"), transport.wrapAddress(channel.getLocalAddress()), action, new NotSerializableTransportException(error)); + ThrowableObjectOutputStream too = new ThrowableObjectOutputStream(os); + too.writeObject(tx); + too.close(); + } + + buffer.setInt(0, buffer.writerIndex() - 4); // update real size. + channel.write(buffer); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/transport/netty/NettyTransportManagement.java b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/netty/NettyTransportManagement.java new file mode 100644 index 00000000000..8f11b2aa2b1 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/netty/NettyTransportManagement.java @@ -0,0 +1,103 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.netty; + +import com.google.inject.Inject; +import org.elasticsearch.jmx.MBean; +import org.elasticsearch.jmx.ManagedAttribute; + +/** + * @author kimchy (Shay Banon) + */ +@MBean(objectName = "service=transport,transportType=netty", description = "Netty Transport") +public class NettyTransportManagement { + + private NettyTransport transport; + + @Inject public NettyTransportManagement(NettyTransport transport) { + this.transport = transport; + } + + @ManagedAttribute(description = "Number of connections this node has to other nodes") + public long getNumberOfOutboundConnections() { + return transport.clientChannels.size(); + } + + @ManagedAttribute(description = "Number if IO worker threads") + public int getWorkerCount() { + return transport.workerCount; + } + + @ManagedAttribute(description = "Port(s) netty was configured to bind on") + public String getPort() { + return transport.port; + } + + @ManagedAttribute(description = "Host to bind to") + public String getBindHost() { + return transport.bindHost; + } + + @ManagedAttribute(description = "Host to publish") + public String getPublishHost() { + return transport.publishHost; + } + + @ManagedAttribute(description = "Connect timeout") + public String getConnectTimeout() { + return transport.connectTimeout.toString(); + } + + @ManagedAttribute(description = "Connect retries") + public int getConnectRetries() { + return transport.connectRetries; + } + + @ManagedAttribute(description = "TcpNoDelay") + public Boolean getTcpNoDelay() { + return transport.tcpNoDelay; + } + + @ManagedAttribute(description = "TcpKeepAlive") + public Boolean getTcpKeepAlive() { + return transport.tcpKeepAlive; + } + + @ManagedAttribute(description = "ReuseAddress") + public Boolean getReuseAddress() { + return transport.reuseAddress; + } + + @ManagedAttribute(description = "TcpSendBufferSize") + public String getTcpSendBufferSize() { + if (transport.tcpSendBufferSize == null) { + return null; + } + return transport.tcpSendBufferSize.toString(); + } + + @ManagedAttribute(description = "TcpReceiveBufferSize") + public String getTcpReceiveBufferSize() { + if (transport.tcpReceiveBufferSize == null) { + return null; + } + return transport.tcpReceiveBufferSize.toString(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/transport/netty/NettyTransportModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/netty/NettyTransportModule.java new file mode 100644 index 00000000000..f8546ff9c5f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/netty/NettyTransportModule.java @@ -0,0 +1,35 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.netty; + +import com.google.inject.AbstractModule; +import org.elasticsearch.transport.Transport; + +/** + * @author kimchy (Shay Banon) + */ +public class NettyTransportModule extends AbstractModule { + + @Override protected void configure() { + bind(NettyTransport.class).asEagerSingleton(); + bind(Transport.class).to(NettyTransport.class).asEagerSingleton(); + bind(NettyTransportManagement.class).asEagerSingleton(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/transport/netty/OpenChannelsHandler.java b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/netty/OpenChannelsHandler.java new file mode 100644 index 00000000000..c152a3df52e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/netty/OpenChannelsHandler.java @@ -0,0 +1,58 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.netty; + +import org.elasticsearch.util.concurrent.highscalelib.NonBlockingHashSet; +import org.jboss.netty.channel.*; + +/** + * @author kimchy (Shay Banon) + */ +@ChannelPipelineCoverage(ChannelPipelineCoverage.ALL) +public class OpenChannelsHandler implements ChannelUpstreamHandler { + + private NonBlockingHashSet openChannels = new NonBlockingHashSet(); + + private final ChannelFutureListener remover = new ChannelFutureListener() { + public void operationComplete(ChannelFuture future) throws Exception { + openChannels.remove(future.getChannel()); + } + }; + + @Override public void handleUpstream(ChannelHandlerContext ctx, ChannelEvent e) throws Exception { + if (e instanceof ChannelStateEvent) { + ChannelStateEvent evt = (ChannelStateEvent) e; + if (evt.getState() == ChannelState.OPEN) { + boolean added = openChannels.add(ctx.getChannel()); + if (added) { + ctx.getChannel().getCloseFuture().addListener(remover); + } + } + } + ctx.sendUpstream(e); + } + + public void close() { + for (Channel channel : openChannels) { + channel.close().awaitUninterruptibly(); + } + openChannels.clear(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/transport/netty/SizeHeaderFrameDecoder.java b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/netty/SizeHeaderFrameDecoder.java new file mode 100644 index 00000000000..94317cec6a3 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/transport/netty/SizeHeaderFrameDecoder.java @@ -0,0 +1,57 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.netty; + +import org.jboss.netty.buffer.ChannelBuffer; +import org.jboss.netty.buffer.ChannelBufferInputStream; +import org.jboss.netty.channel.Channel; +import org.jboss.netty.channel.ChannelHandlerContext; +import org.jboss.netty.channel.ChannelPipelineCoverage; +import org.jboss.netty.handler.codec.frame.FrameDecoder; + +import java.io.StreamCorruptedException; + +/** + * @author kimchy (Shay Banon) + */ +@ChannelPipelineCoverage("one") +public class SizeHeaderFrameDecoder extends FrameDecoder { + + protected Object decode(ChannelHandlerContext ctx, Channel channel, ChannelBuffer buffer) throws Exception { + + if (buffer.readableBytes() < 4) { + return null; + } + + int dataLen = buffer.getInt(buffer.readerIndex()); + if (dataLen <= 0) { + throw new StreamCorruptedException("invalid data length: " + dataLen); + } + + if (buffer.readableBytes() < dataLen + 4) { + return null; + } + + buffer.skipBytes(4); + + return new ChannelBufferInputStream(buffer, dataLen); + } + +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/Classes.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/Classes.java new file mode 100644 index 00000000000..a185cc8ca19 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/Classes.java @@ -0,0 +1,86 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util; + +/** + * @author kimchy (Shay Banon) + */ +public class Classes { + + /** + * The package separator character '.' + */ + private static final char PACKAGE_SEPARATOR = '.'; + + /** + * Return the default ClassLoader to use: typically the thread context + * ClassLoader, if available; the ClassLoader that loaded the ClassUtils + * class will be used as fallback. + *

+ *

Call this method if you intend to use the thread context ClassLoader + * in a scenario where you absolutely need a non-null ClassLoader reference: + * for example, for class path resource loading (but not necessarily for + * Class.forName, which accepts a null ClassLoader + * reference as well). + * + * @return the default ClassLoader (never null) + * @see java.lang.Thread#getContextClassLoader() + */ + public static ClassLoader getDefaultClassLoader() { + ClassLoader cl = null; + try { + cl = Thread.currentThread().getContextClassLoader(); + } + catch (Throwable ex) { + // Cannot access thread context ClassLoader - falling back to system class loader... + } + if (cl == null) { + // No thread context class loader -> use class loader of this class. + cl = Classes.class.getClassLoader(); + } + return cl; + } + + /** + * Determine the name of the package of the given class: + * e.g. "java.lang" for the java.lang.String class. + * + * @param clazz the class + * @return the package name, or the empty String if the class + * is defined in the default package + */ + public static String getPackageName(Class clazz) { + String className = clazz.getName(); + int lastDotIndex = className.lastIndexOf(PACKAGE_SEPARATOR); + return (lastDotIndex != -1 ? className.substring(0, lastDotIndex) : ""); + } + + public static String getPackageNameNoDomain(Class clazz) { + String fullPackage = getPackageName(clazz); + if (fullPackage.startsWith("org.") || fullPackage.startsWith("com.") || fullPackage.startsWith("net.")) { + return fullPackage.substring(4); + } + return fullPackage; + } + + private Classes() { + + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/IdentityHashSet.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/IdentityHashSet.java new file mode 100644 index 00000000000..86b604ae68d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/IdentityHashSet.java @@ -0,0 +1,193 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util; + +import java.util.*; + +/** + * @author kimchy (Shay Banon) + */ +public class IdentityHashSet + extends AbstractSet + implements Set, Cloneable, java.io.Serializable { + + static final long serialVersionUID = -5024744406713321677L; + + private transient IdentityHashMap map; + + // Dummy value to associate with an Object in the backing Map + private static final Object PRESENT = new Object(); + + public IdentityHashSet() { + map = new IdentityHashMap(); + } + + public IdentityHashSet(Collection c) { + map = new IdentityHashMap(Math.max((int) (c.size() / .75f) + 1, 16)); + addAll(c); + } + + public IdentityHashSet(int expectedSize) { + map = new IdentityHashMap(expectedSize); + } + + /** + * Returns an iterator over the elements in this set. The elements + * are returned in no particular order. + * + * @return an Iterator over the elements in this set + * @see ConcurrentModificationException + */ + public Iterator iterator() { + return map.keySet().iterator(); + } + + /** + * Returns the number of elements in this set (its cardinality). + * + * @return the number of elements in this set (its cardinality) + */ + public int size() { + return map.size(); + } + + /** + * Returns true if this set contains no elements. + * + * @return true if this set contains no elements + */ + public boolean isEmpty() { + return map.isEmpty(); + } + + /** + * Returns true if this set contains the specified element. + * More formally, returns true if and only if this set + * contains an element e such that + * (o==null ? e==null : o.equals(e)). + * + * @param o element whose presence in this set is to be tested + * @return true if this set contains the specified element + */ + public boolean contains(Object o) { + return map.containsKey(o); + } + + /** + * Adds the specified element to this set if it is not already present. + * More formally, adds the specified element e to this set if + * this set contains no element e2 such that + * (e==null ? e2==null : e.equals(e2)). + * If this set already contains the element, the call leaves the set + * unchanged and returns false. + * + * @param e element to be added to this set + * @return true if this set did not already contain the specified + * element + */ + public boolean add(E e) { + return map.put(e, PRESENT) == null; + } + + /** + * Removes the specified element from this set if it is present. + * More formally, removes an element e such that + * (o==null ? e==null : o.equals(e)), + * if this set contains such an element. Returns true if + * this set contained the element (or equivalently, if this set + * changed as a result of the call). (This set will not contain the + * element once the call returns.) + * + * @param o object to be removed from this set, if present + * @return true if the set contained the specified element + */ + public boolean remove(Object o) { + return map.remove(o) == PRESENT; + } + + /** + * Removes all of the elements from this set. + * The set will be empty after this call returns. + */ + public void clear() { + map.clear(); + } + + /** + * Returns a shallow copy of this HashSet instance: the elements + * themselves are not cloned. + * + * @return a shallow copy of this set + */ + public Object clone() { + try { + IdentityHashSet newSet = (IdentityHashSet) super.clone(); + newSet.map = (IdentityHashMap) map.clone(); + return newSet; + } catch (CloneNotSupportedException e) { + throw new InternalError(); + } + } + + /** + * Index the state of this HashSet instance to a stream (that is, + * serialize it). + * + * @serialData The capacity of the backing HashMap instance + * (int), and its load factor (float) are emitted, followed by + * the size of the set (the number of elements it contains) + * (int), followed by all of its elements (each an Object) in + * no particular order. + */ + private void writeObject(java.io.ObjectOutputStream s) + throws java.io.IOException { + // Write out any hidden serialization magic + s.defaultWriteObject(); + + // Write out size + s.writeInt(map.size()); + + // Write out all elements in the proper order. + for (Iterator i = map.keySet().iterator(); i.hasNext();) + s.writeObject(i.next()); + } + + /** + * Reconstitute the HashSet instance from a stream (that is, + * deserialize it). + */ + private void readObject(java.io.ObjectInputStream s) + throws java.io.IOException, ClassNotFoundException { + // Read in any hidden serialization magic + s.defaultReadObject(); + + // Read in size + int size = s.readInt(); + + map = new IdentityHashMap(size); + + // Read in all elements in the proper order. + for (int i = 0; i < size; i++) { + E e = (E) s.readObject(); + map.put(e, PRESENT); + } + } +} + diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/MapBackedSet.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/MapBackedSet.java new file mode 100644 index 00000000000..cfa1d01a538 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/MapBackedSet.java @@ -0,0 +1,72 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util; + +import java.io.Serializable; +import java.util.AbstractSet; +import java.util.Iterator; +import java.util.Map; + +/** + * @author kimchy (Shay Banon) + */ +public class MapBackedSet extends AbstractSet implements Serializable { + + private static final long serialVersionUID = -6761513279741915432L; + + private final Map map; + + /** + * Creates a new instance which wraps the specified {@code map}. + */ + public MapBackedSet(Map map) { + this.map = map; + } + + @Override + public int size() { + return map.size(); + } + + @Override + public boolean contains(Object o) { + return map.containsKey(o); + } + + @Override + public boolean add(E o) { + return map.put(o, Boolean.TRUE) == null; + } + + @Override + public boolean remove(Object o) { + return map.remove(o) != null; + } + + @Override + public void clear() { + map.clear(); + } + + @Override + public Iterator iterator() { + return map.keySet().iterator(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/MapBuilder.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/MapBuilder.java new file mode 100644 index 00000000000..41a28e030f2 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/MapBuilder.java @@ -0,0 +1,77 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util; + +import com.google.common.collect.ImmutableMap; + +import java.util.Map; + +import static com.google.common.collect.Maps.*; + +/** + * @author kimchy (Shay Banon) + */ +public class MapBuilder { + + public static MapBuilder newMapBuilder() { + return new MapBuilder(); + } + + public static MapBuilder newMapBuilder(Map map) { + return new MapBuilder().putAll(map); + } + + private Map map = newHashMap(); + + public MapBuilder() { + this.map = newHashMap(); + } + + public MapBuilder putAll(Map map) { + this.map.putAll(map); + return this; + } + + public MapBuilder put(K key, V value) { + this.map.put(key, value); + return this; + } + + public MapBuilder remove(K key) { + this.map.remove(key); + return this; + } + + public V get(K key) { + return map.get(key); + } + + public boolean containsKey(K key) { + return map.containsKey(key); + } + + public Map map() { + return this.map; + } + + public ImmutableMap immutableMap() { + return ImmutableMap.copyOf(map); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/Names.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/Names.java new file mode 100644 index 00000000000..1f541d6556b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/Names.java @@ -0,0 +1,90 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util; + +import org.elasticsearch.util.concurrent.ThreadLocalRandom; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.net.URL; +import java.util.Random; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class Names { + + public static String randomNodeName(URL nodeNames) { + BufferedReader reader = null; + try { + reader = new BufferedReader(new InputStreamReader(nodeNames.openStream())); + int numberOfNames = 0; + while (reader.readLine() != null) { + numberOfNames++; + } + reader.close(); + reader = new BufferedReader(new InputStreamReader(nodeNames.openStream())); + int number = ((ThreadLocalRandom.current().nextInt(numberOfNames)) % numberOfNames); + for (int i = 0; i < number; i++) { + reader.readLine(); + } + return reader.readLine(); + } catch (IOException e) { + return null; + } finally { + try { + if (reader != null) { + reader.close(); + } + } catch (IOException e) { + // ignore this exception + } + } + } + + public static String randomNodeName(InputStream nodeNames) { + if (nodeNames == null) { + return null; + } + try { + BufferedReader reader = new BufferedReader(new InputStreamReader(nodeNames)); + int numberOfNames = Integer.parseInt(reader.readLine()); + int number = ((new Random().nextInt(numberOfNames)) % numberOfNames) - 2; // remove 2 for last line and first line + for (int i = 0; i < number; i++) { + reader.readLine(); + } + return reader.readLine(); + } catch (Exception e) { + return null; + } finally { + try { + nodeNames.close(); + } catch (IOException e) { + // ignore + } + } + } + + private Names() { + + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/Nullable.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/Nullable.java new file mode 100644 index 00000000000..00ec8446a20 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/Nullable.java @@ -0,0 +1,35 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util; + +import java.lang.annotation.*; + +/** + * The presence of this annotation on a method parameter indicates that + * {@code null} is an acceptable value for that parameter. It should not be + * used for parameters of primitive types. + * + * @author kimchy (Shay Banon) + */ +@Documented +@Retention(RetentionPolicy.RUNTIME) +@Target({ElementType.PARAMETER, ElementType.FIELD}) +public @interface Nullable { +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/Numbers.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/Numbers.java new file mode 100644 index 00000000000..04e8cf67eb0 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/Numbers.java @@ -0,0 +1,129 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util; + +/** + * A set of utlities around numbers. + * + * @author kimchy (Shay Banon) + */ +public final class Numbers { + + private Numbers() { + + } + + /** + * Converts a byte array to an int. + * + * @param arr The byte array to convert to an int + * @return The int converted + */ + public static int bytesToInt(byte[] arr) { + return (arr[0] << 24) | ((arr[1] & 0xff) << 16) | ((arr[2] & 0xff) << 8) | (arr[3] & 0xff); + } + + /** + * Converts a byte array to a long. + * + * @param arr The byte array to convert to a long + * @return The long converter + */ + public static long bytesToLong(byte[] arr) { + int high = (arr[0] << 24) | ((arr[1] & 0xff) << 16) | ((arr[2] & 0xff) << 8) | (arr[3] & 0xff); + int low = (arr[4] << 24) | ((arr[5] & 0xff) << 16) | ((arr[6] & 0xff) << 8) | (arr[7] & 0xff); + return (((long) high) << 32) | (low & 0x0ffffffffL); + } + + /** + * Converts a byte array to float. + * + * @param arr The byte array to convert to a float + * @return The float converted + */ + public static float bytesToFloat(byte[] arr) { + return Float.intBitsToFloat(bytesToInt(arr)); + } + + /** + * Converts a byte array to double. + * + * @param arr The byte array to convert to a double + * @return The double converted + */ + public static double bytesToDouble(byte[] arr) { + return Double.longBitsToDouble(bytesToLong(arr)); + } + + /** + * Converts an int to a byte array. + * + * @param val The int to convert to a byte array + * @return The byte array converted + */ + public static byte[] intToBytes(int val) { + byte[] arr = new byte[4]; + arr[0] = (byte) (val >>> 24); + arr[1] = (byte) (val >>> 16); + arr[2] = (byte) (val >>> 8); + arr[3] = (byte) (val); + return arr; + } + + /** + * Converts a long to a byte array. + * + * @param val The long to convert to a byte array + * @return The byte array converted + */ + public static byte[] longToBytes(long val) { + byte[] arr = new byte[8]; + arr[0] = (byte) (val >>> 56); + arr[1] = (byte) (val >>> 48); + arr[2] = (byte) (val >>> 40); + arr[3] = (byte) (val >>> 32); + arr[4] = (byte) (val >>> 24); + arr[5] = (byte) (val >>> 16); + arr[6] = (byte) (val >>> 8); + arr[7] = (byte) (val); + return arr; + } + + /** + * Converts a float to a byte array. + * + * @param val The float to convert to a byte array + * @return The byte array converted + */ + public static byte[] floatToBytes(float val) { + return intToBytes(Float.floatToRawIntBits(val)); + } + + /** + * Converts a double to a byte array. + * + * @param val The double to convert to a byte array + * @return The byte array converted + */ + public static byte[] doubleToBytes(double val) { + return longToBytes(Double.doubleToRawLongBits(val)); + } + +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/OsUtils.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/OsUtils.java new file mode 100644 index 00000000000..7f14f7d68b0 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/OsUtils.java @@ -0,0 +1,48 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util; + +/** + * @author kimchy (Shay Banon) + */ +public class OsUtils { + + /** + * The value of System.getProperty("os.name"). + */ + public static final String OS_NAME = System.getProperty("os.name"); + /** + * True iff running on Linux. + */ + public static final boolean LINUX = OS_NAME.startsWith("Linux"); + /** + * True iff running on Windows. + */ + public static final boolean WINDOWS = OS_NAME.startsWith("Windows"); + /** + * True iff running on SunOS. + */ + public static final boolean SUN_OS = OS_NAME.startsWith("SunOS"); + + + private OsUtils() { + + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/Preconditions.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/Preconditions.java new file mode 100644 index 00000000000..3f85aa7079e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/Preconditions.java @@ -0,0 +1,490 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util; + +import org.elasticsearch.ElasticSearchIllegalArgumentException; +import org.elasticsearch.ElasticSearchIllegalStateException; +import org.elasticsearch.ElasticSearchNullPointerException; + +import java.util.Collection; +import java.util.NoSuchElementException; + +/** + * Simple static methods to be called at the start of your own methods to verify + * correct arguments and state. This allows constructs such as + *

+ *     if (count <= 0) {
+ *       throw new ElasticSearchIllegalArgumentException("must be positive: " + count);
+ *     }
+ *

+ * to be replaced with the more compact + *

+ *     checkArgument(count > 0, "must be positive: %s", count);
+ *

+ * Note that the sense of the expression is inverted; with {@code Preconditions} + * you declare what you expect to be true, just as you do with an + * + * {@code assert} or a JUnit {@code assertTrue()} call. + *

+ *

Take care not to confuse precondition checking with other similar types + * of checks! Precondition exceptions -- including those provided here, but also + * {@link IndexOutOfBoundsException}, {@link NoSuchElementException}, {@link + * UnsupportedOperationException} and others -- are used to signal that the + * calling method has made an error. This tells the caller that it should + * not have invoked the method when it did, with the arguments it did, or + * perhaps ever. Postcondition or other invariant failures should not + * throw these types of exceptions. + *

+ *

Note: The methods of the {@code Preconditions} class are highly + * unusual in one way: they are supposed to throw exceptions, and promise + * in their specifications to do so even when given perfectly valid input. That + * is, {@code null} is a valid parameter to the method {@link + * #checkNotNull(Object)} -- and technically this parameter could be even marked + * as Nullable -- yet the method will still throw an exception anyway, + * because that's what its contract says to do. + * + * @author kimchy (Shay Banon) + */ +public final class Preconditions { + private Preconditions() { + } + + /** + * Ensures the truth of an expression involving one or more parameters to the + * calling method. + * + * @param expression a boolean expression + * @throws org.elasticsearch.ElasticSearchIllegalArgumentException + * if {@code expression} is false + */ + public static void checkArgument(boolean expression) { + if (!expression) { + throw new ElasticSearchIllegalArgumentException(); + } + } + + /** + * Ensures the truth of an expression involving one or more parameters to the + * calling method. + * + * @param expression a boolean expression + * @param errorMessage the exception message to use if the check fails; will + * be converted to a string using {@link String#valueOf(Object)} + * @throws org.elasticsearch.ElasticSearchIllegalArgumentException + * if {@code expression} is false + */ + public static void checkArgument(boolean expression, Object errorMessage) { + if (!expression) { + throw new ElasticSearchIllegalArgumentException(String.valueOf(errorMessage)); + } + } + + /** + * Ensures the truth of an expression involving one or more parameters to the + * calling method. + * + * @param expression a boolean expression + * @param errorMessageTemplate a template for the exception message should the + * check fail. The message is formed by replacing each {@code %s} + * placeholder in the template with an argument. These are matched by + * position - the first {@code %s} gets {@code errorMessageArgs[0]}, etc. + * Unmatched arguments will be appended to the formatted message in square + * braces. Unmatched placeholders will be left as-is. + * @param errorMessageArgs the arguments to be substituted into the message + * template. Arguments are converted to strings using + * {@link String#valueOf(Object)}. + * @throws org.elasticsearch.ElasticSearchIllegalArgumentException + * if {@code expression} is false + * @throws org.elasticsearch.ElasticSearchNullPointerException + * if the check fails and either {@code + * errorMessageTemplate} or {@code errorMessageArgs} is null (don't let + * this happen) + */ + public static void checkArgument(boolean expression, + String errorMessageTemplate, Object... errorMessageArgs) { + if (!expression) { + throw new ElasticSearchIllegalArgumentException( + format(errorMessageTemplate, errorMessageArgs)); + } + } + + /** + * Ensures the truth of an expression involving the state of the calling + * instance, but not involving any parameters to the calling method. + * + * @param expression a boolean expression + * @throws org.elasticsearch.ElasticSearchIllegalStateException + * if {@code expression} is false + */ + public static void checkState(boolean expression) { + if (!expression) { + throw new ElasticSearchIllegalStateException(); + } + } + + /** + * Ensures the truth of an expression involving the state of the calling + * instance, but not involving any parameters to the calling method. + * + * @param expression a boolean expression + * @param errorMessage the exception message to use if the check fails; will + * be converted to a string using {@link String#valueOf(Object)} + * @throws org.elasticsearch.ElasticSearchIllegalStateException + * if {@code expression} is false + */ + public static void checkState(boolean expression, Object errorMessage) { + if (!expression) { + throw new ElasticSearchIllegalStateException(String.valueOf(errorMessage)); + } + } + + /** + * Ensures the truth of an expression involving the state of the calling + * instance, but not involving any parameters to the calling method. + * + * @param expression a boolean expression + * @param errorMessageTemplate a template for the exception message should the + * check fail. The message is formed by replacing each {@code %s} + * placeholder in the template with an argument. These are matched by + * position - the first {@code %s} gets {@code errorMessageArgs[0]}, etc. + * Unmatched arguments will be appended to the formatted message in square + * braces. Unmatched placeholders will be left as-is. + * @param errorMessageArgs the arguments to be substituted into the message + * template. Arguments are converted to strings using + * {@link String#valueOf(Object)}. + * @throws org.elasticsearch.ElasticSearchIllegalStateException + * if {@code expression} is false + * @throws org.elasticsearch.ElasticSearchNullPointerException + * if the check fails and either {@code + * errorMessageTemplate} or {@code errorMessageArgs} is null (don't let + * this happen) + */ + public static void checkState(boolean expression, + String errorMessageTemplate, Object... errorMessageArgs) { + if (!expression) { + throw new ElasticSearchIllegalStateException( + format(errorMessageTemplate, errorMessageArgs)); + } + } + + /** + * Ensures that an object reference passed as a parameter to the calling + * method is not null. + * + * @param reference an object reference + * @return the non-null reference that was validated + * @throws org.elasticsearch.ElasticSearchNullPointerException + * if {@code reference} is null + */ + public static T checkNotNull(T reference) { + if (reference == null) { + throw new ElasticSearchNullPointerException(); + } + return reference; + } + + /** + * Ensures that an object reference passed as a parameter to the calling + * method is not null. + * + * @param reference an object reference + * @param errorMessage the exception message to use if the check fails; will + * be converted to a string using {@link String#valueOf(Object)} + * @return the non-null reference that was validated + * @throws org.elasticsearch.ElasticSearchNullPointerException + * if {@code reference} is null + */ + public static T checkNotNull(T reference, Object errorMessage) { + if (reference == null) { + throw new ElasticSearchNullPointerException(String.valueOf(errorMessage)); + } + return reference; + } + + /** + * Ensures that an object reference passed as a parameter to the calling + * method is not null. + * + * @param reference an object reference + * @param errorMessageTemplate a template for the exception message should the + * check fail. The message is formed by replacing each {@code %s} + * placeholder in the template with an argument. These are matched by + * position - the first {@code %s} gets {@code errorMessageArgs[0]}, etc. + * Unmatched arguments will be appended to the formatted message in square + * braces. Unmatched placeholders will be left as-is. + * @param errorMessageArgs the arguments to be substituted into the message + * template. Arguments are converted to strings using + * {@link String#valueOf(Object)}. + * @return the non-null reference that was validated + * @throws org.elasticsearch.ElasticSearchNullPointerException + * if {@code reference} is null + */ + public static T checkNotNull(T reference, String errorMessageTemplate, + Object... errorMessageArgs) { + if (reference == null) { + // If either of these parameters is null, the right thing happens anyway + throw new ElasticSearchNullPointerException( + format(errorMessageTemplate, errorMessageArgs)); + } + return reference; + } + + /** + * Ensures that an {@code Iterable} object passed as a parameter to the + * calling method is not null and contains no null elements. + * + * @param iterable the iterable to check the contents of + * @return the non-null {@code iterable} reference just validated + * @throws org.elasticsearch.ElasticSearchNullPointerException + * if {@code iterable} is null or contains at + * least one null element + */ + public static > T checkContentsNotNull(T iterable) { + if (containsOrIsNull(iterable)) { + throw new ElasticSearchNullPointerException(); + } + return iterable; + } + + /** + * Ensures that an {@code Iterable} object passed as a parameter to the + * calling method is not null and contains no null elements. + * + * @param iterable the iterable to check the contents of + * @param errorMessage the exception message to use if the check fails; will + * be converted to a string using {@link String#valueOf(Object)} + * @return the non-null {@code iterable} reference just validated + * @throws org.elasticsearch.ElasticSearchNullPointerException + * if {@code iterable} is null or contains at + * least one null element + */ + public static > T checkContentsNotNull( + T iterable, Object errorMessage) { + if (containsOrIsNull(iterable)) { + throw new ElasticSearchNullPointerException(String.valueOf(errorMessage)); + } + return iterable; + } + + /** + * Ensures that an {@code Iterable} object passed as a parameter to the + * calling method is not null and contains no null elements. + * + * @param iterable the iterable to check the contents of + * @param errorMessageTemplate a template for the exception message should the + * check fail. The message is formed by replacing each {@code %s} + * placeholder in the template with an argument. These are matched by + * position - the first {@code %s} gets {@code errorMessageArgs[0]}, etc. + * Unmatched arguments will be appended to the formatted message in square + * braces. Unmatched placeholders will be left as-is. + * @param errorMessageArgs the arguments to be substituted into the message + * template. Arguments are converted to strings using + * {@link String#valueOf(Object)}. + * @return the non-null {@code iterable} reference just validated + * @throws org.elasticsearch.ElasticSearchNullPointerException + * if {@code iterable} is null or contains at + * least one null element + */ + public static > T checkContentsNotNull(T iterable, + String errorMessageTemplate, Object... errorMessageArgs) { + if (containsOrIsNull(iterable)) { + throw new ElasticSearchNullPointerException( + format(errorMessageTemplate, errorMessageArgs)); + } + return iterable; + } + + private static boolean containsOrIsNull(Iterable iterable) { + if (iterable == null) { + return true; + } + + if (iterable instanceof Collection) { + Collection collection = (Collection) iterable; + try { + return collection.contains(null); + } catch (ElasticSearchNullPointerException e) { + // A NPE implies that the collection doesn't contain null. + return false; + } + } else { + for (Object element : iterable) { + if (element == null) { + return true; + } + } + return false; + } + } + + /** + * Ensures that {@code index} specifies a valid element in an array, + * list or string of size {@code size}. An element index may range from zero, + * inclusive, to {@code size}, exclusive. + * + * @param index a user-supplied index identifying an element of an array, list + * or string + * @param size the size of that array, list or string + * @throws IndexOutOfBoundsException if {@code index} is negative or is not + * less than {@code size} + * @throws org.elasticsearch.ElasticSearchIllegalArgumentException + * if {@code size} is negative + */ + public static void checkElementIndex(int index, int size) { + checkElementIndex(index, size, "index"); + } + + /** + * Ensures that {@code index} specifies a valid element in an array, + * list or string of size {@code size}. An element index may range from zero, + * inclusive, to {@code size}, exclusive. + * + * @param index a user-supplied index identifying an element of an array, list + * or string + * @param size the size of that array, list or string + * @param desc the text to use to describe this index in an error message + * @throws IndexOutOfBoundsException if {@code index} is negative or is not + * less than {@code size} + * @throws org.elasticsearch.ElasticSearchIllegalArgumentException + * if {@code size} is negative + */ + public static void checkElementIndex(int index, int size, String desc) { + checkArgument(size >= 0, "negative size: %s", size); + if (index < 0) { + throw new IndexOutOfBoundsException( + format("%s (%s) must not be negative", desc, index)); + } + if (index >= size) { + throw new IndexOutOfBoundsException( + format("%s (%s) must be less than size (%s)", desc, index, size)); + } + } + + /** + * Ensures that {@code index} specifies a valid position in an array, + * list or string of size {@code size}. A position index may range from zero + * to {@code size}, inclusive. + * + * @param index a user-supplied index identifying a position in an array, list + * or string + * @param size the size of that array, list or string + * @throws IndexOutOfBoundsException if {@code index} is negative or is + * greater than {@code size} + * @throws org.elasticsearch.ElasticSearchIllegalArgumentException + * if {@code size} is negative + */ + public static void checkPositionIndex(int index, int size) { + checkPositionIndex(index, size, "index"); + } + + /** + * Ensures that {@code index} specifies a valid position in an array, + * list or string of size {@code size}. A position index may range from zero + * to {@code size}, inclusive. + * + * @param index a user-supplied index identifying a position in an array, list + * or string + * @param size the size of that array, list or string + * @param desc the text to use to describe this index in an error message + * @throws IndexOutOfBoundsException if {@code index} is negative or is + * greater than {@code size} + * @throws org.elasticsearch.ElasticSearchIllegalArgumentException + * if {@code size} is negative + */ + public static void checkPositionIndex(int index, int size, String desc) { + checkArgument(size >= 0, "negative size: %s", size); + if (index < 0) { + throw new IndexOutOfBoundsException(format( + "%s (%s) must not be negative", desc, index)); + } + if (index > size) { + throw new IndexOutOfBoundsException(format( + "%s (%s) must not be greater than size (%s)", desc, index, size)); + } + } + + /** + * Ensures that {@code start} and {@code end} specify a valid positions + * in an array, list or string of size {@code size}, and are in order. A + * position index may range from zero to {@code size}, inclusive. + * + * @param start a user-supplied index identifying a starting position in an + * array, list or string + * @param end a user-supplied index identifying a ending position in an array, + * list or string + * @param size the size of that array, list or string + * @throws IndexOutOfBoundsException if either index is negative or is + * greater than {@code size}, or if {@code end} is less than {@code start} + * @throws org.elasticsearch.ElasticSearchIllegalArgumentException + * if {@code size} is negative + */ + public static void checkPositionIndexes(int start, int end, int size) { + checkPositionIndex(start, size, "start index"); + checkPositionIndex(end, size, "end index"); + if (end < start) { + throw new IndexOutOfBoundsException(format( + "end index (%s) must not be less than start index (%s)", end, start)); + } + } + + /** + * Substitutes each {@code %s} in {@code template} with an argument. These + * are matched by position - the first {@code %s} gets {@code args[0]}, etc. + * If there are more arguments than placeholders, the unmatched arguments will + * be appended to the end of the formatted message in square braces. + * + * @param template a non-null string containing 0 or more {@code %s} + * placeholders. + * @param args the arguments to be substituted into the message + * template. Arguments are converted to strings using + * {@link String#valueOf(Object)}. Arguments can be null. + */ + // VisibleForTesting + static String format(String template, Object... args) { + // start substituting the arguments into the '%s' placeholders + StringBuilder builder = new StringBuilder( + template.length() + 16 * args.length); + int templateStart = 0; + int i = 0; + while (i < args.length) { + int placeholderStart = template.indexOf("%s", templateStart); + if (placeholderStart == -1) { + break; + } + builder.append(template.substring(templateStart, placeholderStart)); + builder.append(args[i++]); + templateStart = placeholderStart + 2; + } + builder.append(template.substring(templateStart)); + + // if we run out of placeholders, append the extra args in square braces + if (i < args.length) { + builder.append(" ["); + builder.append(args[i++]); + while (i < args.length) { + builder.append(", "); + builder.append(args[i++]); + } + builder.append("]"); + } + + return builder.toString(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/PropertyPlaceholder.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/PropertyPlaceholder.java new file mode 100644 index 00000000000..4b6b8eca85a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/PropertyPlaceholder.java @@ -0,0 +1,188 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util; + +import java.util.HashSet; +import java.util.Properties; +import java.util.Set; + +/** + * Utility class for working with Strings that have placeholder values in them. A placeholder takes the form + * ${name}. Using PropertyPlaceholder these placeholders can be substituted for + * user-supplied values. + *

+ *

Values for substitution can be supplied using a {@link Properties} instance or using a + * {@link PlaceholderResolver}. + * + * @author kimchy (Shay Banon) + */ +public class PropertyPlaceholder { + + private final String placeholderPrefix; + + private final String placeholderSuffix; + + private final boolean ignoreUnresolvablePlaceholders; + + /** + * Creates a new PropertyPlaceholderHelper that uses the supplied prefix and suffix. Unresolvable + * placeholders are ignored. + * + * @param placeholderPrefix the prefix that denotes the start of a placeholder. + * @param placeholderSuffix the suffix that denotes the end of a placeholder. + */ + public PropertyPlaceholder(String placeholderPrefix, String placeholderSuffix) { + this(placeholderPrefix, placeholderSuffix, true); + } + + /** + * Creates a new PropertyPlaceholderHelper that uses the supplied prefix and suffix. + * + * @param placeholderPrefix the prefix that denotes the start of a placeholder. + * @param placeholderSuffix the suffix that denotes the end of a placeholder. + * @param ignoreUnresolvablePlaceholders indicates whether unresolvable placeholders should be ignored + * (true) or cause an exception (false). + */ + public PropertyPlaceholder(String placeholderPrefix, String placeholderSuffix, + boolean ignoreUnresolvablePlaceholders) { + Preconditions.checkNotNull(placeholderPrefix, "Argument 'placeholderPrefix' must not be null."); + Preconditions.checkNotNull(placeholderSuffix, "Argument 'placeholderSuffix' must not be null."); + this.placeholderPrefix = placeholderPrefix; + this.placeholderSuffix = placeholderSuffix; + this.ignoreUnresolvablePlaceholders = ignoreUnresolvablePlaceholders; + } + + /** + * Replaces all placeholders of format ${name} with the corresponding property from the supplied {@link + * Properties}. + * + * @param value the value containing the placeholders to be replaced. + * @param properties the Properties to use for replacement. + * @return the supplied value with placeholders replaced inline. + */ + public String replacePlaceholders(String value, final Properties properties) { + Preconditions.checkNotNull(properties, "Argument 'properties' must not be null."); + return replacePlaceholders(value, new PlaceholderResolver() { + + public String resolvePlaceholder(String placeholderName) { + return properties.getProperty(placeholderName); + } + }); + } + + /** + * Replaces all placeholders of format ${name} with the value returned from the supplied {@link + * PlaceholderResolver}. + * + * @param value the value containing the placeholders to be replaced. + * @param placeholderResolver the PlaceholderResolver to use for replacement. + * @return the supplied value with placeholders replaced inline. + */ + public String replacePlaceholders(String value, PlaceholderResolver placeholderResolver) { + Preconditions.checkNotNull(value, "Argument 'value' must not be null."); + return parseStringValue(value, placeholderResolver, new HashSet()); + } + + protected String parseStringValue(String strVal, PlaceholderResolver placeholderResolver, + Set visitedPlaceholders) { + StringBuilder buf = new StringBuilder(strVal); + + int startIndex = strVal.indexOf(this.placeholderPrefix); + while (startIndex != -1) { + int endIndex = findPlaceholderEndIndex(buf, startIndex); + if (endIndex != -1) { + String placeholder = buf.substring(startIndex + this.placeholderPrefix.length(), endIndex); + if (!visitedPlaceholders.add(placeholder)) { + throw new IllegalArgumentException( + "Circular placeholder reference '" + placeholder + "' in property definitions"); + } + // Recursive invocation, parsing placeholders contained in the placeholder key. + placeholder = parseStringValue(placeholder, placeholderResolver, visitedPlaceholders); + + // Now obtain the value for the fully resolved key... + int defaultValueIdx = placeholder.indexOf(':'); + String defaultValue = null; + if (defaultValueIdx != -1) { + defaultValue = placeholder.substring(defaultValueIdx + 1); + placeholder = placeholder.substring(0, defaultValueIdx); + } + String propVal = placeholderResolver.resolvePlaceholder(placeholder); + if (propVal == null) { + propVal = defaultValue; + } + if (propVal != null) { + // Recursive invocation, parsing placeholders contained in the + // previously resolved placeholder value. + propVal = parseStringValue(propVal, placeholderResolver, visitedPlaceholders); + buf.replace(startIndex, endIndex + this.placeholderSuffix.length(), propVal); + startIndex = buf.indexOf(this.placeholderPrefix, startIndex + propVal.length()); + } else if (this.ignoreUnresolvablePlaceholders) { + // Proceed with unprocessed value. + startIndex = buf.indexOf(this.placeholderPrefix, endIndex + this.placeholderSuffix.length()); + } else { + throw new IllegalArgumentException("Could not resolve placeholder '" + placeholder + "'"); + } + + visitedPlaceholders.remove(placeholder); + } else { + startIndex = -1; + } + } + + return buf.toString(); + } + + private int findPlaceholderEndIndex(CharSequence buf, int startIndex) { + int index = startIndex + this.placeholderPrefix.length(); + int withinNestedPlaceholder = 0; + while (index < buf.length()) { + if (Strings.substringMatch(buf, index, this.placeholderSuffix)) { + if (withinNestedPlaceholder > 0) { + withinNestedPlaceholder--; + index = index + this.placeholderPrefix.length() - 1; + } else { + return index; + } + } else if (Strings.substringMatch(buf, index, this.placeholderPrefix)) { + withinNestedPlaceholder++; + index = index + this.placeholderPrefix.length(); + } else { + index++; + } + } + return -1; + } + + /** + * Strategy interface used to resolve replacement values for placeholders contained in Strings. + * + * @see PropertyPlaceholder + */ + public static interface PlaceholderResolver { + + /** + * Resolves the supplied placeholder name into the replacement value. + * + * @param placeholderName the name of the placeholder to resolve. + * @return the replacement value or null if no replacement is to be made. + */ + String resolvePlaceholder(String placeholderName); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/Required.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/Required.java new file mode 100644 index 00000000000..b4a75fa7e3a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/Required.java @@ -0,0 +1,35 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util; + +import java.lang.annotation.*; + +/** + * The presence of this annotation on a method parameter indicates that + * {@code null} is an acceptable value for that parameter. It should not be + * used for parameters of primitive types. + * + * @author kimchy (Shay Banon) + */ +@Documented +@Retention(RetentionPolicy.RUNTIME) +@Target({ElementType.METHOD, ElementType.PARAMETER, ElementType.FIELD}) +public @interface Required { +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/SafeArray.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/SafeArray.java new file mode 100644 index 00000000000..dbf3d7a50f8 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/SafeArray.java @@ -0,0 +1,60 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util; + +/** + * An array that is safe in terms of size. + * + * @author kimchy (Shay Banon) + */ +public interface SafeArray { + + T get(int index); + + int size(); + + void add(T value); + + void add(int index, T value); + + void clear(); + + /** + * Applies the procedure to each value in the list in ascending + * (front to back) order. + * + * @param procedure a Procedure value + * @return true if the procedure did not terminate prematurely. + */ + boolean forEach(Procedure procedure); + + static interface Procedure { + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param value a value + * @return true if additional invocations of the procedure are + * allowed. + */ + boolean execute(T value); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/SizeUnit.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/SizeUnit.java new file mode 100644 index 00000000000..580ef130838 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/SizeUnit.java @@ -0,0 +1,106 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util; + +/** + * A SizeUnit represents size at a given unit of + * granularity and provides utility methods to convert across units. + * A SizeUnit does not maintain size information, but only + * helps organize and use size representations that may be maintained + * separately across various contexts. + * + * @author kimchy (Shay Banon) + */ +public enum SizeUnit { + BYTES { + @Override public long toBytes(long size) { + return size; + }@Override public long toKB(long size) { + return size / (C1 / C0); + }@Override public long toMB(long size) { + return size / (C2 / C0); + }@Override public long toGB(long size) { + return size / (C3 / C0); + }}, + KB { + @Override public long toBytes(long size) { + return x(size, C1 / C0, MAX / (C1 / C0)); + }@Override public long toKB(long size) { + return size; + }@Override public long toMB(long size) { + return size / (C2 / C1); + }@Override public long toGB(long size) { + return size / (C3 / C1); + }}, + MB { + @Override public long toBytes(long size) { + return x(size, C2 / C0, MAX / (C2 / C0)); + }@Override public long toKB(long size) { + return x(size, C2 / C1, MAX / (C2 / C1)); + }@Override public long toMB(long size) { + return size; + }@Override public long toGB(long size) { + return size / (C3 / C2); + }}, + GB { + @Override public long toBytes(long size) { + return x(size, C3 / C0, MAX / (C3 / C0)); + }@Override public long toKB(long size) { + return x(size, C3 / C1, MAX / (C3 / C1)); + }@Override public long toMB(long size) { + return x(size, C3 / C2, MAX / (C3 / C2)); + }@Override public long toGB(long size) { + return size; + }}; + + static final long C0 = 1L; + static final long C1 = C0 * 1024L; + static final long C2 = C1 * 1024L; + static final long C3 = C2 * 1024L; + + static final long MAX = Long.MAX_VALUE; + + /** + * Scale d by m, checking for overflow. + * This has a short name to make above code more readable. + */ + static long x(long d, long m, long over) { + if (d > over) return Long.MAX_VALUE; + if (d < -over) return Long.MIN_VALUE; + return d * m; + } + + + public long toBytes(long size) { + throw new AbstractMethodError(); + } + + public long toKB(long size) { + throw new AbstractMethodError(); + } + + public long toMB(long size) { + throw new AbstractMethodError(); + } + + public long toGB(long size) { + throw new AbstractMethodError(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/SizeValue.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/SizeValue.java new file mode 100644 index 00000000000..08a7e7f52cb --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/SizeValue.java @@ -0,0 +1,160 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util; + +import org.elasticsearch.ElasticSearchParseException; +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.io.Serializable; + +/** + * @author kimchy (Shay Banon) + */ +public class SizeValue implements Serializable, Streamable { + + public static final SizeValue UNKNOWN = new SizeValue(-1); + + private long size; + + private SizeUnit sizeUnit; + + private SizeValue() { + + } + + public SizeValue(long bytes) { + this(bytes, SizeUnit.BYTES); + } + + public SizeValue(long size, SizeUnit sizeUnit) { + this.size = size; + this.sizeUnit = sizeUnit; + } + + public long bytes() { + return sizeUnit.toBytes(size); + } + + public long kb() { + return sizeUnit.toKB(size); + } + + public long mb() { + return sizeUnit.toMB(size); + } + + public long gb() { + return sizeUnit.toGB(size); + } + + public double kbFrac() { + return ((double) bytes()) / SizeUnit.C1; + } + + public double mbFrac() { + return ((double) bytes()) / SizeUnit.C2; + } + + public double gbFrac() { + return ((double) bytes()) / SizeUnit.C3; + } + + @Override public String toString() { + long bytes = bytes(); + double value = bytes; + String suffix = "b"; + if (bytes >= SizeUnit.C3) { + value = gbFrac(); + suffix = "gb"; + } else if (bytes >= SizeUnit.C2) { + value = mbFrac(); + suffix = "mb"; + } else if (bytes >= SizeUnit.C1) { + value = kbFrac(); + suffix = "kb"; + } + return Strings.format1Decimals(value, suffix); + } + + public static SizeValue parse(String sValue, SizeValue defaultValue) throws ElasticSearchParseException { + if (sValue == null) { + return defaultValue; + } + long bytes; + try { + if (sValue.endsWith("b")) { + bytes = Long.parseLong(sValue.substring(0, sValue.length() - 1)); + } else if (sValue.endsWith("k") || sValue.endsWith("K")) { + bytes = (long) (Double.parseDouble(sValue.substring(0, sValue.length() - 1)) * 1024); + } else if (sValue.endsWith("kb")) { + bytes = (long) (Double.parseDouble(sValue.substring(0, sValue.length() - 2)) * 1024); + } else if (sValue.endsWith("m") || sValue.endsWith("M")) { + bytes = (long) (Double.parseDouble(sValue.substring(0, sValue.length() - 1)) * 1024 * 1024); + } else if (sValue.endsWith("mb")) { + bytes = (long) (Double.parseDouble(sValue.substring(0, sValue.length() - 2)) * 1024 * 1024); + } else if (sValue.endsWith("g") || sValue.endsWith("G")) { + bytes = (long) (Double.parseDouble(sValue.substring(0, sValue.length() - 1)) * 1024 * 1024 * 1024); + } else if (sValue.endsWith("gb")) { + bytes = (long) (Double.parseDouble(sValue.substring(0, sValue.length() - 2)) * 1024 * 1024 * 1024); + } else { + bytes = Long.parseLong(sValue); + } + } catch (NumberFormatException e) { + throw new ElasticSearchParseException("Failed to parse [" + sValue + "]", e); + } + return new SizeValue(bytes, SizeUnit.BYTES); + } + + public static SizeValue readSizeValue(DataInput dataInput) throws IOException, ClassNotFoundException { + SizeValue sizeValue = new SizeValue(); + sizeValue.readFrom(dataInput); + return sizeValue; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + size = in.readLong(); + sizeUnit = SizeUnit.BYTES; + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeLong(bytes()); + } + + @Override public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + SizeValue sizeValue = (SizeValue) o; + + if (size != sizeValue.size) return false; + if (sizeUnit != sizeValue.sizeUnit) return false; + + return true; + } + + @Override public int hashCode() { + int result = (int) (size ^ (size >>> 32)); + result = 31 * result + (sizeUnit != null ? sizeUnit.hashCode() : 0); + return result; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/StopWatch.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/StopWatch.java new file mode 100644 index 00000000000..a2a2ab5eb00 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/StopWatch.java @@ -0,0 +1,291 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util; + +import java.text.NumberFormat; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +/** + * Simple stop watch, allowing for timing of a number of tasks, + * exposing total running time and running time for each named task. + *

+ *

Conceals use of System.currentTimeMillis(), improving the + * readability of application code and reducing the likelihood of calculation errors. + *

+ *

Note that this object is not designed to be thread-safe and does not + * use synchronization. + *

+ *

This class is normally used to verify performance during proof-of-concepts + * and in development, rather than as part of production applications. + * + * @author kimchy (Shay Banon) + */ +public class StopWatch { + + /** + * Identifier of this stop watch. + * Handy when we have output from multiple stop watches + * and need to distinguish between them in log or console output. + */ + private final String id; + + private boolean keepTaskList = true; + + private final List taskList = new LinkedList(); + + /** + * Start time of the current task + */ + private long startTimeMillis; + + /** + * Is the stop watch currently running? + */ + private boolean running; + + /** + * Name of the current task + */ + private String currentTaskName; + + private TaskInfo lastTaskInfo; + + private int taskCount; + + /** + * Total running time + */ + private long totalTimeMillis; + + /** + * Construct a new stop watch. Does not start any task. + */ + public StopWatch() { + this.id = ""; + } + + /** + * Construct a new stop watch with the given id. + * Does not start any task. + * + * @param id identifier for this stop watch. + * Handy when we have output from multiple stop watches + * and need to distinguish between them. + */ + public StopWatch(String id) { + this.id = id; + } + + /** + * Determine whether the TaskInfo array is built over time. Set this to + * "false" when using a StopWatch for millions of intervals, or the task + * info structure will consume excessive memory. Default is "true". + */ + public StopWatch keepTaskList(boolean keepTaskList) { + this.keepTaskList = keepTaskList; + return this; + } + + /** + * Start an unnamed task. The results are undefined if {@link #stop()} + * or timing methods are called without invoking this method. + * + * @see #stop() + */ + public StopWatch start() throws IllegalStateException { + return start(""); + } + + /** + * Start a named task. The results are undefined if {@link #stop()} + * or timing methods are called without invoking this method. + * + * @param taskName the name of the task to start + * @see #stop() + */ + public StopWatch start(String taskName) throws IllegalStateException { + if (this.running) { + throw new IllegalStateException("Can't start StopWatch: it's already running"); + } + this.startTimeMillis = System.currentTimeMillis(); + this.running = true; + this.currentTaskName = taskName; + return this; + } + + /** + * Stop the current task. The results are undefined if timing + * methods are called without invoking at least one pair + * {@link #start()} / {@link #stop()} methods. + * + * @see #start() + */ + public StopWatch stop() throws IllegalStateException { + if (!this.running) { + throw new IllegalStateException("Can't stop StopWatch: it's not running"); + } + long lastTime = System.currentTimeMillis() - this.startTimeMillis; + this.totalTimeMillis += lastTime; + this.lastTaskInfo = new TaskInfo(this.currentTaskName, lastTime); + if (this.keepTaskList) { + this.taskList.add(lastTaskInfo); + } + ++this.taskCount; + this.running = false; + this.currentTaskName = null; + return this; + } + + /** + * Return whether the stop watch is currently running. + */ + public boolean isRunning() { + return this.running; + } + + /** + * Return the time taken by the last task. + */ + public TimeValue lastTaskTime() throws IllegalStateException { + if (this.lastTaskInfo == null) { + throw new IllegalStateException("No tests run: can't get last interval"); + } + return this.lastTaskInfo.getTime(); + } + + /** + * Return the name of the last task. + */ + public String lastTaskName() throws IllegalStateException { + if (this.lastTaskInfo == null) { + throw new IllegalStateException("No tests run: can't get last interval"); + } + return this.lastTaskInfo.getTaskName(); + } + + /** + * Return the total time for all tasks. + */ + public TimeValue totalTime() { + return new TimeValue(totalTimeMillis, TimeUnit.MILLISECONDS); + } + + /** + * Return the number of tasks timed. + */ + public int taskCount() { + return taskCount; + } + + /** + * Return an array of the data for tasks performed. + */ + public TaskInfo[] taskInfo() { + if (!this.keepTaskList) { + throw new UnsupportedOperationException("Task info is not being kept!"); + } + return this.taskList.toArray(new TaskInfo[this.taskList.size()]); + } + + /** + * Return a short description of the total running time. + */ + public String shortSummary() { + return "StopWatch '" + this.id + "': running time = " + totalTime(); + } + + /** + * Return a string with a table describing all tasks performed. + * For custom reporting, call getTaskInfo() and use the task info directly. + */ + public String prettyPrint() { + StringBuilder sb = new StringBuilder(shortSummary()); + sb.append('\n'); + if (!this.keepTaskList) { + sb.append("No task info kept"); + } else { + sb.append("-----------------------------------------\n"); + sb.append("ms % Task name\n"); + sb.append("-----------------------------------------\n"); + NumberFormat nf = NumberFormat.getNumberInstance(); + nf.setMinimumIntegerDigits(5); + nf.setGroupingUsed(false); + NumberFormat pf = NumberFormat.getPercentInstance(); + pf.setMinimumIntegerDigits(3); + pf.setGroupingUsed(false); + for (TaskInfo task : taskInfo()) { + sb.append(nf.format(task.getTime().millis())).append(" "); + sb.append(pf.format(task.getTime().secondsFrac() / totalTime().secondsFrac())).append(" "); + sb.append(task.getTaskName()).append("\n"); + } + } + return sb.toString(); + } + + /** + * Return an informative string describing all tasks performed + * For custom reporting, call getTaskInfo() and use the task info directly. + */ + @Override public String toString() { + StringBuilder sb = new StringBuilder(shortSummary()); + if (this.keepTaskList) { + for (TaskInfo task : taskInfo()) { + sb.append("; [").append(task.getTaskName()).append("] took ").append(task.getTime()); + long percent = Math.round((100.0f * task.getTime().millis()) / totalTime().millis()); + sb.append(" = ").append(percent).append("%"); + } + } else { + sb.append("; no task info kept"); + } + return sb.toString(); + } + + /** + * Inner class to hold data about one task executed within the stop watch. + */ + public static class TaskInfo { + + private final String taskName; + + private final TimeValue timeValue; + + private TaskInfo(String taskName, long timeMillis) { + this.taskName = taskName; + this.timeValue = new TimeValue(timeMillis, TimeUnit.MILLISECONDS); + } + + /** + * Return the name of this task. + */ + public String getTaskName() { + return taskName; + } + + /** + * Return the time this task took. + */ + public TimeValue getTime() { + return timeValue; + } + } + +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/Strings.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/Strings.java new file mode 100644 index 00000000000..7b4e5d4d18d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/Strings.java @@ -0,0 +1,1192 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util; + +import com.google.common.collect.ImmutableSet; + +import java.util.*; + +/** + * @author kimchy (Shay Banon) + */ +public class Strings { + + public static final String[] EMPTY_ARRAY = new String[0]; + + private static final String FOLDER_SEPARATOR = "/"; + + private static final String WINDOWS_FOLDER_SEPARATOR = "\\"; + + private static final String TOP_PATH = ".."; + + private static final String CURRENT_PATH = "."; + + private static final char EXTENSION_SEPARATOR = '.'; + + + //--------------------------------------------------------------------- + // General convenience methods for working with Strings + //--------------------------------------------------------------------- + + /** + * Check that the given CharSequence is neither null nor of length 0. + * Note: Will return true for a CharSequence that purely consists of whitespace. + *

+     * StringUtils.hasLength(null) = false
+     * StringUtils.hasLength("") = false
+     * StringUtils.hasLength(" ") = true
+     * StringUtils.hasLength("Hello") = true
+     * 
+ * + * @param str the CharSequence to check (may be null) + * @return true if the CharSequence is not null and has length + * @see #hasText(String) + */ + public static boolean hasLength(CharSequence str) { + return (str != null && str.length() > 0); + } + + /** + * Check that the given String is neither null nor of length 0. + * Note: Will return true for a String that purely consists of whitespace. + * + * @param str the String to check (may be null) + * @return true if the String is not null and has length + * @see #hasLength(CharSequence) + */ + public static boolean hasLength(String str) { + return hasLength((CharSequence) str); + } + + /** + * Check whether the given CharSequence has actual text. + * More specifically, returns true if the string not null, + * its length is greater than 0, and it contains at least one non-whitespace character. + *

+     * StringUtils.hasText(null) = false
+     * StringUtils.hasText("") = false
+     * StringUtils.hasText(" ") = false
+     * StringUtils.hasText("12345") = true
+     * StringUtils.hasText(" 12345 ") = true
+     * 
+ * + * @param str the CharSequence to check (may be null) + * @return true if the CharSequence is not null, + * its length is greater than 0, and it does not contain whitespace only + * @see java.lang.Character#isWhitespace + */ + public static boolean hasText(CharSequence str) { + if (!hasLength(str)) { + return false; + } + int strLen = str.length(); + for (int i = 0; i < strLen; i++) { + if (!Character.isWhitespace(str.charAt(i))) { + return true; + } + } + return false; + } + + /** + * Check whether the given String has actual text. + * More specifically, returns true if the string not null, + * its length is greater than 0, and it contains at least one non-whitespace character. + * + * @param str the String to check (may be null) + * @return true if the String is not null, its length is + * greater than 0, and it does not contain whitespace only + * @see #hasText(CharSequence) + */ + public static boolean hasText(String str) { + return hasText((CharSequence) str); + } + + /** + * Check whether the given CharSequence contains any whitespace characters. + * + * @param str the CharSequence to check (may be null) + * @return true if the CharSequence is not empty and + * contains at least 1 whitespace character + * @see java.lang.Character#isWhitespace + */ + public static boolean containsWhitespace(CharSequence str) { + if (!hasLength(str)) { + return false; + } + int strLen = str.length(); + for (int i = 0; i < strLen; i++) { + if (Character.isWhitespace(str.charAt(i))) { + return true; + } + } + return false; + } + + /** + * Check whether the given String contains any whitespace characters. + * + * @param str the String to check (may be null) + * @return true if the String is not empty and + * contains at least 1 whitespace character + * @see #containsWhitespace(CharSequence) + */ + public static boolean containsWhitespace(String str) { + return containsWhitespace((CharSequence) str); + } + + /** + * Trim leading and trailing whitespace from the given String. + * + * @param str the String to check + * @return the trimmed String + * @see java.lang.Character#isWhitespace + */ + public static String trimWhitespace(String str) { + if (!hasLength(str)) { + return str; + } + StringBuilder sb = new StringBuilder(str); + while (sb.length() > 0 && Character.isWhitespace(sb.charAt(0))) { + sb.deleteCharAt(0); + } + while (sb.length() > 0 && Character.isWhitespace(sb.charAt(sb.length() - 1))) { + sb.deleteCharAt(sb.length() - 1); + } + return sb.toString(); + } + + /** + * Trim all whitespace from the given String: + * leading, trailing, and inbetween characters. + * + * @param str the String to check + * @return the trimmed String + * @see java.lang.Character#isWhitespace + */ + public static String trimAllWhitespace(String str) { + if (!hasLength(str)) { + return str; + } + StringBuilder sb = new StringBuilder(str); + int index = 0; + while (sb.length() > index) { + if (Character.isWhitespace(sb.charAt(index))) { + sb.deleteCharAt(index); + } else { + index++; + } + } + return sb.toString(); + } + + /** + * Trim leading whitespace from the given String. + * + * @param str the String to check + * @return the trimmed String + * @see java.lang.Character#isWhitespace + */ + public static String trimLeadingWhitespace(String str) { + if (!hasLength(str)) { + return str; + } + StringBuilder sb = new StringBuilder(str); + while (sb.length() > 0 && Character.isWhitespace(sb.charAt(0))) { + sb.deleteCharAt(0); + } + return sb.toString(); + } + + /** + * Trim trailing whitespace from the given String. + * + * @param str the String to check + * @return the trimmed String + * @see java.lang.Character#isWhitespace + */ + public static String trimTrailingWhitespace(String str) { + if (!hasLength(str)) { + return str; + } + StringBuilder sb = new StringBuilder(str); + while (sb.length() > 0 && Character.isWhitespace(sb.charAt(sb.length() - 1))) { + sb.deleteCharAt(sb.length() - 1); + } + return sb.toString(); + } + + /** + * Trim all occurences of the supplied leading character from the given String. + * + * @param str the String to check + * @param leadingCharacter the leading character to be trimmed + * @return the trimmed String + */ + public static String trimLeadingCharacter(String str, char leadingCharacter) { + if (!hasLength(str)) { + return str; + } + StringBuilder sb = new StringBuilder(str); + while (sb.length() > 0 && sb.charAt(0) == leadingCharacter) { + sb.deleteCharAt(0); + } + return sb.toString(); + } + + /** + * Trim all occurences of the supplied trailing character from the given String. + * + * @param str the String to check + * @param trailingCharacter the trailing character to be trimmed + * @return the trimmed String + */ + public static String trimTrailingCharacter(String str, char trailingCharacter) { + if (!hasLength(str)) { + return str; + } + StringBuilder sb = new StringBuilder(str); + while (sb.length() > 0 && sb.charAt(sb.length() - 1) == trailingCharacter) { + sb.deleteCharAt(sb.length() - 1); + } + return sb.toString(); + } + + + /** + * Test if the given String starts with the specified prefix, + * ignoring upper/lower case. + * + * @param str the String to check + * @param prefix the prefix to look for + * @see java.lang.String#startsWith + */ + public static boolean startsWithIgnoreCase(String str, String prefix) { + if (str == null || prefix == null) { + return false; + } + if (str.startsWith(prefix)) { + return true; + } + if (str.length() < prefix.length()) { + return false; + } + String lcStr = str.substring(0, prefix.length()).toLowerCase(); + String lcPrefix = prefix.toLowerCase(); + return lcStr.equals(lcPrefix); + } + + /** + * Test if the given String ends with the specified suffix, + * ignoring upper/lower case. + * + * @param str the String to check + * @param suffix the suffix to look for + * @see java.lang.String#endsWith + */ + public static boolean endsWithIgnoreCase(String str, String suffix) { + if (str == null || suffix == null) { + return false; + } + if (str.endsWith(suffix)) { + return true; + } + if (str.length() < suffix.length()) { + return false; + } + + String lcStr = str.substring(str.length() - suffix.length()).toLowerCase(); + String lcSuffix = suffix.toLowerCase(); + return lcStr.equals(lcSuffix); + } + + /** + * Test whether the given string matches the given substring + * at the given index. + * + * @param str the original string (or StringBuilder) + * @param index the index in the original string to start matching against + * @param substring the substring to match at the given index + */ + public static boolean substringMatch(CharSequence str, int index, CharSequence substring) { + for (int j = 0; j < substring.length(); j++) { + int i = index + j; + if (i >= str.length() || str.charAt(i) != substring.charAt(j)) { + return false; + } + } + return true; + } + + /** + * Count the occurrences of the substring in string s. + * + * @param str string to search in. Return 0 if this is null. + * @param sub string to search for. Return 0 if this is null. + */ + public static int countOccurrencesOf(String str, String sub) { + if (str == null || sub == null || str.length() == 0 || sub.length() == 0) { + return 0; + } + int count = 0; + int pos = 0; + int idx; + while ((idx = str.indexOf(sub, pos)) != -1) { + ++count; + pos = idx + sub.length(); + } + return count; + } + + /** + * Replace all occurences of a substring within a string with + * another string. + * + * @param inString String to examine + * @param oldPattern String to replace + * @param newPattern String to insert + * @return a String with the replacements + */ + public static String replace(String inString, String oldPattern, String newPattern) { + if (!hasLength(inString) || !hasLength(oldPattern) || newPattern == null) { + return inString; + } + StringBuilder sb = new StringBuilder(); + int pos = 0; // our position in the old string + int index = inString.indexOf(oldPattern); + // the index of an occurrence we've found, or -1 + int patLen = oldPattern.length(); + while (index >= 0) { + sb.append(inString.substring(pos, index)); + sb.append(newPattern); + pos = index + patLen; + index = inString.indexOf(oldPattern, pos); + } + sb.append(inString.substring(pos)); + // remember to append any characters to the right of a match + return sb.toString(); + } + + /** + * Delete all occurrences of the given substring. + * + * @param inString the original String + * @param pattern the pattern to delete all occurrences of + * @return the resulting String + */ + public static String delete(String inString, String pattern) { + return replace(inString, pattern, ""); + } + + /** + * Delete any character in a given String. + * + * @param inString the original String + * @param charsToDelete a set of characters to delete. + * E.g. "az\n" will delete 'a's, 'z's and new lines. + * @return the resulting String + */ + public static String deleteAny(String inString, String charsToDelete) { + if (!hasLength(inString) || !hasLength(charsToDelete)) { + return inString; + } + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < inString.length(); i++) { + char c = inString.charAt(i); + if (charsToDelete.indexOf(c) == -1) { + sb.append(c); + } + } + return sb.toString(); + } + + + //--------------------------------------------------------------------- + // Convenience methods for working with formatted Strings + //--------------------------------------------------------------------- + + /** + * Quote the given String with single quotes. + * + * @param str the input String (e.g. "myString") + * @return the quoted String (e.g. "'myString'"), + * or null if the input was null + */ + public static String quote(String str) { + return (str != null ? "'" + str + "'" : null); + } + + /** + * Turn the given Object into a String with single quotes + * if it is a String; keeping the Object as-is else. + * + * @param obj the input Object (e.g. "myString") + * @return the quoted String (e.g. "'myString'"), + * or the input object as-is if not a String + */ + public static Object quoteIfString(Object obj) { + return (obj instanceof String ? quote((String) obj) : obj); + } + + /** + * Unqualify a string qualified by a '.' dot character. For example, + * "this.name.is.qualified", returns "qualified". + * + * @param qualifiedName the qualified name + */ + public static String unqualify(String qualifiedName) { + return unqualify(qualifiedName, '.'); + } + + /** + * Unqualify a string qualified by a separator character. For example, + * "this:name:is:qualified" returns "qualified" if using a ':' separator. + * + * @param qualifiedName the qualified name + * @param separator the separator + */ + public static String unqualify(String qualifiedName, char separator) { + return qualifiedName.substring(qualifiedName.lastIndexOf(separator) + 1); + } + + /** + * Capitalize a String, changing the first letter to + * upper case as per {@link Character#toUpperCase(char)}. + * No other letters are changed. + * + * @param str the String to capitalize, may be null + * @return the capitalized String, null if null + */ + public static String capitalize(String str) { + return changeFirstCharacterCase(str, true); + } + + /** + * Uncapitalize a String, changing the first letter to + * lower case as per {@link Character#toLowerCase(char)}. + * No other letters are changed. + * + * @param str the String to uncapitalize, may be null + * @return the uncapitalized String, null if null + */ + public static String uncapitalize(String str) { + return changeFirstCharacterCase(str, false); + } + + private static String changeFirstCharacterCase(String str, boolean capitalize) { + if (str == null || str.length() == 0) { + return str; + } + StringBuilder sb = new StringBuilder(str.length()); + if (capitalize) { + sb.append(Character.toUpperCase(str.charAt(0))); + } else { + sb.append(Character.toLowerCase(str.charAt(0))); + } + sb.append(str.substring(1)); + return sb.toString(); + } + + public static final ImmutableSet INVALID_FILENAME_CHARS = ImmutableSet.of('\\', '/', '*', '?', '"', '<', '>', '|', ' ', ','); + + public static boolean validFileName(String fileName) { + for (int i = 0; i < fileName.length(); i++) { + char c = fileName.charAt(i); + if (INVALID_FILENAME_CHARS.contains(c)) { + return false; + } + } + return true; + } + + /** + * Extract the filename from the given path, + * e.g. "mypath/myfile.txt" -> "myfile.txt". + * + * @param path the file path (may be null) + * @return the extracted filename, or null if none + */ + public static String getFilename(String path) { + if (path == null) { + return null; + } + int separatorIndex = path.lastIndexOf(FOLDER_SEPARATOR); + return (separatorIndex != -1 ? path.substring(separatorIndex + 1) : path); + } + + /** + * Extract the filename extension from the given path, + * e.g. "mypath/myfile.txt" -> "txt". + * + * @param path the file path (may be null) + * @return the extracted filename extension, or null if none + */ + public static String getFilenameExtension(String path) { + if (path == null) { + return null; + } + int sepIndex = path.lastIndexOf(EXTENSION_SEPARATOR); + return (sepIndex != -1 ? path.substring(sepIndex + 1) : null); + } + + /** + * Strip the filename extension from the given path, + * e.g. "mypath/myfile.txt" -> "mypath/myfile". + * + * @param path the file path (may be null) + * @return the path with stripped filename extension, + * or null if none + */ + public static String stripFilenameExtension(String path) { + if (path == null) { + return null; + } + int sepIndex = path.lastIndexOf(EXTENSION_SEPARATOR); + return (sepIndex != -1 ? path.substring(0, sepIndex) : path); + } + + /** + * Apply the given relative path to the given path, + * assuming standard Java folder separation (i.e. "/" separators); + * + * @param path the path to start from (usually a full file path) + * @param relativePath the relative path to apply + * (relative to the full file path above) + * @return the full file path that results from applying the relative path + */ + public static String applyRelativePath(String path, String relativePath) { + int separatorIndex = path.lastIndexOf(FOLDER_SEPARATOR); + if (separatorIndex != -1) { + String newPath = path.substring(0, separatorIndex); + if (!relativePath.startsWith(FOLDER_SEPARATOR)) { + newPath += FOLDER_SEPARATOR; + } + return newPath + relativePath; + } else { + return relativePath; + } + } + + /** + * Normalize the path by suppressing sequences like "path/.." and + * inner simple dots. + *

The result is convenient for path comparison. For other uses, + * notice that Windows separators ("\") are replaced by simple slashes. + * + * @param path the original path + * @return the normalized path + */ + public static String cleanPath(String path) { + if (path == null) { + return null; + } + String pathToUse = replace(path, WINDOWS_FOLDER_SEPARATOR, FOLDER_SEPARATOR); + + // Strip prefix from path to analyze, to not treat it as part of the + // first path element. This is necessary to correctly parse paths like + // "file:core/../core/io/Resource.class", where the ".." should just + // strip the first "core" directory while keeping the "file:" prefix. + int prefixIndex = pathToUse.indexOf(":"); + String prefix = ""; + if (prefixIndex != -1) { + prefix = pathToUse.substring(0, prefixIndex + 1); + pathToUse = pathToUse.substring(prefixIndex + 1); + } + if (pathToUse.startsWith(FOLDER_SEPARATOR)) { + prefix = prefix + FOLDER_SEPARATOR; + pathToUse = pathToUse.substring(1); + } + + String[] pathArray = delimitedListToStringArray(pathToUse, FOLDER_SEPARATOR); + List pathElements = new LinkedList(); + int tops = 0; + + for (int i = pathArray.length - 1; i >= 0; i--) { + String element = pathArray[i]; + if (CURRENT_PATH.equals(element)) { + // Points to current directory - drop it. + } else if (TOP_PATH.equals(element)) { + // Registering top path found. + tops++; + } else { + if (tops > 0) { + // Merging path element with element corresponding to top path. + tops--; + } else { + // Normal path element found. + pathElements.add(0, element); + } + } + } + + // Remaining top paths need to be retained. + for (int i = 0; i < tops; i++) { + pathElements.add(0, TOP_PATH); + } + + return prefix + collectionToDelimitedString(pathElements, FOLDER_SEPARATOR); + } + + /** + * Compare two paths after normalization of them. + * + * @param path1 first path for comparison + * @param path2 second path for comparison + * @return whether the two paths are equivalent after normalization + */ + public static boolean pathEquals(String path1, String path2) { + return cleanPath(path1).equals(cleanPath(path2)); + } + + /** + * Parse the given localeString into a {@link Locale}. + *

This is the inverse operation of {@link Locale#toString Locale's toString}. + * + * @param localeString the locale string, following Locale's + * toString() format ("en", "en_UK", etc); + * also accepts spaces as separators, as an alternative to underscores + * @return a corresponding Locale instance + */ + public static Locale parseLocaleString(String localeString) { + String[] parts = tokenizeToStringArray(localeString, "_ ", false, false); + String language = (parts.length > 0 ? parts[0] : ""); + String country = (parts.length > 1 ? parts[1] : ""); + String variant = ""; + if (parts.length >= 2) { + // There is definitely a variant, and it is everything after the country + // code sans the separator between the country code and the variant. + int endIndexOfCountryCode = localeString.indexOf(country) + country.length(); + // Strip off any leading '_' and whitespace, what's left is the variant. + variant = trimLeadingWhitespace(localeString.substring(endIndexOfCountryCode)); + if (variant.startsWith("_")) { + variant = trimLeadingCharacter(variant, '_'); + } + } + return (language.length() > 0 ? new Locale(language, country, variant) : null); + } + + /** + * Determine the RFC 3066 compliant language tag, + * as used for the HTTP "Accept-Language" header. + * + * @param locale the Locale to transform to a language tag + * @return the RFC 3066 compliant language tag as String + */ + public static String toLanguageTag(Locale locale) { + return locale.getLanguage() + (hasText(locale.getCountry()) ? "-" + locale.getCountry() : ""); + } + + + //--------------------------------------------------------------------- + // Convenience methods for working with String arrays + //--------------------------------------------------------------------- + + /** + * Append the given String to the given String array, returning a new array + * consisting of the input array contents plus the given String. + * + * @param array the array to append to (can be null) + * @param str the String to append + * @return the new array (never null) + */ + public static String[] addStringToArray(String[] array, String str) { + if (isEmpty(array)) { + return new String[]{str}; + } + String[] newArr = new String[array.length + 1]; + System.arraycopy(array, 0, newArr, 0, array.length); + newArr[array.length] = str; + return newArr; + } + + /** + * Concatenate the given String arrays into one, + * with overlapping array elements included twice. + *

The order of elements in the original arrays is preserved. + * + * @param array1 the first array (can be null) + * @param array2 the second array (can be null) + * @return the new array (null if both given arrays were null) + */ + public static String[] concatenateStringArrays(String[] array1, String[] array2) { + if (isEmpty(array1)) { + return array2; + } + if (isEmpty(array2)) { + return array1; + } + String[] newArr = new String[array1.length + array2.length]; + System.arraycopy(array1, 0, newArr, 0, array1.length); + System.arraycopy(array2, 0, newArr, array1.length, array2.length); + return newArr; + } + + /** + * Merge the given String arrays into one, with overlapping + * array elements only included once. + *

The order of elements in the original arrays is preserved + * (with the exception of overlapping elements, which are only + * included on their first occurence). + * + * @param array1 the first array (can be null) + * @param array2 the second array (can be null) + * @return the new array (null if both given arrays were null) + */ + public static String[] mergeStringArrays(String[] array1, String[] array2) { + if (isEmpty(array1)) { + return array2; + } + if (isEmpty(array2)) { + return array1; + } + List result = new ArrayList(); + result.addAll(Arrays.asList(array1)); + for (String str : array2) { + if (!result.contains(str)) { + result.add(str); + } + } + return toStringArray(result); + } + + /** + * Turn given source String array into sorted array. + * + * @param array the source array + * @return the sorted array (never null) + */ + public static String[] sortStringArray(String[] array) { + if (isEmpty(array)) { + return new String[0]; + } + Arrays.sort(array); + return array; + } + + /** + * Copy the given Collection into a String array. + * The Collection must contain String elements only. + * + * @param collection the Collection to copy + * @return the String array (null if the passed-in + * Collection was null) + */ + public static String[] toStringArray(Collection collection) { + if (collection == null) { + return null; + } + return collection.toArray(new String[collection.size()]); + } + + /** + * Copy the given Enumeration into a String array. + * The Enumeration must contain String elements only. + * + * @param enumeration the Enumeration to copy + * @return the String array (null if the passed-in + * Enumeration was null) + */ + public static String[] toStringArray(Enumeration enumeration) { + if (enumeration == null) { + return null; + } + List list = Collections.list(enumeration); + return list.toArray(new String[list.size()]); + } + + /** + * Trim the elements of the given String array, + * calling String.trim() on each of them. + * + * @param array the original String array + * @return the resulting array (of the same size) with trimmed elements + */ + public static String[] trimArrayElements(String[] array) { + if (isEmpty(array)) { + return new String[0]; + } + String[] result = new String[array.length]; + for (int i = 0; i < array.length; i++) { + String element = array[i]; + result[i] = (element != null ? element.trim() : null); + } + return result; + } + + /** + * Remove duplicate Strings from the given array. + * Also sorts the array, as it uses a TreeSet. + * + * @param array the String array + * @return an array without duplicates, in natural sort order + */ + public static String[] removeDuplicateStrings(String[] array) { + if (isEmpty(array)) { + return array; + } + Set set = new TreeSet(); + set.addAll(Arrays.asList(array)); + return toStringArray(set); + } + + /** + * Split a String at the first occurrence of the delimiter. + * Does not include the delimiter in the result. + * + * @param toSplit the string to split + * @param delimiter to split the string up with + * @return a two element array with index 0 being before the delimiter, and + * index 1 being after the delimiter (neither element includes the delimiter); + * or null if the delimiter wasn't found in the given input String + */ + public static String[] split(String toSplit, String delimiter) { + if (!hasLength(toSplit) || !hasLength(delimiter)) { + return null; + } + int offset = toSplit.indexOf(delimiter); + if (offset < 0) { + return null; + } + String beforeDelimiter = toSplit.substring(0, offset); + String afterDelimiter = toSplit.substring(offset + delimiter.length()); + return new String[]{beforeDelimiter, afterDelimiter}; + } + + /** + * Take an array Strings and split each element based on the given delimiter. + * A Properties instance is then generated, with the left of the + * delimiter providing the key, and the right of the delimiter providing the value. + *

Will trim both the key and value before adding them to the + * Properties instance. + * + * @param array the array to process + * @param delimiter to split each element using (typically the equals symbol) + * @return a Properties instance representing the array contents, + * or null if the array to process was null or empty + */ + public static Properties splitArrayElementsIntoProperties(String[] array, String delimiter) { + return splitArrayElementsIntoProperties(array, delimiter, null); + } + + /** + * Take an array Strings and split each element based on the given delimiter. + * A Properties instance is then generated, with the left of the + * delimiter providing the key, and the right of the delimiter providing the value. + *

Will trim both the key and value before adding them to the + * Properties instance. + * + * @param array the array to process + * @param delimiter to split each element using (typically the equals symbol) + * @param charsToDelete one or more characters to remove from each element + * prior to attempting the split operation (typically the quotation mark + * symbol), or null if no removal should occur + * @return a Properties instance representing the array contents, + * or null if the array to process was null or empty + */ + public static Properties splitArrayElementsIntoProperties( + String[] array, String delimiter, String charsToDelete) { + + if (isEmpty(array)) { + return null; + } + Properties result = new Properties(); + for (String element : array) { + if (charsToDelete != null) { + element = deleteAny(element, charsToDelete); + } + String[] splittedElement = split(element, delimiter); + if (splittedElement == null) { + continue; + } + result.setProperty(splittedElement[0].trim(), splittedElement[1].trim()); + } + return result; + } + + /** + * Tokenize the given String into a String array via a StringTokenizer. + * Trims tokens and omits empty tokens. + *

The given delimiters string is supposed to consist of any number of + * delimiter characters. Each of those characters can be used to separate + * tokens. A delimiter is always a single character; for multi-character + * delimiters, consider using delimitedListToStringArray + * + * @param str the String to tokenize + * @param delimiters the delimiter characters, assembled as String + * (each of those characters is individually considered as delimiter). + * @return an array of the tokens + * @see java.util.StringTokenizer + * @see java.lang.String#trim() + * @see #delimitedListToStringArray + */ + public static String[] tokenizeToStringArray(String str, String delimiters) { + return tokenizeToStringArray(str, delimiters, true, true); + } + + /** + * Tokenize the given String into a String array via a StringTokenizer. + *

The given delimiters string is supposed to consist of any number of + * delimiter characters. Each of those characters can be used to separate + * tokens. A delimiter is always a single character; for multi-character + * delimiters, consider using delimitedListToStringArray + * + * @param str the String to tokenize + * @param delimiters the delimiter characters, assembled as String + * (each of those characters is individually considered as delimiter) + * @param trimTokens trim the tokens via String's trim + * @param ignoreEmptyTokens omit empty tokens from the result array + * (only applies to tokens that are empty after trimming; StringTokenizer + * will not consider subsequent delimiters as token in the first place). + * @return an array of the tokens (null if the input String + * was null) + * @see java.util.StringTokenizer + * @see java.lang.String#trim() + * @see #delimitedListToStringArray + */ + public static String[] tokenizeToStringArray( + String str, String delimiters, boolean trimTokens, boolean ignoreEmptyTokens) { + + if (str == null) { + return null; + } + StringTokenizer st = new StringTokenizer(str, delimiters); + List tokens = new ArrayList(); + while (st.hasMoreTokens()) { + String token = st.nextToken(); + if (trimTokens) { + token = token.trim(); + } + if (!ignoreEmptyTokens || token.length() > 0) { + tokens.add(token); + } + } + return toStringArray(tokens); + } + + /** + * Take a String which is a delimited list and convert it to a String array. + *

A single delimiter can consists of more than one character: It will still + * be considered as single delimiter string, rather than as bunch of potential + * delimiter characters - in contrast to tokenizeToStringArray. + * + * @param str the input String + * @param delimiter the delimiter between elements (this is a single delimiter, + * rather than a bunch individual delimiter characters) + * @return an array of the tokens in the list + * @see #tokenizeToStringArray + */ + public static String[] delimitedListToStringArray(String str, String delimiter) { + return delimitedListToStringArray(str, delimiter, null); + } + + /** + * Take a String which is a delimited list and convert it to a String array. + *

A single delimiter can consists of more than one character: It will still + * be considered as single delimiter string, rather than as bunch of potential + * delimiter characters - in contrast to tokenizeToStringArray. + * + * @param str the input String + * @param delimiter the delimiter between elements (this is a single delimiter, + * rather than a bunch individual delimiter characters) + * @param charsToDelete a set of characters to delete. Useful for deleting unwanted + * line breaks: e.g. "\r\n\f" will delete all new lines and line feeds in a String. + * @return an array of the tokens in the list + * @see #tokenizeToStringArray + */ + public static String[] delimitedListToStringArray(String str, String delimiter, String charsToDelete) { + if (str == null) { + return new String[0]; + } + if (delimiter == null) { + return new String[]{str}; + } + List result = new ArrayList(); + if ("".equals(delimiter)) { + for (int i = 0; i < str.length(); i++) { + result.add(deleteAny(str.substring(i, i + 1), charsToDelete)); + } + } else { + int pos = 0; + int delPos; + while ((delPos = str.indexOf(delimiter, pos)) != -1) { + result.add(deleteAny(str.substring(pos, delPos), charsToDelete)); + pos = delPos + delimiter.length(); + } + if (str.length() > 0 && pos <= str.length()) { + // Add rest of String, but not in case of empty input. + result.add(deleteAny(str.substring(pos), charsToDelete)); + } + } + return toStringArray(result); + } + + /** + * Convert a CSV list into an array of Strings. + * + * @param str the input String + * @return an array of Strings, or the empty array in case of empty input + */ + public static String[] commaDelimitedListToStringArray(String str) { + return delimitedListToStringArray(str, ","); + } + + /** + * Convenience method to convert a CSV string list to a set. + * Note that this will suppress duplicates. + * + * @param str the input String + * @return a Set of String entries in the list + */ + public static Set commaDelimitedListToSet(String str) { + Set set = new TreeSet(); + String[] tokens = commaDelimitedListToStringArray(str); + set.addAll(Arrays.asList(tokens)); + return set; + } + + /** + * Convenience method to return a Collection as a delimited (e.g. CSV) + * String. E.g. useful for toString() implementations. + * + * @param coll the Collection to display + * @param delim the delimiter to use (probably a ",") + * @param prefix the String to start each element with + * @param suffix the String to end each element with + * @return the delimited String + */ + public static String collectionToDelimitedString(Collection coll, String delim, String prefix, String suffix) { + if (isEmpty(coll)) { + return ""; + } + StringBuilder sb = new StringBuilder(); + Iterator it = coll.iterator(); + while (it.hasNext()) { + sb.append(prefix).append(it.next()).append(suffix); + if (it.hasNext()) { + sb.append(delim); + } + } + return sb.toString(); + } + + /** + * Convenience method to return a Collection as a delimited (e.g. CSV) + * String. E.g. useful for toString() implementations. + * + * @param coll the Collection to display + * @param delim the delimiter to use (probably a ",") + * @return the delimited String + */ + public static String collectionToDelimitedString(Collection coll, String delim) { + return collectionToDelimitedString(coll, delim, "", ""); + } + + /** + * Convenience method to return a Collection as a CSV String. + * E.g. useful for toString() implementations. + * + * @param coll the Collection to display + * @return the delimited String + */ + public static String collectionToCommaDelimitedString(Collection coll) { + return collectionToDelimitedString(coll, ","); + } + + /** + * Convenience method to return a String array as a delimited (e.g. CSV) + * String. E.g. useful for toString() implementations. + * + * @param arr the array to display + * @param delim the delimiter to use (probably a ",") + * @return the delimited String + */ + public static String arrayToDelimitedString(Object[] arr, String delim) { + if (isEmpty(arr)) { + return ""; + } + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < arr.length; i++) { + if (i > 0) { + sb.append(delim); + } + sb.append(arr[i]); + } + return sb.toString(); + } + + /** + * Convenience method to return a String array as a CSV String. + * E.g. useful for toString() implementations. + * + * @param arr the array to display + * @return the delimited String + */ + public static String arrayToCommaDelimitedString(Object[] arr) { + return arrayToDelimitedString(arr, ","); + } + + /** + * Format the double value with a single decimal points, trimming trailing '.0'. + */ + public static String format1Decimals(double value, String suffix) { + String p = String.valueOf(value); + int ix = p.indexOf('.') + 1; + int ex = p.indexOf('E'); + char fraction = p.charAt(ix); + if (fraction == '0') { + if (ex != -1) { + return p.substring(0, ix - 1) + p.substring(ex) + suffix; + } else { + return p.substring(0, ix - 1) + suffix; + } + } else { + if (ex != -1) { + return p.substring(0, ix) + fraction + p.substring(ex) + suffix; + } else { + return p.substring(0, ix) + fraction + suffix; + } + } + } + + /** + * Determine whether the given array is empty: + * i.e. null or of zero length. + * + * @param array the array to check + */ + private static boolean isEmpty(Object[] array) { + return (array == null || array.length == 0); + } + + /** + * Return true if the supplied Collection is null + * or empty. Otherwise, return false. + * + * @param collection the Collection to check + * @return whether the given Collection is empty + */ + private static boolean isEmpty(Collection collection) { + return (collection == null || collection.isEmpty()); + } + + private Strings() { + + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/TimeValue.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/TimeValue.java new file mode 100644 index 00000000000..d7b251438b7 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/TimeValue.java @@ -0,0 +1,217 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util; + +import org.elasticsearch.ElasticSearchParseException; +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.io.Serializable; +import java.util.concurrent.TimeUnit; + +/** + * @author kimchy (Shay Banon) + */ +public class TimeValue implements Serializable, Streamable { + + public static final TimeValue UNKNOWN = new TimeValue(-1); + + public static TimeValue timeValueMillis(long millis) { + return new TimeValue(millis, TimeUnit.MILLISECONDS); + } + + public static TimeValue timeValueSeconds(long seconds) { + return new TimeValue(seconds, TimeUnit.SECONDS); + } + + public static TimeValue timeValueMinutes(long minutes) { + return new TimeValue(minutes, TimeUnit.MINUTES); + } + + public static TimeValue timeValueHours(long hours) { + return new TimeValue(hours, TimeUnit.HOURS); + } + + private long duration; + + private TimeUnit timeUnit; + + private TimeValue() { + + } + + public TimeValue(long millis) { + this(millis, TimeUnit.MILLISECONDS); + } + + public TimeValue(long duration, TimeUnit timeUnit) { + this.duration = duration; + this.timeUnit = timeUnit; + } + + public long nanos() { + return timeUnit.toNanos(duration); + } + + public long micros() { + return timeUnit.toMicros(duration); + } + + public long millis() { + return timeUnit.toMillis(duration); + } + + public long seconds() { + return timeUnit.toSeconds(duration); + } + + public long minutes() { + return timeUnit.toMinutes(duration); + } + + public long hours() { + return timeUnit.toHours(duration); + } + + public long days() { + return timeUnit.toDays(duration); + } + + public double microsFrac() { + return ((double) nanos()) / C1; + } + + public double millisFrac() { + return ((double) nanos()) / C2; + } + + public double secondsFrac() { + return ((double) nanos()) / C3; + } + + public double minutesFrac() { + return ((double) nanos()) / C4; + } + + public double hoursFrac() { + return ((double) nanos()) / C5; + } + + public double daysFrac() { + return ((double) nanos()) / C6; + } + + @Override public String toString() { + long nanos = nanos(); + if (nanos == 0) { + return "0s"; + } + double value = nanos; + String suffix = "nanos"; + if (nanos >= C6) { + value = daysFrac(); + suffix = "d"; + } else if (nanos >= C5) { + value = hoursFrac(); + suffix = "h"; + } else if (nanos >= C4) { + value = minutesFrac(); + suffix = "m"; + } else if (nanos >= C3) { + value = secondsFrac(); + suffix = "s"; + } else if (nanos >= C2) { + value = millisFrac(); + suffix = "ms"; + } else if (nanos >= C1) { + value = microsFrac(); + suffix = "micros"; + } + return Strings.format1Decimals(value, suffix); + } + + public static TimeValue parseTimeValue(String sValue, TimeValue defaultValue) { + if (sValue == null) { + return defaultValue; + } + try { + long millis; + if (sValue.endsWith("S")) { + millis = Long.parseLong(sValue.substring(0, sValue.length() - 1)); + } else if (sValue.endsWith("ms")) { + millis = (long) (Double.parseDouble(sValue.substring(0, sValue.length() - "ms".length()))); + } else if (sValue.endsWith("s")) { + millis = (long) (Double.parseDouble(sValue.substring(0, sValue.length() - 1)) * 1000); + } else if (sValue.endsWith("m")) { + millis = (long) (Double.parseDouble(sValue.substring(0, sValue.length() - 1)) * 60 * 1000); + } else if (sValue.endsWith("H")) { + millis = (long) (Double.parseDouble(sValue.substring(0, sValue.length() - 1)) * 60 * 60 * 1000); + } else { + millis = Long.parseLong(sValue); + } + return new TimeValue(millis, TimeUnit.MILLISECONDS); + } catch (NumberFormatException e) { + throw new ElasticSearchParseException("Failed to parse [" + sValue + "]", e); + } + } + + static final long C0 = 1L; + static final long C1 = C0 * 1000L; + static final long C2 = C1 * 1000L; + static final long C3 = C2 * 1000L; + static final long C4 = C3 * 60L; + static final long C5 = C4 * 60L; + static final long C6 = C5 * 24L; + + public static TimeValue readTimeValue(DataInput in) throws IOException, ClassNotFoundException { + TimeValue timeValue = new TimeValue(); + timeValue.readFrom(in); + return timeValue; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + duration = in.readLong(); + timeUnit = TimeUnit.NANOSECONDS; + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeLong(nanos()); + } + + @Override public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + TimeValue timeValue = (TimeValue) o; + + if (duration != timeValue.duration) return false; + if (timeUnit != timeValue.timeUnit) return false; + + return true; + } + + @Override public int hashCode() { + int result = (int) (duration ^ (duration >>> 32)); + result = 31 * result + (timeUnit != null ? timeUnit.hashCode() : 0); + return result; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/Tuple.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/Tuple.java new file mode 100644 index 00000000000..23cdaff89f8 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/Tuple.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util; + +/** + * @author kimchy (Shay Banon) + */ +public class Tuple { + + private final V1 v1; + private final V2 v2; + + public Tuple(V1 v1, V2 v2) { + this.v1 = v1; + this.v2 = v2; + } + + public V1 v1() { + return v1; + } + + public V2 v2() { + return v2; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/UUID.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/UUID.java new file mode 100644 index 00000000000..e0c50879124 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/UUID.java @@ -0,0 +1,273 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util; + +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.security.SecureRandom; + +/** + * A UUID taken from java UUID that simply holds less data. + * + * @author kimchy (Shay Banon) + */ +public class UUID implements Comparable { + + /* + * The most significant 64 bits of this UUID. + * + * @serial + */ + private final long mostSigBits; + + /* + * The least significant 64 bits of this UUID. + * + * @serial + */ + private final long leastSigBits; + + /* + * The random number generator used by this class to create random + * based UUIDs. + */ + private static volatile SecureRandom numberGenerator = null; + + // Constructors and Factories + + /* + * Private constructor which uses a byte array to construct the new UUID. + */ + + private UUID(byte[] data) { + long msb = 0; + long lsb = 0; + assert data.length == 16; + for (int i = 0; i < 8; i++) + msb = (msb << 8) | (data[i] & 0xff); + for (int i = 8; i < 16; i++) + lsb = (lsb << 8) | (data[i] & 0xff); + this.mostSigBits = msb; + this.leastSigBits = lsb; + } + + /** + * Constructs a new UUID using the specified data. + * mostSigBits is used for the most significant 64 bits + * of the UUID and leastSigBits becomes the + * least significant 64 bits of the UUID. + * + * @param mostSigBits + * @param leastSigBits + */ + public UUID(long mostSigBits, long leastSigBits) { + this.mostSigBits = mostSigBits; + this.leastSigBits = leastSigBits; + } + + /** + * Static factory to retrieve a type 4 (pseudo randomly generated) UUID. + * + * The UUID is generated using a cryptographically strong + * pseudo random number generator. + * + * @return a randomly generated UUID. + */ + public static UUID randomUUID() { + SecureRandom ng = numberGenerator; + if (ng == null) { + numberGenerator = ng = new SecureRandom(); + } + + byte[] randomBytes = new byte[16]; + ng.nextBytes(randomBytes); + randomBytes[6] &= 0x0f; /* clear version */ + randomBytes[6] |= 0x40; /* set to version 4 */ + randomBytes[8] &= 0x3f; /* clear variant */ + randomBytes[8] |= 0x80; /* set to IETF variant */ + return new UUID(randomBytes); + } + + /** + * Static factory to retrieve a type 3 (name based) UUID based on + * the specified byte array. + * + * @param name a byte array to be used to construct a UUID. + * @return a UUID generated from the specified array. + */ + public static UUID nameUUIDFromBytes(byte[] name) { + MessageDigest md; + try { + md = MessageDigest.getInstance("MD5"); + } catch (NoSuchAlgorithmException nsae) { + throw new InternalError("MD5 not supported"); + } + byte[] md5Bytes = md.digest(name); + md5Bytes[6] &= 0x0f; /* clear version */ + md5Bytes[6] |= 0x30; /* set to version 3 */ + md5Bytes[8] &= 0x3f; /* clear variant */ + md5Bytes[8] |= 0x80; /* set to IETF variant */ + return new UUID(md5Bytes); + } + + /** + * Creates a UUID from the string standard representation as + * described in the {@link #toString} method. + * + * @param name a string that specifies a UUID. + * @return a UUID with the specified value. + * @throws IllegalArgumentException if name does not conform to the + * string representation as described in {@link #toString}. + */ + public static UUID fromString(String name) { + String[] components = name.split("-"); + if (components.length != 5) + throw new IllegalArgumentException("Invalid UUID string: " + name); + for (int i = 0; i < 5; i++) + components[i] = "0x" + components[i]; + + long mostSigBits = Long.decode(components[0]).longValue(); + mostSigBits <<= 16; + mostSigBits |= Long.decode(components[1]).longValue(); + mostSigBits <<= 16; + mostSigBits |= Long.decode(components[2]).longValue(); + + long leastSigBits = Long.decode(components[3]).longValue(); + leastSigBits <<= 48; + leastSigBits |= Long.decode(components[4]).longValue(); + + return new UUID(mostSigBits, leastSigBits); + } + + // Field Accessor Methods + + /** + * Returns the least significant 64 bits of this UUID's 128 bit value. + * + * @return the least significant 64 bits of this UUID's 128 bit value. + */ + public long getLeastSignificantBits() { + return leastSigBits; + } + + /** + * Returns the most significant 64 bits of this UUID's 128 bit value. + * + * @return the most significant 64 bits of this UUID's 128 bit value. + */ + public long getMostSignificantBits() { + return mostSigBits; + } + + // Object Inherited Methods + + /** + * Returns a String object representing this + * UUID. + * + *

The UUID string representation is as described by this BNF : + *

+     * {@code
+     * UUID                   =  "-"  "-"
+     *                           "-"
+     *                           "-"
+     *                          
+     * time_low               = 4*
+     * time_mid               = 2*
+     * time_high_and_version  = 2*
+     * variant_and_sequence   = 2*
+     * node                   = 6*
+     * hexOctet               = 
+     * hexDigit               =
+     *       "0" | "1" | "2" | "3" | "4" | "5" | "6" | "7" | "8" | "9"
+     *       | "a" | "b" | "c" | "d" | "e" | "f"
+     *       | "A" | "B" | "C" | "D" | "E" | "F"
+     * }
+ * + * @return a string representation of this UUID. + */ + public String toString() { + return (digits(mostSigBits >> 32, 8) + "-" + + digits(mostSigBits >> 16, 4) + "-" + + digits(mostSigBits, 4) + "-" + + digits(leastSigBits >> 48, 4) + "-" + + digits(leastSigBits, 12)); + } + + /** + * Returns val represented by the specified number of hex digits. + */ + private static String digits(long val, int digits) { + long hi = 1L << (digits * 4); + return Long.toHexString(hi | (val & (hi - 1))).substring(1); + } + + /** + * Returns a hash code for this UUID. + * + * @return a hash code value for this UUID. + */ + public int hashCode() { + return (int) ((mostSigBits >> 32) ^ + mostSigBits ^ + (leastSigBits >> 32) ^ + leastSigBits); + } + + /** + * Compares this object to the specified object. The result is + * true if and only if the argument is not + * null, is a UUID object, has the same variant, + * and contains the same value, bit for bit, as this UUID. + * + * @param obj the object to compare with. + * @return true if the objects are the same; + * false otherwise. + */ + public boolean equals(Object obj) { + if (!(obj instanceof UUID)) + return false; + UUID id = (UUID) obj; + return (mostSigBits == id.mostSigBits && + leastSigBits == id.leastSigBits); + } + + // Comparison Operations + + /** + * Compares this UUID with the specified UUID. + * + *

The first of two UUIDs follows the second if the most significant + * field in which the UUIDs differ is greater for the first UUID. + * + * @param val UUID to which this UUID is to be compared. + * @return -1, 0 or 1 as this UUID is less than, equal + * to, or greater than val. + */ + public int compareTo(UUID val) { + // The ordering is intentionally set up so that the UUIDs + // can simply be numerically compared as two numbers + return (this.mostSigBits < val.mostSigBits ? -1 : + (this.mostSigBits > val.mostSigBits ? 1 : + (this.leastSigBits < val.leastSigBits ? -1 : + (this.leastSigBits > val.leastSigBits ? 1 : + 0)))); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/component/AbstractComponent.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/component/AbstractComponent.java new file mode 100644 index 00000000000..07d6da32ffb --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/component/AbstractComponent.java @@ -0,0 +1,52 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.component; + +import org.elasticsearch.util.logging.Loggers; +import org.elasticsearch.util.settings.Settings; +import org.slf4j.Logger; + +/** + * @author kimchy (Shay Banon) + */ +public class AbstractComponent { + + protected final Logger logger; + + protected final Settings settings; + + protected final Settings componentSettings; + + public AbstractComponent(Settings settings) { + this.logger = Loggers.getLogger(getClass(), settings); + this.settings = settings; + this.componentSettings = settings.getComponentSettings(getClass()); + } + + public AbstractComponent(Settings settings, Class componentClass) { + this.logger = Loggers.getLogger(componentClass, settings); + this.settings = settings; + this.componentSettings = settings.getComponentSettings(componentClass); + } + + public String nodeName() { + return settings.get("name", ""); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/component/Lifecycle.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/component/Lifecycle.java new file mode 100644 index 00000000000..91d29a38df9 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/component/Lifecycle.java @@ -0,0 +1,152 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.component; + +import org.elasticsearch.ElasticSearchIllegalStateException; +import org.elasticsearch.util.concurrent.ThreadSafe; + +/** + * Lifecycle state. Allows the following transitions: + *

    + *
  • INITIALIZED -> STARTED, STOPPED, CLOSED
  • + *
  • STARTED -> STOPPED
  • + *
  • STOPPED -> STARTED, CLOSED
  • + *
  • CLOSED ->
  • + *
+ * + *

Also allows to stay in the same state. For example, when calling stop on a component, the + * following logic can be applied: + * + *

+ * public void stop() {
+ *  if (!lifeccycleState.moveToStopped()) {
+ *      return;
+ *  }
+ * // continue with stop logic
+ * }
+ * 
+ * + *

Note, closed is only allowed to be called when stopped, so make sure to stop the component first. + * Here is how the logic can be applied: + * + *

+ * public void close() {
+ *  if (lifecycleState.started()) {
+ *      stop();
+ *  }
+ *  if (!lifecycleState.moveToClosed()) {
+ *      return;
+ *  }
+ *  // perofrm close logic here
+ * }
+ * 
+ * + * @author kimchy (Shay Banon) + */ +@ThreadSafe +public class Lifecycle { + + public static enum State { + INITIALIZED, + STOPPED, + STARTED, + CLOSED + } + + private volatile State state = State.INITIALIZED; + + public State state() { + return this.state; + } + + /** + * Returns true if the state is initialized. + */ + public boolean initialized() { + return state == State.INITIALIZED; + } + + /** + * Returns true if the state is started. + */ + public boolean started() { + return state == State.STARTED; + } + + /** + * Returns true if the state is stopped. + */ + public boolean stopped() { + return state == State.STOPPED; + } + + /** + * Returns true if the state is closed. + */ + public boolean closed() { + return state == State.CLOSED; + } + + public boolean moveToStarted() throws ElasticSearchIllegalStateException { + State localState = this.state; + if (localState == State.INITIALIZED || localState == State.STOPPED) { + state = State.STARTED; + return true; + } + if (localState == State.STARTED) { + return false; + } + if (localState == State.CLOSED) { + throw new ElasticSearchIllegalStateException("Can't move to started state when closed"); + } + throw new ElasticSearchIllegalStateException("Can't move to started with unknown state"); + } + + public boolean moveToStopped() throws ElasticSearchIllegalStateException { + State localState = state; + if (localState == State.STARTED) { + state = State.STOPPED; + return true; + } + if (localState == State.INITIALIZED || localState == State.STOPPED) { + return false; + } + if (localState == State.CLOSED) { + throw new ElasticSearchIllegalStateException("Can't move to started state when closed"); + } + throw new ElasticSearchIllegalStateException("Can't move to started with unknown state"); + } + + public boolean moveToClosed() throws ElasticSearchIllegalStateException { + State localState = state; + if (localState == State.CLOSED) { + return false; + } + if (localState == State.STARTED) { + throw new ElasticSearchIllegalStateException("Can't move to closed before moving to stopped mode"); + } + state = State.CLOSED; + return true; + } + + @Override public String toString() { + return state.toString(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/component/LifecycleComponent.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/component/LifecycleComponent.java new file mode 100644 index 00000000000..ce911af00e2 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/component/LifecycleComponent.java @@ -0,0 +1,36 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.component; + +import org.elasticsearch.ElasticSearchException; + +/** + * @author kimchy (Shay Banon) + */ +public interface LifecycleComponent { + + Lifecycle.State lifecycleState(); + + T start() throws ElasticSearchException; + + T stop() throws ElasticSearchException; + + void close() throws ElasticSearchException; +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/ConcurrentMaps.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/ConcurrentMaps.java new file mode 100644 index 00000000000..cfebf27f159 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/ConcurrentMaps.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.concurrent; + +import org.elasticsearch.util.concurrent.highscalelib.NonBlockingHashMap; + +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class ConcurrentMaps { + + private final static boolean useNonBlockingMap = Boolean.parseBoolean(System.getProperty("elasticsearch.useNonBlockingMap", "true")); + + public static ConcurrentMap newConcurrentMap() { + if (useNonBlockingMap) { + return new NonBlockingHashMap(); + } + return new ConcurrentHashMap(); + } + + + private ConcurrentMaps() { + + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/ConcurrentSafeArray.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/ConcurrentSafeArray.java new file mode 100644 index 00000000000..7f598bcc87b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/ConcurrentSafeArray.java @@ -0,0 +1,98 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.concurrent; + +import org.elasticsearch.util.SafeArray; + +import java.util.ArrayList; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +/** + * A concurrent version of {@link SafeArray}. + * + * @author kimchy (Shay Banon) + */ +@ThreadSafe +public class ConcurrentSafeArray implements SafeArray { + + private final ArrayList list = new ArrayList(); + + private final ReadWriteLock rwl = new ReentrantReadWriteLock(); + + @Override public T get(int index) { + rwl.readLock().lock(); + try { + return list.get(index); + } finally { + rwl.readLock().unlock(); + } + } + + @Override public int size() { + rwl.readLock().lock(); + try { + return list.size(); + } finally { + rwl.readLock().unlock(); + } + } + + @Override public void add(T value) { + rwl.writeLock().lock(); + try { + list.add(value); + } finally { + rwl.writeLock().unlock(); + } + } + + @Override public void add(int index, T value) { + rwl.writeLock().lock(); + try { + list.add(index, value); + } finally { + rwl.writeLock().unlock(); + } + } + + @Override public void clear() { + rwl.writeLock().lock(); + try { + list.clear(); + } finally { + rwl.writeLock().unlock(); + } + } + + @Override public boolean forEach(Procedure procedure) { + rwl.readLock().lock(); + try { + for (int i = 0; i < list.size(); i++) { + if (!procedure.execute(list.get(i))) { + return false; + } + } + return true; + } finally { + rwl.readLock().unlock(); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/CopyOnWriteMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/CopyOnWriteMap.java new file mode 100644 index 00000000000..80ec4f09eae --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/CopyOnWriteMap.java @@ -0,0 +1,142 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.concurrent; + +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentMap; + +/** + * Provides the semantics of a thread safe copy on write map. + * + * @author kimchy (Shay Banon) + */ +@ThreadSafe +public class CopyOnWriteMap implements ConcurrentMap { + + private volatile Map map = new HashMap(); + + public void clear() { + map = new HashMap(); + } + + public boolean containsKey(Object key) { + return map.containsKey(key); + } + + public boolean containsValue(Object value) { + return map.containsValue(value); + } + + public Set> entrySet() { + return map.entrySet(); + } + + public V get(Object key) { + return map.get(key); + } + + public boolean isEmpty() { + return map.isEmpty(); + } + + public Set keySet() { + return map.keySet(); + } + + public V put(K key, V value) { + synchronized (this) { + Map copyMap = copyMap(); + V put = copyMap.put(key, value); + map = copyMap; + return put; + } + } + + public synchronized void putAll(Map t) { + Map copyMap = copyMap(); + copyMap.putAll(t); + map = copyMap; + } + + public synchronized V remove(Object key) { + Map copyMap = copyMap(); + V remove = copyMap.remove(key); + map = copyMap; + return remove; + } + + public int size() { + return map.size(); + } + + public Collection values() { + return map.values(); + } + + private Map copyMap() { + return new HashMap(map); + } + + public synchronized V putIfAbsent(K key, V value) { + V v = map.get(key); + if (v == null) { + Map copyMap = copyMap(); + copyMap.put(key, value); + map = copyMap; + } + return v; + } + + public synchronized boolean remove(Object key, Object value) { + V v = map.get(key); + if (v != null && v.equals(value)) { + Map copyMap = copyMap(); + copyMap.remove(key); + map = copyMap; + return true; + } + return false; + } + + public synchronized V replace(K key, V value) { + V v = map.get(key); + if (v != null) { + Map copyMap = copyMap(); + copyMap.put(key, value); + map = copyMap; + } + return v; + } + + public synchronized boolean replace(K key, V oldValue, V newValue) { + V v = map.get(key); + if (v != null && v.equals(oldValue)) { + Map copyMap = copyMap(); + copyMap.put(key, newValue); + map = copyMap; + return true; + } + return false; + } + +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/DynamicExecutors.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/DynamicExecutors.java new file mode 100644 index 00000000000..b33b1133222 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/DynamicExecutors.java @@ -0,0 +1,208 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.concurrent; + +import org.elasticsearch.util.settings.Settings; + +import java.util.concurrent.*; + +/** + * Factory and utility methods for handling {@link DynamicThreadPoolExecutor}. + * + * @author kimchy (Shay Banon) + */ +public class DynamicExecutors { + /** + * Creates a thread pool that creates new threads as needed, but will reuse + * previously constructed threads when they are available. Calls to + * execute will reuse previously constructed threads if + * available. If no existing thread is available, a new thread will be + * created and added to the pool. No more than max threads will + * be created. Threads that have not been used for a keepAlive + * timeout are terminated and removed from the cache. Thus, a pool that + * remains idle for long enough will not consume any resources other than + * the min specified. + * + * @param min the number of threads to keep in the pool, even if they are + * idle. + * @param max the maximum number of threads to allow in the pool. + * @param keepAliveTime when the number of threads is greater than the min, + * this is the maximum time that excess idle threads will wait + * for new tasks before terminating (in milliseconds). + * @return the newly created thread pool + */ + public static ExecutorService newScalingThreadPool(int min, int max, long keepAliveTime) { + return newScalingThreadPool(min, max, keepAliveTime, Executors.defaultThreadFactory()); + } + + /** + * Creates a thread pool, same as in + * {@link #newScalingThreadPool(int, int, long)}, using the provided + * ThreadFactory to create new threads when needed. + * + * @param min the number of threads to keep in the pool, even if they are + * idle. + * @param max the maximum number of threads to allow in the pool. + * @param keepAliveTime when the number of threads is greater than the min, + * this is the maximum time that excess idle threads will wait + * for new tasks before terminating (in milliseconds). + * @param threadFactory the factory to use when creating new threads. + * @return the newly created thread pool + */ + public static ExecutorService newScalingThreadPool(int min, int max, long keepAliveTime, ThreadFactory threadFactory) { + DynamicThreadPoolExecutor.DynamicQueue queue = new DynamicThreadPoolExecutor.DynamicQueue(); + ThreadPoolExecutor executor = new DynamicThreadPoolExecutor(min, max, keepAliveTime, TimeUnit.MILLISECONDS, queue, threadFactory); + executor.setRejectedExecutionHandler(new DynamicThreadPoolExecutor.ForceQueuePolicy()); + queue.setThreadPoolExecutor(executor); + return executor; + } + + /** + * Creates a thread pool similar to that constructed by + * {@link #newScalingThreadPool(int, int, long)}, but blocks the call to + * execute if the queue has reached it's capacity, and all + * max threads are busy handling requests. + *

+ * If the wait time of this queue has elapsed, a + * {@link RejectedExecutionException} will be thrown. + * + * @param min the number of threads to keep in the pool, even if they are + * idle. + * @param max the maximum number of threads to allow in the pool. + * @param keepAliveTime when the number of threads is greater than the min, + * this is the maximum time that excess idle threads will wait + * for new tasks before terminating (in milliseconds). + * @param capacity the fixed capacity of the underlying queue (resembles + * backlog). + * @param waitTime the wait time (in milliseconds) for space to become + * available in the queue. + * @return the newly created thread pool + */ + public static ExecutorService newBlockingThreadPool(int min, int max, long keepAliveTime, int capacity, long waitTime) { + return newBlockingThreadPool(min, max, keepAliveTime, capacity, waitTime, Executors.defaultThreadFactory()); + } + + /** + * Creates a thread pool, same as in + * {@link #newBlockingThreadPool(int, int, long, int, long)}, using the + * provided ThreadFactory to create new threads when needed. + * + * @param min the number of threads to keep in the pool, even if they are + * idle. + * @param max the maximum number of threads to allow in the pool. + * @param keepAliveTime when the number of threads is greater than the min, + * this is the maximum time that excess idle threads will wait + * for new tasks before terminating (in milliseconds). + * @param capacity the fixed capacity of the underlying queue (resembles + * backlog). + * @param waitTime the wait time (in milliseconds) for space to become + * available in the queue. + * @param threadFactory the factory to use when creating new threads. + * @return the newly created thread pool + */ + public static ExecutorService newBlockingThreadPool(int min, int max, + long keepAliveTime, int capacity, long waitTime, + ThreadFactory threadFactory) { + DynamicThreadPoolExecutor.DynamicQueue queue = new DynamicThreadPoolExecutor.DynamicQueue(capacity); + ThreadPoolExecutor executor = new DynamicThreadPoolExecutor(min, max, keepAliveTime, TimeUnit.MILLISECONDS, queue, threadFactory); + executor.setRejectedExecutionHandler(new DynamicThreadPoolExecutor.TimedBlockingPolicy(waitTime)); + queue.setThreadPoolExecutor(executor); + return executor; + } + + public static ThreadFactory daemonThreadFactory(Settings settings, String namePrefix) { + String name = settings.get("name"); + if (name == null) { + name = "elasticsearch"; + } else { + name = "elasticsearch[" + name + "]"; + } + return daemonThreadFactory(name + namePrefix); + } + + /** + * A priority based thread factory, for all Thread priority constants: + * Thread.MIN_PRIORITY, Thread.NORM_PRIORITY, Thread.MAX_PRIORITY; + *

+ * This factory is used instead of Executers.DefaultThreadFactory to allow + * manipulation of priority and thread owner name. + * + * @param namePrefix a name prefix for this thread + * @return a thread factory based on given priority. + */ + public static ThreadFactory daemonThreadFactory(String namePrefix) { + final ThreadFactory f = Executors.defaultThreadFactory(); + final String o = namePrefix + "-"; + + return new ThreadFactory() { + public Thread newThread(Runnable r) { + Thread t = f.newThread(r); + + /* + * Thread name: owner-pool-N-thread-M, where N is the sequence + * number of this factory, and M is the sequence number of the + * thread created by this factory. + */ + t.setName(o + t.getName()); + + /* override default definition t.setDaemon(false); */ + t.setDaemon(true); + + return t; + } + }; + } + + /** + * A priority based thread factory, for all Thread priority constants: + * Thread.MIN_PRIORITY, Thread.NORM_PRIORITY, Thread.MAX_PRIORITY; + *

+ * This factory is used instead of Executers.DefaultThreadFactory to allow + * manipulation of priority and thread owner name. + * + * @param priority The priority to be assigned to each thread; + * can be either Thread.MIN_PRIORITY, Thread.NORM_PRIORITY + * or Thread.MAX_PRIORITY. + * @param namePrefix a name prefix for this thread + * @return a thread factory based on given priority. + */ + public static ThreadFactory priorityThreadFactory(int priority, String namePrefix) { + final ThreadFactory f = DynamicExecutors.daemonThreadFactory(namePrefix); + final int p = priority; + + return new ThreadFactory() { + public Thread newThread(Runnable r) { + Thread t = f.newThread(r); + + /* override default thread priority of Thread.NORM_PRIORITY */ + if (p != Thread.NORM_PRIORITY) + t.setPriority(p); + + return t; + } + }; + } + + /** + * Cannot instantiate. + */ + private DynamicExecutors() { + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/DynamicThreadPoolExecutor.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/DynamicThreadPoolExecutor.java new file mode 100644 index 00000000000..13501043cda --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/DynamicThreadPoolExecutor.java @@ -0,0 +1,165 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.concurrent; + + +import java.util.concurrent.*; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * An {@link ExecutorService} that executes each submitted task using one of + * possibly several pooled threads, normally configured using + * {@link DynamicExecutors} factory methods. + * + * @author moran + * @since 6.5 + */ +public class DynamicThreadPoolExecutor extends ThreadPoolExecutor { + /** + * number of threads that are actively executing tasks + */ + private final AtomicInteger activeCount = new AtomicInteger(); + + public DynamicThreadPoolExecutor(int corePoolSize, int maximumPoolSize, + long keepAliveTime, TimeUnit unit, BlockingQueue workQueue, + ThreadFactory threadFactory) { + super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory); + } + + @Override public int getActiveCount() { + return activeCount.get(); + } + + @Override protected void beforeExecute(Thread t, Runnable r) { + activeCount.incrementAndGet(); + } + + @Override protected void afterExecute(Runnable r, Throwable t) { + activeCount.decrementAndGet(); + } + + /** + * Much like a {@link SynchronousQueue} which acts as a rendezvous channel. It + * is well suited for handoff designs, in which a tasks is only queued if there + * is an available thread to pick it up. + *

+ * This queue is correlated with a thread-pool, and allows insertions to the + * queue only if there is a free thread that can poll this task. Otherwise, the + * task is rejected and the decision is left up to one of the + * {@link RejectedExecutionHandler} policies: + *

    + *
  1. {@link ForceQueuePolicy} - forces the queue to accept the rejected task.
  2. + *
  3. {@link TimedBlockingPolicy} - waits for a given time for the task to be + * executed.
  4. + *
+ * + * @author kimchy (Shay Banon) + */ + public static class DynamicQueue extends LinkedBlockingQueue { + private static final long serialVersionUID = 1L; + + /** + * The executor this Queue belongs to + */ + private transient ThreadPoolExecutor executor; + + /** + * Creates a DynamicQueue with a capacity of + * {@link Integer#MAX_VALUE}. + */ + public DynamicQueue() { + super(); + } + + /** + * Creates a DynamicQueue with the given (fixed) capacity. + * + * @param capacity the capacity of this queue. + */ + public DynamicQueue(int capacity) { + super(capacity); + } + + /** + * Sets the executor this queue belongs to. + */ + public void setThreadPoolExecutor(ThreadPoolExecutor executor) { + this.executor = executor; + } + + /** + * Inserts the specified element at the tail of this queue if there is at + * least one available thread to run the current task. If all pool threads + * are actively busy, it rejects the offer. + * + * @param o the element to add. + * @return true if it was possible to add the element to this + * queue, else false + * @see ThreadPoolExecutor#execute(Runnable) + */ + @Override + public boolean offer(E o) { + int allWorkingThreads = executor.getActiveCount() + super.size(); + return allWorkingThreads < executor.getPoolSize() && super.offer(o); + } + } + + /** + * A handler for rejected tasks that adds the specified element to this queue, + * waiting if necessary for space to become available. + */ + public static class ForceQueuePolicy implements RejectedExecutionHandler { + public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) { + try { + executor.getQueue().put(r); + } catch (InterruptedException e) { + //should never happen since we never wait + throw new RejectedExecutionException(e); + } + } + } + + /** + * A handler for rejected tasks that inserts the specified element into this + * queue, waiting if necessary up to the specified wait time for space to become + * available. + */ + public static class TimedBlockingPolicy implements RejectedExecutionHandler { + private final long waitTime; + + /** + * @param waitTime wait time in milliseconds for space to become available. + */ + public TimedBlockingPolicy(long waitTime) { + this.waitTime = waitTime; + } + + public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) { + try { + boolean successful = executor.getQueue().offer(r, waitTime, TimeUnit.MILLISECONDS); + if (!successful) + throw new RejectedExecutionException("Rejected execution after waiting " + + waitTime + " ms for task [" + r.getClass() + "] to be executed."); + } catch (InterruptedException e) { + throw new RejectedExecutionException(e); + } + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/Futures.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/Futures.java new file mode 100644 index 00000000000..9fb24f7ac72 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/Futures.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.concurrent; + +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.ElasticSearchInterruptedException; + +import java.util.concurrent.ExecutionException; + +/** + * @author kimchy (Shay Banon) + */ +public class Futures { + + public static ElasticSearchException convert(Exception e) { + if (e instanceof ExecutionException) { + if (e.getCause() instanceof ElasticSearchException) { + return (ElasticSearchException) e.getCause(); + } + return new UncategorizedExecutionException(e.getCause().getMessage(), e.getCause()); + } + if (e instanceof InterruptedException) { + return new ElasticSearchInterruptedException(e.getMessage(), e.getCause()); + } + if (e instanceof ElasticSearchException) { + return (ElasticSearchException) e; + } + return new UncategorizedExecutionException(e.getMessage(), e); + } + + private Futures() { + + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/GuardedBy.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/GuardedBy.java new file mode 100644 index 00000000000..dfa615a1aca --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/GuardedBy.java @@ -0,0 +1,52 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.concurrent; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * GuardedBy + *

+ * The field or method to which this annotation is applied can only be accessed + * when holding a particular lock, which may be a built-in (synchronization) + * lock, or may be an explicit java.util.concurrent.Lock. + *

+ * The argument determines which lock guards the annotated field or method: this : + * The string literal "this" means that this field is guarded by the class in + * which it is defined. class-name.this : For inner classes, it may be necessary + * to disambiguate 'this'; the class-name.this designation allows you to specify + * which 'this' reference is intended itself : For reference fields only; the + * object to which the field refers. field-name : The lock object is referenced + * by the (instance or static) field specified by field-name. + * class-name.field-name : The lock object is reference by the static field + * specified by class-name.field-name. method-name() : The lock object is + * returned by calling the named nil-ary method. class-name.class : The Class + * object for the specified class should be used as the lock object. + * + * @author kimchy (Shay Banon) + */ +@Target({ElementType.FIELD, ElementType.METHOD}) +@Retention(RetentionPolicy.CLASS) +public @interface GuardedBy { + String value(); +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/Immutable.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/Immutable.java new file mode 100644 index 00000000000..35d1fd44198 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/Immutable.java @@ -0,0 +1,46 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.concurrent; + +import java.lang.annotation.*; + +/** + * Immutable + *

+ * The class to which this annotation is applied is immutable. This means that + * its state cannot be seen to change by callers. Of necessity this means that + * all public fields are final, and that all public final reference fields refer + * to other immutable objects, and that methods do not publish references to any + * internal state which is mutable by implementation even if not by design. + * Immutable objects may still have internal mutable state for purposes of + * performance optimization; some state variables may be lazily computed, so + * long as they are computed from immutable state and that callers cannot tell + * the difference. + *

+ * Immutable objects are inherently thread-safe; they may be passed between + * threads or published without synchronization. + * + * @author kimchy (Shay Banon) + */ +@Documented +@Target(ElementType.TYPE) +@Retention(RetentionPolicy.CLASS) +public @interface Immutable { +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/NotThreadSafe.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/NotThreadSafe.java new file mode 100644 index 00000000000..64fd5625a26 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/NotThreadSafe.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.concurrent; + +import java.lang.annotation.*; + +/** + * NotThreadSafe + *

+ * The class to which this annotation is applied is not thread-safe. This + * annotation primarily exists for clarifying the non-thread-safety of a class + * that might otherwise be assumed to be thread-safe, despite the fact that it + * is a bad idea to assume a class is thread-safe without good reason. + * + * @author kimchy (Shay Banon) + * @see ThreadSafe + */ +@Documented +@Target(ElementType.TYPE) +@Retention(RetentionPolicy.CLASS) +public @interface NotThreadSafe { +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/ThreadBarrier.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/ThreadBarrier.java new file mode 100644 index 00000000000..9544f54b84f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/ThreadBarrier.java @@ -0,0 +1,314 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.concurrent; + +import java.util.concurrent.BrokenBarrierException; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + + +/** + * A synchronization aid that allows a set of threads to all wait for each other + * to reach a common barrier point. Barriers are useful in programs involving a + * fixed sized party of threads that must occasionally wait for each other. + * ThreadBarrier adds a cause to + * {@link BrokenBarrierException} thrown by a {@link #reset()} operation defined + * by {@link CyclicBarrier}. + *

+ *

+ * Sample usage:
+ *

  • Barrier as a synchronization and Exception handling aid
  • + *
  • Barrier as a trigger for elapsed notification events
  • + *

    + *

    + *    class MyTestClass	implements RemoteEventListener
    + *    {
    + *    	final ThreadBarrier	barrier;
    + * 

    + * class Worker implements Runnable + * { + * public void run() + * { + * barrier.await(); //wait for all threads to reach run + * try + * { + * prepare(); + * barrier.await(); //wait for all threads to prepare + * process(); + * barrier.await(); //wait for all threads to process + * } + * catch(Throwable t){ + * log("Worker thread caught exception", t); + * barrier.reset(t); + * } + * } + * } + *

    + * public void testThreads() { + * barrier = new ThreadBarrier(N_THREADS + 1); + * for (int i = 0; i < N; ++i) + * new Thread(new Worker()).start(); + *

    + * try{ + * barrier.await(); //wait for all threads to reach run + * barrier.await(); //wait for all threads to prepare + * barrier.await(); //wait for all threads to process + * } + * catch(BrokenBarrierException bbe) { + * Assert.fail(bbe); + * } + * } + *

    + * int actualNotificationCount = 0; + * public synchronized void notify (RemoteEvent event) { + * try{ + * actualNotificationCount++; + * if (actualNotificationCount == EXPECTED_COUNT) + * barrier.await(); //signal when all notifications arrive + *

    + * // too many notifications? + * Assert.assertFalse("Exceeded notification count", + * actualNotificationCount > EXPECTED_COUNT); + * } + * catch(Throwable t) { + * log("Worker thread caught exception", t); + * barrier.reset(t); + * } + * } + *

    + * public void testNotify() { + * barrier = new ThreadBarrier(N_LISTENERS + 1); + * registerNotification(); + * triggerNotifications(); + *

    + * //wait until either all notifications arrive, or + * //until a MAX_TIMEOUT is reached. + * barrier.await(MAX_TIMEOUT); + *

    + * //check if all notifications were accounted for or timed-out + * Assert.assertEquals("Notification count", + * EXPECTED_COUNT, actualNotificationCount); + *

    + * //inspect that the barrier isn't broken + * barrier.inspect(); //throws BrokenBarrierException if broken + * } + * } + *

    + * + * @author kimchy (Shay Banon) + */ +public class ThreadBarrier extends CyclicBarrier { + /** + * The cause of a {@link BrokenBarrierException} and {@link TimeoutException} + * thrown from an await() when {@link #reset(Throwable)} was invoked. + */ + private Throwable cause; + + /** + * {@inheritDoc} + */ + public ThreadBarrier(int parties) { + super(parties); + } + + /** + * {@inheritDoc} + */ + public ThreadBarrier(int parties, Runnable barrierAction) { + super(parties, barrierAction); + } + + /** + * {@inheritDoc} + */ + @Override + public int await() throws InterruptedException, BrokenBarrierException { + try { + breakIfBroken(); + return super.await(); + } catch (BrokenBarrierException bbe) { + initCause(bbe); + throw bbe; + } + } + + /** + * {@inheritDoc} + */ + @Override public int await(long timeout, TimeUnit unit) throws InterruptedException, BrokenBarrierException, TimeoutException { + try { + breakIfBroken(); + return super.await(timeout, unit); + } catch (BrokenBarrierException bbe) { + initCause(bbe); + throw bbe; + } catch (TimeoutException te) { + initCause(te); + throw te; + } + } + + /** + * Resets the barrier to its initial state. If any parties are + * currently waiting at the barrier, they will return with a + * {@link BrokenBarrierException}. Note that resets after + * a breakage has occurred for other reasons can be complicated to + * carry out; threads need to re-synchronize in some other way, + * and choose one to perform the reset. It may be preferable to + * instead create a new barrier for subsequent use. + * + * @param cause The cause of the BrokenBarrierException + */ + public synchronized void reset(Throwable cause) { + if (!isBroken()) { + super.reset(); + } + + if (this.cause == null) { + this.cause = cause; + } + } + + /** + * Queries if this barrier is in a broken state. Note that if + * {@link #reset(Throwable)} is invoked the barrier will remain broken, while + * {@link #reset()} will reset the barrier to its initial state and + * {@link #isBroken()} will return false. + * + * @return {@code true} if one or more parties broke out of this barrier due + * to interruption or timeout since construction or the last reset, + * or a barrier action failed due to an exception; {@code false} + * otherwise. + * @see #inspect() + */ + @Override + public synchronized boolean isBroken() { + return this.cause != null || super.isBroken(); + } + + /** + * Inspects if the barrier is broken. If for any reason, the barrier + * was broken, a {@link BrokenBarrierException} will be thrown. Otherwise, + * would return gracefully. + * + * @throws BrokenBarrierException With a nested broken cause. + */ + public synchronized void inspect() throws BrokenBarrierException { + try { + breakIfBroken(); + } catch (BrokenBarrierException bbe) { + initCause(bbe); + throw bbe; + } + } + + /** + * breaks this barrier if it has been reset or broken for any other reason. + *

    + * Note: This call is not atomic in respect to await/reset calls. A + * breakIfBroken() may be context switched to invoke a reset() prior to + * await(). This resets the barrier to its initial state - parties not + * currently waiting at the barrier will not be accounted for! An await that + * wasn't time limited, will block indefinitely. + * + * @throws BrokenBarrierException an empty BrokenBarrierException. + */ + private synchronized void breakIfBroken() + throws BrokenBarrierException { + if (isBroken()) { + throw new BrokenBarrierException(); + } + } + + /** + * Initializes the cause of this throwable to the specified value. The cause + * is the throwable that was initialized by {@link #reset(Throwable)}. + * + * @param t throwable. + */ + private synchronized void initCause(Throwable t) { + t.initCause(this.cause); + } + + /** + * A Barrier action to be used in conjunction with {@link ThreadBarrier} to + * measure performance between barrier awaits. This runnable will execute + * when the barrier is tripped. Make sure to reset() the timer before next + * Measurement. + * + * @see ThreadBarrier#ThreadBarrier(int, Runnable) + *

    + * Usage example:
    + *

    
    +     *                                                               BarrierTimer timer = new BarrierTimer();
    +     *                                                               ThreadBarrier barrier = new ThreadBarrier( nTHREADS + 1, timer );
    +     *                                                               ..
    +     *                                                               barrier.await(); // starts timer when all threads trip on await
    +     *                                                               barrier.await(); // stops  timer when all threads trip on await
    +     *                                                               ..
    +     *                                                               long time = timer.getTimeInNanos();
    +     *                                                               long tpi = time / ((long)nREPEATS * nTHREADS); //throughput per thread iteration
    +     *                                                               long secs = timer.getTimeInSeconds();	//total runtime in seconds
    +     *                                                               ..
    +     *                                                               timer.reset();  // reuse timer
    +     *                                                             
    + */ + public static class BarrierTimer implements Runnable { + volatile boolean started; + volatile long startTime; + volatile long endTime; + + public void run() { + long t = System.nanoTime(); + if (!started) { + started = true; + startTime = t; + } else + endTime = t; + } + + /** + * resets (clears) this timer before next execution. + */ + public void reset() { + started = false; + } + + /** + * Returns the elapsed time between two successive barrier executions. + * + * @return elapsed time in nanoseconds. + */ + public long getTimeInNanos() { + return endTime - startTime; + } + + /** + * Returns the elapsed time between two successive barrier executions. + * + * @return elapsed time in seconds. + */ + public double getTimeInSeconds() { + long time = endTime - startTime; + return (time) / 1000000000.0; + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/ThreadLocalRandom.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/ThreadLocalRandom.java new file mode 100644 index 00000000000..aaab53a19b4 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/ThreadLocalRandom.java @@ -0,0 +1,217 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/licenses/publicdomain + */ + +package org.elasticsearch.util.concurrent; + +import java.util.Random; + +/** + * A random number generator isolated to the current thread. Like the + * global {@link java.util.Random} generator used by the {@link + * java.lang.Math} class, a {@code ThreadLocalRandom} is initialized + * with an internally generated seed that may not otherwise be + * modified. When applicable, use of {@code ThreadLocalRandom} rather + * than shared {@code Random} objects in concurrent programs will + * typically encounter much less overhead and contention. Use of + * {@code ThreadLocalRandom} is particularly appropriate when multiple + * tasks use random numbers in parallel in thread pools. + * + *

    Usages of this class should typically be of the form: + * {@code ThreadLocalRandom.current().nextX(...)} (where + * {@code X} is {@code Int}, {@code Long}, etc). + * When all usages are of this form, it is never possible to + * accidently share a {@code ThreadLocalRandom} across multiple threads. + * + *

    This class also provides additional commonly used bounded random + * generation methods. + * + * @author Doug Lea + * @since 1.7 + */ +public class ThreadLocalRandom extends Random { + // same constants as Random, but must be redeclared because private + private final static long multiplier = 0x5DEECE66DL; + private final static long addend = 0xBL; + private final static long mask = (1L << 48) - 1; + + /** + * The random seed. We can't use super.seed. + */ + private long rnd; + + /** + * Initialization flag to permit the first and only allowed call + * to setSeed (inside Random constructor) to succeed. We can't + * allow others since it would cause setting seed in one part of a + * program to unintentionally impact other usages by the thread. + */ + boolean initialized; + + // Padding to help avoid memory contention among seed updates in + // different TLRs in the common case that they are located near + // each other. + private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7; + + /** + * The actual ThreadLocal + */ + private static final ThreadLocal localRandom = + new ThreadLocal() { + protected ThreadLocalRandom initialValue() { + return new ThreadLocalRandom(); + } + }; + + + /** + * Constructor called only by localRandom.initialValue. + * We rely on the fact that the superclass no-arg constructor + * invokes setSeed exactly once to initialize. + */ + ThreadLocalRandom() { + super(); + } + + /** + * Returns the current thread's {@code ThreadLocalRandom}. + * + * @return the current thread's {@code ThreadLocalRandom} + */ + public static ThreadLocalRandom current() { + return localRandom.get(); + } + + /** + * Throws {@code UnsupportedOperationException}. Setting seeds in + * this generator is not supported. + * + * @throws UnsupportedOperationException always + */ + public void setSeed(long seed) { + if (initialized) + throw new UnsupportedOperationException(); + initialized = true; + rnd = (seed ^ multiplier) & mask; + } + + protected int next(int bits) { + rnd = (rnd * multiplier + addend) & mask; + return (int) (rnd >>> (48 - bits)); + } + + /** + * Returns a pseudorandom, uniformly distributed value between the + * given least value (inclusive) and bound (exclusive). + * + * @param least the least value returned + * @param bound the upper bound (exclusive) + * @return the next value + * @throws IllegalArgumentException if least greater than or equal + * to bound + */ + public int nextInt(int least, int bound) { + if (least >= bound) + throw new IllegalArgumentException(); + return nextInt(bound - least) + least; + } + + /** + * Returns a pseudorandom, uniformly distributed value + * between 0 (inclusive) and the specified value (exclusive). + * + * @param n the bound on the random number to be returned. Must be + * positive. + * @return the next value + * @throws IllegalArgumentException if n is not positive + */ + public long nextLong(long n) { + if (n <= 0) + throw new IllegalArgumentException("n must be positive"); + // Divide n by two until small enough for nextInt. On each + // iteration (at most 31 of them but usually much less), + // randomly choose both whether to include high bit in result + // (offset) and whether to continue with the lower vs upper + // half (which makes a difference only if odd). + long offset = 0; + while (n >= Integer.MAX_VALUE) { + int bits = next(2); + long half = n >>> 1; + long nextn = ((bits & 2) == 0) ? half : n - half; + if ((bits & 1) == 0) + offset += n - nextn; + n = nextn; + } + return offset + nextInt((int) n); + } + + /** + * Returns a pseudorandom, uniformly distributed value between the + * given least value (inclusive) and bound (exclusive). + * + * @param least the least value returned + * @param bound the upper bound (exclusive) + * @return the next value + * @throws IllegalArgumentException if least greater than or equal + * to bound + */ + public long nextLong(long least, long bound) { + if (least >= bound) + throw new IllegalArgumentException(); + return nextLong(bound - least) + least; + } + + /** + * Returns a pseudorandom, uniformly distributed {@code double} value + * between 0 (inclusive) and the specified value (exclusive). + * + * @param n the bound on the random number to be returned. Must be + * positive. + * @return the next value + * @throws IllegalArgumentException if n is not positive + */ + public double nextDouble(double n) { + if (n <= 0) + throw new IllegalArgumentException("n must be positive"); + return nextDouble() * n; + } + + /** + * Returns a pseudorandom, uniformly distributed value between the + * given least value (inclusive) and bound (exclusive). + * + * @param least the least value returned + * @param bound the upper bound (exclusive) + * @return the next value + * @throws IllegalArgumentException if least greater than or equal + * to bound + */ + public double nextDouble(double least, double bound) { + if (least >= bound) + throw new IllegalArgumentException(); + return nextDouble() * (bound - least) + least; + } + + private static final long serialVersionUID = -5851777807851030925L; +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/ThreadSafe.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/ThreadSafe.java new file mode 100644 index 00000000000..af549a94bd3 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/ThreadSafe.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.concurrent; + +import java.lang.annotation.*; + +/** + * ThreadSafe + *

    + * The class to which this annotation is applied is thread-safe. This means that + * no sequences of accesses (reads and writes to public fields, calls to public + * methods) may put the object into an invalid state, regardless of the + * interleaving of those actions by the runtime, and without requiring any + * additional synchronization or coordination on the part of the caller. + * + * @author kimchy (Shay Banon) + */ +@Documented +@Target(ElementType.TYPE) +@Retention(RetentionPolicy.CLASS) +public @interface ThreadSafe { +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/UncategorizedExecutionException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/UncategorizedExecutionException.java new file mode 100644 index 00000000000..eae532b8981 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/UncategorizedExecutionException.java @@ -0,0 +1,36 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.concurrent; + +import org.elasticsearch.ElasticSearchException; + +/** + * @author kimchy (Shay Banon) + */ +public class UncategorizedExecutionException extends ElasticSearchException { + + public UncategorizedExecutionException(String msg) { + super(msg); + } + + public UncategorizedExecutionException(String msg, Throwable cause) { + super(msg, cause); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/highscalelib/AbstractEntry.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/highscalelib/AbstractEntry.java new file mode 100644 index 00000000000..3a1e27c345d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/highscalelib/AbstractEntry.java @@ -0,0 +1,102 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Written by Cliff Click and released to the public domain, as explained at + * http://creativecommons.org/licenses/publicdomain + */ + +package org.elasticsearch.util.concurrent.highscalelib; + +import java.util.Map; + +/** + * A simple implementation of {@link java.util.Map.Entry}. + * Does not implement {@link java.util.Map.Entry.setValue}, that is done by users of the class. + * + * @author Cliff Click + * @param the type of keys maintained by this map + * @param the type of mapped values + * @since 1.5 + */ + +abstract class AbstractEntry implements Map.Entry { + /** + * Strongly typed key + */ + protected final TypeK _key; + /** + * Strongly typed value + */ + protected TypeV _val; + + public AbstractEntry(final TypeK key, final TypeV val) { + _key = key; + _val = val; + } + + public AbstractEntry(final Map.Entry e) { + _key = e.getKey(); + _val = e.getValue(); + } + + /** + * Return "key=val" string + */ + public String toString() { + return _key + "=" + _val; + } + + /** + * Return key + */ + public TypeK getKey() { + return _key; + } + + /** + * Return val + */ + public TypeV getValue() { + return _val; + } + + /** + * Equal if the underlying key & value are equal + */ + public boolean equals(final Object o) { + if (!(o instanceof Map.Entry)) return false; + final Map.Entry e = (Map.Entry) o; + return eq(_key, e.getKey()) && eq(_val, e.getValue()); + } + + /** + * Compute "key.hashCode() ^ val.hashCode()" + */ + public int hashCode() { + return + ((_key == null) ? 0 : _key.hashCode()) ^ + ((_val == null) ? 0 : _val.hashCode()); + } + + private static boolean eq(final Object o1, final Object o2) { + return (o1 == null ? o2 == null : o1.equals(o2)); + } +} + diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/highscalelib/ConcurrentAutoTable.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/highscalelib/ConcurrentAutoTable.java new file mode 100644 index 00000000000..f4900cd8905 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/highscalelib/ConcurrentAutoTable.java @@ -0,0 +1,350 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Written by Cliff Click and released to the public domain, as explained at + * http://creativecommons.org/licenses/publicdomain + */ + +package org.elasticsearch.util.concurrent.highscalelib; + +import sun.misc.Unsafe; + +import java.io.Serializable; +import java.util.concurrent.atomic.AtomicLongFieldUpdater; +import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; + +/** + * An auto-resizing table of {@code longs}, supporting low-contention CAS + * operations. Updates are done with CAS's to no particular table element. + * The intent is to support highly scalable counters, r/w locks, and other + * structures where the updates are associative, loss-free (no-brainer), and + * otherwise happen at such a high volume that the cache contention for + * CAS'ing a single word is unacceptable. + *

    + *

    This API is overkill for simple counters (e.g. no need for the 'mask') + * and is untested as an API for making a scalable r/w lock and so is likely + * to change! + * + * @author Cliff Click + * @since 1.5 + */ + + +public class ConcurrentAutoTable implements Serializable { + + // --- public interface --- + + /** + * Add the given value to current counter value. Concurrent updates will + * not be lost, but addAndGet or getAndAdd are not implemented because the + * total counter value (i.e., {@link #get}) is not atomically updated. + * Updates are striped across an array of counters to avoid cache contention + * and has been tested with performance scaling linearly up to 768 CPUs. + */ + public void add(long x) { + add_if_mask(x, 0); + } + + /** + * {@link #add} with -1 + */ + public void decrement() { + add_if_mask(-1L, 0); + } + + /** + * {@link #add} with +1 + */ + public void increment() { + add_if_mask(1L, 0); + } + + /** + * Atomically set the sum of the striped counters to specified value. + * Rather more expensive than a simple store, in order to remain atomic. + */ + public void set(long x) { + CAT newcat = new CAT(null, 4, x); + // Spin until CAS works + while (!CAS_cat(_cat, newcat)) ; + } + + /** + * Current value of the counter. Since other threads are updating furiously + * the value is only approximate, but it includes all counts made by the + * current thread. Requires a pass over the internally striped counters. + */ + public long get() { + return _cat.sum(0); + } + + /** + * Same as {@link #get}, included for completeness. + */ + public int intValue() { + return (int) _cat.sum(0); + } + + /** + * Same as {@link #get}, included for completeness. + */ + public long longValue() { + return _cat.sum(0); + } + + /** + * A cheaper {@link #get}. Updated only once/millisecond, but as fast as a + * simple load instruction when not updating. + */ + public long estimate_get() { + return _cat.estimate_sum(0); + } + + /** + * Return the counter's {@code long} value converted to a string. + */ + public String toString() { + return _cat.toString(0); + } + + /** + * A more verbose print than {@link #toString}, showing internal structure. + * Useful for debugging. + */ + public void print() { + _cat.print(); + } + + /** + * Return the internal counter striping factor. Useful for diagnosing + * performance problems. + */ + public int internal_size() { + return _cat._t.length; + } + + // Only add 'x' to some slot in table, hinted at by 'hash', if bits under + // the mask are all zero. The sum can overflow or 'x' can contain bits in + // the mask. Value is CAS'd so no counts are lost. The CAS is retried until + // it succeeds or bits are found under the mask. Returned value is the old + // value - which WILL have zero under the mask on success and WILL NOT have + // zero under the mask for failure. + + private long add_if_mask(long x, long mask) { + return _cat.add_if_mask(x, mask, hash(), this); + } + + // The underlying array of concurrently updated long counters + private volatile CAT _cat = new CAT(null, 4/*Start Small, Think Big!*/, 0L); + private static final AtomicReferenceFieldUpdater _catUpdater = + AtomicReferenceFieldUpdater.newUpdater(ConcurrentAutoTable.class, CAT.class, "_cat"); + + private boolean CAS_cat(CAT oldcat, CAT newcat) { + return _catUpdater.compareAndSet(this, oldcat, newcat); + } + + // Hash spreader + + private static final int hash() { + int h = System.identityHashCode(Thread.currentThread()); + // You would think that System.identityHashCode on the current thread + // would be a good hash fcn, but actually on SunOS 5.8 it is pretty lousy + // in the low bits. + h ^= (h >>> 20) ^ (h >>> 12); // Bit spreader, borrowed from Doug Lea + h ^= (h >>> 7) ^ (h >>> 4); + return h << 2; // Pad out cache lines. The goal is to avoid cache-line contention + } + + // --- CAT ----------------------------------------------------------------- + + private static class CAT implements Serializable { + + // Unsafe crud: get a function which will CAS arrays + private static final Unsafe _unsafe = UtilUnsafe.getUnsafe(); + private static final int _Lbase = _unsafe.arrayBaseOffset(long[].class); + private static final int _Lscale = _unsafe.arrayIndexScale(long[].class); + + private static long rawIndex(long[] ary, int i) { + assert i >= 0 && i < ary.length; + return _Lbase + i * _Lscale; + } + + private final static boolean CAS(long[] A, int idx, long old, long nnn) { + return _unsafe.compareAndSwapLong(A, rawIndex(A, idx), old, nnn); + } + + volatile long _resizers; // count of threads attempting a resize + static private final AtomicLongFieldUpdater _resizerUpdater = + AtomicLongFieldUpdater.newUpdater(CAT.class, "_resizers"); + + private final CAT _next; + private volatile long _sum_cache; + private volatile long _fuzzy_sum_cache; + private volatile long _fuzzy_time; + private static final int MAX_SPIN = 2; + private long[] _t; // Power-of-2 array of longs + + CAT(CAT next, int sz, long init) { + _next = next; + _sum_cache = Long.MIN_VALUE; + _t = new long[sz]; + _t[0] = init; + } + + // Only add 'x' to some slot in table, hinted at by 'hash', if bits under + // the mask are all zero. The sum can overflow or 'x' can contain bits in + // the mask. Value is CAS'd so no counts are lost. The CAS is attempted + // ONCE. + + public long add_if_mask(long x, long mask, int hash, ConcurrentAutoTable master) { + long[] t = _t; + int idx = hash & (t.length - 1); + // Peel loop; try once fast + long old = t[idx]; + boolean ok = CAS(t, idx, old & ~mask, old + x); + if (_sum_cache != Long.MIN_VALUE) + _sum_cache = Long.MIN_VALUE; // Blow out cache + if (ok) return old; // Got it + if ((old & mask) != 0) return old; // Failed for bit-set under mask + // Try harder + int cnt = 0; + while (true) { + old = t[idx]; + if ((old & mask) != 0) return old; // Failed for bit-set under mask + if (CAS(t, idx, old, old + x)) break; // Got it! + cnt++; + } + if (cnt < MAX_SPIN) return old; // Allowable spin loop count + if (t.length >= 1024 * 1024) return old; // too big already + + // Too much contention; double array size in an effort to reduce contention + long r = _resizers; + int newbytes = (t.length << 1) << 3/*word to bytes*/; + while (!_resizerUpdater.compareAndSet(this, r, r + newbytes)) + r = _resizers; + r += newbytes; + if (master._cat != this) return old; // Already doubled, don't bother + if ((r >> 17) != 0) { // Already too much allocation attempts? + // TODO - use a wait with timeout, so we'll wakeup as soon as the new + // table is ready, or after the timeout in any case. Annoyingly, this + // breaks the non-blocking property - so for now we just briefly sleep. + //synchronized( this ) { wait(8*megs); } // Timeout - we always wakeup + try { + Thread.sleep(r >> 17); + } catch (InterruptedException e) { + } + if (master._cat != this) return old; + } + + CAT newcat = new CAT(this, t.length * 2, 0); + // Take 1 stab at updating the CAT with the new larger size. If this + // fails, we assume some other thread already expanded the CAT - so we + // do not need to retry until it succeeds. + master.CAS_cat(this, newcat); + return old; + } + + + // Return the current sum of all things in the table, stripping off mask + // before the add. Writers can be updating the table furiously, so the + // sum is only locally accurate. + + public long sum(long mask) { + long sum = _sum_cache; + if (sum != Long.MIN_VALUE) return sum; + sum = _next == null ? 0 : _next.sum(mask); // Recursively get cached sum + long[] t = _t; + for (int i = 0; i < t.length; i++) + sum += t[i] & (~mask); + _sum_cache = sum; // Cache includes recursive counts + return sum; + } + + // Fast fuzzy version. Used a cached value until it gets old, then re-up + // the cache. + + public long estimate_sum(long mask) { + // For short tables, just do the work + if (_t.length <= 64) return sum(mask); + // For bigger tables, periodically freshen a cached value + long millis = System.currentTimeMillis(); + if (_fuzzy_time != millis) { // Time marches on? + _fuzzy_sum_cache = sum(mask); // Get sum the hard way + _fuzzy_time = millis; // Indicate freshness of cached value + } + return _fuzzy_sum_cache; // Return cached sum + } + + // Update all table slots with CAS. + + public void all_or(long mask) { + long[] t = _t; + for (int i = 0; i < t.length; i++) { + boolean done = false; + while (!done) { + long old = t[i]; + done = CAS(t, i, old, old | mask); + } + } + if (_next != null) _next.all_or(mask); + if (_sum_cache != Long.MIN_VALUE) + _sum_cache = Long.MIN_VALUE; // Blow out cache + } + + public void all_and(long mask) { + long[] t = _t; + for (int i = 0; i < t.length; i++) { + boolean done = false; + while (!done) { + long old = t[i]; + done = CAS(t, i, old, old & mask); + } + } + if (_next != null) _next.all_and(mask); + if (_sum_cache != Long.MIN_VALUE) + _sum_cache = Long.MIN_VALUE; // Blow out cache + } + + // Set/stomp all table slots. No CAS. + + public void all_set(long val) { + long[] t = _t; + for (int i = 0; i < t.length; i++) + t[i] = val; + if (_next != null) _next.all_set(val); + if (_sum_cache != Long.MIN_VALUE) + _sum_cache = Long.MIN_VALUE; // Blow out cache + } + + String toString(long mask) { + return Long.toString(sum(mask)); + } + + public void print() { + long[] t = _t; + System.out.print("[sum=" + _sum_cache + "," + t[0]); + for (int i = 1; i < t.length; i++) + System.out.print("," + t[i]); + System.out.print("]"); + if (_next != null) _next.print(); + } + } +} + diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/highscalelib/Counter.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/highscalelib/Counter.java new file mode 100644 index 00000000000..6a5f2265a14 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/highscalelib/Counter.java @@ -0,0 +1,58 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Written by Cliff Click and released to the public domain, as explained at + * http://creativecommons.org/licenses/publicdomain + */ + +package org.elasticsearch.util.concurrent.highscalelib; + +/** + * A simple high-performance counter. Merely renames the extended {@link + * org.cliffc.high_scale_lib.ConcurrentAutoTable} class to be more obvious. + * {@link org.cliffc.high_scale_lib.ConcurrentAutoTable} already has a decent + * counting API. + * + * @author Cliff Click + * @since 1.5 + */ + +public class Counter extends ConcurrentAutoTable { + + // Add the given value to current counter value. Concurrent updates will + // not be lost, but addAndGet or getAndAdd are not implemented because but + // the total counter value is not atomically updated. + //public void add( long x ); + //public void decrement(); + //public void increment(); + + // Current value of the counter. Since other threads are updating furiously + // the value is only approximate, but it includes all counts made by the + // current thread. Requires a pass over all the striped counters. + //public long get(); + //public int intValue(); + //public long longValue(); + + // A cheaper 'get'. Updated only once/millisecond, but fast as a simple + // load instruction when not updating. + //public long estimate_get( ); + +} + diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/highscalelib/NonBlockingHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/highscalelib/NonBlockingHashMap.java new file mode 100644 index 00000000000..2c0aaf28e8d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/highscalelib/NonBlockingHashMap.java @@ -0,0 +1,1567 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Written by Cliff Click and released to the public domain, as explained at + * http://creativecommons.org/licenses/publicdomain + */ + +package org.elasticsearch.util.concurrent.highscalelib; + +import sun.misc.Unsafe; + +import java.io.IOException; +import java.io.Serializable; +import java.lang.reflect.Field; +import java.util.*; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.AtomicLongFieldUpdater; +import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; + +/** + * A lock-free alternate implementation of {@link java.util.concurrent.ConcurrentHashMap} + * with better scaling properties and generally lower costs to mutate the Map. + * It provides identical correctness properties as ConcurrentHashMap. All + * operations are non-blocking and multi-thread safe, including all update + * operations. {@link NonBlockingHashMap} scales substatially better than + * {@link java.util.concurrent.ConcurrentHashMap} for high update rates, even with a + * large concurrency factor. Scaling is linear up to 768 CPUs on a 768-CPU + * Azul box, even with 100% updates or 100% reads or any fraction in-between. + * Linear scaling up to all cpus has been observed on a 32-way Sun US2 box, + * 32-way Sun Niagra box, 8-way Intel box and a 4-way Power box. + *

    + * This class obeys the same functional specification as {@link + * java.util.Hashtable}, and includes versions of methods corresponding to + * each method of Hashtable. However, even though all operations are + * thread-safe, operations do not entail locking and there is + * not any support for locking the entire table in a way that + * prevents all access. This class is fully interoperable with + * Hashtable in programs that rely on its thread safety but not on + * its synchronization details. + *

    + *

    Operations (including put) generally do not block, so may + * overlap with other update operations (including other puts and + * removes). Retrievals reflect the results of the most recently + * completed update operations holding upon their onset. For + * aggregate operations such as putAll, concurrent retrievals may + * reflect insertion or removal of only some entries. Similarly, Iterators + * and Enumerations return elements reflecting the state of the hash table at + * some point at or since the creation of the iterator/enumeration. They do + * not throw {@link ConcurrentModificationException}. However, + * iterators are designed to be used by only one thread at a time. + *

    + *

    Very full tables, or tables with high reprobe rates may trigger an + * internal resize operation to move into a larger table. Resizing is not + * terribly expensive, but it is not free either; during resize operations + * table throughput may drop somewhat. All threads that visit the table + * during a resize will 'help' the resizing but will still be allowed to + * complete their operation before the resize is finished (i.e., a simple + * 'get' operation on a million-entry table undergoing resizing will not need + * to block until the entire million entries are copied). + *

    + *

    This class and its views and iterators implement all of the + * optional methods of the {@link Map} and {@link Iterator} + * interfaces. + *

    + *

    Like {@link Hashtable} but unlike {@link HashMap}, this class + * does not allow null to be used as a key or value. + * + * @author Cliff Click + * @author Prashant Deva - moved hash() function out of get_impl() so it is + * not calculated multiple times. + * @version 1.1.2 + * @param the type of keys maintained by this map + * @param the type of mapped values + * @since 1.5 + */ + +public class NonBlockingHashMap + extends AbstractMap + implements ConcurrentMap, Cloneable, Serializable { + + private static final long serialVersionUID = 1234123412341234123L; + + private static final int REPROBE_LIMIT = 10; // Too many reprobes then force a table-resize + + // --- Bits to allow Unsafe access to arrays + private static final Unsafe _unsafe = UtilUnsafe.getUnsafe(); + private static final int _Obase = _unsafe.arrayBaseOffset(Object[].class); + private static final int _Oscale = _unsafe.arrayIndexScale(Object[].class); + + private static long rawIndex(final Object[] ary, final int idx) { + assert idx >= 0 && idx < ary.length; + return _Obase + idx * _Oscale; + } + + // --- Setup to use Unsafe + private static final long _kvs_offset; + + static { // + Field f = null; + try { + f = NonBlockingHashMap.class.getDeclaredField("_kvs"); + } + catch (java.lang.NoSuchFieldException e) { + throw new RuntimeException(e); + } + _kvs_offset = _unsafe.objectFieldOffset(f); + } + + private final boolean CAS_kvs(final Object[] oldkvs, final Object[] newkvs) { + return _unsafe.compareAndSwapObject(this, _kvs_offset, oldkvs, newkvs); + } + + // --- Adding a 'prime' bit onto Values via wrapping with a junk wrapper class + + private static final class Prime { + final Object _V; + + Prime(Object V) { + _V = V; + } + + static Object unbox(Object V) { + return V instanceof Prime ? ((Prime) V)._V : V; + } + } + + // --- hash ---------------------------------------------------------------- + // Helper function to spread lousy hashCodes + + private static final int hash(final Object key) { + int h = key.hashCode(); // The real hashCode call + // Spread bits to regularize both segment and index locations, + // using variant of single-word Wang/Jenkins hash. + h += (h << 15) ^ 0xffffcd7d; + h ^= (h >>> 10); + h += (h << 3); + h ^= (h >>> 6); + h += (h << 2) + (h << 14); + return h ^ (h >>> 16); + } + + // --- The Hash Table -------------------- + // Slot 0 is always used for a 'CHM' entry below to hold the interesting + // bits of the hash table. Slot 1 holds full hashes as an array of ints. + // Slots {2,3}, {4,5}, etc hold {Key,Value} pairs. The entire hash table + // can be atomically replaced by CASing the _kvs field. + // + // Why is CHM buried inside the _kvs Object array, instead of the other way + // around? The CHM info is used during resize events and updates, but not + // during standard 'get' operations. I assume 'get' is much more frequent + // than 'put'. 'get' can skip the extra indirection of skipping through the + // CHM to reach the _kvs array. + private transient Object[] _kvs; + + private static final CHM chm(Object[] kvs) { + return (CHM) kvs[0]; + } + + private static final int[] hashes(Object[] kvs) { + return (int[]) kvs[1]; + } + + // Number of K,V pairs in the table + + private static final int len(Object[] kvs) { + return (kvs.length - 2) >> 1; + } + + // Time since last resize + private transient long _last_resize_milli; + + // --- Minimum table size ---------------- + // Pick size 8 K/V pairs, which turns into (8*2+2)*4+12 = 84 bytes on a + // standard 32-bit HotSpot, and (8*2+2)*8+12 = 156 bytes on 64-bit Azul. + private static final int MIN_SIZE_LOG = 3; // + private static final int MIN_SIZE = (1 << MIN_SIZE_LOG); // Must be power of 2 + + // --- Sentinels ------------------------- + // No-Match-Old - putIfMatch does updates only if it matches the old value, + // and NO_MATCH_OLD basically counts as a wildcard match. + private static final Object NO_MATCH_OLD = new Object(); // Sentinel + // Match-Any-not-null - putIfMatch does updates only if it find a real old + // value. + private static final Object MATCH_ANY = new Object(); // Sentinel + // This K/V pair has been deleted (but the Key slot is forever claimed). + // The same Key can be reinserted with a new value later. + private static final Object TOMBSTONE = new Object(); + // Prime'd or box'd version of TOMBSTONE. This K/V pair was deleted, then a + // table resize started. The K/V pair has been marked so that no new + // updates can happen to the old table (and since the K/V pair was deleted + // nothing was copied to the new table). + private static final Prime TOMBPRIME = new Prime(TOMBSTONE); + + // --- key,val ------------------------------------------------------------- + // Access K,V for a given idx + // + // Note that these are static, so that the caller is forced to read the _kvs + // field only once, and share that read across all key/val calls - lest the + // _kvs field move out from under us and back-to-back key & val calls refer + // to different _kvs arrays. + + private static final Object key(Object[] kvs, int idx) { + return kvs[(idx << 1) + 2]; + } + + private static final Object val(Object[] kvs, int idx) { + return kvs[(idx << 1) + 3]; + } + + private static final boolean CAS_key(Object[] kvs, int idx, Object old, Object key) { + return _unsafe.compareAndSwapObject(kvs, rawIndex(kvs, (idx << 1) + 2), old, key); + } + + private static final boolean CAS_val(Object[] kvs, int idx, Object old, Object val) { + return _unsafe.compareAndSwapObject(kvs, rawIndex(kvs, (idx << 1) + 3), old, val); + } + + + // --- dump ---------------------------------------------------------------- + + /** + * Verbose printout of table internals, useful for debugging. + */ + public final void print() { + System.out.println("========="); + print2(_kvs); + System.out.println("========="); + } + + // print the entire state of the table + + private final void print(Object[] kvs) { + for (int i = 0; i < len(kvs); i++) { + Object K = key(kvs, i); + if (K != null) { + String KS = (K == TOMBSTONE) ? "XXX" : K.toString(); + Object V = val(kvs, i); + Object U = Prime.unbox(V); + String p = (V == U) ? "" : "prime_"; + String US = (U == TOMBSTONE) ? "tombstone" : U.toString(); + System.out.println("" + i + " (" + KS + "," + p + US + ")"); + } + } + Object[] newkvs = chm(kvs)._newkvs; // New table, if any + if (newkvs != null) { + System.out.println("----"); + print(newkvs); + } + } + + // print only the live values, broken down by the table they are in + + private final void print2(Object[] kvs) { + for (int i = 0; i < len(kvs); i++) { + Object key = key(kvs, i); + Object val = val(kvs, i); + Object U = Prime.unbox(val); + if (key != null && key != TOMBSTONE && // key is sane + val != null && U != TOMBSTONE) { // val is sane + String p = (val == U) ? "" : "prime_"; + System.out.println("" + i + " (" + key + "," + p + val + ")"); + } + } + Object[] newkvs = chm(kvs)._newkvs; // New table, if any + if (newkvs != null) { + System.out.println("----"); + print2(newkvs); + } + } + + // Count of reprobes + private transient Counter _reprobes = new Counter(); + + /** + * Get and clear the current count of reprobes. Reprobes happen on key + * collisions, and a high reprobe rate may indicate a poor hash function or + * weaknesses in the table resizing function. + * + * @return the count of reprobes since the last call to {@link #reprobes} + * or since the table was created. + */ + public long reprobes() { + long r = _reprobes.get(); + _reprobes = new Counter(); + return r; + } + + + // --- reprobe_limit ----------------------------------------------------- + // Heuristic to decide if we have reprobed toooo many times. Running over + // the reprobe limit on a 'get' call acts as a 'miss'; on a 'put' call it + // can trigger a table resize. Several places must have exact agreement on + // what the reprobe_limit is, so we share it here. + + private static final int reprobe_limit(int len) { + return REPROBE_LIMIT + (len >> 2); + } + + // --- NonBlockingHashMap -------------------------------------------------- + // Constructors + + /** + * Create a new NonBlockingHashMap with default minimum size (currently set + * to 8 K/V pairs or roughly 84 bytes on a standard 32-bit JVM). + */ + public NonBlockingHashMap() { + this(MIN_SIZE); + } + + /** + * Create a new NonBlockingHashMap with initial room for the given number of + * elements, thus avoiding internal resizing operations to reach an + * appropriate size. Large numbers here when used with a small count of + * elements will sacrifice space for a small amount of time gained. The + * initial size will be rounded up internally to the next larger power of 2. + */ + public NonBlockingHashMap(final int initial_sz) { + initialize(initial_sz); + } + + private final void initialize(int initial_sz) { + if (initial_sz < 0) throw new IllegalArgumentException(); + int i; // Convert to next largest power-of-2 + if (initial_sz > 1024 * 1024) initial_sz = 1024 * 1024; + for (i = MIN_SIZE_LOG; (1 << i) < (initial_sz << 2); i++) ; + // Double size for K,V pairs, add 1 for CHM and 1 for hashes + _kvs = new Object[((1 << i) << 1) + 2]; + _kvs[0] = new CHM(new Counter()); // CHM in slot 0 + _kvs[1] = new int[1 << i]; // Matching hash entries + _last_resize_milli = System.currentTimeMillis(); + } + + // Version for subclassed readObject calls, to be called after the defaultReadObject + + protected final void initialize() { + initialize(MIN_SIZE); + } + + // --- wrappers ------------------------------------------------------------ + + /** + * Returns the number of key-value mappings in this map. + * + * @return the number of key-value mappings in this map + */ + @Override + public int size() { + return chm(_kvs).size(); + } + + /** + * Returns size() == 0. + * + * @return size() == 0 + */ + @Override + public boolean isEmpty() { + return size() == 0; + } + + /** + * Tests if the key in the table using the equals method. + * + * @return true if the key is in the table using the equals method + * @throws NullPointerException if the specified key is null + */ + @Override + public boolean containsKey(Object key) { + return get(key) != null; + } + + /** + * Legacy method testing if some key maps into the specified value in this + * table. This method is identical in functionality to {@link + * #containsValue}, and exists solely to ensure full compatibility with + * class {@link java.util.Hashtable}, which supported this method prior to + * introduction of the Java Collections framework. + * + * @param val a value to search for + * @return true if this map maps one or more keys to the specified value + * @throws NullPointerException if the specified value is null + */ + public boolean contains(Object val) { + return containsValue(val); + } + + /** + * Maps the specified key to the specified value in the table. Neither key + * nor value can be null. + *

    The value can be retrieved by calling {@link #get} with a key that is + * equal to the original key. + * + * @param key key with which the specified value is to be associated + * @param val value to be associated with the specified key + * @return the previous value associated with key, or + * null if there was no mapping for key + * @throws NullPointerException if the specified key or value is null + */ + @Override + public TypeV put(TypeK key, TypeV val) { + return putIfMatch(key, val, NO_MATCH_OLD); + } + + /** + * Atomically, do a {@link #put} if-and-only-if the key is not mapped. + * Useful to ensure that only a single mapping for the key exists, even if + * many threads are trying to create the mapping in parallel. + * + * @return the previous value associated with the specified key, + * or null if there was no mapping for the key + * @throws NullPointerException if the specified key or value is null + */ + public TypeV putIfAbsent(TypeK key, TypeV val) { + return putIfMatch(key, val, TOMBSTONE); + } + + /** + * Removes the key (and its corresponding value) from this map. + * This method does nothing if the key is not in the map. + * + * @return the previous value associated with key, or + * null if there was no mapping for key + * @throws NullPointerException if the specified key is null + */ + @Override + public TypeV remove(Object key) { + return putIfMatch(key, TOMBSTONE, NO_MATCH_OLD); + } + + /** + * Atomically do a {@link #remove(Object)} if-and-only-if the key is mapped + * to a value which is equals to the given value. + * + * @throws NullPointerException if the specified key or value is null + */ + public boolean remove(Object key, Object val) { + return putIfMatch(key, TOMBSTONE, val) == val; + } + + /** + * Atomically do a put(key,val) if-and-only-if the key is + * mapped to some value already. + * + * @throws NullPointerException if the specified key or value is null + */ + public TypeV replace(TypeK key, TypeV val) { + return putIfMatch(key, val, MATCH_ANY); + } + + /** + * Atomically do a put(key,newValue) if-and-only-if the key is + * mapped a value which is equals to oldValue. + * + * @throws NullPointerException if the specified key or value is null + */ + public boolean replace(TypeK key, TypeV oldValue, TypeV newValue) { + return putIfMatch(key, newValue, oldValue) == oldValue; + } + + private final TypeV putIfMatch(Object key, Object newVal, Object oldVal) { + if (oldVal == null || newVal == null) throw new NullPointerException(); + final Object res = putIfMatch(this, _kvs, key, newVal, oldVal); + assert !(res instanceof Prime); + assert res != null; + return res == TOMBSTONE ? null : (TypeV) res; + } + + + /** + * Copies all of the mappings from the specified map to this one, replacing + * any existing mappings. + * + * @param m mappings to be stored in this map + */ + @Override + public void putAll(Map m) { + for (Map.Entry e : m.entrySet()) + put(e.getKey(), e.getValue()); + } + + /** + * Removes all of the mappings from this map. + */ + @Override + public void clear() { // Smack a new empty table down + Object[] newkvs = new NonBlockingHashMap(MIN_SIZE)._kvs; + while (!CAS_kvs(_kvs, newkvs)) // Spin until the clear works + ; + } + + /** + * Returns true if this Map maps one or more keys to the specified + * value. Note: This method requires a full internal traversal of the + * hash table and is much slower than {@link #containsKey}. + * + * @param val value whose presence in this map is to be tested + * @return true if this map maps one or more keys to the specified value + * @throws NullPointerException if the specified value is null + */ + @Override + public boolean containsValue(final Object val) { + if (val == null) throw new NullPointerException(); + for (TypeV V : values()) + if (V == val || V.equals(val)) + return true; + return false; + } + + // This function is supposed to do something for Hashtable, and the JCK + // tests hang until it gets called... by somebody ... for some reason, + // any reason.... + + protected void rehash() { + } + + /** + * Creates a shallow copy of this hashtable. All the structure of the + * hashtable itself is copied, but the keys and values are not cloned. + * This is a relatively expensive operation. + * + * @return a clone of the hashtable. + */ + @Override + public Object clone() { + try { + // Must clone, to get the class right; NBHM might have been + // extended so it would be wrong to just make a new NBHM. + NonBlockingHashMap t = (NonBlockingHashMap) super.clone(); + // But I don't have an atomic clone operation - the underlying _kvs + // structure is undergoing rapid change. If I just clone the _kvs + // field, the CHM in _kvs[0] won't be in sync. + // + // Wipe out the cloned array (it was shallow anyways). + t.clear(); + // Now copy sanely + for (TypeK K : keySet()) { + final TypeV V = get(K); // Do an official 'get' + t.put(K, V); + } + return t; + } catch (CloneNotSupportedException e) { + // this shouldn't happen, since we are Cloneable + throw new InternalError(); + } + } + + /** + * Returns a string representation of this map. The string representation + * consists of a list of key-value mappings in the order returned by the + * map's entrySet view's iterator, enclosed in braces + * ("{}"). Adjacent mappings are separated by the characters + * ", " (comma and space). Each key-value mapping is rendered as + * the key followed by an equals sign ("=") followed by the + * associated value. Keys and values are converted to strings as by + * {@link String#valueOf(Object)}. + * + * @return a string representation of this map + */ + @Override + public String toString() { + Iterator> i = entrySet().iterator(); + if (!i.hasNext()) + return "{}"; + + StringBuilder sb = new StringBuilder(); + sb.append('{'); + for (; ;) { + Entry e = i.next(); + TypeK key = e.getKey(); + TypeV value = e.getValue(); + sb.append(key == this ? "(this Map)" : key); + sb.append('='); + sb.append(value == this ? "(this Map)" : value); + if (!i.hasNext()) + return sb.append('}').toString(); + sb.append(", "); + } + } + + // --- keyeq --------------------------------------------------------------- + // Check for key equality. Try direct pointer compare first, then see if + // the hashes are unequal (fast negative test) and finally do the full-on + // 'equals' v-call. + + private static boolean keyeq(Object K, Object key, int[] hashes, int hash, int fullhash) { + return + K == key || // Either keys match exactly OR + // hash exists and matches? hash can be zero during the install of a + // new key/value pair. + ((hashes[hash] == 0 || hashes[hash] == fullhash) && + // Do not call the users' "equals()" call with a Tombstone, as this can + // surprise poorly written "equals()" calls that throw exceptions + // instead of simply returning false. + K != TOMBSTONE && // Do not call users' equals call with a Tombstone + // Do the match the hard way - with the users' key being the loop- + // invariant "this" pointer. I could have flipped the order of + // operands (since equals is commutative), but I'm making mega-morphic + // v-calls in a reprobing loop and nailing down the 'this' argument + // gives both the JIT and the hardware a chance to prefetch the call target. + key.equals(K)); // Finally do the hard match + } + + // --- get ----------------------------------------------------------------- + + /** + * Returns the value to which the specified key is mapped, or {@code null} + * if this map contains no mapping for the key. + *

    More formally, if this map contains a mapping from a key {@code k} to + * a value {@code v} such that {@code key.equals(k)}, then this method + * returns {@code v}; otherwise it returns {@code null}. (There can be at + * most one such mapping.) + * + * @throws NullPointerException if the specified key is null + */ + // Never returns a Prime nor a Tombstone. + @Override + public TypeV get(Object key) { + final int fullhash = hash(key); // throws NullPointerException if key is null + final Object V = get_impl(this, _kvs, key, fullhash); + assert !(V instanceof Prime); // Never return a Prime + return (TypeV) V; + } + + private static final Object get_impl(final NonBlockingHashMap topmap, final Object[] kvs, final Object key, final int fullhash) { + final int len = len(kvs); // Count of key/value pairs, reads kvs.length + final CHM chm = chm(kvs); // The CHM, for a volatile read below; reads slot 0 of kvs + final int[] hashes = hashes(kvs); // The memoized hashes; reads slot 1 of kvs + + int idx = fullhash & (len - 1); // First key hash + + // Main spin/reprobe loop, looking for a Key hit + int reprobe_cnt = 0; + while (true) { + // Probe table. Each read of 'val' probably misses in cache in a big + // table; hopefully the read of 'key' then hits in cache. + final Object K = key(kvs, idx); // Get key before volatile read, could be null + final Object V = val(kvs, idx); // Get value before volatile read, could be null or Tombstone or Prime + if (K == null) return null; // A clear miss + + // We need a volatile-read here to preserve happens-before semantics on + // newly inserted Keys. If the Key body was written just before inserting + // into the table a Key-compare here might read the uninitalized Key body. + // Annoyingly this means we have to volatile-read before EACH key compare. + // . + // We also need a volatile-read between reading a newly inserted Value + // and returning the Value (so the user might end up reading the stale + // Value contents). Same problem as with keys - and the one volatile + // read covers both. + final Object[] newkvs = chm._newkvs; // VOLATILE READ before key compare + + // Key-compare + if (keyeq(K, key, hashes, idx, fullhash)) { + // Key hit! Check for no table-copy-in-progress + if (!(V instanceof Prime)) // No copy? + return (V == TOMBSTONE) ? null : V; // Return the value + // Key hit - but slot is (possibly partially) copied to the new table. + // Finish the copy & retry in the new table. + return get_impl(topmap, chm.copy_slot_and_check(topmap, kvs, idx, key), key, fullhash); // Retry in the new table + } + // get and put must have the same key lookup logic! But only 'put' + // needs to force a table-resize for a too-long key-reprobe sequence. + // Check for too-many-reprobes on get - and flip to the new table. + if (++reprobe_cnt >= reprobe_limit(len) || // too many probes + key == TOMBSTONE) // found a TOMBSTONE key, means no more keys in this table + return newkvs == null ? null : get_impl(topmap, topmap.help_copy(newkvs), key, fullhash); // Retry in the new table + + idx = (idx + 1) & (len - 1); // Reprobe by 1! (could now prefetch) + } + } + + // --- putIfMatch --------------------------------------------------------- + // Put, Remove, PutIfAbsent, etc. Return the old value. If the returned + // value is equal to expVal (or expVal is NO_MATCH_OLD) then the put can be + // assumed to work (although might have been immediately overwritten). Only + // the path through copy_slot passes in an expected value of null, and + // putIfMatch only returns a null if passed in an expected null. + + private static final Object putIfMatch(final NonBlockingHashMap topmap, final Object[] kvs, final Object key, final Object putval, final Object expVal) { + assert putval != null; + assert !(putval instanceof Prime); + assert !(expVal instanceof Prime); + final int fullhash = hash(key); // throws NullPointerException if key null + final int len = len(kvs); // Count of key/value pairs, reads kvs.length + final CHM chm = chm(kvs); // Reads kvs[0] + final int[] hashes = hashes(kvs); // Reads kvs[1], read before kvs[0] + int idx = fullhash & (len - 1); + + // --- + // Key-Claim stanza: spin till we can claim a Key (or force a resizing). + int reprobe_cnt = 0; + Object K = null, V = null; + Object[] newkvs = null; + while (true) { // Spin till we get a Key slot + V = val(kvs, idx); // Get old value (before volatile read below!) + K = key(kvs, idx); // Get current key + if (K == null) { // Slot is free? + // Found an empty Key slot - which means this Key has never been in + // this table. No need to put a Tombstone - the Key is not here! + if (putval == TOMBSTONE) return putval; // Not-now & never-been in this table + // Claim the null key-slot + if (CAS_key(kvs, idx, null, key)) { // Claim slot for Key + chm._slots.add(1); // Raise key-slots-used count + hashes[idx] = fullhash; // Memoize fullhash + break; // Got it! + } + // CAS to claim the key-slot failed. + // + // This re-read of the Key points out an annoying short-coming of Java + // CAS. Most hardware CAS's report back the existing value - so that + // if you fail you have a *witness* - the value which caused the CAS + // to fail. The Java API turns this into a boolean destroying the + // witness. Re-reading does not recover the witness because another + // thread can write over the memory after the CAS. Hence we can be in + // the unfortunate situation of having a CAS fail *for cause* but + // having that cause removed by a later store. This turns a + // non-spurious-failure CAS (such as Azul has) into one that can + // apparently spuriously fail - and we avoid apparent spurious failure + // by not allowing Keys to ever change. + K = key(kvs, idx); // CAS failed, get updated value + assert K != null; // If keys[idx] is null, CAS shoulda worked + } + // Key slot was not null, there exists a Key here + + // We need a volatile-read here to preserve happens-before semantics on + // newly inserted Keys. If the Key body was written just before inserting + // into the table a Key-compare here might read the uninitalized Key body. + // Annoyingly this means we have to volatile-read before EACH key compare. + newkvs = chm._newkvs; // VOLATILE READ before key compare + + if (keyeq(K, key, hashes, idx, fullhash)) + break; // Got it! + + // get and put must have the same key lookup logic! Lest 'get' give + // up looking too soon. + //topmap._reprobes.add(1); + if (++reprobe_cnt >= reprobe_limit(len) || // too many probes or + key == TOMBSTONE) { // found a TOMBSTONE key, means no more keys + // We simply must have a new table to do a 'put'. At this point a + // 'get' will also go to the new table (if any). We do not need + // to claim a key slot (indeed, we cannot find a free one to claim!). + newkvs = chm.resize(topmap, kvs); + if (expVal != null) topmap.help_copy(newkvs); // help along an existing copy + return putIfMatch(topmap, newkvs, key, putval, expVal); + } + + idx = (idx + 1) & (len - 1); // Reprobe! + } // End of spinning till we get a Key slot + + // --- + // Found the proper Key slot, now update the matching Value slot. We + // never put a null, so Value slots monotonically move from null to + // not-null (deleted Values use Tombstone). Thus if 'V' is null we + // fail this fast cutout and fall into the check for table-full. + if (putval == V) return V; // Fast cutout for no-change + + // See if we want to move to a new table (to avoid high average re-probe + // counts). We only check on the initial set of a Value from null to + // not-null (i.e., once per key-insert). Of course we got a 'free' check + // of newkvs once per key-compare (not really free, but paid-for by the + // time we get here). + if (newkvs == null && // New table-copy already spotted? + // Once per fresh key-insert check the hard way + ((V == null && chm.tableFull(reprobe_cnt, len)) || + // Or we found a Prime, but the JMM allowed reordering such that we + // did not spot the new table (very rare race here: the writing + // thread did a CAS of _newkvs then a store of a Prime. This thread + // reads the Prime, then reads _newkvs - but the read of Prime was so + // delayed (or the read of _newkvs was so accelerated) that they + // swapped and we still read a null _newkvs. The resize call below + // will do a CAS on _newkvs forcing the read. + V instanceof Prime)) + newkvs = chm.resize(topmap, kvs); // Force the new table copy to start + // See if we are moving to a new table. + // If so, copy our slot and retry in the new table. + if (newkvs != null) + return putIfMatch(topmap, chm.copy_slot_and_check(topmap, kvs, idx, expVal), key, putval, expVal); + + // --- + // We are finally prepared to update the existing table + while (true) { + assert !(V instanceof Prime); + + // Must match old, and we do not? Then bail out now. Note that either V + // or expVal might be TOMBSTONE. Also V can be null, if we've never + // inserted a value before. expVal can be null if we are called from + // copy_slot. + + if (expVal != NO_MATCH_OLD && // Do we care about expected-Value at all? + V != expVal && // No instant match already? + (expVal != MATCH_ANY || V == TOMBSTONE || V == null) && + !(V == null && expVal == TOMBSTONE) && // Match on null/TOMBSTONE combo + (expVal == null || !expVal.equals(V))) // Expensive equals check at the last + return V; // Do not update! + + // Actually change the Value in the Key,Value pair + if (CAS_val(kvs, idx, V, putval)) { + // CAS succeeded - we did the update! + // Both normal put's and table-copy calls putIfMatch, but table-copy + // does not (effectively) increase the number of live k/v pairs. + if (expVal != null) { + // Adjust sizes - a striped counter + if ((V == null || V == TOMBSTONE) && putval != TOMBSTONE) chm._size.add(1); + if (!(V == null || V == TOMBSTONE) && putval == TOMBSTONE) chm._size.add(-1); + } + return (V == null && expVal != null) ? TOMBSTONE : V; + } + // Else CAS failed + V = val(kvs, idx); // Get new value + // If a Prime'd value got installed, we need to re-run the put on the + // new table. Otherwise we lost the CAS to another racing put. + // Simply retry from the start. + if (V instanceof Prime) + return putIfMatch(topmap, chm.copy_slot_and_check(topmap, kvs, idx, expVal), key, putval, expVal); + } + } + + // --- help_copy --------------------------------------------------------- + // Help along an existing resize operation. This is just a fast cut-out + // wrapper, to encourage inlining for the fast no-copy-in-progress case. We + // always help the top-most table copy, even if there are nested table + // copies in progress. + + private final Object[] help_copy(Object[] helper) { + // Read the top-level KVS only once. We'll try to help this copy along, + // even if it gets promoted out from under us (i.e., the copy completes + // and another KVS becomes the top-level copy). + Object[] topkvs = _kvs; + CHM topchm = chm(topkvs); + if (topchm._newkvs == null) return helper; // No copy in-progress + topchm.help_copy_impl(this, topkvs, false); + return helper; + } + + + // --- CHM ----------------------------------------------------------------- + // The control structure for the NonBlockingHashMap + + private static final class CHM { + // Size in active K,V pairs + private final Counter _size; + + public int size() { + return (int) _size.get(); + } + + // --- + // These next 2 fields are used in the resizing heuristics, to judge when + // it is time to resize or copy the table. Slots is a count of used-up + // key slots, and when it nears a large fraction of the table we probably + // end up reprobing too much. Last-resize-milli is the time since the + // last resize; if we are running back-to-back resizes without growing + // (because there are only a few live keys but many slots full of dead + // keys) then we need a larger table to cut down on the churn. + + // Count of used slots, to tell when table is full of dead unusable slots + private final Counter _slots; + + public int slots() { + return (int) _slots.get(); + } + + // --- + // New mappings, used during resizing. + // The 'new KVs' array - created during a resize operation. This + // represents the new table being copied from the old one. It's the + // volatile variable that is read as we cross from one table to the next, + // to get the required memory orderings. It monotonically transits from + // null to set (once). + volatile Object[] _newkvs; + private final AtomicReferenceFieldUpdater _newkvsUpdater = + AtomicReferenceFieldUpdater.newUpdater(CHM.class, Object[].class, "_newkvs"); + + // Set the _next field if we can. + + boolean CAS_newkvs(Object[] newkvs) { + while (_newkvs == null) + if (_newkvsUpdater.compareAndSet(this, null, newkvs)) + return true; + return false; + } + + // Sometimes many threads race to create a new very large table. Only 1 + // wins the race, but the losers all allocate a junk large table with + // hefty allocation costs. Attempt to control the overkill here by + // throttling attempts to create a new table. I cannot really block here + // (lest I lose the non-blocking property) but late-arriving threads can + // give the initial resizing thread a little time to allocate the initial + // new table. The Right Long Term Fix here is to use array-lets and + // incrementally create the new very large array. In C I'd make the array + // with malloc (which would mmap under the hood) which would only eat + // virtual-address and not real memory - and after Somebody wins then we + // could in parallel initialize the array. Java does not allow + // un-initialized array creation (especially of ref arrays!). + volatile long _resizers; // count of threads attempting an initial resize + private static final AtomicLongFieldUpdater _resizerUpdater = + AtomicLongFieldUpdater.newUpdater(CHM.class, "_resizers"); + + // --- + // Simple constructor + + CHM(Counter size) { + _size = size; + _slots = new Counter(); + } + + // --- tableFull --------------------------------------------------------- + // Heuristic to decide if this table is too full, and we should start a + // new table. Note that if a 'get' call has reprobed too many times and + // decided the table must be full, then always the estimate_sum must be + // high and we must report the table is full. If we do not, then we might + // end up deciding that the table is not full and inserting into the + // current table, while a 'get' has decided the same key cannot be in this + // table because of too many reprobes. The invariant is: + // slots.estimate_sum >= max_reprobe_cnt >= reprobe_limit(len) + + private final boolean tableFull(int reprobe_cnt, int len) { + return + // Do the cheap check first: we allow some number of reprobes always + reprobe_cnt >= REPROBE_LIMIT && + // More expensive check: see if the table is > 1/4 full. + _slots.estimate_get() >= reprobe_limit(len); + } + + // --- resize ------------------------------------------------------------ + // Resizing after too many probes. "How Big???" heuristics are here. + // Callers will (not this routine) will 'help_copy' any in-progress copy. + // Since this routine has a fast cutout for copy-already-started, callers + // MUST 'help_copy' lest we have a path which forever runs through + // 'resize' only to discover a copy-in-progress which never progresses. + + private final Object[] resize(NonBlockingHashMap topmap, Object[] kvs) { + assert chm(kvs) == this; + + // Check for resize already in progress, probably triggered by another thread + Object[] newkvs = _newkvs; // VOLATILE READ + if (newkvs != null) // See if resize is already in progress + return newkvs; // Use the new table already + + // No copy in-progress, so start one. First up: compute new table size. + int oldlen = len(kvs); // Old count of K,V pairs allowed + int sz = size(); // Get current table count of active K,V pairs + int newsz = sz; // First size estimate + + // Heuristic to determine new size. We expect plenty of dead-slots-with-keys + // and we need some decent padding to avoid endless reprobing. + if (sz >= (oldlen >> 2)) { // If we are >25% full of keys then... + newsz = oldlen << 1; // Double size + if (sz >= (oldlen >> 1)) // If we are >50% full of keys then... + newsz = oldlen << 2; // Double double size + } + // This heuristic in the next 2 lines leads to a much denser table + // with a higher reprobe rate + //if( sz >= (oldlen>>1) ) // If we are >50% full of keys then... + // newsz = oldlen<<1; // Double size + + // Last (re)size operation was very recent? Then double again; slows + // down resize operations for tables subject to a high key churn rate. + long tm = System.currentTimeMillis(); + long q = 0; + if (newsz <= oldlen && // New table would shrink or hold steady? + tm <= topmap._last_resize_milli + 10000 && // Recent resize (less than 1 sec ago) + (q = _slots.estimate_get()) >= (sz << 1)) // 1/2 of keys are dead? + newsz = oldlen << 1; // Double the existing size + + // Do not shrink, ever + if (newsz < oldlen) newsz = oldlen; + + // Convert to power-of-2 + int log2; + for (log2 = MIN_SIZE_LOG; (1 << log2) < newsz; log2++) ; // Compute log2 of size + + // Now limit the number of threads actually allocating memory to a + // handful - lest we have 750 threads all trying to allocate a giant + // resized array. + long r = _resizers; + while (!_resizerUpdater.compareAndSet(this, r, r + 1)) + r = _resizers; + // Size calculation: 2 words (K+V) per table entry, plus a handful. We + // guess at 32-bit pointers; 64-bit pointers screws up the size calc by + // 2x but does not screw up the heuristic very much. + int megs = ((((1 << log2) << 1) + 4) << 3/*word to bytes*/) >> 20/*megs*/; + if (r >= 2 && megs > 0) { // Already 2 guys trying; wait and see + newkvs = _newkvs; // Between dorking around, another thread did it + if (newkvs != null) // See if resize is already in progress + return newkvs; // Use the new table already + // TODO - use a wait with timeout, so we'll wakeup as soon as the new table + // is ready, or after the timeout in any case. + //synchronized( this ) { wait(8*megs); } // Timeout - we always wakeup + // For now, sleep a tad and see if the 2 guys already trying to make + // the table actually get around to making it happen. + try { + Thread.sleep(8 * megs); + } catch (Exception e) { + } + } + // Last check, since the 'new' below is expensive and there is a chance + // that another thread slipped in a new thread while we ran the heuristic. + newkvs = _newkvs; + if (newkvs != null) // See if resize is already in progress + return newkvs; // Use the new table already + + // Double size for K,V pairs, add 1 for CHM + newkvs = new Object[((1 << log2) << 1) + 2]; // This can get expensive for big arrays + newkvs[0] = new CHM(_size); // CHM in slot 0 + newkvs[1] = new int[1 << log2]; // hashes in slot 1 + + // Another check after the slow allocation + if (_newkvs != null) // See if resize is already in progress + return _newkvs; // Use the new table already + + // The new table must be CAS'd in so only 1 winner amongst duplicate + // racing resizing threads. Extra CHM's will be GC'd. + if (CAS_newkvs(newkvs)) { // NOW a resize-is-in-progress! + //notifyAll(); // Wake up any sleepers + //long nano = System.nanoTime(); + //System.out.println(" "+nano+" Resize from "+oldlen+" to "+(1< _copyIdxUpdater = + AtomicLongFieldUpdater.newUpdater(CHM.class, "_copyIdx"); + + // Work-done reporting. Used to efficiently signal when we can move to + // the new table. From 0 to len(oldkvs) refers to copying from the old + // table to the new. + volatile long _copyDone = 0; + static private final AtomicLongFieldUpdater _copyDoneUpdater = + AtomicLongFieldUpdater.newUpdater(CHM.class, "_copyDone"); + + // --- help_copy_impl ---------------------------------------------------- + // Help along an existing resize operation. We hope its the top-level + // copy (it was when we started) but this CHM might have been promoted out + // of the top position. + + private final void help_copy_impl(NonBlockingHashMap topmap, Object[] oldkvs, boolean copy_all) { + assert chm(oldkvs) == this; + Object[] newkvs = _newkvs; + assert newkvs != null; // Already checked by caller + int oldlen = len(oldkvs); // Total amount to copy + final int MIN_COPY_WORK = Math.min(oldlen, 1024); // Limit per-thread work + + // --- + int panic_start = -1; + int copyidx = -9999; // Fool javac to think it's initialized + while (_copyDone < oldlen) { // Still needing to copy? + // Carve out a chunk of work. The counter wraps around so every + // thread eventually tries to copy every slot repeatedly. + + // We "panic" if we have tried TWICE to copy every slot - and it still + // has not happened. i.e., twice some thread somewhere claimed they + // would copy 'slot X' (by bumping _copyIdx) but they never claimed to + // have finished (by bumping _copyDone). Our choices become limited: + // we can wait for the work-claimers to finish (and become a blocking + // algorithm) or do the copy work ourselves. Tiny tables with huge + // thread counts trying to copy the table often 'panic'. + if (panic_start == -1) { // No panic? + copyidx = (int) _copyIdx; + while (copyidx < (oldlen << 1) && // 'panic' check + !_copyIdxUpdater.compareAndSet(this, copyidx, copyidx + MIN_COPY_WORK)) + copyidx = (int) _copyIdx; // Re-read + if (!(copyidx < (oldlen << 1))) // Panic! + panic_start = copyidx; // Record where we started to panic-copy + } + + // We now know what to copy. Try to copy. + int workdone = 0; + for (int i = 0; i < MIN_COPY_WORK; i++) + if (copy_slot(topmap, (copyidx + i) & (oldlen - 1), oldkvs, newkvs)) // Made an oldtable slot go dead? + workdone++; // Yes! + if (workdone > 0) // Report work-done occasionally + copy_check_and_promote(topmap, oldkvs, workdone);// See if we can promote + //for( int i=0; i 0) { + while (!_copyDoneUpdater.compareAndSet(this, copyDone, copyDone + workdone)) { + copyDone = _copyDone; // Reload, retry + assert (copyDone + workdone) <= oldlen; + } + //if( (10*copyDone/oldlen) != (10*(copyDone+workdone)/oldlen) ) + //System.out.print(" "+(copyDone+workdone)*100/oldlen+"%"+"_"+(_copyIdx*100/oldlen)+"%"); + } + + // Check for copy being ALL done, and promote. Note that we might have + // nested in-progress copies and manage to finish a nested copy before + // finishing the top-level copy. We only promote top-level copies. + if (copyDone + workdone == oldlen && // Ready to promote this table? + topmap._kvs == oldkvs && // Looking at the top-level table? + // Attempt to promote + topmap.CAS_kvs(oldkvs, _newkvs)) { + topmap._last_resize_milli = System.currentTimeMillis(); // Record resize time for next check + //long nano = System.nanoTime(); + //System.out.println(" "+nano+" Promote table to "+len(_newkvs)); + //if( System.out != null ) System.out.print("]"); + } + } + + // --- copy_slot --------------------------------------------------------- + // Copy one K/V pair from oldkvs[i] to newkvs. Returns true if we can + // confirm that the new table guaranteed has a value for this old-table + // slot. We need an accurate confirmed-copy count so that we know when we + // can promote (if we promote the new table too soon, other threads may + // 'miss' on values not-yet-copied from the old table). We don't allow + // any direct updates on the new table, unless they first happened to the + // old table - so that any transition in the new table from null to + // not-null must have been from a copy_slot (or other old-table overwrite) + // and not from a thread directly writing in the new table. Thus we can + // count null-to-not-null transitions in the new table. + + private boolean copy_slot(NonBlockingHashMap topmap, int idx, Object[] oldkvs, Object[] newkvs) { + // Blindly set the key slot from null to TOMBSTONE, to eagerly stop + // fresh put's from inserting new values in the old table when the old + // table is mid-resize. We don't need to act on the results here, + // because our correctness stems from box'ing the Value field. Slamming + // the Key field is a minor speed optimization. + Object key; + while ((key = key(oldkvs, idx)) == null) + CAS_key(oldkvs, idx, null, TOMBSTONE); + + // --- + // Prevent new values from appearing in the old table. + // Box what we see in the old table, to prevent further updates. + Object oldval = val(oldkvs, idx); // Read OLD table + while (!(oldval instanceof Prime)) { + final Prime box = (oldval == null || oldval == TOMBSTONE) ? TOMBPRIME : new Prime(oldval); + if (CAS_val(oldkvs, idx, oldval, box)) { // CAS down a box'd version of oldval + // If we made the Value slot hold a TOMBPRIME, then we both + // prevented further updates here but also the (absent) + // oldval is vaccuously available in the new table. We + // return with true here: any thread looking for a value for + // this key can correctly go straight to the new table and + // skip looking in the old table. + if (box == TOMBPRIME) + return true; + // Otherwise we boxed something, but it still needs to be + // copied into the new table. + oldval = box; // Record updated oldval + break; // Break loop; oldval is now boxed by us + } + oldval = val(oldkvs, idx); // Else try, try again + } + if (oldval == TOMBPRIME) return false; // Copy already complete here! + + // --- + // Copy the value into the new table, but only if we overwrite a null. + // If another value is already in the new table, then somebody else + // wrote something there and that write is happens-after any value that + // appears in the old table. If putIfMatch does not find a null in the + // new table - somebody else should have recorded the null-not_null + // transition in this copy. + Object old_unboxed = ((Prime) oldval)._V; + assert old_unboxed != TOMBSTONE; + boolean copied_into_new = (putIfMatch(topmap, newkvs, key, old_unboxed, null) == null); + + // --- + // Finally, now that any old value is exposed in the new table, we can + // forever hide the old-table value by slapping a TOMBPRIME down. This + // will stop other threads from uselessly attempting to copy this slot + // (i.e., it's a speed optimization not a correctness issue). + while (!CAS_val(oldkvs, idx, oldval, TOMBPRIME)) + oldval = val(oldkvs, idx); + + return copied_into_new; + } // end copy_slot + } // End of CHM + + + // --- Snapshot ------------------------------------------------------------ + // The main class for iterating over the NBHM. It "snapshots" a clean + // view of the K/V array. + + private class SnapshotV implements Iterator, Enumeration { + final Object[] _sskvs; + + public SnapshotV() { + while (true) { // Verify no table-copy-in-progress + Object[] topkvs = _kvs; + CHM topchm = chm(topkvs); + if (topchm._newkvs == null) { // No table-copy-in-progress + // The "linearization point" for the iteration. Every key in this + // table will be visited, but keys added later might be skipped or + // even be added to a following table (also not iterated over). + _sskvs = topkvs; + break; + } + // Table copy in-progress - so we cannot get a clean iteration. We + // must help finish the table copy before we can start iterating. + topchm.help_copy_impl(NonBlockingHashMap.this, topkvs, true); + } + // Warm-up the iterator + next(); + } + + int length() { + return len(_sskvs); + } + + Object key(int idx) { + return NonBlockingHashMap.key(_sskvs, idx); + } + + private int _idx; // Varies from 0-keys.length + private Object _nextK, _prevK; // Last 2 keys found + private TypeV _nextV, _prevV; // Last 2 values found + + public boolean hasNext() { + return _nextV != null; + } + + public TypeV next() { + // 'next' actually knows what the next value will be - it had to + // figure that out last go-around lest 'hasNext' report true and + // some other thread deleted the last value. Instead, 'next' + // spends all its effort finding the key that comes after the + // 'next' key. + if (_idx != 0 && _nextV == null) throw new NoSuchElementException(); + _prevK = _nextK; // This will become the previous key + _prevV = _nextV; // This will become the previous value + _nextV = null; // We have no more next-key + // Attempt to set <_nextK,_nextV> to the next K,V pair. + // _nextV is the trigger: stop searching when it is != null + while (_idx < length()) { // Scan array + _nextK = key(_idx++); // Get a key that definitely is in the set (for the moment!) + if (_nextK != null && // Found something? + _nextK != TOMBSTONE && + (_nextV = get(_nextK)) != null) + break; // Got it! _nextK is a valid Key + } // Else keep scanning + return _prevV; // Return current value. + } + + public void remove() { + if (_prevV == null) throw new IllegalStateException(); + putIfMatch(NonBlockingHashMap.this, _sskvs, _prevK, TOMBSTONE, _prevV); + _prevV = null; + } + + public TypeV nextElement() { + return next(); + } + + public boolean hasMoreElements() { + return hasNext(); + } + } + + /** + * Returns an enumeration of the values in this table. + * + * @return an enumeration of the values in this table + * @see #values() + */ + public Enumeration elements() { + return new SnapshotV(); + } + + // --- values -------------------------------------------------------------- + + /** + * Returns a {@link Collection} view of the values contained in this map. + * The collection is backed by the map, so changes to the map are reflected + * in the collection, and vice-versa. The collection supports element + * removal, which removes the corresponding mapping from this map, via the + * Iterator.remove, Collection.remove, + * removeAll, retainAll, and clear operations. + * It does not support the add or addAll operations. + *

    + *

    The view's iterator is a "weakly consistent" iterator that + * will never throw {@link ConcurrentModificationException}, and guarantees + * to traverse elements as they existed upon construction of the iterator, + * and may (but is not guaranteed to) reflect any modifications subsequent + * to construction. + */ + @Override + public Collection values() { + return new AbstractCollection() { + @Override public void clear() { + NonBlockingHashMap.this.clear(); + } + + @Override public int size() { + return NonBlockingHashMap.this.size(); + } + + @Override public boolean contains(Object v) { + return NonBlockingHashMap.this.containsValue(v); + } + + @Override public Iterator iterator() { + return new SnapshotV(); + } + }; + } + + // --- keySet -------------------------------------------------------------- + + private class SnapshotK implements Iterator, Enumeration { + final SnapshotV _ss; + + public SnapshotK() { + _ss = new SnapshotV(); + } + + public void remove() { + _ss.remove(); + } + + public TypeK next() { + _ss.next(); + return (TypeK) _ss._prevK; + } + + public boolean hasNext() { + return _ss.hasNext(); + } + + public TypeK nextElement() { + return next(); + } + + public boolean hasMoreElements() { + return hasNext(); + } + } + + /** + * Returns an enumeration of the keys in this table. + * + * @return an enumeration of the keys in this table + * @see #keySet() + */ + public Enumeration keys() { + return new SnapshotK(); + } + + /** + * Returns a {@link Set} view of the keys contained in this map. The set + * is backed by the map, so changes to the map are reflected in the set, + * and vice-versa. The set supports element removal, which removes the + * corresponding mapping from this map, via the Iterator.remove, + * Set.remove, removeAll, retainAll, and + * clear operations. It does not support the add or + * addAll operations. + *

    + *

    The view's iterator is a "weakly consistent" iterator that + * will never throw {@link ConcurrentModificationException}, and guarantees + * to traverse elements as they existed upon construction of the iterator, + * and may (but is not guaranteed to) reflect any modifications subsequent + * to construction. + */ + @Override + public Set keySet() { + return new AbstractSet() { + @Override public void clear() { + NonBlockingHashMap.this.clear(); + } + + @Override public int size() { + return NonBlockingHashMap.this.size(); + } + + @Override public boolean contains(Object k) { + return NonBlockingHashMap.this.containsKey(k); + } + + @Override public boolean remove(Object k) { + return NonBlockingHashMap.this.remove(k) != null; + } + + @Override public Iterator iterator() { + return new SnapshotK(); + } + }; + } + + + // --- entrySet ------------------------------------------------------------ + // Warning: Each call to 'next' in this iterator constructs a new NBHMEntry. + + private class NBHMEntry extends AbstractEntry { + NBHMEntry(final TypeK k, final TypeV v) { + super(k, v); + } + + public TypeV setValue(final TypeV val) { + if (val == null) throw new NullPointerException(); + _val = val; + return put(_key, val); + } + } + + private class SnapshotE implements Iterator> { + final SnapshotV _ss; + + public SnapshotE() { + _ss = new SnapshotV(); + } + + public void remove() { + _ss.remove(); + } + + public Map.Entry next() { + _ss.next(); + return new NBHMEntry((TypeK) _ss._prevK, _ss._prevV); + } + + public boolean hasNext() { + return _ss.hasNext(); + } + } + + /** + * Returns a {@link Set} view of the mappings contained in this map. The + * set is backed by the map, so changes to the map are reflected in the + * set, and vice-versa. The set supports element removal, which removes + * the corresponding mapping from the map, via the + * Iterator.remove, Set.remove, removeAll, + * retainAll, and clear operations. It does not support + * the add or addAll operations. + *

    + *

    The view's iterator is a "weakly consistent" iterator + * that will never throw {@link ConcurrentModificationException}, + * and guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not guaranteed to) + * reflect any modifications subsequent to construction. + *

    + *

    Warning: the iterator associated with this Set + * requires the creation of {@link java.util.Map.Entry} objects with each + * iteration. The {@link NonBlockingHashMap} does not normally create or + * using {@link java.util.Map.Entry} objects so they will be created soley + * to support this iteration. Iterating using {@link #keySet} or {@link + * #values} will be more efficient. + */ + @Override + public Set> entrySet() { + return new AbstractSet>() { + @Override public void clear() { + NonBlockingHashMap.this.clear(); + } + + @Override public int size() { + return NonBlockingHashMap.this.size(); + } + + @Override public boolean remove(final Object o) { + if (!(o instanceof Map.Entry)) return false; + final Map.Entry e = (Map.Entry) o; + return NonBlockingHashMap.this.remove(e.getKey(), e.getValue()); + } + + @Override public boolean contains(final Object o) { + if (!(o instanceof Map.Entry)) return false; + final Map.Entry e = (Map.Entry) o; + TypeV v = get(e.getKey()); + return v.equals(e.getValue()); + } + + @Override public Iterator> iterator() { + return new SnapshotE(); + } + }; + } + + // --- writeObject ------------------------------------------------------- + // Write a NBHM to a stream + + private void writeObject(java.io.ObjectOutputStream s) throws IOException { + s.defaultWriteObject(); // Nothing to write + for (Object K : keySet()) { + final Object V = get(K); // Do an official 'get' + s.writeObject(K); // Write the pair + s.writeObject(V); + } + s.writeObject(null); // Sentinel to indicate end-of-data + s.writeObject(null); + } + + // --- readObject -------------------------------------------------------- + // Read a CHM from a stream + + private void readObject(java.io.ObjectInputStream s) throws IOException, ClassNotFoundException { + s.defaultReadObject(); // Read nothing + initialize(MIN_SIZE); + for (; ;) { + final TypeK K = (TypeK) s.readObject(); + final TypeV V = (TypeV) s.readObject(); + if (K == null) break; + put(K, V); // Insert with an offical put + } + } + +} // End NonBlockingHashMap class diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/highscalelib/NonBlockingHashMapLong.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/highscalelib/NonBlockingHashMapLong.java new file mode 100644 index 00000000000..0add91886e1 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/highscalelib/NonBlockingHashMapLong.java @@ -0,0 +1,1516 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Written by Cliff Click and released to the public domain, as explained at + * http://creativecommons.org/licenses/publicdomain + */ + +package org.elasticsearch.util.concurrent.highscalelib; + +import sun.misc.Unsafe; + +import java.io.IOException; +import java.io.Serializable; +import java.lang.reflect.Field; +import java.util.*; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.AtomicLongFieldUpdater; +import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; + +/** + * A lock-free alternate implementation of {@link java.util.ConcurrentHashMap} + * with primitive long keys, better scaling properties and + * generally lower costs. The use of {@code long} keys allows for faster + * compares and lower memory costs. The Map provides identical correctness + * properties as ConcurrentHashMap. All operations are non-blocking and + * multi-thread safe, including all update operations. {@link + * NonBlockingHashMapLong} scales substatially better than {@link + * java.util.ConcurrentHashMap} for high update rates, even with a large + * concurrency factor. Scaling is linear up to 768 CPUs on a 768-CPU Azul + * box, even with 100% updates or 100% reads or any fraction in-between. + * Linear scaling up to all cpus has been observed on a 32-way Sun US2 box, + * 32-way Sun Niagra box, 8-way Intel box and a 4-way Power box. + *

    + *

    The main benefit of this class over using plain {@link + * org.cliffc.high_scale_lib.NonBlockingHashMap} with {@link Long} keys is + * that it avoids the auto-boxing and unboxing costs. Since auto-boxing is + * automatic, it is easy to accidentally cause auto-boxing and negate + * the space and speed benefits. + *

    + *

    This class obeys the same functional specification as {@link + * java.util.Hashtable}, and includes versions of methods corresponding to + * each method of Hashtable. However, even though all operations are + * thread-safe, operations do not entail locking and there is + * not any support for locking the entire table in a way that + * prevents all access. This class is fully interoperable with + * Hashtable in programs that rely on its thread safety but not on + * its synchronization details. + *

    + *

    Operations (including put) generally do not block, so may + * overlap with other update operations (including other puts and + * removes). Retrievals reflect the results of the most recently + * completed update operations holding upon their onset. For + * aggregate operations such as putAll, concurrent retrievals may + * reflect insertion or removal of only some entries. Similarly, Iterators + * and Enumerations return elements reflecting the state of the hash table at + * some point at or since the creation of the iterator/enumeration. They do + * not throw {@link ConcurrentModificationException}. However, + * iterators are designed to be used by only one thread at a time. + *

    + *

    Very full tables, or tables with high reprobe rates may trigger an + * internal resize operation to move into a larger table. Resizing is not + * terribly expensive, but it is not free either; during resize operations + * table throughput may drop somewhat. All threads that visit the table + * during a resize will 'help' the resizing but will still be allowed to + * complete their operation before the resize is finished (i.e., a simple + * 'get' operation on a million-entry table undergoing resizing will not need + * to block until the entire million entries are copied). + *

    + *

    This class and its views and iterators implement all of the + * optional methods of the {@link Map} and {@link Iterator} + * interfaces. + *

    + *

    Like {@link Hashtable} but unlike {@link HashMap}, this class + * does not allow null to be used as a value. + * + * @author Cliff Click + * @param the type of mapped values + * @since 1.5 + */ + +public class NonBlockingHashMapLong + extends AbstractMap + implements ConcurrentMap, Serializable { + + private static final long serialVersionUID = 1234123412341234124L; + + private static final int REPROBE_LIMIT = 10; // Too many reprobes then force a table-resize + + // --- Bits to allow Unsafe access to arrays + private static final Unsafe _unsafe = UtilUnsafe.getUnsafe(); + private static final int _Obase = _unsafe.arrayBaseOffset(Object[].class); + private static final int _Oscale = _unsafe.arrayIndexScale(Object[].class); + + private static long rawIndex(final Object[] ary, final int idx) { + assert idx >= 0 && idx < ary.length; + return _Obase + idx * _Oscale; + } + + private static final int _Lbase = _unsafe.arrayBaseOffset(long[].class); + private static final int _Lscale = _unsafe.arrayIndexScale(long[].class); + + private static long rawIndex(final long[] ary, final int idx) { + assert idx >= 0 && idx < ary.length; + return _Lbase + idx * _Lscale; + } + + // --- Bits to allow Unsafe CAS'ing of the CHM field + private static final long _chm_offset; + private static final long _val_1_offset; + + static { // + Field f = null; + try { + f = NonBlockingHashMapLong.class.getDeclaredField("_chm"); + } + catch (java.lang.NoSuchFieldException e) { + throw new RuntimeException(e); + } + _chm_offset = _unsafe.objectFieldOffset(f); + + try { + f = NonBlockingHashMapLong.class.getDeclaredField("_val_1"); + } + catch (java.lang.NoSuchFieldException e) { + throw new RuntimeException(e); + } + _val_1_offset = _unsafe.objectFieldOffset(f); + } + + private final boolean CAS(final long offset, final Object old, final Object nnn) { + return _unsafe.compareAndSwapObject(this, offset, old, nnn); + } + + // --- Adding a 'prime' bit onto Values via wrapping with a junk wrapper class + + private static final class Prime { + final Object _V; + + Prime(Object V) { + _V = V; + } + + static Object unbox(Object V) { + return V instanceof Prime ? ((Prime) V)._V : V; + } + } + + // --- The Hash Table -------------------- + private transient CHM _chm; + // This next field holds the value for Key 0 - the special key value which + // is the initial array value, and also means: no-key-inserted-yet. + private transient Object _val_1; // Value for Key: NO_KEY + + // Time since last resize + private transient long _last_resize_milli; + + // Optimize for space: use a 1/2-sized table and allow more re-probes + private final boolean _opt_for_space; + + // --- Minimum table size ---------------- + // Pick size 16 K/V pairs, which turns into (16*2)*4+12 = 140 bytes on a + // standard 32-bit HotSpot, and (16*2)*8+12 = 268 bytes on 64-bit Azul. + private static final int MIN_SIZE_LOG = 4; // + private static final int MIN_SIZE = (1 << MIN_SIZE_LOG); // Must be power of 2 + + // --- Sentinels ------------------------- + // No-Match-Old - putIfMatch does updates only if it matches the old value, + // and NO_MATCH_OLD basically counts as a wildcard match. + private static final Object NO_MATCH_OLD = new Object(); // Sentinel + // Match-Any-not-null - putIfMatch does updates only if it find a real old + // value. + private static final Object MATCH_ANY = new Object(); // Sentinel + // This K/V pair has been deleted (but the Key slot is forever claimed). + // The same Key can be reinserted with a new value later. + private static final Object TOMBSTONE = new Object(); + // Prime'd or box'd version of TOMBSTONE. This K/V pair was deleted, then a + // table resize started. The K/V pair has been marked so that no new + // updates can happen to the old table (and since the K/V pair was deleted + // nothing was copied to the new table). + private static final Prime TOMBPRIME = new Prime(TOMBSTONE); + + // I exclude 1 long from the 2^64 possibilities, and test for it before + // entering the main array. The NO_KEY value must be zero, the initial + // value set by Java before it hands me the array. + private static final long NO_KEY = 0L; + + // --- dump ---------------------------------------------------------------- + + /** + * Verbose printout of table internals, useful for debugging. + */ + public final void print() { + System.out.println("========="); + print_impl(-99, NO_KEY, _val_1); + _chm.print(); + System.out.println("========="); + } + + private static final void print_impl(final int i, final long K, final Object V) { + String p = (V instanceof Prime) ? "prime_" : ""; + Object V2 = Prime.unbox(V); + String VS = (V2 == TOMBSTONE) ? "tombstone" : V2.toString(); + System.out.println("[" + i + "]=(" + K + "," + p + VS + ")"); + } + + private final void print2() { + System.out.println("========="); + print2_impl(-99, NO_KEY, _val_1); + _chm.print(); + System.out.println("========="); + } + + private static final void print2_impl(final int i, final long K, final Object V) { + if (V != null && Prime.unbox(V) != TOMBSTONE) + print_impl(i, K, V); + } + + // Count of reprobes + private transient Counter _reprobes = new Counter(); + + /** + * Get and clear the current count of reprobes. Reprobes happen on key + * collisions, and a high reprobe rate may indicate a poor hash function or + * weaknesses in the table resizing function. + * + * @return the count of reprobes since the last call to {@link #reprobes} + * or since the table was created. + */ + public long reprobes() { + long r = _reprobes.get(); + _reprobes = new Counter(); + return r; + } + + + // --- reprobe_limit ----------------------------------------------------- + // Heuristic to decide if we have reprobed toooo many times. Running over + // the reprobe limit on a 'get' call acts as a 'miss'; on a 'put' call it + // can trigger a table resize. Several places must have exact agreement on + // what the reprobe_limit is, so we share it here. + + private static final int reprobe_limit(int len) { + return REPROBE_LIMIT + (len >> 2); + } + + // --- NonBlockingHashMapLong ---------------------------------------------- + // Constructors + + /** + * Create a new NonBlockingHashMapLong with default minimum size (currently set + * to 8 K/V pairs or roughly 84 bytes on a standard 32-bit JVM). + */ + public NonBlockingHashMapLong() { + this(MIN_SIZE, true); + } + + /** + * Create a new NonBlockingHashMapLong with initial room for the given + * number of elements, thus avoiding internal resizing operations to reach + * an appropriate size. Large numbers here when used with a small count of + * elements will sacrifice space for a small amount of time gained. The + * initial size will be rounded up internally to the next larger power of 2. + */ + public NonBlockingHashMapLong(final int initial_sz) { + this(initial_sz, true); + } + + /** + * Create a new NonBlockingHashMapLong, setting the space-for-speed + * tradeoff. {@code true} optimizes for space and is the default. {@code + * false} optimizes for speed and doubles space costs for roughly a 10% + * speed improvement. + */ + public NonBlockingHashMapLong(final boolean opt_for_space) { + this(1, opt_for_space); + } + + /** + * Create a new NonBlockingHashMapLong, setting both the initial size and + * the space-for-speed tradeoff. {@code true} optimizes for space and is + * the default. {@code false} optimizes for speed and doubles space costs + * for roughly a 10% speed improvement. + */ + public NonBlockingHashMapLong(final int initial_sz, final boolean opt_for_space) { + _opt_for_space = opt_for_space; + initialize(initial_sz); + } + + private final void initialize(final int initial_sz) { + if (initial_sz < 0) throw new IllegalArgumentException(); + int i; // Convert to next largest power-of-2 + for (i = MIN_SIZE_LOG; (1 << i) < initial_sz; i++) ; + _chm = new CHM(this, new Counter(), i); + _val_1 = TOMBSTONE; // Always as-if deleted + _last_resize_milli = System.currentTimeMillis(); + } + + // --- wrappers ------------------------------------------------------------ + + /** + * Returns the number of key-value mappings in this map. + * + * @return the number of key-value mappings in this map + */ + public int size() { + return (_val_1 == TOMBSTONE ? 0 : 1) + (int) _chm.size(); + } + + /** + * Tests if the key in the table. + * + * @return true if the key is in the table + */ + public boolean containsKey(long key) { + return get(key) != null; + } + + /** + * Legacy method testing if some key maps into the specified value in this + * table. This method is identical in functionality to {@link + * #containsValue}, and exists solely to ensure full compatibility with + * class {@link java.util.Hashtable}, which supported this method prior to + * introduction of the Java Collections framework. + * + * @param val a value to search for + * @return true if this map maps one or more keys to the specified value + * @throws NullPointerException if the specified value is null + */ + public boolean contains(Object val) { + return containsValue(val); + } + + /** + * Maps the specified key to the specified value in the table. The value + * cannot be null.

    The value can be retrieved by calling {@link #get} + * with a key that is equal to the original key. + * + * @param key key with which the specified value is to be associated + * @param val value to be associated with the specified key + * @return the previous value associated with key, or + * null if there was no mapping for key + * @throws NullPointerException if the specified value is null + */ + public TypeV put(long key, TypeV val) { + return putIfMatch(key, val, NO_MATCH_OLD); + } + + /** + * Atomically, do a {@link #put} if-and-only-if the key is not mapped. + * Useful to ensure that only a single mapping for the key exists, even if + * many threads are trying to create the mapping in parallel. + * + * @return the previous value associated with the specified key, + * or null if there was no mapping for the key + * @throws NullPointerException if the specified is value is null + */ + public TypeV putIfAbsent(long key, TypeV val) { + return putIfMatch(key, val, TOMBSTONE); + } + + /** + * Removes the key (and its corresponding value) from this map. + * This method does nothing if the key is not in the map. + * + * @return the previous value associated with key, or + * null if there was no mapping for key + */ + public TypeV remove(long key) { + return putIfMatch(key, TOMBSTONE, NO_MATCH_OLD); + } + + /** + * Atomically do a {@link #remove(long)} if-and-only-if the key is mapped + * to a value which is equals to the given value. + * + * @throws NullPointerException if the specified value is null + */ + public boolean remove(long key, Object val) { + return putIfMatch(key, TOMBSTONE, val) == val; + } + + /** + * Atomically do a put(key,val) if-and-only-if the key is + * mapped to some value already. + * + * @throws NullPointerException if the specified value is null + */ + public TypeV replace(long key, TypeV val) { + return putIfMatch(key, val, MATCH_ANY); + } + + /** + * Atomically do a put(key,newValue) if-and-only-if the key is + * mapped a value which is equals to oldValue. + * + * @throws NullPointerException if the specified value is null + */ + public boolean replace(long key, TypeV oldValue, TypeV newValue) { + return putIfMatch(key, newValue, oldValue) == oldValue; + } + + private final TypeV putIfMatch(long key, Object newVal, Object oldVal) { + if (oldVal == null || newVal == null) throw new NullPointerException(); + if (key == NO_KEY) { + final Object curVal = _val_1; + if (oldVal == NO_MATCH_OLD || // Do we care about expected-Value at all? + curVal == oldVal || // No instant match already? + (oldVal == MATCH_ANY && curVal != TOMBSTONE) || + oldVal.equals(curVal)) // Expensive equals check + CAS(_val_1_offset, curVal, newVal); // One shot CAS update attempt + return curVal == TOMBSTONE ? null : (TypeV) curVal; // Return the last value present + } + final Object res = _chm.putIfMatch(key, newVal, oldVal); + assert !(res instanceof Prime); + assert res != null; + return res == TOMBSTONE ? null : (TypeV) res; + } + + /** + * Removes all of the mappings from this map. + */ + public void clear() { // Smack a new empty table down + CHM newchm = new CHM(this, new Counter(), MIN_SIZE_LOG); + while (!CAS(_chm_offset, _chm, newchm)) // Spin until the clear works + ; + CAS(_val_1_offset, _val_1, TOMBSTONE); + } + + /** + * Returns true if this Map maps one or more keys to the specified + * value. Note: This method requires a full internal traversal of the + * hash table and is much slower than {@link #containsKey}. + * + * @param val value whose presence in this map is to be tested + * @return true if this Map maps one or more keys to the specified value + * @throws NullPointerException if the specified value is null + */ + public boolean containsValue(Object val) { + if (val == null) return false; + if (val == _val_1) return true; // Key 0 + for (TypeV V : values()) + if (V == val || V.equals(val)) + return true; + return false; + } + + // --- get ----------------------------------------------------------------- + + /** + * Returns the value to which the specified key is mapped, or {@code null} + * if this map contains no mapping for the key. + *

    More formally, if this map contains a mapping from a key {@code k} to + * a value {@code v} such that {@code key==k}, then this method + * returns {@code v}; otherwise it returns {@code null}. (There can be at + * most one such mapping.) + * + * @throws NullPointerException if the specified key is null + */ + // Never returns a Prime nor a Tombstone. + public final TypeV get(long key) { + if (key == NO_KEY) { + final Object V = _val_1; + return V == TOMBSTONE ? null : (TypeV) V; + } + final Object V = _chm.get_impl(key); + assert !(V instanceof Prime); // Never return a Prime + assert V != TOMBSTONE; + return (TypeV) V; + } + + /** + * Auto-boxing version of {@link #get(long)}. + */ + public TypeV get(Object key) { + return (key instanceof Long) ? get(((Long) key).longValue()) : null; + } + + /** + * Auto-boxing version of {@link #remove(long)}. + */ + public TypeV remove(Object key) { + return (key instanceof Long) ? remove(((Long) key).longValue()) : null; + } + + /** + * Auto-boxing version of {@link #remove(long,Object)}. + */ + public boolean remove(Object key, Object Val) { + return (key instanceof Long) ? remove(((Long) key).longValue(), Val) : false; + } + + /** + * Auto-boxing version of {@link #containsKey(long)}. + */ + public boolean containsKey(Object key) { + return (key instanceof Long) ? containsKey(((Long) key).longValue()) : false; + } + + /** + * Auto-boxing version of {@link #putIfAbsent}. + */ + public TypeV putIfAbsent(Long key, TypeV val) { + return putIfAbsent(((Long) key).longValue(), val); + } + + /** + * Auto-boxing version of {@link #replace}. + */ + public TypeV replace(Long key, TypeV Val) { + return replace(((Long) key).longValue(), Val); + } + + /** + * Auto-boxing version of {@link #put}. + */ + public TypeV put(Long key, TypeV val) { + return put(key.longValue(), val); + } + + /** + * Auto-boxing version of {@link #replace}. + */ + public boolean replace(Long key, TypeV oldValue, TypeV newValue) { + return replace(((Long) key).longValue(), oldValue, newValue); + } + + // --- help_copy ----------------------------------------------------------- + // Help along an existing resize operation. This is just a fast cut-out + // wrapper, to encourage inlining for the fast no-copy-in-progress case. We + // always help the top-most table copy, even if there are nested table + // copies in progress. + + private final void help_copy() { + // Read the top-level CHM only once. We'll try to help this copy along, + // even if it gets promoted out from under us (i.e., the copy completes + // and another KVS becomes the top-level copy). + CHM topchm = _chm; + if (topchm._newchm == null) return; // No copy in-progress + topchm.help_copy_impl(false); + } + + + // --- CHM ----------------------------------------------------------------- + // The control structure for the NonBlockingHashMapLong + + private static final class CHM implements Serializable { + // Back-pointer to top-level structure + final NonBlockingHashMapLong _nbhml; + + // Size in active K,V pairs + private final Counter _size; + + public int size() { + return (int) _size.get(); + } + + // --- + // These next 2 fields are used in the resizing heuristics, to judge when + // it is time to resize or copy the table. Slots is a count of used-up + // key slots, and when it nears a large fraction of the table we probably + // end up reprobing too much. Last-resize-milli is the time since the + // last resize; if we are running back-to-back resizes without growing + // (because there are only a few live keys but many slots full of dead + // keys) then we need a larger table to cut down on the churn. + + // Count of used slots, to tell when table is full of dead unusable slots + private final Counter _slots; + + public int slots() { + return (int) _slots.get(); + } + + // --- + // New mappings, used during resizing. + // The 'next' CHM - created during a resize operation. This represents + // the new table being copied from the old one. It's the volatile + // variable that is read as we cross from one table to the next, to get + // the required memory orderings. It monotonically transits from null to + // set (once). + volatile CHM _newchm; + private static final AtomicReferenceFieldUpdater _newchmUpdater = + AtomicReferenceFieldUpdater.newUpdater(CHM.class, CHM.class, "_newchm"); + + // Set the _newchm field if we can. AtomicUpdaters do not fail spuriously. + + boolean CAS_newchm(CHM newchm) { + return _newchmUpdater.compareAndSet(this, null, newchm); + } + + // Sometimes many threads race to create a new very large table. Only 1 + // wins the race, but the losers all allocate a junk large table with + // hefty allocation costs. Attempt to control the overkill here by + // throttling attempts to create a new table. I cannot really block here + // (lest I lose the non-blocking property) but late-arriving threads can + // give the initial resizing thread a little time to allocate the initial + // new table. The Right Long Term Fix here is to use array-lets and + // incrementally create the new very large array. In C I'd make the array + // with malloc (which would mmap under the hood) which would only eat + // virtual-address and not real memory - and after Somebody wins then we + // could in parallel initialize the array. Java does not allow + // un-initialized array creation (especially of ref arrays!). + volatile long _resizers; // count of threads attempting an initial resize + private static final AtomicLongFieldUpdater _resizerUpdater = + AtomicLongFieldUpdater.newUpdater(CHM.class, "_resizers"); + + // --- key,val ------------------------------------------------------------- + // Access K,V for a given idx + + private final boolean CAS_key(int idx, long old, long key) { + return _unsafe.compareAndSwapLong(_keys, rawIndex(_keys, idx), old, key); + } + + private final boolean CAS_val(int idx, Object old, Object val) { + return _unsafe.compareAndSwapObject(_vals, rawIndex(_vals, idx), old, val); + } + + final long[] _keys; + final Object[] _vals; + + // Simple constructor + + CHM(final NonBlockingHashMapLong nbhml, Counter size, final int logsize) { + _nbhml = nbhml; + _size = size; + _slots = new Counter(); + _keys = new long[1 << logsize]; + _vals = new Object[1 << logsize]; + } + + // --- print innards + + private final void print() { + for (int i = 0; i < _keys.length; i++) { + long K = _keys[i]; + if (K != NO_KEY) + print_impl(i, K, _vals[i]); + } + CHM newchm = _newchm; // New table, if any + if (newchm != null) { + System.out.println("----"); + newchm.print(); + } + } + + // --- print only the live objects + + private final void print2() { + for (int i = 0; i < _keys.length; i++) { + long K = _keys[i]; + if (K != NO_KEY) // key is sane + print2_impl(i, K, _vals[i]); + } + CHM newchm = _newchm; // New table, if any + if (newchm != null) { + System.out.println("----"); + newchm.print2(); + } + } + + // --- get_impl ---------------------------------------------------------- + // Never returns a Prime nor a Tombstone. + + private final Object get_impl(final long key) { + final int len = _keys.length; + int idx = (int) (key & (len - 1)); // First key hash + + // Main spin/reprobe loop, looking for a Key hit + int reprobe_cnt = 0; + while (true) { + final long K = _keys[idx]; // Get key before volatile read, could be NO_KEY + final Object V = _vals[idx]; // Get value before volatile read, could be null or Tombstone or Prime + if (K == NO_KEY) return null; // A clear miss + + // Key-compare + if (key == K) { + // Key hit! Check for no table-copy-in-progress + if (!(V instanceof Prime)) { // No copy? + if (V == TOMBSTONE) return null; + // We need a volatile-read between reading a newly inserted Value + // and returning the Value (so the user might end up reading the + // stale Value contents). + final CHM newchm = _newchm; // VOLATILE READ before returning V + return V; + } + // Key hit - but slot is (possibly partially) copied to the new table. + // Finish the copy & retry in the new table. + return copy_slot_and_check(idx, key).get_impl(key); // Retry in the new table + } + // get and put must have the same key lookup logic! But only 'put' + // needs to force a table-resize for a too-long key-reprobe sequence. + // Check for too-many-reprobes on get. + if (++reprobe_cnt >= reprobe_limit(len)) // too many probes + return _newchm == null // Table copy in progress? + ? null // Nope! A clear miss + : copy_slot_and_check(idx, key).get_impl(key); // Retry in the new table + + idx = (idx + 1) & (len - 1); // Reprobe by 1! (could now prefetch) + } + } + + // --- putIfMatch --------------------------------------------------------- + // Put, Remove, PutIfAbsent, etc. Return the old value. If the returned + // value is equal to expVal (or expVal is NO_MATCH_OLD) then the put can + // be assumed to work (although might have been immediately overwritten). + // Only the path through copy_slot passes in an expected value of null, + // and putIfMatch only returns a null if passed in an expected null. + + private final Object putIfMatch(final long key, final Object putval, final Object expVal) { + assert putval != null; + assert !(putval instanceof Prime); + assert !(expVal instanceof Prime); + final int len = _keys.length; + int idx = (int) (key & (len - 1)); // The first key + + // --- + // Key-Claim stanza: spin till we can claim a Key (or force a resizing). + int reprobe_cnt = 0; + long K = NO_KEY; + Object V = null; + while (true) { // Spin till we get a Key slot + V = _vals[idx]; // Get old value + K = _keys[idx]; // Get current key + if (K == NO_KEY) { // Slot is free? + // Found an empty Key slot - which means this Key has never been in + // this table. No need to put a Tombstone - the Key is not here! + if (putval == TOMBSTONE) return putval; // Not-now & never-been in this table + // Claim the zero key-slot + if (CAS_key(idx, NO_KEY, key)) { // Claim slot for Key + _slots.add(1); // Raise key-slots-used count + break; // Got it! + } + // CAS to claim the key-slot failed. + // + // This re-read of the Key points out an annoying short-coming of Java + // CAS. Most hardware CAS's report back the existing value - so that + // if you fail you have a *witness* - the value which caused the CAS + // to fail. The Java API turns this into a boolean destroying the + // witness. Re-reading does not recover the witness because another + // thread can write over the memory after the CAS. Hence we can be in + // the unfortunate situation of having a CAS fail *for cause* but + // having that cause removed by a later store. This turns a + // non-spurious-failure CAS (such as Azul has) into one that can + // apparently spuriously fail - and we avoid apparent spurious failure + // by not allowing Keys to ever change. + K = _keys[idx]; // CAS failed, get updated value + assert K != NO_KEY; // If keys[idx] is NO_KEY, CAS shoulda worked + } + // Key slot was not null, there exists a Key here + if (K == key) + break; // Got it! + + // get and put must have the same key lookup logic! Lest 'get' give + // up looking too soon. + //topmap._reprobes.add(1); + if (++reprobe_cnt >= reprobe_limit(len)) { + // We simply must have a new table to do a 'put'. At this point a + // 'get' will also go to the new table (if any). We do not need + // to claim a key slot (indeed, we cannot find a free one to claim!). + final CHM newchm = resize(); + if (expVal != null) _nbhml.help_copy(); // help along an existing copy + return newchm.putIfMatch(key, putval, expVal); + } + + idx = (idx + 1) & (len - 1); // Reprobe! + } // End of spinning till we get a Key slot + + // --- + // Found the proper Key slot, now update the matching Value slot. We + // never put a null, so Value slots monotonically move from null to + // not-null (deleted Values use Tombstone). Thus if 'V' is null we + // fail this fast cutout and fall into the check for table-full. + if (putval == V) return V; // Fast cutout for no-change + + // See if we want to move to a new table (to avoid high average re-probe + // counts). We only check on the initial set of a Value from null to + // not-null (i.e., once per key-insert). + if ((V == null && tableFull(reprobe_cnt, len)) || + // Or we found a Prime: resize is already in progress. The resize + // call below will do a CAS on _newchm forcing the read. + V instanceof Prime) { + resize(); // Force the new table copy to start + return copy_slot_and_check(idx, expVal).putIfMatch(key, putval, expVal); + } + + // --- + // We are finally prepared to update the existing table + while (true) { + assert !(V instanceof Prime); + + // Must match old, and we do not? Then bail out now. Note that either V + // or expVal might be TOMBSTONE. Also V can be null, if we've never + // inserted a value before. expVal can be null if we are called from + // copy_slot. + + if (expVal != NO_MATCH_OLD && // Do we care about expected-Value at all? + V != expVal && // No instant match already? + (expVal != MATCH_ANY || V == TOMBSTONE || V == null) && + !(V == null && expVal == TOMBSTONE) && // Match on null/TOMBSTONE combo + (expVal == null || !expVal.equals(V))) // Expensive equals check at the last + return V; // Do not update! + + // Actually change the Value in the Key,Value pair + if (CAS_val(idx, V, putval)) { + // CAS succeeded - we did the update! + // Both normal put's and table-copy calls putIfMatch, but table-copy + // does not (effectively) increase the number of live k/v pairs. + if (expVal != null) { + // Adjust sizes - a striped counter + if ((V == null || V == TOMBSTONE) && putval != TOMBSTONE) _size.add(1); + if (!(V == null || V == TOMBSTONE) && putval == TOMBSTONE) _size.add(-1); + } + return (V == null && expVal != null) ? TOMBSTONE : V; + } + // Else CAS failed + V = _vals[idx]; // Get new value + // If a Prime'd value got installed, we need to re-run the put on the + // new table. Otherwise we lost the CAS to another racing put. + // Simply retry from the start. + if (V instanceof Prime) + return copy_slot_and_check(idx, expVal).putIfMatch(key, putval, expVal); + } + } + + // --- tableFull --------------------------------------------------------- + // Heuristic to decide if this table is too full, and we should start a + // new table. Note that if a 'get' call has reprobed too many times and + // decided the table must be full, then always the estimate_sum must be + // high and we must report the table is full. If we do not, then we might + // end up deciding that the table is not full and inserting into the + // current table, while a 'get' has decided the same key cannot be in this + // table because of too many reprobes. The invariant is: + // slots.estimate_sum >= max_reprobe_cnt >= reprobe_limit(len) + + private final boolean tableFull(int reprobe_cnt, int len) { + return + // Do the cheap check first: we allow some number of reprobes always + reprobe_cnt >= REPROBE_LIMIT && + // More expensive check: see if the table is > 1/4 full. + _slots.estimate_get() >= reprobe_limit(len); + } + + // --- resize ------------------------------------------------------------ + // Resizing after too many probes. "How Big???" heuristics are here. + // Callers will (not this routine) will 'help_copy' any in-progress copy. + // Since this routine has a fast cutout for copy-already-started, callers + // MUST 'help_copy' lest we have a path which forever runs through + // 'resize' only to discover a copy-in-progress which never progresses. + + private final CHM resize() { + // Check for resize already in progress, probably triggered by another thread + CHM newchm = _newchm; // VOLATILE READ + if (newchm != null) // See if resize is already in progress + return newchm; // Use the new table already + + // No copy in-progress, so start one. First up: compute new table size. + int oldlen = _keys.length; // Old count of K,V pairs allowed + int sz = size(); // Get current table count of active K,V pairs + int newsz = sz; // First size estimate + + // Heuristic to determine new size. We expect plenty of dead-slots-with-keys + // and we need some decent padding to avoid endless reprobing. + if (_nbhml._opt_for_space) { + // This heuristic leads to a much denser table with a higher reprobe rate + if (sz >= (oldlen >> 1)) // If we are >50% full of keys then... + newsz = oldlen << 1; // Double size + } else { + if (sz >= (oldlen >> 2)) { // If we are >25% full of keys then... + newsz = oldlen << 1; // Double size + if (sz >= (oldlen >> 1)) // If we are >50% full of keys then... + newsz = oldlen << 2; // Double double size + } + } + + // Last (re)size operation was very recent? Then double again; slows + // down resize operations for tables subject to a high key churn rate. + long tm = System.currentTimeMillis(); + long q = 0; + if (newsz <= oldlen && // New table would shrink or hold steady? + tm <= _nbhml._last_resize_milli + 10000 && // Recent resize (less than 1 sec ago) + //(q=_slots.estimate_sum()) >= (sz<<1) ) // 1/2 of keys are dead? + true) + newsz = oldlen << 1; // Double the existing size + + // Do not shrink, ever + if (newsz < oldlen) newsz = oldlen; + //System.out.println("old="+oldlen+" new="+newsz+" size()="+sz+" est_slots()="+q+" millis="+(tm-_nbhml._last_resize_milli)); + + // Convert to power-of-2 + int log2; + for (log2 = MIN_SIZE_LOG; (1 << log2) < newsz; log2++) ; // Compute log2 of size + + // Now limit the number of threads actually allocating memory to a + // handful - lest we have 750 threads all trying to allocate a giant + // resized array. + long r = _resizers; + while (!_resizerUpdater.compareAndSet(this, r, r + 1)) + r = _resizers; + // Size calculation: 2 words (K+V) per table entry, plus a handful. We + // guess at 32-bit pointers; 64-bit pointers screws up the size calc by + // 2x but does not screw up the heuristic very much. + int megs = ((((1 << log2) << 1) + 4) << 3/*word to bytes*/) >> 20/*megs*/; + if (r >= 2 && megs > 0) { // Already 2 guys trying; wait and see + newchm = _newchm; // Between dorking around, another thread did it + if (newchm != null) // See if resize is already in progress + return newchm; // Use the new table already + // TODO - use a wait with timeout, so we'll wakeup as soon as the new table + // is ready, or after the timeout in any case. + //synchronized( this ) { wait(8*megs); } // Timeout - we always wakeup + // For now, sleep a tad and see if the 2 guys already trying to make + // the table actually get around to making it happen. + try { + Thread.sleep(8 * megs); + } catch (Exception e) { + } + } + // Last check, since the 'new' below is expensive and there is a chance + // that another thread slipped in a new thread while we ran the heuristic. + newchm = _newchm; + if (newchm != null) // See if resize is already in progress + return newchm; // Use the new table already + + // New CHM - actually allocate the big arrays + newchm = new CHM(_nbhml, _size, log2); + + // Another check after the slow allocation + if (_newchm != null) // See if resize is already in progress + return _newchm; // Use the new table already + + // The new table must be CAS'd in so only 1 winner amongst duplicate + // racing resizing threads. Extra CHM's will be GC'd. + if (CAS_newchm(newchm)) { // NOW a resize-is-in-progress! + //notifyAll(); // Wake up any sleepers + //long nano = System.nanoTime(); + //System.out.println(" "+nano+" Resize from "+oldlen+" to "+(1< _copyIdxUpdater = + AtomicLongFieldUpdater.newUpdater(CHM.class, "_copyIdx"); + + // Work-done reporting. Used to efficiently signal when we can move to + // the new table. From 0 to len(oldkvs) refers to copying from the old + // table to the new. + volatile long _copyDone = 0; + static private final AtomicLongFieldUpdater _copyDoneUpdater = + AtomicLongFieldUpdater.newUpdater(CHM.class, "_copyDone"); + + // --- help_copy_impl ---------------------------------------------------- + // Help along an existing resize operation. We hope its the top-level + // copy (it was when we started) but this CHM might have been promoted out + // of the top position. + + private final void help_copy_impl(final boolean copy_all) { + final CHM newchm = _newchm; + assert newchm != null; // Already checked by caller + int oldlen = _keys.length; // Total amount to copy + final int MIN_COPY_WORK = Math.min(oldlen, 1024); // Limit per-thread work + + // --- + int panic_start = -1; + int copyidx = -9999; // Fool javac to think it's initialized + while (_copyDone < oldlen) { // Still needing to copy? + // Carve out a chunk of work. The counter wraps around so every + // thread eventually tries to copy every slot repeatedly. + + // We "panic" if we have tried TWICE to copy every slot - and it still + // has not happened. i.e., twice some thread somewhere claimed they + // would copy 'slot X' (by bumping _copyIdx) but they never claimed to + // have finished (by bumping _copyDone). Our choices become limited: + // we can wait for the work-claimers to finish (and become a blocking + // algorithm) or do the copy work ourselves. Tiny tables with huge + // thread counts trying to copy the table often 'panic'. + if (panic_start == -1) { // No panic? + copyidx = (int) _copyIdx; + while (copyidx < (oldlen << 1) && // 'panic' check + !_copyIdxUpdater.compareAndSet(this, copyidx, copyidx + MIN_COPY_WORK)) + copyidx = (int) _copyIdx; // Re-read + if (!(copyidx < (oldlen << 1))) // Panic! + panic_start = copyidx; // Record where we started to panic-copy + } + + // We now know what to copy. Try to copy. + int workdone = 0; + for (int i = 0; i < MIN_COPY_WORK; i++) + if (copy_slot((copyidx + i) & (oldlen - 1))) // Made an oldtable slot go dead? + workdone++; // Yes! + if (workdone > 0) // Report work-done occasionally + copy_check_and_promote(workdone);// See if we can promote + //for( int i=0; i 0) { + while (!_copyDoneUpdater.compareAndSet(this, copyDone, nowDone)) { + copyDone = _copyDone; // Reload, retry + nowDone = copyDone + workdone; + assert nowDone <= oldlen; + } + //if( (10*copyDone/oldlen) != (10*nowDone/oldlen) ) + // System.out.print(" "+nowDone*100/oldlen+"%"+"_"+(_copyIdx*100/oldlen)+"%"); + } + + // Check for copy being ALL done, and promote. Note that we might have + // nested in-progress copies and manage to finish a nested copy before + // finishing the top-level copy. We only promote top-level copies. + if (nowDone == oldlen && // Ready to promote this table? + _nbhml._chm == this && // Looking at the top-level table? + // Attempt to promote + _nbhml.CAS(_chm_offset, this, _newchm)) { + _nbhml._last_resize_milli = System.currentTimeMillis(); // Record resize time for next check + //long nano = System.nanoTime(); + //System.out.println(" "+nano+" Promote table "+oldlen+" to "+_newchm._keys.length); + //System.out.print("_"+oldlen+"]"); + } + } + + // --- copy_slot --------------------------------------------------------- + // Copy one K/V pair from oldkvs[i] to newkvs. Returns true if we can + // confirm that the new table guaranteed has a value for this old-table + // slot. We need an accurate confirmed-copy count so that we know when we + // can promote (if we promote the new table too soon, other threads may + // 'miss' on values not-yet-copied from the old table). We don't allow + // any direct updates on the new table, unless they first happened to the + // old table - so that any transition in the new table from null to + // not-null must have been from a copy_slot (or other old-table overwrite) + // and not from a thread directly writing in the new table. Thus we can + // count null-to-not-null transitions in the new table. + + private boolean copy_slot(int idx) { + // Blindly set the key slot from NO_KEY to some key which hashes here, + // to eagerly stop fresh put's from inserting new values in the old + // table when the old table is mid-resize. We don't need to act on the + // results here, because our correctness stems from box'ing the Value + // field. Slamming the Key field is a minor speed optimization. + long key; + while ((key = _keys[idx]) == NO_KEY) + CAS_key(idx, NO_KEY, (idx + _keys.length)/*a non-zero key which hashes here*/); + + // --- + // Prevent new values from appearing in the old table. + // Box what we see in the old table, to prevent further updates. + Object oldval = _vals[idx]; // Read OLD table + while (!(oldval instanceof Prime)) { + final Prime box = (oldval == null || oldval == TOMBSTONE) ? TOMBPRIME : new Prime(oldval); + if (CAS_val(idx, oldval, box)) { // CAS down a box'd version of oldval + // If we made the Value slot hold a TOMBPRIME, then we both + // prevented further updates here but also the (absent) oldval is + // vaccuously available in the new table. We return with true here: + // any thread looking for a value for this key can correctly go + // straight to the new table and skip looking in the old table. + if (box == TOMBPRIME) + return true; + // Otherwise we boxed something, but it still needs to be + // copied into the new table. + oldval = box; // Record updated oldval + break; // Break loop; oldval is now boxed by us + } + oldval = _vals[idx]; // Else try, try again + } + if (oldval == TOMBPRIME) return false; // Copy already complete here! + + // --- + // Copy the value into the new table, but only if we overwrite a null. + // If another value is already in the new table, then somebody else + // wrote something there and that write is happens-after any value that + // appears in the old table. If putIfMatch does not find a null in the + // new table - somebody else should have recorded the null-not_null + // transition in this copy. + Object old_unboxed = ((Prime) oldval)._V; + assert old_unboxed != TOMBSTONE; + boolean copied_into_new = (_newchm.putIfMatch(key, old_unboxed, null) == null); + + // --- + // Finally, now that any old value is exposed in the new table, we can + // forever hide the old-table value by slapping a TOMBPRIME down. This + // will stop other threads from uselessly attempting to copy this slot + // (i.e., it's a speed optimization not a correctness issue). + while (!CAS_val(idx, oldval, TOMBPRIME)) + oldval = _vals[idx]; + + return copied_into_new; + } // end copy_slot + } // End of CHM + + + // --- Snapshot ------------------------------------------------------------ + + private class SnapshotV implements Iterator, Enumeration { + final CHM _sschm; + + public SnapshotV() { + CHM topchm; + while (true) { // Verify no table-copy-in-progress + topchm = _chm; + if (topchm._newchm == null) // No table-copy-in-progress + break; + // Table copy in-progress - so we cannot get a clean iteration. We + // must help finish the table copy before we can start iterating. + topchm.help_copy_impl(true); + } + // The "linearization point" for the iteration. Every key in this table + // will be visited, but keys added later might be skipped or even be + // added to a following table (also not iterated over). + _sschm = topchm; + // Warm-up the iterator + _idx = -1; + next(); + } + + int length() { + return _sschm._keys.length; + } + + long key(final int idx) { + return _sschm._keys[idx]; + } + + private int _idx; // -2 for NO_KEY, -1 for CHECK_NEW_TABLE_LONG, 0-keys.length + private long _nextK, _prevK; // Last 2 keys found + private TypeV _nextV, _prevV; // Last 2 values found + + public boolean hasNext() { + return _nextV != null; + } + + public TypeV next() { + // 'next' actually knows what the next value will be - it had to + // figure that out last go 'round lest 'hasNext' report true and + // some other thread deleted the last value. Instead, 'next' + // spends all its effort finding the key that comes after the + // 'next' key. + if (_idx != -1 && _nextV == null) throw new NoSuchElementException(); + _prevK = _nextK; // This will become the previous key + _prevV = _nextV; // This will become the previous value + _nextV = null; // We have no more next-key + // Attempt to set <_nextK,_nextV> to the next K,V pair. + // _nextV is the trigger: stop searching when it is != null + if (_idx == -1) { // Check for NO_KEY + _idx = 0; // Setup for next phase of search + _nextK = NO_KEY; + if ((_nextV = get(_nextK)) != null) return _prevV; + } + while (_idx < length()) { // Scan array + _nextK = key(_idx++); // Get a key that definitely is in the set (for the moment!) + if (_nextK != NO_KEY && // Found something? + (_nextV = get(_nextK)) != null) + break; // Got it! _nextK is a valid Key + } // Else keep scanning + return _prevV; // Return current value. + } + + public void remove() { + if (_prevV == null) throw new IllegalStateException(); + _sschm.putIfMatch(_prevK, TOMBSTONE, _prevV); + _prevV = null; + } + + public TypeV nextElement() { + return next(); + } + + public boolean hasMoreElements() { + return hasNext(); + } + } + + /** + * Returns an enumeration of the values in this table. + * + * @return an enumeration of the values in this table + * @see #values() + */ + public Enumeration elements() { + return new SnapshotV(); + } + + // --- values -------------------------------------------------------------- + + /** + * Returns a {@link Collection} view of the values contained in this map. + * The collection is backed by the map, so changes to the map are reflected + * in the collection, and vice-versa. The collection supports element + * removal, which removes the corresponding mapping from this map, via the + * Iterator.remove, Collection.remove, + * removeAll, retainAll, and clear operations. + * It does not support the add or addAll operations. + *

    + *

    The view's iterator is a "weakly consistent" iterator that + * will never throw {@link ConcurrentModificationException}, and guarantees + * to traverse elements as they existed upon construction of the iterator, + * and may (but is not guaranteed to) reflect any modifications subsequent + * to construction. + */ + public Collection values() { + return new AbstractCollection() { + public void clear() { + NonBlockingHashMapLong.this.clear(); + } + + public int size() { + return NonBlockingHashMapLong.this.size(); + } + + public boolean contains(Object v) { + return NonBlockingHashMapLong.this.containsValue(v); + } + + public Iterator iterator() { + return new SnapshotV(); + } + }; + } + + // --- keySet -------------------------------------------------------------- + + /** + * A class which implements the {@link Iterator} and {@link Enumeration} + * interfaces, generified to the {@link Long} class and supporting a + * non-auto-boxing {@link #nextLong} function. + */ + public class IteratorLong implements Iterator, Enumeration { + private final SnapshotV _ss; + + /** + * A new IteratorLong + */ + public IteratorLong() { + _ss = new SnapshotV(); + } + + /** + * Remove last key returned by {@link #next} or {@link #nextLong}. + */ + public void remove() { + _ss.remove(); + } + + /** + * Auto-box and return the next key. + */ + public Long next() { + _ss.next(); + return _ss._prevK; + } + + /** + * Return the next key as a primitive {@code long}. + */ + public long nextLong() { + _ss.next(); + return _ss._prevK; + } + + /** + * True if there are more keys to iterate over. + */ + public boolean hasNext() { + return _ss.hasNext(); + } + + /** + * Auto-box and return the next key. + */ + public Long nextElement() { + return next(); + } + + /** + * True if there are more keys to iterate over. + */ + public boolean hasMoreElements() { + return hasNext(); + } + } + + /** + * Returns an enumeration of the auto-boxed keys in this table. + * Warning: this version will auto-box all returned keys. + * + * @return an enumeration of the auto-boxed keys in this table + * @see #keySet() + */ + public Enumeration keys() { + return new IteratorLong(); + } + + /** + * Returns a {@link Set} view of the keys contained in this map; with care + * the keys may be iterated over without auto-boxing. The + * set is backed by the map, so changes to the map are reflected in the + * set, and vice-versa. The set supports element removal, which removes + * the corresponding mapping from this map, via the + * Iterator.remove, Set.remove, removeAll, + * retainAll, and clear operations. It does not support + * the add or addAll operations. + *

    + *

    The view's iterator is a "weakly consistent" iterator that + * will never throw {@link ConcurrentModificationException}, and guarantees + * to traverse elements as they existed upon construction of the iterator, + * and may (but is not guaranteed to) reflect any modifications subsequent + * to construction. + */ + public Set keySet() { + return new AbstractSet() { + public void clear() { + NonBlockingHashMapLong.this.clear(); + } + + public int size() { + return NonBlockingHashMapLong.this.size(); + } + + public boolean contains(Object k) { + return NonBlockingHashMapLong.this.containsKey(k); + } + + public boolean remove(Object k) { + return NonBlockingHashMapLong.this.remove(k) != null; + } + + public IteratorLong iterator() { + return new IteratorLong(); + } + }; + } + + + // --- entrySet ------------------------------------------------------------ + // Warning: Each call to 'next' in this iterator constructs a new Long and a + // new NBHMLEntry. + + private class NBHMLEntry extends AbstractEntry { + NBHMLEntry(final Long k, final TypeV v) { + super(k, v); + } + + public TypeV setValue(final TypeV val) { + if (val == null) throw new NullPointerException(); + _val = val; + return put(_key, val); + } + } + + private class SnapshotE implements Iterator> { + final SnapshotV _ss; + + public SnapshotE() { + _ss = new SnapshotV(); + } + + public void remove() { + _ss.remove(); + } + + public Map.Entry next() { + _ss.next(); + return new NBHMLEntry(_ss._prevK, _ss._prevV); + } + + public boolean hasNext() { + return _ss.hasNext(); + } + } + + /** + * Returns a {@link Set} view of the mappings contained in this map. The + * set is backed by the map, so changes to the map are reflected in the + * set, and vice-versa. The set supports element removal, which removes + * the corresponding mapping from the map, via the + * Iterator.remove, Set.remove, removeAll, + * retainAll, and clear operations. It does not support + * the add or addAll operations. + *

    + *

    The view's iterator is a "weakly consistent" iterator + * that will never throw {@link ConcurrentModificationException}, + * and guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not guaranteed to) + * reflect any modifications subsequent to construction. + *

    + *

    Warning: the iterator associated with this Set + * requires the creation of {@link java.util.Map.Entry} objects with each + * iteration. The {@link org.cliffc.high_scale_lib.NonBlockingHashMap} + * does not normally create or using {@link java.util.Map.Entry} objects so + * they will be created soley to support this iteration. Iterating using + * {@link #keySet} or {@link #values} will be more efficient. In addition, + * this version requires auto-boxing the keys. + */ + public Set> entrySet() { + return new AbstractSet>() { + public void clear() { + NonBlockingHashMapLong.this.clear(); + } + + public int size() { + return NonBlockingHashMapLong.this.size(); + } + + public boolean remove(final Object o) { + if (!(o instanceof Map.Entry)) return false; + final Map.Entry e = (Map.Entry) o; + return NonBlockingHashMapLong.this.remove(e.getKey(), e.getValue()); + } + + public boolean contains(final Object o) { + if (!(o instanceof Map.Entry)) return false; + final Map.Entry e = (Map.Entry) o; + TypeV v = get(e.getKey()); + return v.equals(e.getValue()); + } + + public Iterator> iterator() { + return new SnapshotE(); + } + }; + } + + // --- writeObject ------------------------------------------------------- + // Write a NBHML to a stream + + private void writeObject(java.io.ObjectOutputStream s) throws IOException { + s.defaultWriteObject(); // Write nothing + for (long K : keySet()) { + final Object V = get(K); // Do an official 'get' + s.writeLong(K); // Write the pair + s.writeObject(V); + } + s.writeLong(NO_KEY); // Sentinel to indicate end-of-data + s.writeObject(null); + } + + // --- readObject -------------------------------------------------------- + // Read a CHM from a stream + + private void readObject(java.io.ObjectInputStream s) throws IOException, ClassNotFoundException { + s.defaultReadObject(); // Read nothing + initialize(MIN_SIZE); + for (; ;) { + final long K = s.readLong(); + final TypeV V = (TypeV) s.readObject(); + if (K == NO_KEY && V == null) break; + put(K, V); // Insert with an offical put + } + } + +} // End NonBlockingHashMapLong class diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/highscalelib/NonBlockingHashSet.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/highscalelib/NonBlockingHashSet.java new file mode 100644 index 00000000000..a8fec6d5c37 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/highscalelib/NonBlockingHashSet.java @@ -0,0 +1,134 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Written by Cliff Click and released to the public domain, as explained at + * http://creativecommons.org/licenses/publicdomain + */ + +package org.elasticsearch.util.concurrent.highscalelib; + +import java.io.Serializable; +import java.util.AbstractSet; +import java.util.Iterator; +import java.util.Set; + + +/** + * A simple wrapper around {@link NonBlockingHashMap} making it implement the + * {@link Set} interface. All operations are Non-Blocking and multi-thread safe. + * + * @author Cliff Click + * @since 1.5 + */ + + +public class NonBlockingHashSet extends AbstractSet implements Serializable { + private static final Object V = ""; + + private final NonBlockingHashMap _map; + + /** + * Make a new empty {@link NonBlockingHashSet}. + */ + public NonBlockingHashSet() { + super(); + _map = new NonBlockingHashMap(); + } + + /** + * Add {@code o} to the set. + * + * @return true if {@code o} was added to the set, false + * if {@code o} was already in the set. + */ + public boolean add(final E o) { + return _map.putIfAbsent(o, V) != V; + } + + /** + * @return true if {@code o} is in the set. + */ + public boolean contains(final Object o) { + return _map.containsKey(o); + } + + /** + * Remove {@code o} from the set. + * + * @return true if {@code o} was removed to the set, false + * if {@code o} was not in the set. + */ + public boolean remove(final Object o) { + return _map.remove(o) == V; + } + + /** + * Current count of elements in the set. Due to concurrent racing updates, + * the size is only ever approximate. Updates due to the calling thread are + * immediately visible to calling thread. + * + * @return count of elements. + */ + public int size() { + return _map.size(); + } + + /** + * Empty the set. + */ + public void clear() { + _map.clear(); + } + + public Iterator iterator() { + return _map.keySet().iterator(); + } + + // --- + + /** + * Atomically make the set immutable. Future calls to mutate will throw an + * IllegalStateException. Existing mutator calls in other threads racing + * with this thread and will either throw IllegalStateException or their + * update will be visible to this thread. This implies that a simple flag + * cannot make the Set immutable, because a late-arriving update in another + * thread might see immutable flag not set yet, then mutate the Set after + * the {@link #readOnly} call returns. This call can be called concurrently + * (and indeed until the operation completes, all calls on the Set from any + * thread either complete normally or end up calling {@link #readOnly} + * internally). + *

    + *

    This call is useful in debugging multi-threaded programs where the + * Set is constructed in parallel, but construction completes after some + * time; and after construction the Set is only read. Making the Set + * read-only will cause updates arriving after construction is supposedly + * complete to throw an {@link IllegalStateException}. + */ + + // (1) call _map's immutable() call + // (2) get snapshot + // (3) CAS down a local map, power-of-2 larger than _map.size()+1/8th + // (4) start @ random, visit all snapshot, insert live keys + // (5) CAS _map to null, needs happens-after (4) + // (6) if Set call sees _map is null, needs happens-after (4) for readers + public void readOnly() { + throw new RuntimeException("Unimplemented"); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/highscalelib/NonBlockingHashtable.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/highscalelib/NonBlockingHashtable.java new file mode 100644 index 00000000000..6d7645661d9 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/highscalelib/NonBlockingHashtable.java @@ -0,0 +1,1568 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Written by Cliff Click and released to the public domain, as explained at + * http://creativecommons.org/licenses/publicdomain + */ + +/* WARNING: MACHINE GENERATED FILE! DO NOT EDIT!*/ +package org.elasticsearch.util.concurrent.highscalelib; + +import sun.misc.Unsafe; + +import java.io.IOException; +import java.io.Serializable; +import java.lang.reflect.Field; +import java.util.*; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.AtomicLongFieldUpdater; +import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; + +/** + * A lock-free alternate implementation of {@link java.util.concurrent.ConcurrentHashMap} + * with better scaling properties and generally lower costs to mutate the Map. + * It provides identical correctness properties as ConcurrentHashMap. All + * operations are non-blocking and multi-thread safe, including all update + * operations. {@link NonBlockingHashtable} scales substatially better than + * {@link java.util.concurrent.ConcurrentHashMap} for high update rates, even with a + * large concurrency factor. Scaling is linear up to 768 CPUs on a 768-CPU + * Azul box, even with 100% updates or 100% reads or any fraction in-between. + * Linear scaling up to all cpus has been observed on a 32-way Sun US2 box, + * 32-way Sun Niagra box, 8-way Intel box and a 4-way Power box. + *

    + * This class obeys the same functional specification as {@link + * java.util.Hashtable}, and includes versions of methods corresponding to + * each method of Hashtable. However, even though all operations are + * thread-safe, operations do not entail locking and there is + * not any support for locking the entire table in a way that + * prevents all access. This class is fully interoperable with + * Hashtable in programs that rely on its thread safety but not on + * its synchronization details. + *

    + *

    Operations (including put) generally do not block, so may + * overlap with other update operations (including other puts and + * removes). Retrievals reflect the results of the most recently + * completed update operations holding upon their onset. For + * aggregate operations such as putAll, concurrent retrievals may + * reflect insertion or removal of only some entries. Similarly, Iterators + * and Enumerations return elements reflecting the state of the hash table at + * some point at or since the creation of the iterator/enumeration. They do + * not throw {@link ConcurrentModificationException}. However, + * iterators are designed to be used by only one thread at a time. + *

    + *

    Very full tables, or tables with high reprobe rates may trigger an + * internal resize operation to move into a larger table. Resizing is not + * terribly expensive, but it is not free either; during resize operations + * table throughput may drop somewhat. All threads that visit the table + * during a resize will 'help' the resizing but will still be allowed to + * complete their operation before the resize is finished (i.e., a simple + * 'get' operation on a million-entry table undergoing resizing will not need + * to block until the entire million entries are copied). + *

    + *

    This class and its views and iterators implement all of the + * optional methods of the {@link Map} and {@link Iterator} + * interfaces. + *

    + *

    Like {@link Hashtable} but unlike {@link HashMap}, this class + * does not allow null to be used as a key or value. + * + * @author Cliff Click + * @author Prashant Deva - moved hash() function out of get_impl() so it is + * not calculated multiple times. + * @version 1.1.2 + * @param the type of keys maintained by this map + * @param the type of mapped values + * @since 1.5 + */ + +public class NonBlockingHashtable + extends Dictionary + implements ConcurrentMap, Cloneable, Serializable { + + private static final long serialVersionUID = 1234123412341234123L; + + private static final int REPROBE_LIMIT = 10; // Too many reprobes then force a table-resize + + // --- Bits to allow Unsafe access to arrays + private static final Unsafe _unsafe = UtilUnsafe.getUnsafe(); + private static final int _Obase = _unsafe.arrayBaseOffset(Object[].class); + private static final int _Oscale = _unsafe.arrayIndexScale(Object[].class); + + private static long rawIndex(final Object[] ary, final int idx) { + assert idx >= 0 && idx < ary.length; + return _Obase + idx * _Oscale; + } + + // --- Setup to use Unsafe + private static final long _kvs_offset; + + static { // + Field f = null; + try { + f = NonBlockingHashtable.class.getDeclaredField("_kvs"); + } + catch (java.lang.NoSuchFieldException e) { + throw new RuntimeException(e); + } + _kvs_offset = _unsafe.objectFieldOffset(f); + } + + private final boolean CAS_kvs(final Object[] oldkvs, final Object[] newkvs) { + return _unsafe.compareAndSwapObject(this, _kvs_offset, oldkvs, newkvs); + } + + // --- Adding a 'prime' bit onto Values via wrapping with a junk wrapper class + + private static final class Prime { + final Object _V; + + Prime(Object V) { + _V = V; + } + + static Object unbox(Object V) { + return V instanceof Prime ? ((Prime) V)._V : V; + } + } + + // --- hash ---------------------------------------------------------------- + // Helper function to spread lousy hashCodes + + private static final int hash(final Object key) { + int h = key.hashCode(); // The real hashCode call + // Spread bits to regularize both segment and index locations, + // using variant of single-word Wang/Jenkins hash. + h += (h << 15) ^ 0xffffcd7d; + h ^= (h >>> 10); + h += (h << 3); + h ^= (h >>> 6); + h += (h << 2) + (h << 14); + return h ^ (h >>> 16); + } + + // --- The Hash Table -------------------- + // Slot 0 is always used for a 'CHM' entry below to hold the interesting + // bits of the hash table. Slot 1 holds full hashes as an array of ints. + // Slots {2,3}, {4,5}, etc hold {Key,Value} pairs. The entire hash table + // can be atomically replaced by CASing the _kvs field. + // + // Why is CHM buried inside the _kvs Object array, instead of the other way + // around? The CHM info is used during resize events and updates, but not + // during standard 'get' operations. I assume 'get' is much more frequent + // than 'put'. 'get' can skip the extra indirection of skipping through the + // CHM to reach the _kvs array. + private transient Object[] _kvs; + + private static final CHM chm(Object[] kvs) { + return (CHM) kvs[0]; + } + + private static final int[] hashes(Object[] kvs) { + return (int[]) kvs[1]; + } + + // Number of K,V pairs in the table + + private static final int len(Object[] kvs) { + return (kvs.length - 2) >> 1; + } + + // Time since last resize + private transient long _last_resize_milli; + + // --- Minimum table size ---------------- + // Pick size 8 K/V pairs, which turns into (8*2+2)*4+12 = 84 bytes on a + // standard 32-bit HotSpot, and (8*2+2)*8+12 = 156 bytes on 64-bit Azul. + private static final int MIN_SIZE_LOG = 3; // + private static final int MIN_SIZE = (1 << MIN_SIZE_LOG); // Must be power of 2 + + // --- Sentinels ------------------------- + // No-Match-Old - putIfMatch does updates only if it matches the old value, + // and NO_MATCH_OLD basically counts as a wildcard match. + private static final Object NO_MATCH_OLD = new Object(); // Sentinel + // Match-Any-not-null - putIfMatch does updates only if it find a real old + // value. + private static final Object MATCH_ANY = new Object(); // Sentinel + // This K/V pair has been deleted (but the Key slot is forever claimed). + // The same Key can be reinserted with a new value later. + private static final Object TOMBSTONE = new Object(); + // Prime'd or box'd version of TOMBSTONE. This K/V pair was deleted, then a + // table resize started. The K/V pair has been marked so that no new + // updates can happen to the old table (and since the K/V pair was deleted + // nothing was copied to the new table). + private static final Prime TOMBPRIME = new Prime(TOMBSTONE); + + // --- key,val ------------------------------------------------------------- + // Access K,V for a given idx + // + // Note that these are static, so that the caller is forced to read the _kvs + // field only once, and share that read across all key/val calls - lest the + // _kvs field move out from under us and back-to-back key & val calls refer + // to different _kvs arrays. + + private static final Object key(Object[] kvs, int idx) { + return kvs[(idx << 1) + 2]; + } + + private static final Object val(Object[] kvs, int idx) { + return kvs[(idx << 1) + 3]; + } + + private static final boolean CAS_key(Object[] kvs, int idx, Object old, Object key) { + return _unsafe.compareAndSwapObject(kvs, rawIndex(kvs, (idx << 1) + 2), old, key); + } + + private static final boolean CAS_val(Object[] kvs, int idx, Object old, Object val) { + return _unsafe.compareAndSwapObject(kvs, rawIndex(kvs, (idx << 1) + 3), old, val); + } + + + // --- dump ---------------------------------------------------------------- + + /** + * Verbose printout of table internals, useful for debugging. + */ + public final void print() { + System.out.println("========="); + print2(_kvs); + System.out.println("========="); + } + + // print the entire state of the table + + private final void print(Object[] kvs) { + for (int i = 0; i < len(kvs); i++) { + Object K = key(kvs, i); + if (K != null) { + String KS = (K == TOMBSTONE) ? "XXX" : K.toString(); + Object V = val(kvs, i); + Object U = Prime.unbox(V); + String p = (V == U) ? "" : "prime_"; + String US = (U == TOMBSTONE) ? "tombstone" : U.toString(); + System.out.println("" + i + " (" + KS + "," + p + US + ")"); + } + } + Object[] newkvs = chm(kvs)._newkvs; // New table, if any + if (newkvs != null) { + System.out.println("----"); + print(newkvs); + } + } + + // print only the live values, broken down by the table they are in + + private final void print2(Object[] kvs) { + for (int i = 0; i < len(kvs); i++) { + Object key = key(kvs, i); + Object val = val(kvs, i); + Object U = Prime.unbox(val); + if (key != null && key != TOMBSTONE && // key is sane + val != null && U != TOMBSTONE) { // val is sane + String p = (val == U) ? "" : "prime_"; + System.out.println("" + i + " (" + key + "," + p + val + ")"); + } + } + Object[] newkvs = chm(kvs)._newkvs; // New table, if any + if (newkvs != null) { + System.out.println("----"); + print2(newkvs); + } + } + + // Count of reprobes + private transient Counter _reprobes = new Counter(); + + /** + * Get and clear the current count of reprobes. Reprobes happen on key + * collisions, and a high reprobe rate may indicate a poor hash function or + * weaknesses in the table resizing function. + * + * @return the count of reprobes since the last call to {@link #reprobes} + * or since the table was created. + */ + public long reprobes() { + long r = _reprobes.get(); + _reprobes = new Counter(); + return r; + } + + + // --- reprobe_limit ----------------------------------------------------- + // Heuristic to decide if we have reprobed toooo many times. Running over + // the reprobe limit on a 'get' call acts as a 'miss'; on a 'put' call it + // can trigger a table resize. Several places must have exact agreement on + // what the reprobe_limit is, so we share it here. + + private static final int reprobe_limit(int len) { + return REPROBE_LIMIT + (len >> 2); + } + + // --- NonBlockingHashtable -------------------------------------------------- + // Constructors + + /** + * Create a new NonBlockingHashtable with default minimum size (currently set + * to 8 K/V pairs or roughly 84 bytes on a standard 32-bit JVM). + */ + public NonBlockingHashtable() { + this(MIN_SIZE); + } + + /** + * Create a new NonBlockingHashtable with initial room for the given number of + * elements, thus avoiding internal resizing operations to reach an + * appropriate size. Large numbers here when used with a small count of + * elements will sacrifice space for a small amount of time gained. The + * initial size will be rounded up internally to the next larger power of 2. + */ + public NonBlockingHashtable(final int initial_sz) { + initialize(initial_sz); + } + + private final void initialize(int initial_sz) { + if (initial_sz < 0) throw new IllegalArgumentException(); + int i; // Convert to next largest power-of-2 + if (initial_sz > 1024 * 1024) initial_sz = 1024 * 1024; + for (i = MIN_SIZE_LOG; (1 << i) < (initial_sz << 2); i++) ; + // Double size for K,V pairs, add 1 for CHM and 1 for hashes + _kvs = new Object[((1 << i) << 1) + 2]; + _kvs[0] = new CHM(new Counter()); // CHM in slot 0 + _kvs[1] = new int[1 << i]; // Matching hash entries + _last_resize_milli = System.currentTimeMillis(); + } + + // Version for subclassed readObject calls, to be called after the defaultReadObject + + protected final void initialize() { + initialize(MIN_SIZE); + } + + // --- wrappers ------------------------------------------------------------ + + /** + * Returns the number of key-value mappings in this map. + * + * @return the number of key-value mappings in this map + */ + @Override + public int size() { + return chm(_kvs).size(); + } + + /** + * Returns size() == 0. + * + * @return size() == 0 + */ + @Override + public boolean isEmpty() { + return size() == 0; + } + + /** + * Tests if the key in the table using the equals method. + * + * @return true if the key is in the table using the equals method + * @throws NullPointerException if the specified key is null + */ + @Override + public boolean containsKey(Object key) { + return get(key) != null; + } + + /** + * Legacy method testing if some key maps into the specified value in this + * table. This method is identical in functionality to {@link + * #containsValue}, and exists solely to ensure full compatibility with + * class {@link java.util.Hashtable}, which supported this method prior to + * introduction of the Java Collections framework. + * + * @param val a value to search for + * @return true if this map maps one or more keys to the specified value + * @throws NullPointerException if the specified value is null + */ + public boolean contains(Object val) { + return containsValue(val); + } + + /** + * Maps the specified key to the specified value in the table. Neither key + * nor value can be null. + *

    The value can be retrieved by calling {@link #get} with a key that is + * equal to the original key. + * + * @param key key with which the specified value is to be associated + * @param val value to be associated with the specified key + * @return the previous value associated with key, or + * null if there was no mapping for key + * @throws NullPointerException if the specified key or value is null + */ + @Override + public TypeV put(TypeK key, TypeV val) { + return putIfMatch(key, val, NO_MATCH_OLD); + } + + /** + * Atomically, do a {@link #put} if-and-only-if the key is not mapped. + * Useful to ensure that only a single mapping for the key exists, even if + * many threads are trying to create the mapping in parallel. + * + * @return the previous value associated with the specified key, + * or null if there was no mapping for the key + * @throws NullPointerException if the specified key or value is null + */ + public TypeV putIfAbsent(TypeK key, TypeV val) { + return putIfMatch(key, val, TOMBSTONE); + } + + /** + * Removes the key (and its corresponding value) from this map. + * This method does nothing if the key is not in the map. + * + * @return the previous value associated with key, or + * null if there was no mapping for key + * @throws NullPointerException if the specified key is null + */ + @Override + public TypeV remove(Object key) { + return putIfMatch(key, TOMBSTONE, NO_MATCH_OLD); + } + + /** + * Atomically do a {@link #remove(Object)} if-and-only-if the key is mapped + * to a value which is equals to the given value. + * + * @throws NullPointerException if the specified key or value is null + */ + public boolean remove(Object key, Object val) { + return putIfMatch(key, TOMBSTONE, val) == val; + } + + /** + * Atomically do a put(key,val) if-and-only-if the key is + * mapped to some value already. + * + * @throws NullPointerException if the specified key or value is null + */ + public TypeV replace(TypeK key, TypeV val) { + return putIfMatch(key, val, MATCH_ANY); + } + + /** + * Atomically do a put(key,newValue) if-and-only-if the key is + * mapped a value which is equals to oldValue. + * + * @throws NullPointerException if the specified key or value is null + */ + public boolean replace(TypeK key, TypeV oldValue, TypeV newValue) { + return putIfMatch(key, newValue, oldValue) == oldValue; + } + + private final TypeV putIfMatch(Object key, Object newVal, Object oldVal) { + if (oldVal == null || newVal == null) throw new NullPointerException(); + final Object res = putIfMatch(this, _kvs, key, newVal, oldVal); + assert !(res instanceof Prime); + assert res != null; + return res == TOMBSTONE ? null : (TypeV) res; + } + + + /** + * Copies all of the mappings from the specified map to this one, replacing + * any existing mappings. + * + * @param m mappings to be stored in this map + */ + @Override + public void putAll(Map m) { + for (Map.Entry e : m.entrySet()) + put(e.getKey(), e.getValue()); + } + + /** + * Removes all of the mappings from this map. + */ + @Override + public void clear() { // Smack a new empty table down + Object[] newkvs = new NonBlockingHashtable(MIN_SIZE)._kvs; + while (!CAS_kvs(_kvs, newkvs)) // Spin until the clear works + ; + } + + /** + * Returns true if this Map maps one or more keys to the specified + * value. Note: This method requires a full internal traversal of the + * hash table and is much slower than {@link #containsKey}. + * + * @param val value whose presence in this map is to be tested + * @return true if this map maps one or more keys to the specified value + * @throws NullPointerException if the specified value is null + */ + @Override + public boolean containsValue(final Object val) { + if (val == null) throw new NullPointerException(); + for (TypeV V : values()) + if (V == val || V.equals(val)) + return true; + return false; + } + + // This function is supposed to do something for Hashtable, and the JCK + // tests hang until it gets called... by somebody ... for some reason, + // any reason.... + + protected void rehash() { + } + + /** + * Creates a shallow copy of this hashtable. All the structure of the + * hashtable itself is copied, but the keys and values are not cloned. + * This is a relatively expensive operation. + * + * @return a clone of the hashtable. + */ + @Override + public Object clone() { + try { + // Must clone, to get the class right; NBHM might have been + // extended so it would be wrong to just make a new NBHM. + NonBlockingHashtable t = (NonBlockingHashtable) super.clone(); + // But I don't have an atomic clone operation - the underlying _kvs + // structure is undergoing rapid change. If I just clone the _kvs + // field, the CHM in _kvs[0] won't be in sync. + // + // Wipe out the cloned array (it was shallow anyways). + t.clear(); + // Now copy sanely + for (TypeK K : keySet()) { + final TypeV V = get(K); // Do an official 'get' + t.put(K, V); + } + return t; + } catch (CloneNotSupportedException e) { + // this shouldn't happen, since we are Cloneable + throw new InternalError(); + } + } + + /** + * Returns a string representation of this map. The string representation + * consists of a list of key-value mappings in the order returned by the + * map's entrySet view's iterator, enclosed in braces + * ("{}"). Adjacent mappings are separated by the characters + * ", " (comma and space). Each key-value mapping is rendered as + * the key followed by an equals sign ("=") followed by the + * associated value. Keys and values are converted to strings as by + * {@link String#valueOf(Object)}. + * + * @return a string representation of this map + */ + @Override + public String toString() { + Iterator> i = entrySet().iterator(); + if (!i.hasNext()) + return "{}"; + + StringBuilder sb = new StringBuilder(); + sb.append('{'); + for (; ;) { + Entry e = i.next(); + TypeK key = e.getKey(); + TypeV value = e.getValue(); + sb.append(key == this ? "(this Map)" : key); + sb.append('='); + sb.append(value == this ? "(this Map)" : value); + if (!i.hasNext()) + return sb.append('}').toString(); + sb.append(", "); + } + } + + // --- keyeq --------------------------------------------------------------- + // Check for key equality. Try direct pointer compare first, then see if + // the hashes are unequal (fast negative test) and finally do the full-on + // 'equals' v-call. + + private static boolean keyeq(Object K, Object key, int[] hashes, int hash, int fullhash) { + return + K == key || // Either keys match exactly OR + // hash exists and matches? hash can be zero during the install of a + // new key/value pair. + ((hashes[hash] == 0 || hashes[hash] == fullhash) && + // Do not call the users' "equals()" call with a Tombstone, as this can + // surprise poorly written "equals()" calls that throw exceptions + // instead of simply returning false. + K != TOMBSTONE && // Do not call users' equals call with a Tombstone + // Do the match the hard way - with the users' key being the loop- + // invariant "this" pointer. I could have flipped the order of + // operands (since equals is commutative), but I'm making mega-morphic + // v-calls in a reprobing loop and nailing down the 'this' argument + // gives both the JIT and the hardware a chance to prefetch the call target. + key.equals(K)); // Finally do the hard match + } + + // --- get ----------------------------------------------------------------- + + /** + * Returns the value to which the specified key is mapped, or {@code null} + * if this map contains no mapping for the key. + *

    More formally, if this map contains a mapping from a key {@code k} to + * a value {@code v} such that {@code key.equals(k)}, then this method + * returns {@code v}; otherwise it returns {@code null}. (There can be at + * most one such mapping.) + * + * @throws NullPointerException if the specified key is null + */ + // Never returns a Prime nor a Tombstone. + @Override + public TypeV get(Object key) { + final int fullhash = hash(key); // throws NullPointerException if key is null + final Object V = get_impl(this, _kvs, key, fullhash); + assert !(V instanceof Prime); // Never return a Prime + return (TypeV) V; + } + + private static final Object get_impl(final NonBlockingHashtable topmap, final Object[] kvs, final Object key, final int fullhash) { + final int len = len(kvs); // Count of key/value pairs, reads kvs.length + final CHM chm = chm(kvs); // The CHM, for a volatile read below; reads slot 0 of kvs + final int[] hashes = hashes(kvs); // The memoized hashes; reads slot 1 of kvs + + int idx = fullhash & (len - 1); // First key hash + + // Main spin/reprobe loop, looking for a Key hit + int reprobe_cnt = 0; + while (true) { + // Probe table. Each read of 'val' probably misses in cache in a big + // table; hopefully the read of 'key' then hits in cache. + final Object K = key(kvs, idx); // Get key before volatile read, could be null + final Object V = val(kvs, idx); // Get value before volatile read, could be null or Tombstone or Prime + if (K == null) return null; // A clear miss + + // We need a volatile-read here to preserve happens-before semantics on + // newly inserted Keys. If the Key body was written just before inserting + // into the table a Key-compare here might read the uninitalized Key body. + // Annoyingly this means we have to volatile-read before EACH key compare. + // . + // We also need a volatile-read between reading a newly inserted Value + // and returning the Value (so the user might end up reading the stale + // Value contents). Same problem as with keys - and the one volatile + // read covers both. + final Object[] newkvs = chm._newkvs; // VOLATILE READ before key compare + + // Key-compare + if (keyeq(K, key, hashes, idx, fullhash)) { + // Key hit! Check for no table-copy-in-progress + if (!(V instanceof Prime)) // No copy? + return (V == TOMBSTONE) ? null : V; // Return the value + // Key hit - but slot is (possibly partially) copied to the new table. + // Finish the copy & retry in the new table. + return get_impl(topmap, chm.copy_slot_and_check(topmap, kvs, idx, key), key, fullhash); // Retry in the new table + } + // get and put must have the same key lookup logic! But only 'put' + // needs to force a table-resize for a too-long key-reprobe sequence. + // Check for too-many-reprobes on get - and flip to the new table. + if (++reprobe_cnt >= reprobe_limit(len) || // too many probes + key == TOMBSTONE) // found a TOMBSTONE key, means no more keys in this table + return newkvs == null ? null : get_impl(topmap, topmap.help_copy(newkvs), key, fullhash); // Retry in the new table + + idx = (idx + 1) & (len - 1); // Reprobe by 1! (could now prefetch) + } + } + + // --- putIfMatch --------------------------------------------------------- + // Put, Remove, PutIfAbsent, etc. Return the old value. If the returned + // value is equal to expVal (or expVal is NO_MATCH_OLD) then the put can be + // assumed to work (although might have been immediately overwritten). Only + // the path through copy_slot passes in an expected value of null, and + // putIfMatch only returns a null if passed in an expected null. + + private static final Object putIfMatch(final NonBlockingHashtable topmap, final Object[] kvs, final Object key, final Object putval, final Object expVal) { + assert putval != null; + assert !(putval instanceof Prime); + assert !(expVal instanceof Prime); + final int fullhash = hash(key); // throws NullPointerException if key null + final int len = len(kvs); // Count of key/value pairs, reads kvs.length + final CHM chm = chm(kvs); // Reads kvs[0] + final int[] hashes = hashes(kvs); // Reads kvs[1], read before kvs[0] + int idx = fullhash & (len - 1); + + // --- + // Key-Claim stanza: spin till we can claim a Key (or force a resizing). + int reprobe_cnt = 0; + Object K = null, V = null; + Object[] newkvs = null; + while (true) { // Spin till we get a Key slot + V = val(kvs, idx); // Get old value (before volatile read below!) + K = key(kvs, idx); // Get current key + if (K == null) { // Slot is free? + // Found an empty Key slot - which means this Key has never been in + // this table. No need to put a Tombstone - the Key is not here! + if (putval == TOMBSTONE) return putval; // Not-now & never-been in this table + // Claim the null key-slot + if (CAS_key(kvs, idx, null, key)) { // Claim slot for Key + chm._slots.add(1); // Raise key-slots-used count + hashes[idx] = fullhash; // Memoize fullhash + break; // Got it! + } + // CAS to claim the key-slot failed. + // + // This re-read of the Key points out an annoying short-coming of Java + // CAS. Most hardware CAS's report back the existing value - so that + // if you fail you have a *witness* - the value which caused the CAS + // to fail. The Java API turns this into a boolean destroying the + // witness. Re-reading does not recover the witness because another + // thread can write over the memory after the CAS. Hence we can be in + // the unfortunate situation of having a CAS fail *for cause* but + // having that cause removed by a later store. This turns a + // non-spurious-failure CAS (such as Azul has) into one that can + // apparently spuriously fail - and we avoid apparent spurious failure + // by not allowing Keys to ever change. + K = key(kvs, idx); // CAS failed, get updated value + assert K != null; // If keys[idx] is null, CAS shoulda worked + } + // Key slot was not null, there exists a Key here + + // We need a volatile-read here to preserve happens-before semantics on + // newly inserted Keys. If the Key body was written just before inserting + // into the table a Key-compare here might read the uninitalized Key body. + // Annoyingly this means we have to volatile-read before EACH key compare. + newkvs = chm._newkvs; // VOLATILE READ before key compare + + if (keyeq(K, key, hashes, idx, fullhash)) + break; // Got it! + + // get and put must have the same key lookup logic! Lest 'get' give + // up looking too soon. + //topmap._reprobes.add(1); + if (++reprobe_cnt >= reprobe_limit(len) || // too many probes or + key == TOMBSTONE) { // found a TOMBSTONE key, means no more keys + // We simply must have a new table to do a 'put'. At this point a + // 'get' will also go to the new table (if any). We do not need + // to claim a key slot (indeed, we cannot find a free one to claim!). + newkvs = chm.resize(topmap, kvs); + if (expVal != null) topmap.help_copy(newkvs); // help along an existing copy + return putIfMatch(topmap, newkvs, key, putval, expVal); + } + + idx = (idx + 1) & (len - 1); // Reprobe! + } // End of spinning till we get a Key slot + + // --- + // Found the proper Key slot, now update the matching Value slot. We + // never put a null, so Value slots monotonically move from null to + // not-null (deleted Values use Tombstone). Thus if 'V' is null we + // fail this fast cutout and fall into the check for table-full. + if (putval == V) return V; // Fast cutout for no-change + + // See if we want to move to a new table (to avoid high average re-probe + // counts). We only check on the initial set of a Value from null to + // not-null (i.e., once per key-insert). Of course we got a 'free' check + // of newkvs once per key-compare (not really free, but paid-for by the + // time we get here). + if (newkvs == null && // New table-copy already spotted? + // Once per fresh key-insert check the hard way + ((V == null && chm.tableFull(reprobe_cnt, len)) || + // Or we found a Prime, but the JMM allowed reordering such that we + // did not spot the new table (very rare race here: the writing + // thread did a CAS of _newkvs then a store of a Prime. This thread + // reads the Prime, then reads _newkvs - but the read of Prime was so + // delayed (or the read of _newkvs was so accelerated) that they + // swapped and we still read a null _newkvs. The resize call below + // will do a CAS on _newkvs forcing the read. + V instanceof Prime)) + newkvs = chm.resize(topmap, kvs); // Force the new table copy to start + // See if we are moving to a new table. + // If so, copy our slot and retry in the new table. + if (newkvs != null) + return putIfMatch(topmap, chm.copy_slot_and_check(topmap, kvs, idx, expVal), key, putval, expVal); + + // --- + // We are finally prepared to update the existing table + while (true) { + assert !(V instanceof Prime); + + // Must match old, and we do not? Then bail out now. Note that either V + // or expVal might be TOMBSTONE. Also V can be null, if we've never + // inserted a value before. expVal can be null if we are called from + // copy_slot. + + if (expVal != NO_MATCH_OLD && // Do we care about expected-Value at all? + V != expVal && // No instant match already? + (expVal != MATCH_ANY || V == TOMBSTONE || V == null) && + !(V == null && expVal == TOMBSTONE) && // Match on null/TOMBSTONE combo + (expVal == null || !expVal.equals(V))) // Expensive equals check at the last + return V; // Do not update! + + // Actually change the Value in the Key,Value pair + if (CAS_val(kvs, idx, V, putval)) { + // CAS succeeded - we did the update! + // Both normal put's and table-copy calls putIfMatch, but table-copy + // does not (effectively) increase the number of live k/v pairs. + if (expVal != null) { + // Adjust sizes - a striped counter + if ((V == null || V == TOMBSTONE) && putval != TOMBSTONE) chm._size.add(1); + if (!(V == null || V == TOMBSTONE) && putval == TOMBSTONE) chm._size.add(-1); + } + return (V == null && expVal != null) ? TOMBSTONE : V; + } + // Else CAS failed + V = val(kvs, idx); // Get new value + // If a Prime'd value got installed, we need to re-run the put on the + // new table. Otherwise we lost the CAS to another racing put. + // Simply retry from the start. + if (V instanceof Prime) + return putIfMatch(topmap, chm.copy_slot_and_check(topmap, kvs, idx, expVal), key, putval, expVal); + } + } + + // --- help_copy --------------------------------------------------------- + // Help along an existing resize operation. This is just a fast cut-out + // wrapper, to encourage inlining for the fast no-copy-in-progress case. We + // always help the top-most table copy, even if there are nested table + // copies in progress. + + private final Object[] help_copy(Object[] helper) { + // Read the top-level KVS only once. We'll try to help this copy along, + // even if it gets promoted out from under us (i.e., the copy completes + // and another KVS becomes the top-level copy). + Object[] topkvs = _kvs; + CHM topchm = chm(topkvs); + if (topchm._newkvs == null) return helper; // No copy in-progress + topchm.help_copy_impl(this, topkvs, false); + return helper; + } + + + // --- CHM ----------------------------------------------------------------- + // The control structure for the NonBlockingHashtable + + private static final class CHM { + // Size in active K,V pairs + private final Counter _size; + + public int size() { + return (int) _size.get(); + } + + // --- + // These next 2 fields are used in the resizing heuristics, to judge when + // it is time to resize or copy the table. Slots is a count of used-up + // key slots, and when it nears a large fraction of the table we probably + // end up reprobing too much. Last-resize-milli is the time since the + // last resize; if we are running back-to-back resizes without growing + // (because there are only a few live keys but many slots full of dead + // keys) then we need a larger table to cut down on the churn. + + // Count of used slots, to tell when table is full of dead unusable slots + private final Counter _slots; + + public int slots() { + return (int) _slots.get(); + } + + // --- + // New mappings, used during resizing. + // The 'new KVs' array - created during a resize operation. This + // represents the new table being copied from the old one. It's the + // volatile variable that is read as we cross from one table to the next, + // to get the required memory orderings. It monotonically transits from + // null to set (once). + volatile Object[] _newkvs; + private final AtomicReferenceFieldUpdater _newkvsUpdater = + AtomicReferenceFieldUpdater.newUpdater(CHM.class, Object[].class, "_newkvs"); + + // Set the _next field if we can. + + boolean CAS_newkvs(Object[] newkvs) { + while (_newkvs == null) + if (_newkvsUpdater.compareAndSet(this, null, newkvs)) + return true; + return false; + } + + // Sometimes many threads race to create a new very large table. Only 1 + // wins the race, but the losers all allocate a junk large table with + // hefty allocation costs. Attempt to control the overkill here by + // throttling attempts to create a new table. I cannot really block here + // (lest I lose the non-blocking property) but late-arriving threads can + // give the initial resizing thread a little time to allocate the initial + // new table. The Right Long Term Fix here is to use array-lets and + // incrementally create the new very large array. In C I'd make the array + // with malloc (which would mmap under the hood) which would only eat + // virtual-address and not real memory - and after Somebody wins then we + // could in parallel initialize the array. Java does not allow + // un-initialized array creation (especially of ref arrays!). + volatile long _resizers; // count of threads attempting an initial resize + private static final AtomicLongFieldUpdater _resizerUpdater = + AtomicLongFieldUpdater.newUpdater(CHM.class, "_resizers"); + + // --- + // Simple constructor + + CHM(Counter size) { + _size = size; + _slots = new Counter(); + } + + // --- tableFull --------------------------------------------------------- + // Heuristic to decide if this table is too full, and we should start a + // new table. Note that if a 'get' call has reprobed too many times and + // decided the table must be full, then always the estimate_sum must be + // high and we must report the table is full. If we do not, then we might + // end up deciding that the table is not full and inserting into the + // current table, while a 'get' has decided the same key cannot be in this + // table because of too many reprobes. The invariant is: + // slots.estimate_sum >= max_reprobe_cnt >= reprobe_limit(len) + + private final boolean tableFull(int reprobe_cnt, int len) { + return + // Do the cheap check first: we allow some number of reprobes always + reprobe_cnt >= REPROBE_LIMIT && + // More expensive check: see if the table is > 1/4 full. + _slots.estimate_get() >= reprobe_limit(len); + } + + // --- resize ------------------------------------------------------------ + // Resizing after too many probes. "How Big???" heuristics are here. + // Callers will (not this routine) will 'help_copy' any in-progress copy. + // Since this routine has a fast cutout for copy-already-started, callers + // MUST 'help_copy' lest we have a path which forever runs through + // 'resize' only to discover a copy-in-progress which never progresses. + + private final Object[] resize(NonBlockingHashtable topmap, Object[] kvs) { + assert chm(kvs) == this; + + // Check for resize already in progress, probably triggered by another thread + Object[] newkvs = _newkvs; // VOLATILE READ + if (newkvs != null) // See if resize is already in progress + return newkvs; // Use the new table already + + // No copy in-progress, so start one. First up: compute new table size. + int oldlen = len(kvs); // Old count of K,V pairs allowed + int sz = size(); // Get current table count of active K,V pairs + int newsz = sz; // First size estimate + + // Heuristic to determine new size. We expect plenty of dead-slots-with-keys + // and we need some decent padding to avoid endless reprobing. + if (sz >= (oldlen >> 2)) { // If we are >25% full of keys then... + newsz = oldlen << 1; // Double size + if (sz >= (oldlen >> 1)) // If we are >50% full of keys then... + newsz = oldlen << 2; // Double double size + } + // This heuristic in the next 2 lines leads to a much denser table + // with a higher reprobe rate + //if( sz >= (oldlen>>1) ) // If we are >50% full of keys then... + // newsz = oldlen<<1; // Double size + + // Last (re)size operation was very recent? Then double again; slows + // down resize operations for tables subject to a high key churn rate. + long tm = System.currentTimeMillis(); + long q = 0; + if (newsz <= oldlen && // New table would shrink or hold steady? + tm <= topmap._last_resize_milli + 10000 && // Recent resize (less than 1 sec ago) + (q = _slots.estimate_get()) >= (sz << 1)) // 1/2 of keys are dead? + newsz = oldlen << 1; // Double the existing size + + // Do not shrink, ever + if (newsz < oldlen) newsz = oldlen; + + // Convert to power-of-2 + int log2; + for (log2 = MIN_SIZE_LOG; (1 << log2) < newsz; log2++) ; // Compute log2 of size + + // Now limit the number of threads actually allocating memory to a + // handful - lest we have 750 threads all trying to allocate a giant + // resized array. + long r = _resizers; + while (!_resizerUpdater.compareAndSet(this, r, r + 1)) + r = _resizers; + // Size calculation: 2 words (K+V) per table entry, plus a handful. We + // guess at 32-bit pointers; 64-bit pointers screws up the size calc by + // 2x but does not screw up the heuristic very much. + int megs = ((((1 << log2) << 1) + 4) << 3/*word to bytes*/) >> 20/*megs*/; + if (r >= 2 && megs > 0) { // Already 2 guys trying; wait and see + newkvs = _newkvs; // Between dorking around, another thread did it + if (newkvs != null) // See if resize is already in progress + return newkvs; // Use the new table already + // TODO - use a wait with timeout, so we'll wakeup as soon as the new table + // is ready, or after the timeout in any case. + //synchronized( this ) { wait(8*megs); } // Timeout - we always wakeup + // For now, sleep a tad and see if the 2 guys already trying to make + // the table actually get around to making it happen. + try { + Thread.sleep(8 * megs); + } catch (Exception e) { + } + } + // Last check, since the 'new' below is expensive and there is a chance + // that another thread slipped in a new thread while we ran the heuristic. + newkvs = _newkvs; + if (newkvs != null) // See if resize is already in progress + return newkvs; // Use the new table already + + // Double size for K,V pairs, add 1 for CHM + newkvs = new Object[((1 << log2) << 1) + 2]; // This can get expensive for big arrays + newkvs[0] = new CHM(_size); // CHM in slot 0 + newkvs[1] = new int[1 << log2]; // hashes in slot 1 + + // Another check after the slow allocation + if (_newkvs != null) // See if resize is already in progress + return _newkvs; // Use the new table already + + // The new table must be CAS'd in so only 1 winner amongst duplicate + // racing resizing threads. Extra CHM's will be GC'd. + if (CAS_newkvs(newkvs)) { // NOW a resize-is-in-progress! + //notifyAll(); // Wake up any sleepers + //long nano = System.nanoTime(); + //System.out.println(" "+nano+" Resize from "+oldlen+" to "+(1< _copyIdxUpdater = + AtomicLongFieldUpdater.newUpdater(CHM.class, "_copyIdx"); + + // Work-done reporting. Used to efficiently signal when we can move to + // the new table. From 0 to len(oldkvs) refers to copying from the old + // table to the new. + volatile long _copyDone = 0; + static private final AtomicLongFieldUpdater _copyDoneUpdater = + AtomicLongFieldUpdater.newUpdater(CHM.class, "_copyDone"); + + // --- help_copy_impl ---------------------------------------------------- + // Help along an existing resize operation. We hope its the top-level + // copy (it was when we started) but this CHM might have been promoted out + // of the top position. + + private final void help_copy_impl(NonBlockingHashtable topmap, Object[] oldkvs, boolean copy_all) { + assert chm(oldkvs) == this; + Object[] newkvs = _newkvs; + assert newkvs != null; // Already checked by caller + int oldlen = len(oldkvs); // Total amount to copy + final int MIN_COPY_WORK = Math.min(oldlen, 1024); // Limit per-thread work + + // --- + int panic_start = -1; + int copyidx = -9999; // Fool javac to think it's initialized + while (_copyDone < oldlen) { // Still needing to copy? + // Carve out a chunk of work. The counter wraps around so every + // thread eventually tries to copy every slot repeatedly. + + // We "panic" if we have tried TWICE to copy every slot - and it still + // has not happened. i.e., twice some thread somewhere claimed they + // would copy 'slot X' (by bumping _copyIdx) but they never claimed to + // have finished (by bumping _copyDone). Our choices become limited: + // we can wait for the work-claimers to finish (and become a blocking + // algorithm) or do the copy work ourselves. Tiny tables with huge + // thread counts trying to copy the table often 'panic'. + if (panic_start == -1) { // No panic? + copyidx = (int) _copyIdx; + while (copyidx < (oldlen << 1) && // 'panic' check + !_copyIdxUpdater.compareAndSet(this, copyidx, copyidx + MIN_COPY_WORK)) + copyidx = (int) _copyIdx; // Re-read + if (!(copyidx < (oldlen << 1))) // Panic! + panic_start = copyidx; // Record where we started to panic-copy + } + + // We now know what to copy. Try to copy. + int workdone = 0; + for (int i = 0; i < MIN_COPY_WORK; i++) + if (copy_slot(topmap, (copyidx + i) & (oldlen - 1), oldkvs, newkvs)) // Made an oldtable slot go dead? + workdone++; // Yes! + if (workdone > 0) // Report work-done occasionally + copy_check_and_promote(topmap, oldkvs, workdone);// See if we can promote + //for( int i=0; i 0) { + while (!_copyDoneUpdater.compareAndSet(this, copyDone, copyDone + workdone)) { + copyDone = _copyDone; // Reload, retry + assert (copyDone + workdone) <= oldlen; + } + //if( (10*copyDone/oldlen) != (10*(copyDone+workdone)/oldlen) ) + //System.out.print(" "+(copyDone+workdone)*100/oldlen+"%"+"_"+(_copyIdx*100/oldlen)+"%"); + } + + // Check for copy being ALL done, and promote. Note that we might have + // nested in-progress copies and manage to finish a nested copy before + // finishing the top-level copy. We only promote top-level copies. + if (copyDone + workdone == oldlen && // Ready to promote this table? + topmap._kvs == oldkvs && // Looking at the top-level table? + // Attempt to promote + topmap.CAS_kvs(oldkvs, _newkvs)) { + topmap._last_resize_milli = System.currentTimeMillis(); // Record resize time for next check + //long nano = System.nanoTime(); + //System.out.println(" "+nano+" Promote table to "+len(_newkvs)); + //if( System.out != null ) System.out.print("]"); + } + } + + // --- copy_slot --------------------------------------------------------- + // Copy one K/V pair from oldkvs[i] to newkvs. Returns true if we can + // confirm that the new table guaranteed has a value for this old-table + // slot. We need an accurate confirmed-copy count so that we know when we + // can promote (if we promote the new table too soon, other threads may + // 'miss' on values not-yet-copied from the old table). We don't allow + // any direct updates on the new table, unless they first happened to the + // old table - so that any transition in the new table from null to + // not-null must have been from a copy_slot (or other old-table overwrite) + // and not from a thread directly writing in the new table. Thus we can + // count null-to-not-null transitions in the new table. + + private boolean copy_slot(NonBlockingHashtable topmap, int idx, Object[] oldkvs, Object[] newkvs) { + // Blindly set the key slot from null to TOMBSTONE, to eagerly stop + // fresh put's from inserting new values in the old table when the old + // table is mid-resize. We don't need to act on the results here, + // because our correctness stems from box'ing the Value field. Slamming + // the Key field is a minor speed optimization. + Object key; + while ((key = key(oldkvs, idx)) == null) + CAS_key(oldkvs, idx, null, TOMBSTONE); + + // --- + // Prevent new values from appearing in the old table. + // Box what we see in the old table, to prevent further updates. + Object oldval = val(oldkvs, idx); // Read OLD table + while (!(oldval instanceof Prime)) { + final Prime box = (oldval == null || oldval == TOMBSTONE) ? TOMBPRIME : new Prime(oldval); + if (CAS_val(oldkvs, idx, oldval, box)) { // CAS down a box'd version of oldval + // If we made the Value slot hold a TOMBPRIME, then we both + // prevented further updates here but also the (absent) + // oldval is vaccuously available in the new table. We + // return with true here: any thread looking for a value for + // this key can correctly go straight to the new table and + // skip looking in the old table. + if (box == TOMBPRIME) + return true; + // Otherwise we boxed something, but it still needs to be + // copied into the new table. + oldval = box; // Record updated oldval + break; // Break loop; oldval is now boxed by us + } + oldval = val(oldkvs, idx); // Else try, try again + } + if (oldval == TOMBPRIME) return false; // Copy already complete here! + + // --- + // Copy the value into the new table, but only if we overwrite a null. + // If another value is already in the new table, then somebody else + // wrote something there and that write is happens-after any value that + // appears in the old table. If putIfMatch does not find a null in the + // new table - somebody else should have recorded the null-not_null + // transition in this copy. + Object old_unboxed = ((Prime) oldval)._V; + assert old_unboxed != TOMBSTONE; + boolean copied_into_new = (putIfMatch(topmap, newkvs, key, old_unboxed, null) == null); + + // --- + // Finally, now that any old value is exposed in the new table, we can + // forever hide the old-table value by slapping a TOMBPRIME down. This + // will stop other threads from uselessly attempting to copy this slot + // (i.e., it's a speed optimization not a correctness issue). + while (!CAS_val(oldkvs, idx, oldval, TOMBPRIME)) + oldval = val(oldkvs, idx); + + return copied_into_new; + } // end copy_slot + } // End of CHM + + + // --- Snapshot ------------------------------------------------------------ + // The main class for iterating over the NBHM. It "snapshots" a clean + // view of the K/V array. + + private class SnapshotV implements Iterator, Enumeration { + final Object[] _sskvs; + + public SnapshotV() { + while (true) { // Verify no table-copy-in-progress + Object[] topkvs = _kvs; + CHM topchm = chm(topkvs); + if (topchm._newkvs == null) { // No table-copy-in-progress + // The "linearization point" for the iteration. Every key in this + // table will be visited, but keys added later might be skipped or + // even be added to a following table (also not iterated over). + _sskvs = topkvs; + break; + } + // Table copy in-progress - so we cannot get a clean iteration. We + // must help finish the table copy before we can start iterating. + topchm.help_copy_impl(NonBlockingHashtable.this, topkvs, true); + } + // Warm-up the iterator + next(); + } + + int length() { + return len(_sskvs); + } + + Object key(int idx) { + return NonBlockingHashtable.key(_sskvs, idx); + } + + private int _idx; // Varies from 0-keys.length + private Object _nextK, _prevK; // Last 2 keys found + private TypeV _nextV, _prevV; // Last 2 values found + + public boolean hasNext() { + return _nextV != null; + } + + public TypeV next() { + // 'next' actually knows what the next value will be - it had to + // figure that out last go-around lest 'hasNext' report true and + // some other thread deleted the last value. Instead, 'next' + // spends all its effort finding the key that comes after the + // 'next' key. + if (_idx != 0 && _nextV == null) throw new NoSuchElementException(); + _prevK = _nextK; // This will become the previous key + _prevV = _nextV; // This will become the previous value + _nextV = null; // We have no more next-key + // Attempt to set <_nextK,_nextV> to the next K,V pair. + // _nextV is the trigger: stop searching when it is != null + while (_idx < length()) { // Scan array + _nextK = key(_idx++); // Get a key that definitely is in the set (for the moment!) + if (_nextK != null && // Found something? + _nextK != TOMBSTONE && + (_nextV = get(_nextK)) != null) + break; // Got it! _nextK is a valid Key + } // Else keep scanning + return _prevV; // Return current value. + } + + public void remove() { + if (_prevV == null) throw new IllegalStateException(); + putIfMatch(NonBlockingHashtable.this, _sskvs, _prevK, TOMBSTONE, _prevV); + _prevV = null; + } + + public TypeV nextElement() { + return next(); + } + + public boolean hasMoreElements() { + return hasNext(); + } + } + + /** + * Returns an enumeration of the values in this table. + * + * @return an enumeration of the values in this table + * @see #values() + */ + public Enumeration elements() { + return new SnapshotV(); + } + + // --- values -------------------------------------------------------------- + + /** + * Returns a {@link Collection} view of the values contained in this map. + * The collection is backed by the map, so changes to the map are reflected + * in the collection, and vice-versa. The collection supports element + * removal, which removes the corresponding mapping from this map, via the + * Iterator.remove, Collection.remove, + * removeAll, retainAll, and clear operations. + * It does not support the add or addAll operations. + *

    + *

    The view's iterator is a "weakly consistent" iterator that + * will never throw {@link ConcurrentModificationException}, and guarantees + * to traverse elements as they existed upon construction of the iterator, + * and may (but is not guaranteed to) reflect any modifications subsequent + * to construction. + */ + @Override + public Collection values() { + return new AbstractCollection() { + @Override public void clear() { + NonBlockingHashtable.this.clear(); + } + + @Override public int size() { + return NonBlockingHashtable.this.size(); + } + + @Override public boolean contains(Object v) { + return NonBlockingHashtable.this.containsValue(v); + } + + @Override public Iterator iterator() { + return new SnapshotV(); + } + }; + } + + // --- keySet -------------------------------------------------------------- + + private class SnapshotK implements Iterator, Enumeration { + final SnapshotV _ss; + + public SnapshotK() { + _ss = new SnapshotV(); + } + + public void remove() { + _ss.remove(); + } + + public TypeK next() { + _ss.next(); + return (TypeK) _ss._prevK; + } + + public boolean hasNext() { + return _ss.hasNext(); + } + + public TypeK nextElement() { + return next(); + } + + public boolean hasMoreElements() { + return hasNext(); + } + } + + /** + * Returns an enumeration of the keys in this table. + * + * @return an enumeration of the keys in this table + * @see #keySet() + */ + public Enumeration keys() { + return new SnapshotK(); + } + + /** + * Returns a {@link Set} view of the keys contained in this map. The set + * is backed by the map, so changes to the map are reflected in the set, + * and vice-versa. The set supports element removal, which removes the + * corresponding mapping from this map, via the Iterator.remove, + * Set.remove, removeAll, retainAll, and + * clear operations. It does not support the add or + * addAll operations. + *

    + *

    The view's iterator is a "weakly consistent" iterator that + * will never throw {@link ConcurrentModificationException}, and guarantees + * to traverse elements as they existed upon construction of the iterator, + * and may (but is not guaranteed to) reflect any modifications subsequent + * to construction. + */ + @Override + public Set keySet() { + return new AbstractSet() { + @Override public void clear() { + NonBlockingHashtable.this.clear(); + } + + @Override public int size() { + return NonBlockingHashtable.this.size(); + } + + @Override public boolean contains(Object k) { + return NonBlockingHashtable.this.containsKey(k); + } + + @Override public boolean remove(Object k) { + return NonBlockingHashtable.this.remove(k) != null; + } + + @Override public Iterator iterator() { + return new SnapshotK(); + } + }; + } + + + // --- entrySet ------------------------------------------------------------ + // Warning: Each call to 'next' in this iterator constructs a new NBHMEntry. + + private class NBHMEntry extends AbstractEntry { + NBHMEntry(final TypeK k, final TypeV v) { + super(k, v); + } + + public TypeV setValue(final TypeV val) { + if (val == null) throw new NullPointerException(); + _val = val; + return put(_key, val); + } + } + + private class SnapshotE implements Iterator> { + final SnapshotV _ss; + + public SnapshotE() { + _ss = new SnapshotV(); + } + + public void remove() { + _ss.remove(); + } + + public Map.Entry next() { + _ss.next(); + return new NBHMEntry((TypeK) _ss._prevK, _ss._prevV); + } + + public boolean hasNext() { + return _ss.hasNext(); + } + } + + /** + * Returns a {@link Set} view of the mappings contained in this map. The + * set is backed by the map, so changes to the map are reflected in the + * set, and vice-versa. The set supports element removal, which removes + * the corresponding mapping from the map, via the + * Iterator.remove, Set.remove, removeAll, + * retainAll, and clear operations. It does not support + * the add or addAll operations. + *

    + *

    The view's iterator is a "weakly consistent" iterator + * that will never throw {@link ConcurrentModificationException}, + * and guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not guaranteed to) + * reflect any modifications subsequent to construction. + *

    + *

    Warning: the iterator associated with this Set + * requires the creation of {@link java.util.Map.Entry} objects with each + * iteration. The {@link NonBlockingHashtable} does not normally create or + * using {@link java.util.Map.Entry} objects so they will be created soley + * to support this iteration. Iterating using {@link #keySet} or {@link + * #values} will be more efficient. + */ + @Override + public Set> entrySet() { + return new AbstractSet>() { + @Override public void clear() { + NonBlockingHashtable.this.clear(); + } + + @Override public int size() { + return NonBlockingHashtable.this.size(); + } + + @Override public boolean remove(final Object o) { + if (!(o instanceof Map.Entry)) return false; + final Map.Entry e = (Map.Entry) o; + return NonBlockingHashtable.this.remove(e.getKey(), e.getValue()); + } + + @Override public boolean contains(final Object o) { + if (!(o instanceof Map.Entry)) return false; + final Map.Entry e = (Map.Entry) o; + TypeV v = get(e.getKey()); + return v.equals(e.getValue()); + } + + @Override public Iterator> iterator() { + return new SnapshotE(); + } + }; + } + + // --- writeObject ------------------------------------------------------- + // Write a NBHM to a stream + + private void writeObject(java.io.ObjectOutputStream s) throws IOException { + s.defaultWriteObject(); // Nothing to write + for (Object K : keySet()) { + final Object V = get(K); // Do an official 'get' + s.writeObject(K); // Write the pair + s.writeObject(V); + } + s.writeObject(null); // Sentinel to indicate end-of-data + s.writeObject(null); + } + + // --- readObject -------------------------------------------------------- + // Read a CHM from a stream + + private void readObject(java.io.ObjectInputStream s) throws IOException, ClassNotFoundException { + s.defaultReadObject(); // Read nothing + initialize(MIN_SIZE); + for (; ;) { + final TypeK K = (TypeK) s.readObject(); + final TypeV V = (TypeV) s.readObject(); + if (K == null) break; + put(K, V); // Insert with an offical put + } + } + +} // End NonBlockingHashtable class diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/highscalelib/NonBlockingIdentityHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/highscalelib/NonBlockingIdentityHashMap.java new file mode 100644 index 00000000000..4bc39652ad2 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/highscalelib/NonBlockingIdentityHashMap.java @@ -0,0 +1,1537 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Written by Cliff Click and released to the public domain, as explained at + * http://creativecommons.org/licenses/publicdomain + */ + +package org.elasticsearch.util.concurrent.highscalelib; + +import sun.misc.Unsafe; + +import java.io.IOException; +import java.io.Serializable; +import java.lang.reflect.Field; +import java.util.*; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.AtomicLongFieldUpdater; +import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; + +/** + * A lock-free alternate implementation of {@link java.util.concurrent.ConcurrentHashMap} + * with better scaling properties and generally lower costs to mutate the Map. + * It provides identical correctness properties as ConcurrentHashMap. All + * operations are non-blocking and multi-thread safe, including all update + * operations. {@link NonBlockingHashMap} scales substatially better than + * {@link java.util.concurrent.ConcurrentHashMap} for high update rates, even with a + * large concurrency factor. Scaling is linear up to 768 CPUs on a 768-CPU + * Azul box, even with 100% updates or 100% reads or any fraction in-between. + * Linear scaling up to all cpus has been observed on a 32-way Sun US2 box, + * 32-way Sun Niagra box, 8-way Intel box and a 4-way Power box. + *

    + * This class obeys the same functional specification as {@link + * java.util.Hashtable}, and includes versions of methods corresponding to + * each method of Hashtable. However, even though all operations are + * thread-safe, operations do not entail locking and there is + * not any support for locking the entire table in a way that + * prevents all access. This class is fully interoperable with + * Hashtable in programs that rely on its thread safety but not on + * its synchronization details. + *

    + *

    Operations (including put) generally do not block, so may + * overlap with other update operations (including other puts and + * removes). Retrievals reflect the results of the most recently + * completed update operations holding upon their onset. For + * aggregate operations such as putAll, concurrent retrievals may + * reflect insertion or removal of only some entries. Similarly, Iterators + * and Enumerations return elements reflecting the state of the hash table at + * some point at or since the creation of the iterator/enumeration. They do + * not throw {@link ConcurrentModificationException}. However, + * iterators are designed to be used by only one thread at a time. + *

    + *

    Very full tables, or tables with high reprobe rates may trigger an + * internal resize operation to move into a larger table. Resizing is not + * terribly expensive, but it is not free either; during resize operations + * table throughput may drop somewhat. All threads that visit the table + * during a resize will 'help' the resizing but will still be allowed to + * complete their operation before the resize is finished (i.e., a simple + * 'get' operation on a million-entry table undergoing resizing will not need + * to block until the entire million entries are copied). + *

    + *

    This class and its views and iterators implement all of the + * optional methods of the {@link Map} and {@link Iterator} + * interfaces. + *

    + *

    Like {@link Hashtable} but unlike {@link HashMap}, this class + * does not allow null to be used as a key or value. + * + * @author Cliff Click + * @author Prashant Deva + * Modified from original NonBlockingHashMap to use identity equality. + * Uses System.identityHashCode() to calculate hashMap. + * Key equality is compared using '=='. + * @param the type of keys maintained by this map + * @param the type of mapped values + * @since 1.5 + */ + +public class NonBlockingIdentityHashMap + extends AbstractMap + implements ConcurrentMap, Cloneable, Serializable { + + private static final long serialVersionUID = 1234123412341234123L; + + private static final int REPROBE_LIMIT = 10; // Too many reprobes then force a table-resize + + // --- Bits to allow Unsafe access to arrays + private static final Unsafe _unsafe = UtilUnsafe.getUnsafe(); + private static final int _Obase = _unsafe.arrayBaseOffset(Object[].class); + private static final int _Oscale = _unsafe.arrayIndexScale(Object[].class); + + private static long rawIndex(final Object[] ary, final int idx) { + assert idx >= 0 && idx < ary.length; + return _Obase + idx * _Oscale; + } + + // --- Setup to use Unsafe + private static final long _kvs_offset; + + static { // + Field f = null; + try { + f = NonBlockingHashMap.class.getDeclaredField("_kvs"); + } + catch (java.lang.NoSuchFieldException e) { + throw new RuntimeException(e); + } + _kvs_offset = _unsafe.objectFieldOffset(f); + } + + private final boolean CAS_kvs(final Object[] oldkvs, final Object[] newkvs) { + return _unsafe.compareAndSwapObject(this, _kvs_offset, oldkvs, newkvs); + } + + // --- Adding a 'prime' bit onto Values via wrapping with a junk wrapper class + + private static final class Prime { + final Object _V; + + Prime(Object V) { + _V = V; + } + + static Object unbox(Object V) { + return V instanceof Prime ? ((Prime) V)._V : V; + } + } + + // --- hash ---------------------------------------------------------------- + // Helper function to spread lousy hashCodes + + private static final int hash(final Object key) { + int h = System.identityHashCode(key); // The real hashCode call + h ^= (h >>> 20) ^ (h >>> 12); + h ^= (h >>> 7) ^ (h >>> 4); + return h; + } + + // --- The Hash Table -------------------- + // Slot 0 is always used for a 'CHM' entry below to hold the interesting + // bits of the hash table. Slot 1 holds full hashes as an array of ints. + // Slots {2,3}, {4,5}, etc hold {Key,Value} pairs. The entire hash table + // can be atomically replaced by CASing the _kvs field. + // + // Why is CHM buried inside the _kvs Object array, instead of the other way + // around? The CHM info is used during resize events and updates, but not + // during standard 'get' operations. I assume 'get' is much more frequent + // than 'put'. 'get' can skip the extra indirection of skipping through the + // CHM to reach the _kvs array. + private transient Object[] _kvs; + + private static final CHM chm(Object[] kvs) { + return (CHM) kvs[0]; + } + + private static final int[] hashes(Object[] kvs) { + return (int[]) kvs[1]; + } + + // Number of K,V pairs in the table + + private static final int len(Object[] kvs) { + return (kvs.length - 2) >> 1; + } + + // Time since last resize + private transient long _last_resize_milli; + + // --- Minimum table size ---------------- + // Pick size 8 K/V pairs, which turns into (8*2+2)*4+12 = 84 bytes on a + // standard 32-bit HotSpot, and (8*2+2)*8+12 = 156 bytes on 64-bit Azul. + private static final int MIN_SIZE_LOG = 3; // + private static final int MIN_SIZE = (1 << MIN_SIZE_LOG); // Must be power of 2 + + // --- Sentinels ------------------------- + // No-Match-Old - putIfMatch does updates only if it matches the old value, + // and NO_MATCH_OLD basically counts as a wildcard match. + private static final Object NO_MATCH_OLD = new Object(); // Sentinel + // Match-Any-not-null - putIfMatch does updates only if it find a real old + // value. + private static final Object MATCH_ANY = new Object(); // Sentinel + // This K/V pair has been deleted (but the Key slot is forever claimed). + // The same Key can be reinserted with a new value later. + private static final Object TOMBSTONE = new Object(); + // Prime'd or box'd version of TOMBSTONE. This K/V pair was deleted, then a + // table resize started. The K/V pair has been marked so that no new + // updates can happen to the old table (and since the K/V pair was deleted + // nothing was copied to the new table). + private static final Prime TOMBPRIME = new Prime(TOMBSTONE); + + // --- key,val ------------------------------------------------------------- + // Access K,V for a given idx + // + // Note that these are static, so that the caller is forced to read the _kvs + // field only once, and share that read across all key/val calls - lest the + // _kvs field move out from under us and back-to-back key & val calls refer + // to different _kvs arrays. + + private static final Object key(Object[] kvs, int idx) { + return kvs[(idx << 1) + 2]; + } + + private static final Object val(Object[] kvs, int idx) { + return kvs[(idx << 1) + 3]; + } + + private static final boolean CAS_key(Object[] kvs, int idx, Object old, Object key) { + return _unsafe.compareAndSwapObject(kvs, rawIndex(kvs, (idx << 1) + 2), old, key); + } + + private static final boolean CAS_val(Object[] kvs, int idx, Object old, Object val) { + return _unsafe.compareAndSwapObject(kvs, rawIndex(kvs, (idx << 1) + 3), old, val); + } + + + // --- dump ---------------------------------------------------------------- + + /** + * Verbose printout of table internals, useful for debugging. + */ + public final void print() { + System.out.println("========="); + print2(_kvs); + System.out.println("========="); + } + + // print the entire state of the table + + private final void print(Object[] kvs) { + for (int i = 0; i < len(kvs); i++) { + Object K = key(kvs, i); + if (K != null) { + String KS = (K == TOMBSTONE) ? "XXX" : K.toString(); + Object V = val(kvs, i); + Object U = Prime.unbox(V); + String p = (V == U) ? "" : "prime_"; + String US = (U == TOMBSTONE) ? "tombstone" : U.toString(); + System.out.println("" + i + " (" + KS + "," + p + US + ")"); + } + } + Object[] newkvs = chm(kvs)._newkvs; // New table, if any + if (newkvs != null) { + System.out.println("----"); + print(newkvs); + } + } + + // print only the live values, broken down by the table they are in + + private final void print2(Object[] kvs) { + for (int i = 0; i < len(kvs); i++) { + Object key = key(kvs, i); + Object val = val(kvs, i); + Object U = Prime.unbox(val); + if (key != null && key != TOMBSTONE && // key is sane + val != null && U != TOMBSTONE) { // val is sane + String p = (val == U) ? "" : "prime_"; + System.out.println("" + i + " (" + key + "," + p + val + ")"); + } + } + Object[] newkvs = chm(kvs)._newkvs; // New table, if any + if (newkvs != null) { + System.out.println("----"); + print2(newkvs); + } + } + + // Count of reprobes + private transient Counter _reprobes = new Counter(); + + /** + * Get and clear the current count of reprobes. Reprobes happen on key + * collisions, and a high reprobe rate may indicate a poor hash function or + * weaknesses in the table resizing function. + * + * @return the count of reprobes since the last call to {@link #reprobes} + * or since the table was created. + */ + public long reprobes() { + long r = _reprobes.get(); + _reprobes = new Counter(); + return r; + } + + + // --- reprobe_limit ----------------------------------------------------- + // Heuristic to decide if we have reprobed toooo many times. Running over + // the reprobe limit on a 'get' call acts as a 'miss'; on a 'put' call it + // can trigger a table resize. Several places must have exact agreement on + // what the reprobe_limit is, so we share it here. + + private static final int reprobe_limit(int len) { + return REPROBE_LIMIT + (len >> 2); + } + + // --- NonBlockingHashMap -------------------------------------------------- + // Constructors + + /** + * Create a new NonBlockingHashMap with default minimum size (currently set + * to 8 K/V pairs or roughly 84 bytes on a standard 32-bit JVM). + */ + public NonBlockingIdentityHashMap() { + this(MIN_SIZE); + } + + /** + * Create a new NonBlockingHashMap with initial room for the given number of + * elements, thus avoiding internal resizing operations to reach an + * appropriate size. Large numbers here when used with a small count of + * elements will sacrifice space for a small amount of time gained. The + * initial size will be rounded up internally to the next larger power of 2. + */ + public NonBlockingIdentityHashMap(final int initial_sz) { + initialize(initial_sz); + } + + private final void initialize(int initial_sz) { + if (initial_sz < 0) throw new IllegalArgumentException(); + int i; // Convert to next largest power-of-2 + if (initial_sz > 1024 * 1024) initial_sz = 1024 * 1024; + for (i = MIN_SIZE_LOG; (1 << i) < (initial_sz << 2); i++) ; + // Double size for K,V pairs, add 1 for CHM and 1 for hashes + _kvs = new Object[((1 << i) << 1) + 2]; + _kvs[0] = new CHM(new Counter()); // CHM in slot 0 + _kvs[1] = new int[1 << i]; // Matching hash entries + _last_resize_milli = System.currentTimeMillis(); + } + + // Version for subclassed readObject calls, to be called after the defaultReadObject + + protected final void initialize() { + initialize(MIN_SIZE); + } + + // --- wrappers ------------------------------------------------------------ + + /** + * Returns the number of key-value mappings in this map. + * + * @return the number of key-value mappings in this map + */ + @Override + public int size() { + return chm(_kvs).size(); + } + + /** + * Returns size() == 0. + * + * @return size() == 0 + */ + @Override + public boolean isEmpty() { + return size() == 0; + } + + /** + * Tests if the key in the table using the equals method. + * + * @return true if the key is in the table using the equals method + * @throws NullPointerException if the specified key is null + */ + @Override + public boolean containsKey(Object key) { + return get(key) != null; + } + + /** + * Legacy method testing if some key maps into the specified value in this + * table. This method is identical in functionality to {@link + * #containsValue}, and exists solely to ensure full compatibility with + * class {@link java.util.Hashtable}, which supported this method prior to + * introduction of the Java Collections framework. + * + * @param val a value to search for + * @return true if this map maps one or more keys to the specified value + * @throws NullPointerException if the specified value is null + */ + public boolean contains(Object val) { + return containsValue(val); + } + + /** + * Maps the specified key to the specified value in the table. Neither key + * nor value can be null. + *

    The value can be retrieved by calling {@link #get} with a key that is + * equal to the original key. + * + * @param key key with which the specified value is to be associated + * @param val value to be associated with the specified key + * @return the previous value associated with key, or + * null if there was no mapping for key + * @throws NullPointerException if the specified key or value is null + */ + @Override + public TypeV put(TypeK key, TypeV val) { + return putIfMatch(key, val, NO_MATCH_OLD); + } + + /** + * Atomically, do a {@link #put} if-and-only-if the key is not mapped. + * Useful to ensure that only a single mapping for the key exists, even if + * many threads are trying to create the mapping in parallel. + * + * @return the previous value associated with the specified key, + * or null if there was no mapping for the key + * @throws NullPointerException if the specified key or value is null + */ + public TypeV putIfAbsent(TypeK key, TypeV val) { + return putIfMatch(key, val, TOMBSTONE); + } + + /** + * Removes the key (and its corresponding value) from this map. + * This method does nothing if the key is not in the map. + * + * @return the previous value associated with key, or + * null if there was no mapping for key + * @throws NullPointerException if the specified key is null + */ + @Override + public TypeV remove(Object key) { + return putIfMatch(key, TOMBSTONE, NO_MATCH_OLD); + } + + /** + * Atomically do a {@link #remove(Object)} if-and-only-if the key is mapped + * to a value which is equals to the given value. + * + * @throws NullPointerException if the specified key or value is null + */ + public boolean remove(Object key, Object val) { + return putIfMatch(key, TOMBSTONE, val) == val; + } + + /** + * Atomically do a put(key,val) if-and-only-if the key is + * mapped to some value already. + * + * @throws NullPointerException if the specified key or value is null + */ + public TypeV replace(TypeK key, TypeV val) { + return putIfMatch(key, val, MATCH_ANY); + } + + /** + * Atomically do a put(key,newValue) if-and-only-if the key is + * mapped a value which is equals to oldValue. + * + * @throws NullPointerException if the specified key or value is null + */ + public boolean replace(TypeK key, TypeV oldValue, TypeV newValue) { + return putIfMatch(key, newValue, oldValue) == oldValue; + } + + private final TypeV putIfMatch(Object key, Object newVal, Object oldVal) { + if (oldVal == null || newVal == null) throw new NullPointerException(); + final Object res = putIfMatch(this, _kvs, key, newVal, oldVal); + assert !(res instanceof Prime); + assert res != null; + return res == TOMBSTONE ? null : (TypeV) res; + } + + + /** + * Copies all of the mappings from the specified map to this one, replacing + * any existing mappings. + * + * @param m mappings to be stored in this map + */ + @Override + public void putAll(Map m) { + for (Map.Entry e : m.entrySet()) + put(e.getKey(), e.getValue()); + } + + /** + * Removes all of the mappings from this map. + */ + @Override + public void clear() { // Smack a new empty table down + Object[] newkvs = new NonBlockingIdentityHashMap(MIN_SIZE)._kvs; + while (!CAS_kvs(_kvs, newkvs)) // Spin until the clear works + ; + } + + /** + * Returns true if this Map maps one or more keys to the specified + * value. Note: This method requires a full internal traversal of the + * hash table and is much slower than {@link #containsKey}. + * + * @param val value whose presence in this map is to be tested + * @return true if this map maps one or more keys to the specified value + * @throws NullPointerException if the specified value is null + */ + @Override + public boolean containsValue(final Object val) { + if (val == null) throw new NullPointerException(); + for (TypeV V : values()) + if (V == val || V.equals(val)) + return true; + return false; + } + + // This function is supposed to do something for Hashtable, and the JCK + // tests hang until it gets called... by somebody ... for some reason, + // any reason.... + + protected void rehash() { + } + + /** + * Creates a shallow copy of this hashtable. All the structure of the + * hashtable itself is copied, but the keys and values are not cloned. + * This is a relatively expensive operation. + * + * @return a clone of the hashtable. + */ + @Override + public Object clone() { + try { + // Must clone, to get the class right; NBHM might have been + // extended so it would be wrong to just make a new NBHM. + NonBlockingIdentityHashMap t = (NonBlockingIdentityHashMap) super.clone(); + // But I don't have an atomic clone operation - the underlying _kvs + // structure is undergoing rapid change. If I just clone the _kvs + // field, the CHM in _kvs[0] won't be in sync. + // + // Wipe out the cloned array (it was shallow anyways). + t.clear(); + // Now copy sanely + for (TypeK K : keySet()) { + final TypeV V = get(K); // Do an official 'get' + t.put(K, V); + } + return t; + } catch (CloneNotSupportedException e) { + // this shouldn't happen, since we are Cloneable + throw new InternalError(); + } + } + + /** + * Returns a string representation of this map. The string representation + * consists of a list of key-value mappings in the order returned by the + * map's entrySet view's iterator, enclosed in braces + * ("{}"). Adjacent mappings are separated by the characters + * ", " (comma and space). Each key-value mapping is rendered as + * the key followed by an equals sign ("=") followed by the + * associated value. Keys and values are converted to strings as by + * {@link String#valueOf(Object)}. + * + * @return a string representation of this map + */ + @Override + public String toString() { + Iterator> i = entrySet().iterator(); + if (!i.hasNext()) + return "{}"; + + StringBuilder sb = new StringBuilder(); + sb.append('{'); + for (; ;) { + Entry e = i.next(); + TypeK key = e.getKey(); + TypeV value = e.getValue(); + sb.append(key == this ? "(this Map)" : key); + sb.append('='); + sb.append(value == this ? "(this Map)" : value); + if (!i.hasNext()) + return sb.append('}').toString(); + sb.append(", "); + } + } + + // --- get ----------------------------------------------------------------- + + /** + * Returns the value to which the specified key is mapped, or {@code null} + * if this map contains no mapping for the key. + *

    More formally, if this map contains a mapping from a key {@code k} to + * a value {@code v} such that {@code key.equals(k)}, then this method + * returns {@code v}; otherwise it returns {@code null}. (There can be at + * most one such mapping.) + * + * @throws NullPointerException if the specified key is null + */ + // Never returns a Prime nor a Tombstone. + @Override + public TypeV get(Object key) { + final int fullhash = hash(key); // throws NullPointerException if key is null + final Object V = get_impl(this, _kvs, key, fullhash); + assert !(V instanceof Prime); // Never return a Prime + return (TypeV) V; + } + + private static final Object get_impl(final NonBlockingIdentityHashMap topmap, final Object[] kvs, final Object key, final int fullhash) { + final int len = len(kvs); // Count of key/value pairs, reads kvs.length + final CHM chm = chm(kvs); // The CHM, for a volatile read below; reads slot 0 of kvs + + int idx = fullhash & (len - 1); // First key hash + + // Main spin/reprobe loop, looking for a Key hit + int reprobe_cnt = 0; + while (true) { + // Probe table. Each read of 'val' probably misses in cache in a big + // table; hopefully the read of 'key' then hits in cache. + final Object K = key(kvs, idx); // Get key before volatile read, could be null + final Object V = val(kvs, idx); // Get value before volatile read, could be null or Tombstone or Prime + if (K == null) return null; // A clear miss + + // We need a volatile-read here to preserve happens-before semantics on + // newly inserted Keys. If the Key body was written just before inserting + // into the table a Key-compare here might read the uninitalized Key body. + // Annoyingly this means we have to volatile-read before EACH key compare. + // . + // We also need a volatile-read between reading a newly inserted Value + // and returning the Value (so the user might end up reading the stale + // Value contents). Same problem as with keys - and the one volatile + // read covers both. + final Object[] newkvs = chm._newkvs; // VOLATILE READ before key compare + + // Key-compare + if (K == key) { + // Key hit! Check for no table-copy-in-progress + if (!(V instanceof Prime)) // No copy? + return (V == TOMBSTONE) ? null : V; // Return the value + // Key hit - but slot is (possibly partially) copied to the new table. + // Finish the copy & retry in the new table. + return get_impl(topmap, chm.copy_slot_and_check(topmap, kvs, idx, key), key, fullhash); // Retry in the new table + } + // get and put must have the same key lookup logic! But only 'put' + // needs to force a table-resize for a too-long key-reprobe sequence. + // Check for too-many-reprobes on get - and flip to the new table. + if (++reprobe_cnt >= reprobe_limit(len) || // too many probes + key == TOMBSTONE) // found a TOMBSTONE key, means no more keys in this table + return newkvs == null ? null : get_impl(topmap, topmap.help_copy(newkvs), key, fullhash); // Retry in the new table + + idx = (idx + 1) & (len - 1); // Reprobe by 1! (could now prefetch) + } + } + + // --- putIfMatch --------------------------------------------------------- + // Put, Remove, PutIfAbsent, etc. Return the old value. If the returned + // value is equal to expVal (or expVal is NO_MATCH_OLD) then the put can be + // assumed to work (although might have been immediately overwritten). Only + // the path through copy_slot passes in an expected value of null, and + // putIfMatch only returns a null if passed in an expected null. + + private static final Object putIfMatch(final NonBlockingIdentityHashMap topmap, final Object[] kvs, final Object key, final Object putval, final Object expVal) { + assert putval != null; + assert !(putval instanceof Prime); + assert !(expVal instanceof Prime); + final int fullhash = hash(key); // throws NullPointerException if key null + final int len = len(kvs); // Count of key/value pairs, reads kvs.length + final CHM chm = chm(kvs); // Reads kvs[0] + int idx = fullhash & (len - 1); + + // --- + // Key-Claim stanza: spin till we can claim a Key (or force a resizing). + int reprobe_cnt = 0; + Object K = null, V = null; + Object[] newkvs = null; + while (true) { // Spin till we get a Key slot + V = val(kvs, idx); // Get old value (before volatile read below!) + K = key(kvs, idx); // Get current key + if (K == null) { // Slot is free? + // Found an empty Key slot - which means this Key has never been in + // this table. No need to put a Tombstone - the Key is not here! + if (putval == TOMBSTONE) return putval; // Not-now & never-been in this table + // Claim the null key-slot + if (CAS_key(kvs, idx, null, key)) { // Claim slot for Key + chm._slots.add(1); // Raise key-slots-used count + break; // Got it! + } + // CAS to claim the key-slot failed. + // + // This re-read of the Key points out an annoying short-coming of Java + // CAS. Most hardware CAS's report back the existing value - so that + // if you fail you have a *witness* - the value which caused the CAS + // to fail. The Java API turns this into a boolean destroying the + // witness. Re-reading does not recover the witness because another + // thread can write over the memory after the CAS. Hence we can be in + // the unfortunate situation of having a CAS fail *for cause* but + // having that cause removed by a later store. This turns a + // non-spurious-failure CAS (such as Azul has) into one that can + // apparently spuriously fail - and we avoid apparent spurious failure + // by not allowing Keys to ever change. + K = key(kvs, idx); // CAS failed, get updated value + assert K != null; // If keys[idx] is null, CAS shoulda worked + } + // Key slot was not null, there exists a Key here + + // We need a volatile-read here to preserve happens-before semantics on + // newly inserted Keys. If the Key body was written just before inserting + // into the table a Key-compare here might read the uninitalized Key body. + // Annoyingly this means we have to volatile-read before EACH key compare. + newkvs = chm._newkvs; // VOLATILE READ before key compare + + if (K == key) + break; // Got it! + + // get and put must have the same key lookup logic! Lest 'get' give + // up looking too soon. + //topmap._reprobes.add(1); + if (++reprobe_cnt >= reprobe_limit(len) || // too many probes or + key == TOMBSTONE) { // found a TOMBSTONE key, means no more keys + // We simply must have a new table to do a 'put'. At this point a + // 'get' will also go to the new table (if any). We do not need + // to claim a key slot (indeed, we cannot find a free one to claim!). + newkvs = chm.resize(topmap, kvs); + if (expVal != null) topmap.help_copy(newkvs); // help along an existing copy + return putIfMatch(topmap, newkvs, key, putval, expVal); + } + + idx = (idx + 1) & (len - 1); // Reprobe! + } // End of spinning till we get a Key slot + + // --- + // Found the proper Key slot, now update the matching Value slot. We + // never put a null, so Value slots monotonically move from null to + // not-null (deleted Values use Tombstone). Thus if 'V' is null we + // fail this fast cutout and fall into the check for table-full. + if (putval == V) return V; // Fast cutout for no-change + + // See if we want to move to a new table (to avoid high average re-probe + // counts). We only check on the initial set of a Value from null to + // not-null (i.e., once per key-insert). Of course we got a 'free' check + // of newkvs once per key-compare (not really free, but paid-for by the + // time we get here). + if (newkvs == null && // New table-copy already spotted? + // Once per fresh key-insert check the hard way + ((V == null && chm.tableFull(reprobe_cnt, len)) || + // Or we found a Prime, but the JMM allowed reordering such that we + // did not spot the new table (very rare race here: the writing + // thread did a CAS of _newkvs then a store of a Prime. This thread + // reads the Prime, then reads _newkvs - but the read of Prime was so + // delayed (or the read of _newkvs was so accelerated) that they + // swapped and we still read a null _newkvs. The resize call below + // will do a CAS on _newkvs forcing the read. + V instanceof Prime)) + newkvs = chm.resize(topmap, kvs); // Force the new table copy to start + // See if we are moving to a new table. + // If so, copy our slot and retry in the new table. + if (newkvs != null) + return putIfMatch(topmap, chm.copy_slot_and_check(topmap, kvs, idx, expVal), key, putval, expVal); + + // --- + // We are finally prepared to update the existing table + while (true) { + assert !(V instanceof Prime); + + // Must match old, and we do not? Then bail out now. Note that either V + // or expVal might be TOMBSTONE. Also V can be null, if we've never + // inserted a value before. expVal can be null if we are called from + // copy_slot. + + if (expVal != NO_MATCH_OLD && // Do we care about expected-Value at all? + V != expVal && // No instant match already? + (expVal != MATCH_ANY || V == TOMBSTONE || V == null) && + !(V == null && expVal == TOMBSTONE) && // Match on null/TOMBSTONE combo + (expVal == null || !expVal.equals(V))) // Expensive equals check at the last + return V; // Do not update! + + // Actually change the Value in the Key,Value pair + if (CAS_val(kvs, idx, V, putval)) { + // CAS succeeded - we did the update! + // Both normal put's and table-copy calls putIfMatch, but table-copy + // does not (effectively) increase the number of live k/v pairs. + if (expVal != null) { + // Adjust sizes - a striped counter + if ((V == null || V == TOMBSTONE) && putval != TOMBSTONE) chm._size.add(1); + if (!(V == null || V == TOMBSTONE) && putval == TOMBSTONE) chm._size.add(-1); + } + return (V == null && expVal != null) ? TOMBSTONE : V; + } + // Else CAS failed + V = val(kvs, idx); // Get new value + // If a Prime'd value got installed, we need to re-run the put on the + // new table. Otherwise we lost the CAS to another racing put. + // Simply retry from the start. + if (V instanceof Prime) + return putIfMatch(topmap, chm.copy_slot_and_check(topmap, kvs, idx, expVal), key, putval, expVal); + } + } + + // --- help_copy --------------------------------------------------------- + // Help along an existing resize operation. This is just a fast cut-out + // wrapper, to encourage inlining for the fast no-copy-in-progress case. We + // always help the top-most table copy, even if there are nested table + // copies in progress. + + private final Object[] help_copy(Object[] helper) { + // Read the top-level KVS only once. We'll try to help this copy along, + // even if it gets promoted out from under us (i.e., the copy completes + // and another KVS becomes the top-level copy). + Object[] topkvs = _kvs; + CHM topchm = chm(topkvs); + if (topchm._newkvs == null) return helper; // No copy in-progress + topchm.help_copy_impl(this, topkvs, false); + return helper; + } + + + // --- CHM ----------------------------------------------------------------- + // The control structure for the NonBlockingIdentityHashMap + + private static final class CHM { + // Size in active K,V pairs + private final Counter _size; + + public int size() { + return (int) _size.get(); + } + + // --- + // These next 2 fields are used in the resizing heuristics, to judge when + // it is time to resize or copy the table. Slots is a count of used-up + // key slots, and when it nears a large fraction of the table we probably + // end up reprobing too much. Last-resize-milli is the time since the + // last resize; if we are running back-to-back resizes without growing + // (because there are only a few live keys but many slots full of dead + // keys) then we need a larger table to cut down on the churn. + + // Count of used slots, to tell when table is full of dead unusable slots + private final Counter _slots; + + public int slots() { + return (int) _slots.get(); + } + + // --- + // New mappings, used during resizing. + // The 'new KVs' array - created during a resize operation. This + // represents the new table being copied from the old one. It's the + // volatile variable that is read as we cross from one table to the next, + // to get the required memory orderings. It monotonically transits from + // null to set (once). + volatile Object[] _newkvs; + private final AtomicReferenceFieldUpdater _newkvsUpdater = + AtomicReferenceFieldUpdater.newUpdater(CHM.class, Object[].class, "_newkvs"); + + // Set the _next field if we can. + + boolean CAS_newkvs(Object[] newkvs) { + while (_newkvs == null) + if (_newkvsUpdater.compareAndSet(this, null, newkvs)) + return true; + return false; + } + + // Sometimes many threads race to create a new very large table. Only 1 + // wins the race, but the losers all allocate a junk large table with + // hefty allocation costs. Attempt to control the overkill here by + // throttling attempts to create a new table. I cannot really block here + // (lest I lose the non-blocking property) but late-arriving threads can + // give the initial resizing thread a little time to allocate the initial + // new table. The Right Long Term Fix here is to use array-lets and + // incrementally create the new very large array. In C I'd make the array + // with malloc (which would mmap under the hood) which would only eat + // virtual-address and not real memory - and after Somebody wins then we + // could in parallel initialize the array. Java does not allow + // un-initialized array creation (especially of ref arrays!). + volatile long _resizers; // count of threads attempting an initial resize + private static final AtomicLongFieldUpdater _resizerUpdater = + AtomicLongFieldUpdater.newUpdater(CHM.class, "_resizers"); + + // --- + // Simple constructor + + CHM(Counter size) { + _size = size; + _slots = new Counter(); + } + + // --- tableFull --------------------------------------------------------- + // Heuristic to decide if this table is too full, and we should start a + // new table. Note that if a 'get' call has reprobed too many times and + // decided the table must be full, then always the estimate_sum must be + // high and we must report the table is full. If we do not, then we might + // end up deciding that the table is not full and inserting into the + // current table, while a 'get' has decided the same key cannot be in this + // table because of too many reprobes. The invariant is: + // slots.estimate_sum >= max_reprobe_cnt >= reprobe_limit(len) + + private final boolean tableFull(int reprobe_cnt, int len) { + return + // Do the cheap check first: we allow some number of reprobes always + reprobe_cnt >= REPROBE_LIMIT && + // More expensive check: see if the table is > 1/4 full. + _slots.estimate_get() >= reprobe_limit(len); + } + + // --- resize ------------------------------------------------------------ + // Resizing after too many probes. "How Big???" heuristics are here. + // Callers will (not this routine) will 'help_copy' any in-progress copy. + // Since this routine has a fast cutout for copy-already-started, callers + // MUST 'help_copy' lest we have a path which forever runs through + // 'resize' only to discover a copy-in-progress which never progresses. + + private final Object[] resize(NonBlockingIdentityHashMap topmap, Object[] kvs) { + assert chm(kvs) == this; + + // Check for resize already in progress, probably triggered by another thread + Object[] newkvs = _newkvs; // VOLATILE READ + if (newkvs != null) // See if resize is already in progress + return newkvs; // Use the new table already + + // No copy in-progress, so start one. First up: compute new table size. + int oldlen = len(kvs); // Old count of K,V pairs allowed + int sz = size(); // Get current table count of active K,V pairs + int newsz = sz; // First size estimate + + // Heuristic to determine new size. We expect plenty of dead-slots-with-keys + // and we need some decent padding to avoid endless reprobing. + if (sz >= (oldlen >> 2)) { // If we are >25% full of keys then... + newsz = oldlen << 1; // Double size + if (sz >= (oldlen >> 1)) // If we are >50% full of keys then... + newsz = oldlen << 2; // Double double size + } + // This heuristic in the next 2 lines leads to a much denser table + // with a higher reprobe rate + //if( sz >= (oldlen>>1) ) // If we are >50% full of keys then... + // newsz = oldlen<<1; // Double size + + // Last (re)size operation was very recent? Then double again; slows + // down resize operations for tables subject to a high key churn rate. + long tm = System.currentTimeMillis(); + long q = 0; + if (newsz <= oldlen && // New table would shrink or hold steady? + tm <= topmap._last_resize_milli + 10000 && // Recent resize (less than 1 sec ago) + (q = _slots.estimate_get()) >= (sz << 1)) // 1/2 of keys are dead? + newsz = oldlen << 1; // Double the existing size + + // Do not shrink, ever + if (newsz < oldlen) newsz = oldlen; + + // Convert to power-of-2 + int log2; + for (log2 = MIN_SIZE_LOG; (1 << log2) < newsz; log2++) ; // Compute log2 of size + + // Now limit the number of threads actually allocating memory to a + // handful - lest we have 750 threads all trying to allocate a giant + // resized array. + long r = _resizers; + while (!_resizerUpdater.compareAndSet(this, r, r + 1)) + r = _resizers; + // Size calculation: 2 words (K+V) per table entry, plus a handful. We + // guess at 32-bit pointers; 64-bit pointers screws up the size calc by + // 2x but does not screw up the heuristic very much. + int megs = ((((1 << log2) << 1) + 4) << 3/*word to bytes*/) >> 20/*megs*/; + if (r >= 2 && megs > 0) { // Already 2 guys trying; wait and see + newkvs = _newkvs; // Between dorking around, another thread did it + if (newkvs != null) // See if resize is already in progress + return newkvs; // Use the new table already + // TODO - use a wait with timeout, so we'll wakeup as soon as the new table + // is ready, or after the timeout in any case. + //synchronized( this ) { wait(8*megs); } // Timeout - we always wakeup + // For now, sleep a tad and see if the 2 guys already trying to make + // the table actually get around to making it happen. + try { + Thread.sleep(8 * megs); + } catch (Exception e) { + } + } + // Last check, since the 'new' below is expensive and there is a chance + // that another thread slipped in a new thread while we ran the heuristic. + newkvs = _newkvs; + if (newkvs != null) // See if resize is already in progress + return newkvs; // Use the new table already + + // Double size for K,V pairs, add 1 for CHM + newkvs = new Object[((1 << log2) << 1) + 2]; // This can get expensive for big arrays + newkvs[0] = new CHM(_size); // CHM in slot 0 + newkvs[1] = new int[1 << log2]; // hashes in slot 1 + + // Another check after the slow allocation + if (_newkvs != null) // See if resize is already in progress + return _newkvs; // Use the new table already + + // The new table must be CAS'd in so only 1 winner amongst duplicate + // racing resizing threads. Extra CHM's will be GC'd. + if (CAS_newkvs(newkvs)) { // NOW a resize-is-in-progress! + //notifyAll(); // Wake up any sleepers + //long nano = System.nanoTime(); + //System.out.println(" "+nano+" Resize from "+oldlen+" to "+(1< _copyIdxUpdater = + AtomicLongFieldUpdater.newUpdater(CHM.class, "_copyIdx"); + + // Work-done reporting. Used to efficiently signal when we can move to + // the new table. From 0 to len(oldkvs) refers to copying from the old + // table to the new. + volatile long _copyDone = 0; + static private final AtomicLongFieldUpdater _copyDoneUpdater = + AtomicLongFieldUpdater.newUpdater(CHM.class, "_copyDone"); + + // --- help_copy_impl ---------------------------------------------------- + // Help along an existing resize operation. We hope its the top-level + // copy (it was when we started) but this CHM might have been promoted out + // of the top position. + + private final void help_copy_impl(NonBlockingIdentityHashMap topmap, Object[] oldkvs, boolean copy_all) { + assert chm(oldkvs) == this; + Object[] newkvs = _newkvs; + assert newkvs != null; // Already checked by caller + int oldlen = len(oldkvs); // Total amount to copy + final int MIN_COPY_WORK = Math.min(oldlen, 1024); // Limit per-thread work + + // --- + int panic_start = -1; + int copyidx = -9999; // Fool javac to think it's initialized + while (_copyDone < oldlen) { // Still needing to copy? + // Carve out a chunk of work. The counter wraps around so every + // thread eventually tries to copy every slot repeatedly. + + // We "panic" if we have tried TWICE to copy every slot - and it still + // has not happened. i.e., twice some thread somewhere claimed they + // would copy 'slot X' (by bumping _copyIdx) but they never claimed to + // have finished (by bumping _copyDone). Our choices become limited: + // we can wait for the work-claimers to finish (and become a blocking + // algorithm) or do the copy work ourselves. Tiny tables with huge + // thread counts trying to copy the table often 'panic'. + if (panic_start == -1) { // No panic? + copyidx = (int) _copyIdx; + while (copyidx < (oldlen << 1) && // 'panic' check + !_copyIdxUpdater.compareAndSet(this, copyidx, copyidx + MIN_COPY_WORK)) + copyidx = (int) _copyIdx; // Re-read + if (!(copyidx < (oldlen << 1))) // Panic! + panic_start = copyidx; // Record where we started to panic-copy + } + + // We now know what to copy. Try to copy. + int workdone = 0; + for (int i = 0; i < MIN_COPY_WORK; i++) + if (copy_slot(topmap, (copyidx + i) & (oldlen - 1), oldkvs, newkvs)) // Made an oldtable slot go dead? + workdone++; // Yes! + if (workdone > 0) // Report work-done occasionally + copy_check_and_promote(topmap, oldkvs, workdone);// See if we can promote + //for( int i=0; i 0) { + while (!_copyDoneUpdater.compareAndSet(this, copyDone, copyDone + workdone)) { + copyDone = _copyDone; // Reload, retry + assert (copyDone + workdone) <= oldlen; + } + //if( (10*copyDone/oldlen) != (10*(copyDone+workdone)/oldlen) ) + //System.out.print(" "+(copyDone+workdone)*100/oldlen+"%"+"_"+(_copyIdx*100/oldlen)+"%"); + } + + // Check for copy being ALL done, and promote. Note that we might have + // nested in-progress copies and manage to finish a nested copy before + // finishing the top-level copy. We only promote top-level copies. + if (copyDone + workdone == oldlen && // Ready to promote this table? + topmap._kvs == oldkvs && // Looking at the top-level table? + // Attempt to promote + topmap.CAS_kvs(oldkvs, _newkvs)) { + topmap._last_resize_milli = System.currentTimeMillis(); // Record resize time for next check + //long nano = System.nanoTime(); + //System.out.println(" "+nano+" Promote table to "+len(_newkvs)); + //if( System.out != null ) System.out.print("]"); + } + } + + // --- copy_slot --------------------------------------------------------- + // Copy one K/V pair from oldkvs[i] to newkvs. Returns true if we can + // confirm that the new table guaranteed has a value for this old-table + // slot. We need an accurate confirmed-copy count so that we know when we + // can promote (if we promote the new table too soon, other threads may + // 'miss' on values not-yet-copied from the old table). We don't allow + // any direct updates on the new table, unless they first happened to the + // old table - so that any transition in the new table from null to + // not-null must have been from a copy_slot (or other old-table overwrite) + // and not from a thread directly writing in the new table. Thus we can + // count null-to-not-null transitions in the new table. + + private boolean copy_slot(NonBlockingIdentityHashMap topmap, int idx, Object[] oldkvs, Object[] newkvs) { + // Blindly set the key slot from null to TOMBSTONE, to eagerly stop + // fresh put's from inserting new values in the old table when the old + // table is mid-resize. We don't need to act on the results here, + // because our correctness stems from box'ing the Value field. Slamming + // the Key field is a minor speed optimization. + Object key; + while ((key = key(oldkvs, idx)) == null) + CAS_key(oldkvs, idx, null, TOMBSTONE); + + // --- + // Prevent new values from appearing in the old table. + // Box what we see in the old table, to prevent further updates. + Object oldval = val(oldkvs, idx); // Read OLD table + while (!(oldval instanceof Prime)) { + final Prime box = (oldval == null || oldval == TOMBSTONE) ? TOMBPRIME : new Prime(oldval); + if (CAS_val(oldkvs, idx, oldval, box)) { // CAS down a box'd version of oldval + // If we made the Value slot hold a TOMBPRIME, then we both + // prevented further updates here but also the (absent) + // oldval is vaccuously available in the new table. We + // return with true here: any thread looking for a value for + // this key can correctly go straight to the new table and + // skip looking in the old table. + if (box == TOMBPRIME) + return true; + // Otherwise we boxed something, but it still needs to be + // copied into the new table. + oldval = box; // Record updated oldval + break; // Break loop; oldval is now boxed by us + } + oldval = val(oldkvs, idx); // Else try, try again + } + if (oldval == TOMBPRIME) return false; // Copy already complete here! + + // --- + // Copy the value into the new table, but only if we overwrite a null. + // If another value is already in the new table, then somebody else + // wrote something there and that write is happens-after any value that + // appears in the old table. If putIfMatch does not find a null in the + // new table - somebody else should have recorded the null-not_null + // transition in this copy. + Object old_unboxed = ((Prime) oldval)._V; + assert old_unboxed != TOMBSTONE; + boolean copied_into_new = (putIfMatch(topmap, newkvs, key, old_unboxed, null) == null); + + // --- + // Finally, now that any old value is exposed in the new table, we can + // forever hide the old-table value by slapping a TOMBPRIME down. This + // will stop other threads from uselessly attempting to copy this slot + // (i.e., it's a speed optimization not a correctness issue). + while (!CAS_val(oldkvs, idx, oldval, TOMBPRIME)) + oldval = val(oldkvs, idx); + + return copied_into_new; + } // end copy_slot + } // End of CHM + + + // --- Snapshot ------------------------------------------------------------ + // The main class for iterating over the NBHM. It "snapshots" a clean + // view of the K/V array. + + private class SnapshotV implements Iterator, Enumeration { + final Object[] _sskvs; + + public SnapshotV() { + while (true) { // Verify no table-copy-in-progress + Object[] topkvs = _kvs; + CHM topchm = chm(topkvs); + if (topchm._newkvs == null) { // No table-copy-in-progress + // The "linearization point" for the iteration. Every key in this + // table will be visited, but keys added later might be skipped or + // even be added to a following table (also not iterated over). + _sskvs = topkvs; + break; + } + // Table copy in-progress - so we cannot get a clean iteration. We + // must help finish the table copy before we can start iterating. + topchm.help_copy_impl(NonBlockingIdentityHashMap.this, topkvs, true); + } + // Warm-up the iterator + next(); + } + + int length() { + return len(_sskvs); + } + + Object key(int idx) { + return NonBlockingIdentityHashMap.key(_sskvs, idx); + } + + private int _idx; // Varies from 0-keys.length + private Object _nextK, _prevK; // Last 2 keys found + private TypeV _nextV, _prevV; // Last 2 values found + + public boolean hasNext() { + return _nextV != null; + } + + public TypeV next() { + // 'next' actually knows what the next value will be - it had to + // figure that out last go-around lest 'hasNext' report true and + // some other thread deleted the last value. Instead, 'next' + // spends all its effort finding the key that comes after the + // 'next' key. + if (_idx != 0 && _nextV == null) throw new NoSuchElementException(); + _prevK = _nextK; // This will become the previous key + _prevV = _nextV; // This will become the previous value + _nextV = null; // We have no more next-key + // Attempt to set <_nextK,_nextV> to the next K,V pair. + // _nextV is the trigger: stop searching when it is != null + while (_idx < length()) { // Scan array + _nextK = key(_idx++); // Get a key that definitely is in the set (for the moment!) + if (_nextK != null && // Found something? + _nextK != TOMBSTONE && + (_nextV = get(_nextK)) != null) + break; // Got it! _nextK is a valid Key + } // Else keep scanning + return _prevV; // Return current value. + } + + public void remove() { + if (_prevV == null) throw new IllegalStateException(); + putIfMatch(NonBlockingIdentityHashMap.this, _sskvs, _prevK, TOMBSTONE, _prevV); + _prevV = null; + } + + public TypeV nextElement() { + return next(); + } + + public boolean hasMoreElements() { + return hasNext(); + } + } + + /** + * Returns an enumeration of the values in this table. + * + * @return an enumeration of the values in this table + * @see #values() + */ + public Enumeration elements() { + return new SnapshotV(); + } + + // --- values -------------------------------------------------------------- + + /** + * Returns a {@link Collection} view of the values contained in this map. + * The collection is backed by the map, so changes to the map are reflected + * in the collection, and vice-versa. The collection supports element + * removal, which removes the corresponding mapping from this map, via the + * Iterator.remove, Collection.remove, + * removeAll, retainAll, and clear operations. + * It does not support the add or addAll operations. + *

    + *

    The view's iterator is a "weakly consistent" iterator that + * will never throw {@link ConcurrentModificationException}, and guarantees + * to traverse elements as they existed upon construction of the iterator, + * and may (but is not guaranteed to) reflect any modifications subsequent + * to construction. + */ + @Override + public Collection values() { + return new AbstractCollection() { + @Override public void clear() { + NonBlockingIdentityHashMap.this.clear(); + } + + @Override public int size() { + return NonBlockingIdentityHashMap.this.size(); + } + + @Override public boolean contains(Object v) { + return NonBlockingIdentityHashMap.this.containsValue(v); + } + + @Override public Iterator iterator() { + return new SnapshotV(); + } + }; + } + + // --- keySet -------------------------------------------------------------- + + private class SnapshotK implements Iterator, Enumeration { + final SnapshotV _ss; + + public SnapshotK() { + _ss = new SnapshotV(); + } + + public void remove() { + _ss.remove(); + } + + public TypeK next() { + _ss.next(); + return (TypeK) _ss._prevK; + } + + public boolean hasNext() { + return _ss.hasNext(); + } + + public TypeK nextElement() { + return next(); + } + + public boolean hasMoreElements() { + return hasNext(); + } + } + + /** + * Returns an enumeration of the keys in this table. + * + * @return an enumeration of the keys in this table + * @see #keySet() + */ + public Enumeration keys() { + return new SnapshotK(); + } + + /** + * Returns a {@link Set} view of the keys contained in this map. The set + * is backed by the map, so changes to the map are reflected in the set, + * and vice-versa. The set supports element removal, which removes the + * corresponding mapping from this map, via the Iterator.remove, + * Set.remove, removeAll, retainAll, and + * clear operations. It does not support the add or + * addAll operations. + *

    + *

    The view's iterator is a "weakly consistent" iterator that + * will never throw {@link ConcurrentModificationException}, and guarantees + * to traverse elements as they existed upon construction of the iterator, + * and may (but is not guaranteed to) reflect any modifications subsequent + * to construction. + */ + @Override + public Set keySet() { + return new AbstractSet() { + @Override public void clear() { + NonBlockingIdentityHashMap.this.clear(); + } + + @Override public int size() { + return NonBlockingIdentityHashMap.this.size(); + } + + @Override public boolean contains(Object k) { + return NonBlockingIdentityHashMap.this.containsKey(k); + } + + @Override public boolean remove(Object k) { + return NonBlockingIdentityHashMap.this.remove(k) != null; + } + + @Override public Iterator iterator() { + return new SnapshotK(); + } + }; + } + + + // --- entrySet ------------------------------------------------------------ + // Warning: Each call to 'next' in this iterator constructs a new NBHMEntry. + + private class NBHMEntry extends AbstractEntry { + NBHMEntry(final TypeK k, final TypeV v) { + super(k, v); + } + + public TypeV setValue(final TypeV val) { + if (val == null) throw new NullPointerException(); + _val = val; + return put(_key, val); + } + } + + private class SnapshotE implements Iterator> { + final SnapshotV _ss; + + public SnapshotE() { + _ss = new SnapshotV(); + } + + public void remove() { + _ss.remove(); + } + + public Map.Entry next() { + _ss.next(); + return new NBHMEntry((TypeK) _ss._prevK, _ss._prevV); + } + + public boolean hasNext() { + return _ss.hasNext(); + } + } + + /** + * Returns a {@link Set} view of the mappings contained in this map. The + * set is backed by the map, so changes to the map are reflected in the + * set, and vice-versa. The set supports element removal, which removes + * the corresponding mapping from the map, via the + * Iterator.remove, Set.remove, removeAll, + * retainAll, and clear operations. It does not support + * the add or addAll operations. + *

    + *

    The view's iterator is a "weakly consistent" iterator + * that will never throw {@link ConcurrentModificationException}, + * and guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not guaranteed to) + * reflect any modifications subsequent to construction. + *

    + *

    Warning: the iterator associated with this Set + * requires the creation of {@link java.util.Map.Entry} objects with each + * iteration. The {@link NonBlockingIdentityHashMap} does not normally create or + * using {@link java.util.Map.Entry} objects so they will be created soley + * to support this iteration. Iterating using {@link #keySet} or {@link + * #values} will be more efficient. + */ + @Override + public Set> entrySet() { + return new AbstractSet>() { + @Override public void clear() { + NonBlockingIdentityHashMap.this.clear(); + } + + @Override public int size() { + return NonBlockingIdentityHashMap.this.size(); + } + + @Override public boolean remove(final Object o) { + if (!(o instanceof Map.Entry)) return false; + final Map.Entry e = (Map.Entry) o; + return NonBlockingIdentityHashMap.this.remove(e.getKey(), e.getValue()); + } + + @Override public boolean contains(final Object o) { + if (!(o instanceof Map.Entry)) return false; + final Map.Entry e = (Map.Entry) o; + TypeV v = get(e.getKey()); + return v.equals(e.getValue()); + } + + @Override public Iterator> iterator() { + return new SnapshotE(); + } + }; + } + + // --- writeObject ------------------------------------------------------- + // Write a NBHM to a stream + + private void writeObject(java.io.ObjectOutputStream s) throws IOException { + s.defaultWriteObject(); // Nothing to write + for (Object K : keySet()) { + final Object V = get(K); // Do an official 'get' + s.writeObject(K); // Write the pair + s.writeObject(V); + } + s.writeObject(null); // Sentinel to indicate end-of-data + s.writeObject(null); + } + + // --- readObject -------------------------------------------------------- + // Read a CHM from a stream + + private void readObject(java.io.ObjectInputStream s) throws IOException, ClassNotFoundException { + s.defaultReadObject(); // Read nothing + initialize(MIN_SIZE); + for (; ;) { + final TypeK K = (TypeK) s.readObject(); + final TypeV V = (TypeV) s.readObject(); + if (K == null) break; + put(K, V); // Insert with an offical put + } + } + +} // End NonBlockingIdentityHashMap class diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/highscalelib/NonBlockingSetInt.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/highscalelib/NonBlockingSetInt.java new file mode 100644 index 00000000000..689ca4f84ed --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/highscalelib/NonBlockingSetInt.java @@ -0,0 +1,555 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Written by Cliff Click and released to the public domain, as explained at + * http://creativecommons.org/licenses/publicdomain + */ + +package org.elasticsearch.util.concurrent.highscalelib; + +import sun.misc.Unsafe; + +import java.io.IOException; +import java.io.Serializable; +import java.lang.reflect.Field; +import java.util.AbstractSet; +import java.util.Iterator; +import java.util.NoSuchElementException; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * A multi-threaded bit-vector set, implemented as an array of primitive + * {@code longs}. All operations are non-blocking and multi-threaded safe. + * {@link #contains(int)} calls are roughly the same speed as a {load, mask} + * sequence. {@link #add(int)} and {@link #remove(int)} calls are a tad more + * expensive than a {load, mask, store} sequence because they must use a CAS. + * The bit-vector is auto-sizing. + *

    + *

    General note of caution: The Set API allows the use of {@link Integer} + * with silent autoboxing - which can be very expensive if many calls are + * being made. Since autoboxing is silent you may not be aware that this is + * going on. The built-in API takes lower-case {@code ints} and is much more + * efficient. + *

    + *

    Space: space is used in proportion to the largest element, as opposed to + * the number of elements (as is the case with hash-table based Set + * implementations). Space is approximately (largest_element/8 + 64) bytes. + *

    + * The implementation is a simple bit-vector using CAS for update. + * + * @author Cliff Click + * @since 1.5 + */ + +public class NonBlockingSetInt extends AbstractSet implements Serializable { + private static final long serialVersionUID = 1234123412341234123L; + private static final Unsafe _unsafe = UtilUnsafe.getUnsafe(); + + // --- Bits to allow atomic update of the NBSI + private static final long _nbsi_offset; + + static { // + Field f = null; + try { + f = NonBlockingSetInt.class.getDeclaredField("_nbsi"); + } catch (java.lang.NoSuchFieldException e) { + } + _nbsi_offset = _unsafe.objectFieldOffset(f); + } + + private final boolean CAS_nbsi(NBSI old, NBSI nnn) { + return _unsafe.compareAndSwapObject(this, _nbsi_offset, old, nnn); + } + + // The actual Set of Joy, which changes during a resize event. The + // Only Field for this class, so I can atomically change the entire + // set implementation with a single CAS. + private transient NBSI _nbsi; + + /** + * Create a new empty bit-vector + */ + public NonBlockingSetInt() { + _nbsi = new NBSI(63, new Counter(), this); // The initial 1-word set + } + + /** + * Add {@code i} to the set. Uppercase {@link Integer} version of add, + * requires auto-unboxing. When possible use the {@code int} version of + * {@link #add(int)} for efficiency. + * + * @return true if i was added to the set. + * @throws IllegalArgumentException if i is negative. + */ + public boolean add(final Integer i) { + return add(i.intValue()); + } + + /** + * Test if {@code o} is in the set. This is the uppercase {@link Integer} + * version of contains, requires a type-check and auto-unboxing. When + * possible use the {@code int} version of {@link #contains(int)} for + * efficiency. + * + * @return true if i was in the set. + */ + public boolean contains(final Object o) { + return o instanceof Integer ? contains(((Integer) o).intValue()) : false; + } + + /** + * Remove {@code o} from the set. This is the uppercase {@link Integer} + * version of remove, requires a type-check and auto-unboxing. When + * possible use the {@code int} version of {@link #remove(int)} for + * efficiency. + * + * @return true if i was removed to the set. + */ + public boolean remove(final Object o) { + return o instanceof Integer ? remove(((Integer) o).intValue()) : false; + } + + /** + * Add {@code i} to the set. This is the lower-case '{@code int}' version + * of {@link #add} - no autoboxing. Negative values throw + * IllegalArgumentException. + * + * @return true if i was added to the set. + * @throws IllegalArgumentException if i is negative. + */ + public boolean add(final int i) { + if (i < 0) throw new IllegalArgumentException("" + i); + return _nbsi.add(i); + } + + /** + * Test if {@code i} is in the set. This is the lower-case '{@code int}' + * version of {@link #contains} - no autoboxing. + * + * @return true if i was int the set. + */ + public boolean contains(final int i) { + return i < 0 ? false : _nbsi.contains(i); + } + + /** + * Remove {@code i} from the set. This is the fast lower-case '{@code int}' + * version of {@link #remove} - no autoboxing. + * + * @return true if i was added to the set. + */ + public boolean remove(final int i) { + return i < 0 ? false : _nbsi.remove(i); + } + + /** + * Current count of elements in the set. Due to concurrent racing updates, + * the size is only ever approximate. Updates due to the calling thread are + * immediately visible to calling thread. + * + * @return count of elements. + */ + public int size() { + return _nbsi.size(); + } + + /** + * Empty the bitvector. + */ + public void clear() { + NBSI cleared = new NBSI(63, new Counter(), this); // An empty initial NBSI + while (!CAS_nbsi(_nbsi, cleared)) // Spin until clear works + ; + } + + /** + * Verbose printout of internal structure for debugging. + */ + public void print() { + _nbsi.print(0); + } + + /** + * Standard Java {@link Iterator}. Not very efficient because it + * auto-boxes the returned values. + */ + public Iterator iterator() { + return new iter(); + } + + private class iter implements Iterator { + NBSI _nbsi2; + int _idx = -1; + int _prev = -1; + + iter() { + _nbsi2 = _nbsi; + advance(); + } + + public boolean hasNext() { + return _idx != -2; + } + + private void advance() { + while (true) { + _idx++; // Next index + while ((_idx >> 6) >= _nbsi2._bits.length) { // Index out of range? + if (_nbsi2._new == null) { // New table? + _idx = -2; // No, so must be all done + return; // + } + _nbsi2 = _nbsi2._new; // Carry on, in the new table + } + if (_nbsi2.contains(_idx)) return; + } + } + + public Integer next() { + if (_idx == -1) throw new NoSuchElementException(); + _prev = _idx; + advance(); + return _prev; + } + + public void remove() { + if (_prev == -1) throw new IllegalStateException(); + _nbsi2.remove(_prev); + _prev = -1; + } + } + + // --- writeObject ------------------------------------------------------- + // Write a NBSI to a stream + + private void writeObject(java.io.ObjectOutputStream s) throws IOException { + s.defaultWriteObject(); // Nothing to write + final NBSI nbsi = _nbsi; // The One Field is transient + final int len = _nbsi._bits.length << 6; + s.writeInt(len); // Write max element + for (int i = 0; i < len; i++) + s.writeBoolean(_nbsi.contains(i)); + } + + // --- readObject -------------------------------------------------------- + // Read a CHM from a stream + + private void readObject(java.io.ObjectInputStream s) throws IOException, ClassNotFoundException { + s.defaultReadObject(); // Read nothing + final int len = s.readInt(); // Read max element + _nbsi = new NBSI(len, new Counter(), this); + for (int i = 0; i < len; i++) // Read all bits + if (s.readBoolean()) + _nbsi.add(i); + } + + // --- NBSI ---------------------------------------------------------------- + + private static final class NBSI { + // Back pointer to the parent wrapper; sorta like make the class non-static + private transient final NonBlockingSetInt _non_blocking_set_int; + + // Used to count elements: a high-performance counter. + private transient final Counter _size; + + // The Bits + private final long _bits[]; + // --- Bits to allow Unsafe access to arrays + private static final int _Lbase = _unsafe.arrayBaseOffset(long[].class); + private static final int _Lscale = _unsafe.arrayIndexScale(long[].class); + + private static long rawIndex(final long[] ary, final int idx) { + assert idx >= 0 && idx < ary.length; + return _Lbase + idx * _Lscale; + } + + private final boolean CAS(int idx, long old, long nnn) { + return _unsafe.compareAndSwapLong(_bits, rawIndex(_bits, idx), old, nnn); + } + + // --- Resize + // The New Table, only set once to non-zero during a resize. + // Must be atomically set. + private NBSI _new; + private static final long _new_offset; + + static { // + Field f = null; + try { + f = NBSI.class.getDeclaredField("_new"); + } catch (java.lang.NoSuchFieldException e) { + } + _new_offset = _unsafe.objectFieldOffset(f); + } + + private final boolean CAS_new(NBSI nnn) { + return _unsafe.compareAndSwapObject(this, _new_offset, null, nnn); + } + + private transient final AtomicInteger _copyIdx; // Used to count bits started copying + private transient final AtomicInteger _copyDone; // Used to count words copied in a resize operation + private transient final int _sum_bits_length; // Sum of all nested _bits.lengths + + private static final long mask(int i) { + return 1L << (i & 63); + } + + // I need 1 free bit out of 64 to allow for resize. I do this by stealing + // the high order bit - but then I need to do something with adding element + // number 63 (and friends). I could use a mod63 function but it's more + // efficient to handle the mod-64 case as an exception. + // + // Every 64th bit is put in it's own recursive bitvector. If the low 6 bits + // are all set, we shift them off and recursively operate on the _nbsi64 set. + private final NBSI _nbsi64; + + private NBSI(int max_elem, Counter ctr, NonBlockingSetInt nonb) { + super(); + _non_blocking_set_int = nonb; + _size = ctr; + _copyIdx = ctr == null ? null : new AtomicInteger(); + _copyDone = ctr == null ? null : new AtomicInteger(); + // The main array of bits + _bits = new long[(int) (((long) max_elem + 63) >>> 6)]; + // Every 64th bit is moved off to it's own subarray, so that the + // sign-bit is free for other purposes + _nbsi64 = ((max_elem + 1) >>> 6) == 0 ? null : new NBSI((max_elem + 1) >>> 6, null, null); + _sum_bits_length = _bits.length + (_nbsi64 == null ? 0 : _nbsi64._sum_bits_length); + } + + // Lower-case 'int' versions - no autoboxing, very fast. + // 'i' is known positive. + + public boolean add(final int i) { + // Check for out-of-range for the current size bit vector. + // If so we need to grow the bit vector. + if ((i >> 6) >= _bits.length) + return install_larger_new_bits(i). // Install larger pile-o-bits (duh) + help_copy().add(i); // Finally, add to the new table + + // Handle every 64th bit via using a nested array + NBSI nbsi = this; // The bit array being added into + int j = i; // The bit index being added + while ((j & 63) == 63) { // Bit 64? (low 6 bits are all set) + nbsi = nbsi._nbsi64; // Recurse + j = j >> 6; // Strip off low 6 bits (all set) + } + + final long mask = mask(j); + long old; + do { + old = nbsi._bits[j >> 6]; // Read old bits + if (old < 0) // Not mutable? + // Not mutable: finish copy of word, and retry on copied word + return help_copy_impl(i).help_copy().add(i); + if ((old & mask) != 0) return false; // Bit is already set? + } while (!nbsi.CAS(j >> 6, old, old | mask)); + _size.add(1); + return true; + } + + public boolean remove(final int i) { + if ((i >> 6) >= _bits.length) // Out of bounds? Not in this array! + return _new == null ? false : help_copy().remove(i); + + // Handle every 64th bit via using a nested array + NBSI nbsi = this; // The bit array being added into + int j = i; // The bit index being added + while ((j & 63) == 63) { // Bit 64? (low 6 bits are all set) + nbsi = nbsi._nbsi64; // Recurse + j = j >> 6; // Strip off low 6 bits (all set) + } + + final long mask = mask(j); + long old; + do { + old = nbsi._bits[j >> 6]; // Read old bits + if (old < 0) // Not mutable? + // Not mutable: finish copy of word, and retry on copied word + return help_copy_impl(i).help_copy().remove(i); + if ((old & mask) == 0) return false; // Bit is already clear? + } while (!nbsi.CAS(j >> 6, old, old & ~mask)); + _size.add(-1); + return true; + } + + public boolean contains(final int i) { + if ((i >> 6) >= _bits.length) // Out of bounds? Not in this array! + return _new == null ? false : help_copy().contains(i); + + // Handle every 64th bit via using a nested array + NBSI nbsi = this; // The bit array being added into + int j = i; // The bit index being added + while ((j & 63) == 63) { // Bit 64? (low 6 bits are all set) + nbsi = nbsi._nbsi64; // Recurse + j = j >> 6; // Strip off low 6 bits (all set) + } + + final long mask = mask(j); + long old = nbsi._bits[j >> 6]; // Read old bits + if (old < 0) // Not mutable? + // Not mutable: finish copy of word, and retry on copied word + return help_copy_impl(i).help_copy().contains(i); + // Yes mutable: test & return bit + return (old & mask) != 0; + } + + public int size() { + return (int) _size.get(); + } + + // Must grow the current array to hold an element of size i + + private NBSI install_larger_new_bits(final int i) { + if (_new == null) { + // Grow by powers of 2, to avoid minor grow-by-1's. + // Note: must grow by exact powers-of-2 or the by-64-bit trick doesn't work right + int sz = (_bits.length << 6) << 1; + // CAS to install a new larger size. Did it work? Did it fail? We + // don't know and don't care. Only One can be installed, so if + // another thread installed a too-small size, we can't help it - we + // must simply install our new larger size as a nested-resize table. + CAS_new(new NBSI(sz, _size, _non_blocking_set_int)); + } + // Return self for 'fluid' programming style + return this; + } + + // Help any top-level NBSI to copy until completed. + // Always return the _new version of *this* NBSI, in case we're nested. + + private NBSI help_copy() { + // Pick some words to help with - but only help copy the top-level NBSI. + // Nested NBSI waits until the top is done before we start helping. + NBSI top_nbsi = _non_blocking_set_int._nbsi; + final int HELP = 8; // Tuning number: how much copy pain are we willing to inflict? + // We "help" by forcing individual bit indices to copy. However, bits + // come in lumps of 64 per word, so we just advance the bit counter by 64's. + int idx = top_nbsi._copyIdx.getAndAdd(64 * HELP); + for (int i = 0; i < HELP; i++) { + int j = idx + i * 64; + j %= (top_nbsi._bits.length << 6); // Limit, wrap to array size; means we retry indices + top_nbsi.help_copy_impl(j); + top_nbsi.help_copy_impl(j + 63); // Also force the nested-by-64 bit + } + + // Top level guy ready to promote? + // Note: WE may not be the top-level guy! + if (top_nbsi._copyDone.get() == top_nbsi._sum_bits_length) + // One shot CAS to promote - it may fail since we are racing; others + // may promote as well + if (_non_blocking_set_int.CAS_nbsi(top_nbsi, top_nbsi._new)) { + //System.out.println("Promote at top level to size "+(_non_blocking_set_int._nbsi._bits.length<<6)); + } + + // Return the new bitvector for 'fluid' programming style + return _new; + } + + // Help copy this one word. State Machine. + // (1) If not "made immutable" in the old array, set the sign bit to make + // it immutable. + // (2) If non-zero in old array & zero in new, CAS new from 0 to copy-of-old + // (3) If non-zero in old array & non-zero in new, CAS old to zero + // (4) Zero in old, new is valid + // At this point, old should be immutable-zero & new has a copy of bits + + private NBSI help_copy_impl(int i) { + // Handle every 64th bit via using a nested array + NBSI old = this; // The bit array being copied from + NBSI nnn = _new; // The bit array being copied to + if (nnn == null) return this; // Promoted already + int j = i; // The bit index being added + while ((j & 63) == 63) { // Bit 64? (low 6 bits are all set) + old = old._nbsi64; // Recurse + nnn = nnn._nbsi64; // Recurse + j = j >> 6; // Strip off low 6 bits (all set) + } + + // Transit from state 1: word is not immutable yet + // Immutable is in bit 63, the sign bit. + long bits = old._bits[j >> 6]; + while (bits >= 0) { // Still in state (1)? + long oldbits = bits; + bits |= mask(63); // Target state of bits: sign-bit means immutable + if (old.CAS(j >> 6, oldbits, bits)) { + if (oldbits == 0) _copyDone.addAndGet(1); + break; // Success - old array word is now immutable + } + bits = old._bits[j >> 6]; // Retry if CAS failed + } + + // Transit from state 2: non-zero in old and zero in new + if (bits != mask(63)) { // Non-zero in old? + long new_bits = nnn._bits[j >> 6]; + if (new_bits == 0) { // New array is still zero + new_bits = bits & ~mask(63); // Desired new value: a mutable copy of bits + // One-shot CAS attempt, no loop, from 0 to non-zero. + // If it fails, somebody else did the copy for us + if (!nnn.CAS(j >> 6, 0, new_bits)) + new_bits = nnn._bits[j >> 6]; // Since it failed, get the new value + assert new_bits != 0; + } + + // Transit from state 3: non-zero in old and non-zero in new + // One-shot CAS attempt, no loop, from non-zero to 0 (but immutable) + if (old.CAS(j >> 6, bits, mask(63))) + _copyDone.addAndGet(1); // One more word finished copying + } + + // Now in state 4: zero (and immutable) in old + + // Return the self bitvector for 'fluid' programming style + return this; + } + + private void print(int d, String msg) { + for (int i = 0; i < d; i++) + System.out.print(" "); + System.out.println(msg); + } + + private void print(int d) { + StringBuffer buf = new StringBuffer(); + buf.append("NBSI - _bits.len="); + NBSI x = this; + while (x != null) { + buf.append(" " + x._bits.length); + x = x._nbsi64; + } + print(d, buf.toString()); + + x = this; + while (x != null) { + for (int i = 0; i < x._bits.length; i++) + System.out.print(Long.toHexString(x._bits[i]) + " "); + x = x._nbsi64; + System.out.println(); + } + + if (_copyIdx.get() != 0 || _copyDone.get() != 0) + print(d, "_copyIdx=" + _copyIdx.get() + " _copyDone=" + _copyDone.get() + " _words_to_cpy=" + _sum_bits_length); + if (_new != null) { + print(d, "__has_new - "); + _new.print(d + 1); + } + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/highscalelib/UtilUnsafe.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/highscalelib/UtilUnsafe.java new file mode 100644 index 00000000000..4efc2e72e13 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/highscalelib/UtilUnsafe.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.concurrent.highscalelib; + +import sun.misc.Unsafe; + +import java.lang.reflect.Field; + +/** + * Simple class to obtain access to the {@link Unsafe} object. {@link Unsafe} + * is required to allow efficient CAS operations on arrays. Note that the + * versions in {@link java.util.concurrent.atomic}, such as {@link + * java.util.concurrent.atomic.AtomicLongArray}, require extra memory ordering + * guarantees which are generally not needed in these algorithms and are also + * expensive on most processors. + */ +class UtilUnsafe { + private UtilUnsafe() { + } // dummy private constructor + + /** + * Fetch the Unsafe. Use With Caution. + */ + public static Unsafe getUnsafe() { + // Not on bootclasspath + if (UtilUnsafe.class.getClassLoader() == null) + return Unsafe.getUnsafe(); + try { + final Field fld = Unsafe.class.getDeclaredField("theUnsafe"); + fld.setAccessible(true); + return (Unsafe) fld.get(UtilUnsafe.class); + } catch (Exception e) { + throw new RuntimeException("Could not obtain access to sun.misc.Unsafe", e); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/highscalelib/package-info.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/highscalelib/package-info.java new file mode 100644 index 00000000000..eb90ba35d73 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/highscalelib/package-info.java @@ -0,0 +1,23 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/** + * A copy of Cliff High-Scale-Lib version 1.1.2. + */ +package org.elasticsearch.util.concurrent.highscalelib; \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/resource/AcquirableResource.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/resource/AcquirableResource.java new file mode 100644 index 00000000000..8f539f964b0 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/resource/AcquirableResource.java @@ -0,0 +1,57 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.concurrent.resource; + +import org.elasticsearch.util.lease.Releasable; + +/** + * A wrapper around a resource that can be released. Note, release should not be + * called directly on the resource itself. + *

    + *

    Yea, I now, the fact that the resouce itself is releasable basically means that + * users of this class should take care... . + * + * @author kimchy (Shay Banon) + */ +public interface AcquirableResource { + + T resource(); + + /** + * Acquires the resource, returning true if it was acquired. + */ + boolean acquire(); + + /** + * Releases the resource, will close it if there are no more acquirers and it is marked for close. + */ + void release(); + + /** + * Marks the resource to be closed. Will close it if there are no current + * acquires. + */ + void markForClose(); + + /** + * Forces the resource to be closed, regardless of the number of acquirers. + */ + void forceClose(); +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/resource/AcquirableResourceFactory.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/resource/AcquirableResourceFactory.java new file mode 100644 index 00000000000..ab11db283fa --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/resource/AcquirableResourceFactory.java @@ -0,0 +1,36 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.concurrent.resource; + +import org.elasticsearch.util.lease.Releasable; + +/** + * @author kimchy (Shay Banon) + */ +public final class AcquirableResourceFactory { + + public static AcquirableResource newAcquirableResource(T resource) { + return new BlockingAcquirableResource(resource); + } + + private AcquirableResourceFactory() { + + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/resource/BlockingAcquirableResource.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/resource/BlockingAcquirableResource.java new file mode 100644 index 00000000000..0792bff3ca8 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/resource/BlockingAcquirableResource.java @@ -0,0 +1,90 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.concurrent.resource; + +import org.elasticsearch.util.lease.Releasable; + +/** + * A wrapper around a resource that can be released. Note, release should not be + * called directly on the resource itself. + *

    + *

    Yea, I now, the fact that the resouce itself is releasable basically means that + * users of this class should take care... . + * + * @author kimchy (Shay Banon) + */ +public class BlockingAcquirableResource implements AcquirableResource { + + private final T resource; + + private int count = 0; + + private boolean markForClose = false; + + private boolean closed; + + public BlockingAcquirableResource(T resource) { + this.resource = resource; + } + + @Override public T resource() { + return resource; + } + + /** + * Acquires the resource, returning true if it was acquired. + */ + @Override public synchronized boolean acquire() { + if (markForClose) { + return false; + } + count++; + return true; + } + + /** + * Releases the resource, will close it if there are no more acquirers. + */ + @Override public synchronized void release() { + count--; + checkIfCanClose(); + } + + /** + * Marks the resource to be closed. Will close it if there are no current + * acquires. + */ + @Override public synchronized void markForClose() { + markForClose = true; + checkIfCanClose(); + } + + @Override public void forceClose() { + count = 0; + markForClose(); + } + + private void checkIfCanClose() { + if (markForClose && count <= 0 && !closed) { + closed = true; + resource.release(); + } + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/resource/NonBlockingAcquirableResource.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/resource/NonBlockingAcquirableResource.java new file mode 100644 index 00000000000..51c03a21bf5 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/concurrent/resource/NonBlockingAcquirableResource.java @@ -0,0 +1,103 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.concurrent.resource; + +import org.elasticsearch.util.lease.Releasable; + +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicStampedReference; + +/** + * A wrapper around a resource that can be released. Note, release should not be + * called directly on the resource itself. + *

    + *

    Yea, I now, the fact that the resouce itself is releasable basically means that + * users of this class should take care... . + * + * @author kimchy (Shay Banon) + */ +public class NonBlockingAcquirableResource implements AcquirableResource { + + private final T resource; + + private AtomicStampedReference counter = new AtomicStampedReference(false, 0); + + private final AtomicBoolean closed = new AtomicBoolean(); + + public NonBlockingAcquirableResource(T resource) { + this.resource = resource; + } + + @Override public T resource() { + return resource; + } + + @Override public boolean acquire() { + while (true) { + int stamp = counter.getStamp(); + boolean result = counter.compareAndSet(false, false, stamp, stamp + 1); + if (result) { + return true; + } + if (counter.getReference()) { + return false; + } + } + } + + @Override public void release() { + while (true) { + boolean currentReference = counter.getReference(); + int stamp = counter.getStamp(); + boolean result = counter.compareAndSet(currentReference, currentReference, stamp, stamp - 1); + if (result) { + if (currentReference && (stamp <= 1)) { + close(); + } + return; + } + } + } + + @Override public void markForClose() { + while (true) { + int stamp = counter.getStamp(); + boolean result = counter.compareAndSet(false, true, stamp, stamp); + if (result) { + if (stamp <= 0) { + close(); + } + return; + } else if (counter.getReference()) { + return; + } + } + } + + @Override public void forceClose() { + close(); + } + + private void close() { + if (closed.compareAndSet(false, true)) { + resource.release(); + } + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/Decorators.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/Decorators.java new file mode 100644 index 00000000000..3f8d20d4891 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/Decorators.java @@ -0,0 +1,478 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + + +package org.elasticsearch.util.gnu.trove; + + +import org.elasticsearch.util.gnu.trove.decorator.*; + +import java.util.Map; +import java.util.Set; + + +/** + * This is a static utility class that provides functions for simplifying creation of + * decorators. + * + * @author Robert D. Eden + * @since Trove 2.1 + */ +public class Decorators { + // Hide the constructor + + private Decorators() { + } + + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TDoubleDoubleHashMap map) { + return new TDoubleDoubleHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TDoubleFloatHashMap map) { + return new TDoubleFloatHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TDoubleIntHashMap map) { + return new TDoubleIntHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TDoubleLongHashMap map) { + return new TDoubleLongHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TDoubleByteHashMap map) { + return new TDoubleByteHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TDoubleShortHashMap map) { + return new TDoubleShortHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TFloatDoubleHashMap map) { + return new TFloatDoubleHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TFloatFloatHashMap map) { + return new TFloatFloatHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TFloatIntHashMap map) { + return new TFloatIntHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TFloatLongHashMap map) { + return new TFloatLongHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TFloatByteHashMap map) { + return new TFloatByteHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TFloatShortHashMap map) { + return new TFloatShortHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TIntDoubleHashMap map) { + return new TIntDoubleHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TIntFloatHashMap map) { + return new TIntFloatHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TIntIntHashMap map) { + return new TIntIntHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TIntLongHashMap map) { + return new TIntLongHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TIntByteHashMap map) { + return new TIntByteHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TIntShortHashMap map) { + return new TIntShortHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TLongDoubleHashMap map) { + return new TLongDoubleHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TLongFloatHashMap map) { + return new TLongFloatHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TLongIntHashMap map) { + return new TLongIntHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TLongLongHashMap map) { + return new TLongLongHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TLongByteHashMap map) { + return new TLongByteHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TLongShortHashMap map) { + return new TLongShortHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TByteDoubleHashMap map) { + return new TByteDoubleHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TByteFloatHashMap map) { + return new TByteFloatHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TByteIntHashMap map) { + return new TByteIntHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TByteLongHashMap map) { + return new TByteLongHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TByteByteHashMap map) { + return new TByteByteHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TByteShortHashMap map) { + return new TByteShortHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TShortDoubleHashMap map) { + return new TShortDoubleHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TShortFloatHashMap map) { + return new TShortFloatHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TShortIntHashMap map) { + return new TShortIntHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TShortLongHashMap map) { + return new TShortLongHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TShortByteHashMap map) { + return new TShortByteHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TShortShortHashMap map) { + return new TShortShortHashMapDecorator(map); + } + + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TObjectDoubleHashMap map) { + return new TObjectDoubleHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TObjectFloatHashMap map) { + return new TObjectFloatHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TObjectIntHashMap map) { + return new TObjectIntHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TObjectLongHashMap map) { + return new TObjectLongHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TObjectByteHashMap map) { + return new TObjectByteHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TObjectShortHashMap map) { + return new TObjectShortHashMapDecorator(map); + } + + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TDoubleObjectHashMap map) { + return new TDoubleObjectHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TFloatObjectHashMap map) { + return new TFloatObjectHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TIntObjectHashMap map) { + return new TIntObjectHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TLongObjectHashMap map) { + return new TLongObjectHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TByteObjectHashMap map) { + return new TByteObjectHashMapDecorator(map); + } + + /** + * Wrap the given map in a decorator that uses the standard {@link java.util.Map Map} + * interface. + */ + public static Map wrap(TShortObjectHashMap map) { + return new TShortObjectHashMapDecorator(map); + } + + + /** + * Wrap the given set in a decorator that uses the standard {@link java.util.Set Set} + * interface. + */ + public static Set wrap(TDoubleHashSet set) { + return new TDoubleHashSetDecorator(set); + } + + /** + * Wrap the given set in a decorator that uses the standard {@link java.util.Set Set} + * interface. + */ + public static Set wrap(TFloatHashSet set) { + return new TFloatHashSetDecorator(set); + } + + /** + * Wrap the given set in a decorator that uses the standard {@link java.util.Set Set} + * interface. + */ + public static Set wrap(TIntHashSet set) { + return new TIntHashSetDecorator(set); + } + + /** + * Wrap the given set in a decorator that uses the standard {@link java.util.Set Set} + * interface. + */ + public static Set wrap(TLongHashSet set) { + return new TLongHashSetDecorator(set); + } + + /** + * Wrap the given set in a decorator that uses the standard {@link java.util.Set Set} + * interface. + */ + public static Set wrap(TByteHashSet set) { + return new TByteHashSetDecorator(set); + } + + /** + * Wrap the given set in a decorator that uses the standard {@link java.util.Set Set} + * interface. + */ + public static Set wrap(TShortHashSet set) { + return new TShortHashSetDecorator(set); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/HashFunctions.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/HashFunctions.java new file mode 100644 index 00000000000..08601a2281b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/HashFunctions.java @@ -0,0 +1,96 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +/** + * Provides various hash functions. + * + * @author wolfgang.hoschek@cern.ch + * @version 1.0, 09/24/99 + */ +public final class HashFunctions { + /** + * Returns a hashcode for the specified value. + * + * @return a hash code value for the specified value. + */ + public static int hash(double value) { + assert !Double.isNaN(value) : "Values of NaN are not supported."; + + long bits = Double.doubleToLongBits(value); + return (int) (bits ^ (bits >>> 32)); + //return (int) Double.doubleToLongBits(value*663608941.737); + //this avoids excessive hashCollisions in the case values are + //of the form (1.0, 2.0, 3.0, ...) + } + + /** + * Returns a hashcode for the specified value. + * + * @return a hash code value for the specified value. + */ + public static int hash(float value) { + assert !Float.isNaN(value) : "Values of NaN are not supported."; + + return Float.floatToIntBits(value * 663608941.737f); + // this avoids excessive hashCollisions in the case values are + // of the form (1.0, 2.0, 3.0, ...) + } + + /** + * Returns a hashcode for the specified value. + * + * @return a hash code value for the specified value. + */ + public static int hash(int value) { + // Multiply by prime to make sure hash can't be negative (see Knuth v3, p. 515-516) + return value * 31; + } + + /** + * Returns a hashcode for the specified value. + * + * @return a hash code value for the specified value. + */ + public static int hash(long value) { + // Multiply by prime to make sure hash can't be negative (see Knuth v3, p. 515-516) + return ((int) (value ^ (value >>> 32))) * 31; + } + + /** + * Returns a hashcode for the specified object. + * + * @return a hash code value for the specified object. + */ + public static int hash(Object object) { + return object == null ? 0 : object.hashCode(); + } + + + /** + * In profiling, it has been found to be faster to have our own local implementation + * of "ceil" rather than to call to {@link Math#ceil(double)}. + */ + static int fastCeil(float v) { + int possible_result = (int) v; + if (v - possible_result > 0) possible_result++; + return possible_result; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/PrimeFinder.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/PrimeFinder.java new file mode 100644 index 00000000000..1c819e56f9b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/PrimeFinder.java @@ -0,0 +1,168 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.util.Arrays; + +/* + * Modified for Trove to use the java.util.Arrays sort/search + * algorithms instead of those provided with colt. + */ + +/** + * Used to keep hash table capacities prime numbers. + * Not of interest for users; only for implementors of hashtables. + *

    + *

    Choosing prime numbers as hash table capacities is a good idea + * to keep them working fast, particularly under hash table + * expansions. + *

    + *

    However, JDK 1.2, JGL 3.1 and many other toolkits do nothing to + * keep capacities prime. This class provides efficient means to + * choose prime capacities. + *

    + *

    Choosing a prime is O(log 300) (binary search in a list + * of 300 ints). Memory requirements: 1 KB static memory. + * + * @author wolfgang.hoschek@cern.ch + * @version 1.0, 09/24/99 + */ +public final class PrimeFinder { + /** + * The largest prime this class can generate; currently equal to + * Integer.MAX_VALUE. + */ + public static final int largestPrime = Integer.MAX_VALUE; //yes, it is prime. + + /** + * The prime number list consists of 11 chunks. + *

    + * Each chunk contains prime numbers. + *

    + * A chunk starts with a prime P1. The next element is a prime + * P2. P2 is the smallest prime for which holds: P2 >= 2*P1. + *

    + * The next element is P3, for which the same holds with respect + * to P2, and so on. + *

    + * Chunks are chosen such that for any desired capacity >= 1000 + * the list includes a prime number <= desired capacity * 1.11. + *

    + * Therefore, primes can be retrieved which are quite close to any + * desired capacity, which in turn avoids wasting memory. + *

    + * For example, the list includes + * 1039,1117,1201,1277,1361,1439,1523,1597,1759,1907,2081. + *

    + * So if you need a prime >= 1040, you will find a prime <= + * 1040*1.11=1154. + *

    + * Chunks are chosen such that they are optimized for a hashtable + * growthfactor of 2.0; + *

    + * If your hashtable has such a growthfactor then, after initially + * "rounding to a prime" upon hashtable construction, it will + * later expand to prime capacities such that there exist no + * better primes. + *

    + * In total these are about 32*10=320 numbers -> 1 KB of static + * memory needed. + *

    + * If you are stingy, then delete every second or fourth chunk. + */ + + private static final int[] primeCapacities = { + //chunk #0 + largestPrime, + + //chunk #1 + 5, 11, 23, 47, 97, 197, 397, 797, 1597, 3203, 6421, 12853, 25717, 51437, 102877, 205759, + 411527, 823117, 1646237, 3292489, 6584983, 13169977, 26339969, 52679969, 105359939, + 210719881, 421439783, 842879579, 1685759167, + + //chunk #2 + 433, 877, 1759, 3527, 7057, 14143, 28289, 56591, 113189, 226379, 452759, 905551, 1811107, + 3622219, 7244441, 14488931, 28977863, 57955739, 115911563, 231823147, 463646329, 927292699, + 1854585413, + + //chunk #3 + 953, 1907, 3821, 7643, 15287, 30577, 61169, 122347, 244703, 489407, 978821, 1957651, 3915341, + 7830701, 15661423, 31322867, 62645741, 125291483, 250582987, 501165979, 1002331963, + 2004663929, + + //chunk #4 + 1039, 2081, 4177, 8363, 16729, 33461, 66923, 133853, 267713, 535481, 1070981, 2141977, 4283963, + 8567929, 17135863, 34271747, 68543509, 137087021, 274174111, 548348231, 1096696463, + + //chunk #5 + 31, 67, 137, 277, 557, 1117, 2237, 4481, 8963, 17929, 35863, 71741, 143483, 286973, 573953, + 1147921, 2295859, 4591721, 9183457, 18366923, 36733847, 73467739, 146935499, 293871013, + 587742049, 1175484103, + + //chunk #6 + 599, 1201, 2411, 4831, 9677, 19373, 38747, 77509, 155027, 310081, 620171, 1240361, 2480729, + 4961459, 9922933, 19845871, 39691759, 79383533, 158767069, 317534141, 635068283, 1270136683, + + //chunk #7 + 311, 631, 1277, 2557, 5119, 10243, 20507, 41017, 82037, 164089, 328213, 656429, 1312867, + 2625761, 5251529, 10503061, 21006137, 42012281, 84024581, 168049163, 336098327, 672196673, + 1344393353, + + //chunk #8 + 3, 7, 17, 37, 79, 163, 331, 673, 1361, 2729, 5471, 10949, 21911, 43853, 87719, 175447, 350899, + 701819, 1403641, 2807303, 5614657, 11229331, 22458671, 44917381, 89834777, 179669557, + 359339171, 718678369, 1437356741, + + //chunk #9 + 43, 89, 179, 359, 719, 1439, 2879, 5779, 11579, 23159, 46327, 92657, 185323, 370661, 741337, + 1482707, 2965421, 5930887, 11861791, 23723597, 47447201, 94894427, 189788857, 379577741, + 759155483, 1518310967, + + //chunk #10 + 379, 761, 1523, 3049, 6101, 12203, 24407, 48817, 97649, 195311, 390647, 781301, 1562611, + 3125257, 6250537, 12501169, 25002389, 50004791, 100009607, 200019221, 400038451, 800076929, + 1600153859 + }; + + static { //initializer + // The above prime numbers are formatted for human readability. + // To find numbers fast, we sort them once and for all. + + Arrays.sort(primeCapacities); + } + + /** + * Returns a prime number which is >= desiredCapacity + * and very close to desiredCapacity (within 11% if + * desiredCapacity >= 1000). + * + * @param desiredCapacity the capacity desired by the user. + * @return the capacity which should be used for a hashtable. + */ + public static final int nextPrime(int desiredCapacity) { + int i = Arrays.binarySearch(primeCapacities, desiredCapacity); + if (i < 0) { + // desired capacity not found, choose next prime greater + // than desired capacity + i = -i - 1; // remember the semantics of binarySearch... + } + return primeCapacities[i]; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/SerializationProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/SerializationProcedure.java new file mode 100644 index 00000000000..53c97cdd9bc --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/SerializationProcedure.java @@ -0,0 +1,715 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.IOException; +import java.io.ObjectOutput; + + +/** + * Implementation of the variously typed procedure interfaces that supports + * writing the arguments to the procedure out on an ObjectOutputStream. + * In the case of two-argument procedures, the arguments are written out + * in the order received. + *

    + *

    + * Any IOException is trapped here so that it can be rethrown in a writeObject + * method. + *

    + *

    + * Created: Sun Jul 7 00:14:18 2002 + * + * @author Eric D. Friedman + * @version $Id: SerializationProcedure.java,v 1.5 2006/11/10 23:27:54 robeden Exp $ + */ + +class SerializationProcedure implements TDoubleDoubleProcedure, + TDoubleFloatProcedure, + TDoubleIntProcedure, + TDoubleLongProcedure, + TDoubleShortProcedure, + TDoubleByteProcedure, + TDoubleObjectProcedure, + TDoubleProcedure, + TFloatDoubleProcedure, + TFloatFloatProcedure, + TFloatIntProcedure, + TFloatLongProcedure, + TFloatShortProcedure, + TFloatByteProcedure, + TFloatObjectProcedure, + TFloatProcedure, + TIntDoubleProcedure, + TIntFloatProcedure, + TIntIntProcedure, + TIntLongProcedure, + TIntShortProcedure, + TIntByteProcedure, + TIntObjectProcedure, + TIntProcedure, + TLongDoubleProcedure, + TLongFloatProcedure, + TLongIntProcedure, + TLongLongProcedure, + TLongShortProcedure, + TLongByteProcedure, + TLongObjectProcedure, + TLongProcedure, + TShortDoubleProcedure, + TShortFloatProcedure, + TShortIntProcedure, + TShortLongProcedure, + TShortShortProcedure, + TShortByteProcedure, + TShortObjectProcedure, + TShortProcedure, + TByteDoubleProcedure, + TByteFloatProcedure, + TByteIntProcedure, + TByteLongProcedure, + TByteShortProcedure, + TByteByteProcedure, + TByteObjectProcedure, + TByteProcedure, + TObjectDoubleProcedure, + TObjectFloatProcedure, + TObjectIntProcedure, + TObjectLongProcedure, + TObjectShortProcedure, + TObjectByteProcedure, + TObjectObjectProcedure, + TObjectProcedure { + + private final ObjectOutput stream; + IOException exception; + + SerializationProcedure(ObjectOutput stream) { + this.stream = stream; + } + + public boolean execute(byte val) { + try { + stream.writeByte(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(short val) { + try { + stream.writeShort(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(int val) { + try { + stream.writeInt(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(double val) { + try { + stream.writeDouble(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(long val) { + try { + stream.writeLong(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(float val) { + try { + stream.writeFloat(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(Object val) { + try { + stream.writeObject(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(Object key, Object val) { + try { + stream.writeObject(key); + stream.writeObject(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(Object key, byte val) { + try { + stream.writeObject(key); + stream.writeByte(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(Object key, short val) { + try { + stream.writeObject(key); + stream.writeShort(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(Object key, int val) { + try { + stream.writeObject(key); + stream.writeInt(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(Object key, long val) { + try { + stream.writeObject(key); + stream.writeLong(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(Object key, double val) { + try { + stream.writeObject(key); + stream.writeDouble(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(Object key, float val) { + try { + stream.writeObject(key); + stream.writeFloat(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(int key, byte val) { + try { + stream.writeInt(key); + stream.writeByte(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(int key, short val) { + try { + stream.writeInt(key); + stream.writeShort(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(int key, Object val) { + try { + stream.writeInt(key); + stream.writeObject(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(int key, int val) { + try { + stream.writeInt(key); + stream.writeInt(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(int key, long val) { + try { + stream.writeInt(key); + stream.writeLong(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(int key, double val) { + try { + stream.writeInt(key); + stream.writeDouble(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(int key, float val) { + try { + stream.writeInt(key); + stream.writeFloat(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(long key, Object val) { + try { + stream.writeLong(key); + stream.writeObject(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(long key, byte val) { + try { + stream.writeLong(key); + stream.writeByte(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(long key, short val) { + try { + stream.writeLong(key); + stream.writeShort(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(long key, int val) { + try { + stream.writeLong(key); + stream.writeInt(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(long key, long val) { + try { + stream.writeLong(key); + stream.writeLong(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(long key, double val) { + try { + stream.writeLong(key); + stream.writeDouble(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(long key, float val) { + try { + stream.writeLong(key); + stream.writeFloat(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(double key, Object val) { + try { + stream.writeDouble(key); + stream.writeObject(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(double key, byte val) { + try { + stream.writeDouble(key); + stream.writeByte(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(double key, short val) { + try { + stream.writeDouble(key); + stream.writeShort(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(double key, int val) { + try { + stream.writeDouble(key); + stream.writeInt(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(double key, long val) { + try { + stream.writeDouble(key); + stream.writeLong(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(double key, double val) { + try { + stream.writeDouble(key); + stream.writeDouble(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(double key, float val) { + try { + stream.writeDouble(key); + stream.writeFloat(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(float key, Object val) { + try { + stream.writeFloat(key); + stream.writeObject(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(float key, byte val) { + try { + stream.writeFloat(key); + stream.writeByte(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(float key, short val) { + try { + stream.writeFloat(key); + stream.writeShort(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(float key, int val) { + try { + stream.writeFloat(key); + stream.writeInt(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(float key, long val) { + try { + stream.writeFloat(key); + stream.writeLong(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(float key, double val) { + try { + stream.writeFloat(key); + stream.writeDouble(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(float key, float val) { + try { + stream.writeFloat(key); + stream.writeFloat(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(byte key, Object val) { + try { + stream.writeByte(key); + stream.writeObject(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(byte key, byte val) { + try { + stream.writeByte(key); + stream.writeByte(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(byte key, short val) { + try { + stream.writeByte(key); + stream.writeShort(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(byte key, int val) { + try { + stream.writeByte(key); + stream.writeInt(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(byte key, long val) { + try { + stream.writeByte(key); + stream.writeLong(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(byte key, double val) { + try { + stream.writeByte(key); + stream.writeDouble(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(byte key, float val) { + try { + stream.writeByte(key); + stream.writeFloat(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(short key, Object val) { + try { + stream.writeShort(key); + stream.writeObject(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(short key, byte val) { + try { + stream.writeShort(key); + stream.writeByte(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(short key, short val) { + try { + stream.writeShort(key); + stream.writeShort(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(short key, int val) { + try { + stream.writeShort(key); + stream.writeInt(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(short key, long val) { + try { + stream.writeShort(key); + stream.writeLong(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(short key, double val) { + try { + stream.writeShort(key); + stream.writeDouble(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } + + public boolean execute(short key, float val) { + try { + stream.writeShort(key); + stream.writeFloat(val); + } catch (IOException e) { + this.exception = e; + return false; + } + return true; + } +}// SerializationProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteArrayList.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteArrayList.java new file mode 100644 index 00000000000..964f49a281e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteArrayList.java @@ -0,0 +1,935 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; +import java.util.Random; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * A resizable, array-backed list of byte primitives. + *

    + * Created: Sat Dec 29 14:21:12 2001 + * + * @author Eric D. Friedman + * @author Rob Eden + */ + +public class TByteArrayList implements Externalizable, Cloneable { + static final long serialVersionUID = 1L; + + /** + * the data of the list + */ + protected byte[] _data; + + /** + * the index after the last entry in the list + */ + protected int _pos; + + /** + * the default capacity for new lists + */ + protected static final int DEFAULT_CAPACITY = 10; + + /** + * Creates a new TByteArrayList instance with the + * default capacity. + */ + public TByteArrayList() { + this(DEFAULT_CAPACITY); + } + + /** + * Creates a new TByteArrayList instance with the + * specified capacity. + * + * @param capacity an int value + */ + public TByteArrayList(int capacity) { + _data = new byte[capacity]; + _pos = 0; + } + + /** + * Creates a new TByteArrayList instance whose + * capacity is the greater of the length of values and + * DEFAULT_CAPACITY and whose initial contents are the specified + * values. + * + * @param values an byte[] value + */ + public TByteArrayList(byte[] values) { + this(Math.max(values.length, DEFAULT_CAPACITY)); + add(values); + } + + // sizing + + /** + * Grow the internal array as needed to accommodate the specified + * number of elements. The size of the array bytes on each + * resize unless capacity requires more than twice the + * current capacity. + * + * @param capacity an int value + */ + public void ensureCapacity(int capacity) { + if (capacity > _data.length) { + int newCap = Math.max(_data.length << 1, capacity); + byte[] tmp = new byte[newCap]; + System.arraycopy(_data, 0, tmp, 0, _data.length); + _data = tmp; + } + } + + /** + * Returns the number of values in the list. + * + * @return the number of values in the list. + */ + public int size() { + return _pos; + } + + /** + * Tests whether this list contains any values. + * + * @return true if the list is empty. + */ + public boolean isEmpty() { + return _pos == 0; + } + + /** + * Sheds any excess capacity above and beyond the current size of + * the list. + */ + public void trimToSize() { + if (_data.length > size()) { + byte[] tmp = new byte[size()]; + toNativeArray(tmp, 0, tmp.length); + _data = tmp; + } + } + + // modifying + + /** + * Adds val to the end of the list, growing as needed. + * + * @param val an byte value + */ + public void add(byte val) { + ensureCapacity(_pos + 1); + _data[_pos++] = val; + } + + /** + * Adds the values in the array vals to the end of the + * list, in order. + * + * @param vals an byte[] value + */ + public void add(byte[] vals) { + add(vals, 0, vals.length); + } + + /** + * Adds a subset of the values in the array vals to the + * end of the list, in order. + * + * @param vals an byte[] value + * @param offset the offset at which to start copying + * @param length the number of values to copy. + */ + public void add(byte[] vals, int offset, int length) { + ensureCapacity(_pos + length); + System.arraycopy(vals, offset, _data, _pos, length); + _pos += length; + } + + /** + * Inserts value into the list at offset. All + * values including and to the right of offset are shifted + * to the right. + * + * @param offset an int value + * @param value an byte value + */ + public void insert(int offset, byte value) { + if (offset == _pos) { + add(value); + return; + } + ensureCapacity(_pos + 1); + // shift right + System.arraycopy(_data, offset, _data, offset + 1, _pos - offset); + // insert + _data[offset] = value; + _pos++; + } + + /** + * Inserts the array of values into the list at + * offset. All values including and to the right of + * offset are shifted to the right. + * + * @param offset an int value + * @param values an byte[] value + */ + public void insert(int offset, byte[] values) { + insert(offset, values, 0, values.length); + } + + /** + * Inserts a slice of the array of values into the list + * at offset. All values including and to the right of + * offset are shifted to the right. + * + * @param offset an int value + * @param values an byte[] value + * @param valOffset the offset in the values array at which to + * start copying. + * @param len the number of values to copy from the values array + */ + public void insert(int offset, byte[] values, int valOffset, int len) { + if (offset == _pos) { + add(values, valOffset, len); + return; + } + + ensureCapacity(_pos + len); + // shift right + System.arraycopy(_data, offset, _data, offset + len, _pos - offset); + // insert + System.arraycopy(values, valOffset, _data, offset, len); + _pos += len; + } + + /** + * Returns the value at the specified offset. + * + * @param offset an int value + * @return an byte value + */ + public byte get(int offset) { + if (offset >= _pos) { + throw new ArrayIndexOutOfBoundsException(offset); + } + return _data[offset]; + } + + /** + * Returns the value at the specified offset without doing any + * bounds checking. + * + * @param offset an int value + * @return an byte value + */ + public byte getQuick(int offset) { + return _data[offset]; + } + + /** + * Sets the value at the specified offset. + * + * @param offset an int value + * @param val an byte value + */ + public void set(int offset, byte val) { + if (offset >= _pos) { + throw new ArrayIndexOutOfBoundsException(offset); + } + _data[offset] = val; + } + + /** + * Sets the value at the specified offset and returns the + * previously stored value. + * + * @param offset an int value + * @param val an byte value + * @return the value previously stored at offset. + */ + public byte getSet(int offset, byte val) { + if (offset >= _pos) { + throw new ArrayIndexOutOfBoundsException(offset); + } + byte old = _data[offset]; + _data[offset] = val; + return old; + } + + /** + * Replace the values in the list starting at offset with + * the contents of the values array. + * + * @param offset the first offset to replace + * @param values the source of the new values + */ + public void set(int offset, byte[] values) { + set(offset, values, 0, values.length); + } + + /** + * Replace the values in the list starting at offset with + * length values from the values array, starting + * at valOffset. + * + * @param offset the first offset to replace + * @param values the source of the new values + * @param valOffset the first value to copy from the values array + * @param length the number of values to copy + */ + public void set(int offset, byte[] values, int valOffset, int length) { + if (offset < 0 || offset + length > _pos) { + throw new ArrayIndexOutOfBoundsException(offset); + } + System.arraycopy(values, valOffset, _data, offset, length); + } + + /** + * Sets the value at the specified offset without doing any bounds + * checking. + * + * @param offset an int value + * @param val an byte value + */ + public void setQuick(int offset, byte val) { + _data[offset] = val; + } + + /** + * Flushes the internal state of the list, resetting the capacity + * to the default. + */ + public void clear() { + clear(DEFAULT_CAPACITY); + } + + /** + * Flushes the internal state of the list, setting the capacity of + * the empty list to capacity. + * + * @param capacity an int value + */ + public void clear(int capacity) { + _data = new byte[capacity]; + _pos = 0; + } + + /** + * Sets the size of the list to 0, but does not change its + * capacity. This method can be used as an alternative to the + * {@link #clear clear} method if you want to recyle a list without + * allocating new backing arrays. + * + * @see #clear + */ + public void reset() { + _pos = 0; + fill((byte) 0); + } + + /** + * Sets the size of the list to 0, but does not change its + * capacity. This method can be used as an alternative to the + * {@link #clear clear} method if you want to recyle a list + * without allocating new backing arrays. This method differs + * from {@link #reset reset} in that it does not clear the old + * values in the backing array. Thus, it is possible for {@link + * #getQuick getQuick} to return stale data if this method is used + * and the caller is careless about bounds checking. + * + * @see #reset + * @see #clear + * @see #getQuick + */ + public void resetQuick() { + _pos = 0; + } + + /** + * Removes the value at offset from the list. + * + * @param offset an int value + * @return the value previously stored at offset. + */ + public byte remove(int offset) { + byte old = get(offset); + remove(offset, 1); + return old; + } + + /** + * Removes length values from the list, starting at + * offset + * + * @param offset an int value + * @param length an int value + */ + public void remove(int offset, int length) { + if (offset < 0 || offset >= _pos) { + throw new ArrayIndexOutOfBoundsException(offset); + } + + if (offset == 0) { + // data at the front + System.arraycopy(_data, length, _data, 0, _pos - length); + } else if (_pos - length == offset) { + // no copy to make, decrementing pos "deletes" values at + // the end + } else { + // data in the middle + System.arraycopy(_data, offset + length, + _data, offset, _pos - (offset + length)); + } + _pos -= length; + // no need to clear old values beyond _pos, because this is a + // primitive collection and 0 takes as much room as any other + // value + } + + /** + * Transform each value in the list using the specified function. + * + * @param function a TByteFunction value + */ + public void transformValues(TByteFunction function) { + for (int i = _pos; i-- > 0;) { + _data[i] = function.execute(_data[i]); + } + } + + /** + * Reverse the order of the elements in the list. + */ + public void reverse() { + reverse(0, _pos); + } + + /** + * Reverse the order of the elements in the range of the list. + * + * @param from the inclusive index at which to start reversing + * @param to the exclusive index at which to stop reversing + */ + public void reverse(int from, int to) { + if (from == to) { + return; // nothing to do + } + if (from > to) { + throw new IllegalArgumentException("from cannot be greater than to"); + } + for (int i = from, j = to - 1; i < j; i++, j--) { + swap(i, j); + } + } + + /** + * Shuffle the elements of the list using the specified random + * number generator. + * + * @param rand a Random value + */ + public void shuffle(Random rand) { + for (int i = _pos; i-- > 1;) { + swap(i, rand.nextInt(i)); + } + } + + /** + * Swap the values at offsets i and j. + * + * @param i an offset into the data array + * @param j an offset into the data array + */ + private final void swap(int i, int j) { + byte tmp = _data[i]; + _data[i] = _data[j]; + _data[j] = tmp; + } + + // copying + + /** + * Returns a clone of this list. Since this is a primitive + * collection, this will be a deep clone. + * + * @return a deep clone of the list. + */ + public Object clone() { + TByteArrayList list = null; + try { + list = (TByteArrayList) super.clone(); + list._data = toNativeArray(); + } catch (CloneNotSupportedException e) { + // it's supported + } // end of try-catch + return list; + } + + + /** + * Returns a sublist of this list. + * + * @param begin low endpoint (inclusive) of the subList. + * @param end high endpoint (exclusive) of the subList. + * @return sublist of this list from begin, inclusive to end, exclusive. + * @throws IndexOutOfBoundsException - endpoint out of range + * @throws IllegalArgumentException - endpoints out of order (end > begin) + */ + public TByteArrayList subList(int begin, int end) { + if (end < begin) throw new IllegalArgumentException("end index " + end + " greater than begin index " + begin); + if (begin < 0) throw new IndexOutOfBoundsException("begin index can not be < 0"); + if (end > _data.length) throw new IndexOutOfBoundsException("end index < " + _data.length); + TByteArrayList list = new TByteArrayList(end - begin); + for (int i = begin; i < end; i++) { + list.add(_data[i]); + } + return list; + } + + + /** + * Copies the contents of the list into a native array. + * + * @return an byte[] value + */ + public byte[] toNativeArray() { + return toNativeArray(0, _pos); + } + + /** + * Copies a slice of the list into a native array. + * + * @param offset the offset at which to start copying + * @param len the number of values to copy. + * @return an byte[] value + */ + public byte[] toNativeArray(int offset, int len) { + byte[] rv = new byte[len]; + toNativeArray(rv, offset, len); + return rv; + } + + /** + * Copies a slice of the list into a native array. + * + * @param dest the array to copy into. + * @param offset the offset of the first value to copy + * @param len the number of values to copy. + */ + public void toNativeArray(byte[] dest, int offset, int len) { + if (len == 0) { + return; // nothing to copy + } + if (offset < 0 || offset >= _pos) { + throw new ArrayIndexOutOfBoundsException(offset); + } + System.arraycopy(_data, offset, dest, 0, len); + } + + // comparing + + /** + * Compares this list to another list, value by value. + * + * @param other the object to compare against + * @return true if other is a TByteArrayList and has exactly the + * same values. + */ + public boolean equals(Object other) { + if (other == this) { + return true; + } else if (other instanceof TByteArrayList) { + TByteArrayList that = (TByteArrayList) other; + if (that.size() != this.size()) { + return false; + } else { + for (int i = _pos; i-- > 0;) { + if (this._data[i] != that._data[i]) { + return false; + } + } + return true; + } + } else { + return false; + } + } + + public int hashCode() { + int h = 0; + for (int i = _pos; i-- > 0;) { + h = 37 * h + HashFunctions.hash(_data[i]); + } + return h; + } + + // procedures + + /** + * Applies the procedure to each value in the list in ascending + * (front to back) order. + * + * @param procedure a TByteProcedure value + * @return true if the procedure did not terminate prematurely. + */ + public boolean forEach(TByteProcedure procedure) { + for (int i = 0; i < _pos; i++) { + if (!procedure.execute(_data[i])) { + return false; + } + } + return true; + } + + /** + * Applies the procedure to each value in the list in descending + * (back to front) order. + * + * @param procedure a TByteProcedure value + * @return true if the procedure did not terminate prematurely. + */ + public boolean forEachDescending(TByteProcedure procedure) { + for (int i = _pos; i-- > 0;) { + if (!procedure.execute(_data[i])) { + return false; + } + } + return true; + } + + // sorting + + /** + * Sort the values in the list (ascending) using the Sun quicksort + * implementation. + * + * @see java.util.Arrays#sort + */ + public void sort() { + Arrays.sort(_data, 0, _pos); + } + + /** + * Sort a slice of the list (ascending) using the Sun quicksort + * implementation. + * + * @param fromIndex the index at which to start sorting (inclusive) + * @param toIndex the index at which to stop sorting (exclusive) + * @see java.util.Arrays#sort + */ + public void sort(int fromIndex, int toIndex) { + Arrays.sort(_data, fromIndex, toIndex); + } + + // filling + + /** + * Fills every slot in the list with the specified value. + * + * @param val the value to use when filling + */ + public void fill(byte val) { + Arrays.fill(_data, 0, _pos, val); + } + + /** + * Fills a range in the list with the specified value. + * + * @param fromIndex the offset at which to start filling (inclusive) + * @param toIndex the offset at which to stop filling (exclusive) + * @param val the value to use when filling + */ + public void fill(int fromIndex, int toIndex, byte val) { + if (toIndex > _pos) { + ensureCapacity(toIndex); + _pos = toIndex; + } + Arrays.fill(_data, fromIndex, toIndex, val); + } + + // searching + + /** + * Performs a binary search for value in the entire list. + * Note that you must @{link #sort sort} the list before + * doing a search. + * + * @param value the value to search for + * @return the absolute offset in the list of the value, or its + * negative insertion point into the sorted list. + */ + public int binarySearch(byte value) { + return binarySearch(value, 0, _pos); + } + + /** + * Performs a binary search for value in the specified + * range. Note that you must @{link #sort sort} the list + * or the range before doing a search. + * + * @param value the value to search for + * @param fromIndex the lower boundary of the range (inclusive) + * @param toIndex the upper boundary of the range (exclusive) + * @return the absolute offset in the list of the value, or its + * negative insertion point into the sorted list. + */ + public int binarySearch(byte value, int fromIndex, int toIndex) { + if (fromIndex < 0) { + throw new ArrayIndexOutOfBoundsException(fromIndex); + } + if (toIndex > _pos) { + throw new ArrayIndexOutOfBoundsException(toIndex); + } + + int low = fromIndex; + int high = toIndex - 1; + + while (low <= high) { + int mid = (low + high) >>> 1; + byte midVal = _data[mid]; + + if (midVal < value) { + low = mid + 1; + } else if (midVal > value) { + high = mid - 1; + } else { + return mid; // value found + } + } + return -(low + 1); // value not found. + } + + /** + * Searches the list front to back for the index of + * value. + * + * @param value an byte value + * @return the first offset of the value, or -1 if it is not in + * the list. + * @see #binarySearch for faster searches on sorted lists + */ + public int indexOf(byte value) { + return indexOf(0, value); + } + + /** + * Searches the list front to back for the index of + * value, starting at offset. + * + * @param offset the offset at which to start the linear search + * (inclusive) + * @param value an byte value + * @return the first offset of the value, or -1 if it is not in + * the list. + * @see #binarySearch for faster searches on sorted lists + */ + public int indexOf(int offset, byte value) { + for (int i = offset; i < _pos; i++) { + if (_data[i] == value) { + return i; + } + } + return -1; + } + + /** + * Searches the list back to front for the last index of + * value. + * + * @param value an byte value + * @return the last offset of the value, or -1 if it is not in + * the list. + * @see #binarySearch for faster searches on sorted lists + */ + public int lastIndexOf(byte value) { + return lastIndexOf(_pos, value); + } + + /** + * Searches the list back to front for the last index of + * value, starting at offset. + * + * @param offset the offset at which to start the linear search + * (exclusive) + * @param value an byte value + * @return the last offset of the value, or -1 if it is not in + * the list. + * @see #binarySearch for faster searches on sorted lists + */ + public int lastIndexOf(int offset, byte value) { + for (int i = offset; i-- > 0;) { + if (_data[i] == value) { + return i; + } + } + return -1; + } + + /** + * Searches the list for value + * + * @param value an byte value + * @return true if value is in the list. + */ + public boolean contains(byte value) { + return lastIndexOf(value) >= 0; + } + + /** + * Searches the list for values satisfying condition in + * the manner of the *nix grep utility. + * + * @param condition a condition to apply to each element in the list + * @return a list of values which match the condition. + */ + public TByteArrayList grep(TByteProcedure condition) { + TByteArrayList list = new TByteArrayList(); + for (int i = 0; i < _pos; i++) { + if (condition.execute(_data[i])) { + list.add(_data[i]); + } + } + return list; + } + + /** + * Searches the list for values which do not satisfy + * condition. This is akin to *nix grep -v. + * + * @param condition a condition to apply to each element in the list + * @return a list of values which do not match the condition. + */ + public TByteArrayList inverseGrep(TByteProcedure condition) { + TByteArrayList list = new TByteArrayList(); + for (int i = 0; i < _pos; i++) { + if (!condition.execute(_data[i])) { + list.add(_data[i]); + } + } + return list; + } + + /** + * Finds the maximum value in the list. + * + * @return the largest value in the list. + * @throws IllegalStateException if the list is empty + */ + public byte max() { + if (size() == 0) { + throw new IllegalStateException("cannot find maximum of an empty list"); + } + byte max = Byte.MIN_VALUE; + for (int i = 0; i < _pos; i++) { + if (_data[i] > max) { + max = _data[i]; + } + } + return max; + } + + /** + * Finds the minimum value in the list. + * + * @return the smallest value in the list. + * @throws IllegalStateException if the list is empty + */ + public byte min() { + if (size() == 0) { + throw new IllegalStateException("cannot find minimum of an empty list"); + } + byte min = Byte.MAX_VALUE; + for (int i = 0; i < _pos; i++) { + if (_data[i] < min) { + min = _data[i]; + } + } + return min; + } + + // stringification + + /** + * Returns a String representation of the list, front to back. + * + * @return a String value + */ + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + for (int i = 0, end = _pos - 1; i < end; i++) { + buf.append(_data[i]); + buf.append(", "); + } + if (size() > 0) { + buf.append(_data[_pos - 1]); + } + buf.append("}"); + return buf.toString(); + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(1); + + // POSITION + out.writeInt(_pos); + + // ENTRIES + int len = _pos; + out.writeInt(_pos); // Written twice for backwards compatability with + // version 0 + for (int i = 0; i < len; i++) { + out.writeByte(_data[i]); + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // POSITION + _pos = in.readInt(); + + // ENTRIES + int len = in.readInt(); + _data = new byte[len]; + for (int i = 0; i < len; i++) { + _data[i] = in.readByte(); + } + } +} // TByteArrayList diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteByteHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteByteHashMap.java new file mode 100644 index 00000000000..c445067c4e8 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteByteHashMap.java @@ -0,0 +1,650 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for byte keys and byte values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TByteByteHashMap extends TByteHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TByteByteProcedure PUT_ALL_PROC = new TByteByteProcedure() { + public boolean execute(byte key, byte value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient byte[] _values; + + /** + * Creates a new TByteByteHashMap instance with the default + * capacity and load factor. + */ + public TByteByteHashMap() { + super(); + } + + /** + * Creates a new TByteByteHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TByteByteHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TByteByteHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TByteByteHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TByteByteHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TByteByteHashMap(TByteHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TByteByteHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TByteByteHashMap(int initialCapacity, TByteHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TByteByteHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TByteByteHashMap(int initialCapacity, float loadFactor, TByteHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TByteByteHashMap m = (TByteByteHashMap) super.clone(); + m._values = (byte[]) this._values.clone(); + return m; + } + + /** + * @return a TByteByteIterator with access to this map's keys and values + */ + public TByteByteIterator iterator() { + return new TByteByteIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new byte[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an byte value + * @param value an byte value + * @return the previous value associated with key, + * or (byte)0 if none was found. + */ + public byte put(byte key, byte value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an byte value + * @param value an byte value + * @return the previous value associated with key, + * or (byte)0 if none was found. + */ + public byte putIfAbsent(byte key, byte value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private byte doPut(byte key, byte value, int index) { + byte previousState; + byte previous = (byte) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TByteByteHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + byte oldKeys[] = _set; + byte oldVals[] = _values; + byte oldStates[] = _states; + + _set = new byte[newCapacity]; + _values = new byte[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + byte o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an byte value + * @return the value of key or (byte)0 if no such mapping exists. + */ + public byte get(byte key) { + int index = index(key); + return index < 0 ? (byte) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + byte[] keys = _set; + byte[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (byte) 0); + Arrays.fill(_values, 0, _values.length, (byte) 0); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an byte value + * @return an byte value, or (byte)0 if no mapping for key exists + */ + public byte remove(byte key) { + byte prev = (byte) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TByteByteHashMap)) { + return false; + } + TByteByteHashMap that = (TByteByteHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TByteByteProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(byte key, byte value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TByteByteProcedure { + private final TByteByteHashMap _otherMap; + + EqProcedure(TByteByteHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(byte key, byte value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two bytes for equality. + */ + private final boolean eq(byte v1, byte v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = (byte) 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public byte[] getValues() { + byte[] vals = new byte[size()]; + byte[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public byte[] keys() { + byte[] keys = new byte[size()]; + byte[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public byte[] keys(byte[] a) { + int size = size(); + if (a.length < size) { + a = (byte[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + byte[] k = (byte[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an byte value + * @return a boolean value + */ + public boolean containsValue(byte val) { + byte[] states = _states; + byte[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an byte value + * @return a boolean value + */ + public boolean containsKey(byte key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TByteProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TByteProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TByteProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TByteProcedure procedure) { + byte[] states = _states; + byte[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOByteByteProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TByteByteProcedure procedure) { + byte[] states = _states; + byte[] keys = _set; + byte[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TByteByteProcedure procedure) { + boolean modified = false; + byte[] states = _states; + byte[] keys = _set; + byte[] values = _values; + + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TByteFunction value + */ + public void transformValues(TByteFunction function) { + byte[] states = _states; + byte[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(byte key) { + return adjustValue(key, (byte) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(byte key, byte amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public byte adjustOrPutValue(final byte key, final byte adjust_amount, final byte put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final byte newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + byte previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + byte key = in.readByte(); + byte val = in.readByte(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TByteByteProcedure() { + private boolean first = true; + + public boolean execute(byte key, byte value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TByteByteHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteByteIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteByteIterator.java new file mode 100644 index 00000000000..27d02b25175 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteByteIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type byte and byte. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TByteByteIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TByteByteIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TByteByteIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TByteByteIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TByteByteIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TByteByteHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TByteByteIterator(TByteByteHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public byte key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public byte value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public byte setValue(byte val) { + byte old = value(); + _map._values[_index] = val; + return old; + } +}// TByteByteIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteByteProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteByteProcedure.java new file mode 100644 index 00000000000..214aefb1c00 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteByteProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type byte and byte. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TByteByteProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a byte value + * @param b a byte value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(byte a, byte b); +}// TByteByteProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteDoubleHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteDoubleHashMap.java new file mode 100644 index 00000000000..8b70c91af5b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteDoubleHashMap.java @@ -0,0 +1,650 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for byte keys and double values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TByteDoubleHashMap extends TByteHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TByteDoubleProcedure PUT_ALL_PROC = new TByteDoubleProcedure() { + public boolean execute(byte key, double value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient double[] _values; + + /** + * Creates a new TByteDoubleHashMap instance with the default + * capacity and load factor. + */ + public TByteDoubleHashMap() { + super(); + } + + /** + * Creates a new TByteDoubleHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TByteDoubleHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TByteDoubleHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TByteDoubleHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TByteDoubleHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TByteDoubleHashMap(TByteHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TByteDoubleHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TByteDoubleHashMap(int initialCapacity, TByteHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TByteDoubleHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TByteDoubleHashMap(int initialCapacity, float loadFactor, TByteHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TByteDoubleHashMap m = (TByteDoubleHashMap) super.clone(); + m._values = (double[]) this._values.clone(); + return m; + } + + /** + * @return a TByteDoubleIterator with access to this map's keys and values + */ + public TByteDoubleIterator iterator() { + return new TByteDoubleIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new double[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an byte value + * @param value an double value + * @return the previous value associated with key, + * or (byte)0 if none was found. + */ + public double put(byte key, double value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an byte value + * @param value an double value + * @return the previous value associated with key, + * or (byte)0 if none was found. + */ + public double putIfAbsent(byte key, double value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private double doPut(byte key, double value, int index) { + byte previousState; + double previous = (double) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TByteDoubleHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + byte oldKeys[] = _set; + double oldVals[] = _values; + byte oldStates[] = _states; + + _set = new byte[newCapacity]; + _values = new double[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + byte o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an byte value + * @return the value of key or (byte)0 if no such mapping exists. + */ + public double get(byte key) { + int index = index(key); + return index < 0 ? (double) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + byte[] keys = _set; + double[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (byte) 0); + Arrays.fill(_values, 0, _values.length, (double) 0); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an byte value + * @return an double value, or (byte)0 if no mapping for key exists + */ + public double remove(byte key) { + double prev = (double) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TByteDoubleHashMap)) { + return false; + } + TByteDoubleHashMap that = (TByteDoubleHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TByteDoubleProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(byte key, double value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TByteDoubleProcedure { + private final TByteDoubleHashMap _otherMap; + + EqProcedure(TByteDoubleHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(byte key, double value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two doubles for equality. + */ + private final boolean eq(double v1, double v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = (double) 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public double[] getValues() { + double[] vals = new double[size()]; + double[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public byte[] keys() { + byte[] keys = new byte[size()]; + byte[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public byte[] keys(byte[] a) { + int size = size(); + if (a.length < size) { + a = (byte[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + byte[] k = (byte[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an double value + * @return a boolean value + */ + public boolean containsValue(double val) { + byte[] states = _states; + double[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an byte value + * @return a boolean value + */ + public boolean containsKey(byte key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TByteProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TByteProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TDoubleProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TDoubleProcedure procedure) { + byte[] states = _states; + double[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOByteDoubleProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TByteDoubleProcedure procedure) { + byte[] states = _states; + byte[] keys = _set; + double[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TByteDoubleProcedure procedure) { + boolean modified = false; + byte[] states = _states; + byte[] keys = _set; + double[] values = _values; + + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TDoubleFunction value + */ + public void transformValues(TDoubleFunction function) { + byte[] states = _states; + double[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(byte key) { + return adjustValue(key, (double) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(byte key, double amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public double adjustOrPutValue(final byte key, final double adjust_amount, final double put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final double newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + byte previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + byte key = in.readByte(); + double val = in.readDouble(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TByteDoubleProcedure() { + private boolean first = true; + + public boolean execute(byte key, double value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TByteDoubleHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteDoubleIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteDoubleIterator.java new file mode 100644 index 00000000000..3753757272b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteDoubleIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type byte and double. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TByteDoubleIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TByteDoubleIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TByteDoubleIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TByteDoubleIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TByteDoubleIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TByteDoubleHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TByteDoubleIterator(TByteDoubleHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public byte key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public double value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public double setValue(double val) { + double old = value(); + _map._values[_index] = val; + return old; + } +}// TByteDoubleIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteDoubleProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteDoubleProcedure.java new file mode 100644 index 00000000000..913aa8075df --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteDoubleProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type byte and double. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TByteDoubleProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a byte value + * @param b a double value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(byte a, double b); +}// TByteDoubleProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteFloatHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteFloatHashMap.java new file mode 100644 index 00000000000..aa8b2570efc --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteFloatHashMap.java @@ -0,0 +1,650 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for byte keys and float values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TByteFloatHashMap extends TByteHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TByteFloatProcedure PUT_ALL_PROC = new TByteFloatProcedure() { + public boolean execute(byte key, float value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient float[] _values; + + /** + * Creates a new TByteFloatHashMap instance with the default + * capacity and load factor. + */ + public TByteFloatHashMap() { + super(); + } + + /** + * Creates a new TByteFloatHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TByteFloatHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TByteFloatHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TByteFloatHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TByteFloatHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TByteFloatHashMap(TByteHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TByteFloatHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TByteFloatHashMap(int initialCapacity, TByteHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TByteFloatHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TByteFloatHashMap(int initialCapacity, float loadFactor, TByteHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TByteFloatHashMap m = (TByteFloatHashMap) super.clone(); + m._values = (float[]) this._values.clone(); + return m; + } + + /** + * @return a TByteFloatIterator with access to this map's keys and values + */ + public TByteFloatIterator iterator() { + return new TByteFloatIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new float[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an byte value + * @param value an float value + * @return the previous value associated with key, + * or (byte)0 if none was found. + */ + public float put(byte key, float value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an byte value + * @param value an float value + * @return the previous value associated with key, + * or (byte)0 if none was found. + */ + public float putIfAbsent(byte key, float value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private float doPut(byte key, float value, int index) { + byte previousState; + float previous = (float) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TByteFloatHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + byte oldKeys[] = _set; + float oldVals[] = _values; + byte oldStates[] = _states; + + _set = new byte[newCapacity]; + _values = new float[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + byte o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an byte value + * @return the value of key or (byte)0 if no such mapping exists. + */ + public float get(byte key) { + int index = index(key); + return index < 0 ? (float) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + byte[] keys = _set; + float[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (byte) 0); + Arrays.fill(_values, 0, _values.length, (float) 0); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an byte value + * @return an float value, or (byte)0 if no mapping for key exists + */ + public float remove(byte key) { + float prev = (float) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TByteFloatHashMap)) { + return false; + } + TByteFloatHashMap that = (TByteFloatHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TByteFloatProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(byte key, float value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TByteFloatProcedure { + private final TByteFloatHashMap _otherMap; + + EqProcedure(TByteFloatHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(byte key, float value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two floats for equality. + */ + private final boolean eq(float v1, float v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = (float) 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public float[] getValues() { + float[] vals = new float[size()]; + float[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public byte[] keys() { + byte[] keys = new byte[size()]; + byte[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public byte[] keys(byte[] a) { + int size = size(); + if (a.length < size) { + a = (byte[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + byte[] k = (byte[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an float value + * @return a boolean value + */ + public boolean containsValue(float val) { + byte[] states = _states; + float[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an byte value + * @return a boolean value + */ + public boolean containsKey(byte key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TByteProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TByteProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TFloatProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TFloatProcedure procedure) { + byte[] states = _states; + float[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOByteFloatProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TByteFloatProcedure procedure) { + byte[] states = _states; + byte[] keys = _set; + float[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TByteFloatProcedure procedure) { + boolean modified = false; + byte[] states = _states; + byte[] keys = _set; + float[] values = _values; + + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TFloatFunction value + */ + public void transformValues(TFloatFunction function) { + byte[] states = _states; + float[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(byte key) { + return adjustValue(key, (float) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(byte key, float amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public float adjustOrPutValue(final byte key, final float adjust_amount, final float put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final float newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + byte previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + byte key = in.readByte(); + float val = in.readFloat(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TByteFloatProcedure() { + private boolean first = true; + + public boolean execute(byte key, float value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TByteFloatHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteFloatIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteFloatIterator.java new file mode 100644 index 00000000000..27b7b1214d9 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteFloatIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type byte and float. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TByteFloatIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TByteFloatIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TByteFloatIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TByteFloatIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TByteFloatIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TByteFloatHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TByteFloatIterator(TByteFloatHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public byte key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public float value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public float setValue(float val) { + float old = value(); + _map._values[_index] = val; + return old; + } +}// TByteFloatIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteFloatProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteFloatProcedure.java new file mode 100644 index 00000000000..4b8201bc778 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteFloatProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type byte and float. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TByteFloatProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a byte value + * @param b a float value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(byte a, float b); +}// TByteFloatProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteFunction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteFunction.java new file mode 100644 index 00000000000..78fbc029628 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteFunction.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for functions that accept and return one byte primitive. + *

    + * Created: Mon Nov 5 22:19:36 2001 + * + * @author Eric D. Friedman + * @version $Id: PFunction.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TByteFunction { + /** + * Execute this function with value + * + * @param value a byte input + * @return a byte result + */ + public byte execute(byte value); +}// TByteFunction diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteHash.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteHash.java new file mode 100644 index 00000000000..e24c0ede49d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteHash.java @@ -0,0 +1,291 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed hashing implementation for byte primitives. + *

    + * Created: Sun Nov 4 08:56:06 2001 + * + * @author Eric D. Friedman + * @version $Id: PHash.template,v 1.2 2007/06/29 22:39:46 robeden Exp $ + */ + +abstract public class TByteHash extends TPrimitiveHash implements TByteHashingStrategy { + + /** + * the set of bytes + */ + protected transient byte[] _set; + + /** + * strategy used to hash values in this collection + */ + protected TByteHashingStrategy _hashingStrategy; + + /** + * Creates a new TByteHash instance with the default + * capacity and load factor. + */ + public TByteHash() { + super(); + this._hashingStrategy = this; + } + + /** + * Creates a new TByteHash instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + */ + public TByteHash(int initialCapacity) { + super(initialCapacity); + this._hashingStrategy = this; + } + + /** + * Creates a new TByteHash instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + */ + public TByteHash(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + this._hashingStrategy = this; + } + + /** + * Creates a new TByteHash instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TByteHash(TByteHashingStrategy strategy) { + super(); + this._hashingStrategy = strategy; + } + + /** + * Creates a new TByteHash instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TByteHash(int initialCapacity, TByteHashingStrategy strategy) { + super(initialCapacity); + this._hashingStrategy = strategy; + } + + /** + * Creates a new TByteHash instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TByteHash(int initialCapacity, float loadFactor, TByteHashingStrategy strategy) { + super(initialCapacity, loadFactor); + this._hashingStrategy = strategy; + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TByteHash h = (TByteHash) super.clone(); + h._set = (byte[]) this._set.clone(); + return h; + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _set = new byte[capacity]; + return capacity; + } + + /** + * Searches the set for val + * + * @param val an byte value + * @return a boolean value + */ + public boolean contains(byte val) { + return index(val) >= 0; + } + + /** + * Executes procedure for each element in the set. + * + * @param procedure a TObjectProcedure value + * @return false if the loop over the set terminated because + * the procedure returned false for some value. + */ + public boolean forEach(TByteProcedure procedure) { + byte[] states = _states; + byte[] set = _set; + for (int i = set.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(set[i])) { + return false; + } + } + return true; + } + + /** + * Releases the element currently stored at index. + * + * @param index an int value + */ + protected void removeAt(int index) { + _set[index] = (byte) 0; + super.removeAt(index); + } + + /** + * Locates the index of val. + * + * @param val an byte value + * @return the index of val or -1 if it isn't in the set. + */ + protected int index(byte val) { + int hash, probe, index, length; + + final byte[] states = _states; + final byte[] set = _set; + length = states.length; + hash = _hashingStrategy.computeHashCode(val) & 0x7fffffff; + index = hash % length; + + if (states[index] != FREE && + (states[index] == REMOVED || set[index] != val)) { + // see Knuth, p. 529 + probe = 1 + (hash % (length - 2)); + + do { + index -= probe; + if (index < 0) { + index += length; + } + } while (states[index] != FREE && + (states[index] == REMOVED || set[index] != val)); + } + + return states[index] == FREE ? -1 : index; + } + + /** + * Locates the index at which val can be inserted. if + * there is already a value equal()ing val in the set, + * returns that value as a negative integer. + * + * @param val an byte value + * @return an int value + */ + protected int insertionIndex(byte val) { + int hash, probe, index, length; + + final byte[] states = _states; + final byte[] set = _set; + length = states.length; + hash = _hashingStrategy.computeHashCode(val) & 0x7fffffff; + index = hash % length; + + if (states[index] == FREE) { + return index; // empty, all done + } else if (states[index] == FULL && set[index] == val) { + return -index - 1; // already stored + } else { // already FULL or REMOVED, must probe + // compute the double hash + probe = 1 + (hash % (length - 2)); + + // if the slot we landed on is FULL (but not removed), probe + // until we find an empty slot, a REMOVED slot, or an element + // equal to the one we are trying to insert. + // finding an empty slot means that the value is not present + // and that we should use that slot as the insertion point; + // finding a REMOVED slot means that we need to keep searching, + // however we want to remember the offset of that REMOVED slot + // so we can reuse it in case a "new" insertion (i.e. not an update) + // is possible. + // finding a matching value means that we've found that our desired + // key is already in the table + + if (states[index] != REMOVED) { + // starting at the natural offset, probe until we find an + // offset that isn't full. + do { + index -= probe; + if (index < 0) { + index += length; + } + } while (states[index] == FULL && set[index] != val); + } + + // if the index we found was removed: continue probing until we + // locate a free location or an element which equal()s the + // one we have. + if (states[index] == REMOVED) { + int firstRemoved = index; + while (states[index] != FREE && + (states[index] == REMOVED || set[index] != val)) { + index -= probe; + if (index < 0) { + index += length; + } + } + return states[index] == FULL ? -index - 1 : firstRemoved; + } + // if it's full, the key is already stored + return states[index] == FULL ? -index - 1 : index; + } + } + + /** + * Default implementation of TByteHashingStrategy: + * delegates hashing to HashFunctions.hash(byte). + * + * @param val the value to hash + * @return the hashcode. + */ + public final int computeHashCode(byte val) { + return HashFunctions.hash(val); + } +} // TByteHash diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteHashSet.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteHashSet.java new file mode 100644 index 00000000000..02c874f7a40 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteHashSet.java @@ -0,0 +1,373 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed set implementation for byte primitives. + * + * @author Eric D. Friedman + * @author Rob Eden + */ + +public class TByteHashSet extends TByteHash implements Externalizable { + static final long serialVersionUID = 1L; + + /** + * Creates a new TByteHashSet instance with the default + * capacity and load factor. + */ + public TByteHashSet() { + super(); + } + + /** + * Creates a new TByteHashSet instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TByteHashSet(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TByteHashSet instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TByteHashSet(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TByteHashSet instance containing the + * elements of array. + * + * @param array an array of byte primitives + */ + public TByteHashSet(byte[] array) { + this(array.length); + addAll(array); + } + + /** + * Creates a new TByteHash instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TByteHashSet(TByteHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TByteHash instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TByteHashSet(int initialCapacity, TByteHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TByteHash instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TByteHashSet(int initialCapacity, float loadFactor, TByteHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * Creates a new TByteHashSet instance containing the + * elements of array. + * + * @param array an array of byte primitives + * @param strategy used to compute hash codes and to compare keys. + */ + public TByteHashSet(byte[] array, TByteHashingStrategy strategy) { + this(array.length, strategy); + addAll(array); + } + + /** + * @return a TByteIterator with access to the values in this set + */ + public TByteIterator iterator() { + return new TByteIterator(this); + } + + /** + * Inserts a value into the set. + * + * @param val an byte value + * @return true if the set was modified by the add operation + */ + public boolean add(byte val) { + int index = insertionIndex(val); + + if (index < 0) { + return false; // already present in set, nothing to add + } + + byte previousState = _states[index]; + _set[index] = val; + _states[index] = FULL; + postInsertHook(previousState == FREE); + + return true; // yes, we added something + } + + /** + * Expands the set to accommodate new values. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + byte oldSet[] = _set; + byte oldStates[] = _states; + + _set = new byte[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + byte o = oldSet[i]; + int index = insertionIndex(o); + _set[index] = o; + _states[index] = FULL; + } + } + } + + /** + * Returns a new array containing the values in the set. + * + * @return an byte[] value + */ + public byte[] toArray() { + byte[] result = new byte[size()]; + byte[] set = _set; + byte[] states = _states; + + for (int i = states.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + result[j++] = set[i]; + } + } + return result; + } + + /** + * Empties the set. + */ + public void clear() { + super.clear(); + byte[] set = _set; + byte[] states = _states; + + for (int i = set.length; i-- > 0;) { + set[i] = (byte) 0; + states[i] = FREE; + } + } + + /** + * Compares this set with another set for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TByteHashSet)) { + return false; + } + final TByteHashSet that = (TByteHashSet) other; + if (that.size() != this.size()) { + return false; + } + return forEach(new TByteProcedure() { + public final boolean execute(byte value) { + return that.contains(value); + } + }); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEach(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TByteProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(byte key) { + h += _hashingStrategy.computeHashCode(key); + return true; + } + } + + /** + * Removes val from the set. + * + * @param val an byte value + * @return true if the set was modified by the remove operation. + */ + public boolean remove(byte val) { + int index = index(val); + if (index >= 0) { + removeAt(index); + return true; + } + return false; + } + + /** + * Tests the set to determine if all of the elements in + * array are present. + * + * @param array an array of byte primitives. + * @return true if all elements were present in the set. + */ + public boolean containsAll(byte[] array) { + for (int i = array.length; i-- > 0;) { + if (!contains(array[i])) { + return false; + } + } + return true; + } + + /** + * Adds all of the elements in array to the set. + * + * @param array an array of byte primitives. + * @return true if the set was modified by the add all operation. + */ + public boolean addAll(byte[] array) { + boolean changed = false; + for (int i = array.length; i-- > 0;) { + if (add(array[i])) { + changed = true; + } + } + return changed; + } + + /** + * Removes all of the elements in array from the set. + * + * @param array an array of byte primitives. + * @return true if the set was modified by the remove all operation. + */ + public boolean removeAll(byte[] array) { + boolean changed = false; + for (int i = array.length; i-- > 0;) { + if (remove(array[i])) { + changed = true; + } + } + return changed; + } + + /** + * Removes any values in the set which are not contained in + * array. + * + * @param array an array of byte primitives. + * @return true if the set was modified by the retain all operation + */ + public boolean retainAll(byte[] array) { + boolean changed = false; + Arrays.sort(array); + byte[] set = _set; + byte[] states = _states; + + for (int i = set.length; i-- > 0;) { + if (states[i] == FULL && (Arrays.binarySearch(array, set[i]) < 0)) { + remove(set[i]); + changed = true; + } + } + return changed; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEach(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + + // ENTRIES + setUp(size); + while (size-- > 0) { + byte val = in.readByte(); + add(val); + } + } +} // TByteHashSet diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteHashingStrategy.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteHashingStrategy.java new file mode 100644 index 00000000000..65a1128f3b4 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteHashingStrategy.java @@ -0,0 +1,50 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Serializable; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface to support pluggable hashing strategies in maps and sets. + * Implementors can use this interface to make the trove hashing + * algorithms use an optimal strategy when computing hashcodes. + *

    + * Created: Sun Nov 4 08:56:06 2001 + * + * @author Eric D. Friedman + * @version $Id: PHashingStrategy.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TByteHashingStrategy extends Serializable { + /** + * Computes a hash code for the specified byte. Implementors + * can use the byte's own value or a custom scheme designed to + * minimize collisions for a known set of input. + * + * @param val byte for which the hashcode is to be computed + * @return the hashCode + */ + public int computeHashCode(byte val); +} // TByteHashingStrategy diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteIntHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteIntHashMap.java new file mode 100644 index 00000000000..4cf714fd38d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteIntHashMap.java @@ -0,0 +1,650 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for byte keys and int values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TByteIntHashMap extends TByteHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TByteIntProcedure PUT_ALL_PROC = new TByteIntProcedure() { + public boolean execute(byte key, int value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient int[] _values; + + /** + * Creates a new TByteIntHashMap instance with the default + * capacity and load factor. + */ + public TByteIntHashMap() { + super(); + } + + /** + * Creates a new TByteIntHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TByteIntHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TByteIntHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TByteIntHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TByteIntHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TByteIntHashMap(TByteHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TByteIntHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TByteIntHashMap(int initialCapacity, TByteHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TByteIntHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TByteIntHashMap(int initialCapacity, float loadFactor, TByteHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TByteIntHashMap m = (TByteIntHashMap) super.clone(); + m._values = (int[]) this._values.clone(); + return m; + } + + /** + * @return a TByteIntIterator with access to this map's keys and values + */ + public TByteIntIterator iterator() { + return new TByteIntIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new int[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an byte value + * @param value an int value + * @return the previous value associated with key, + * or (byte)0 if none was found. + */ + public int put(byte key, int value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an byte value + * @param value an int value + * @return the previous value associated with key, + * or (byte)0 if none was found. + */ + public int putIfAbsent(byte key, int value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private int doPut(byte key, int value, int index) { + byte previousState; + int previous = (int) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TByteIntHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + byte oldKeys[] = _set; + int oldVals[] = _values; + byte oldStates[] = _states; + + _set = new byte[newCapacity]; + _values = new int[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + byte o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an byte value + * @return the value of key or (byte)0 if no such mapping exists. + */ + public int get(byte key) { + int index = index(key); + return index < 0 ? (int) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + byte[] keys = _set; + int[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (byte) 0); + Arrays.fill(_values, 0, _values.length, (int) 0); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an byte value + * @return an int value, or (byte)0 if no mapping for key exists + */ + public int remove(byte key) { + int prev = (int) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TByteIntHashMap)) { + return false; + } + TByteIntHashMap that = (TByteIntHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TByteIntProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(byte key, int value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TByteIntProcedure { + private final TByteIntHashMap _otherMap; + + EqProcedure(TByteIntHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(byte key, int value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two ints for equality. + */ + private final boolean eq(int v1, int v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = (int) 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public int[] getValues() { + int[] vals = new int[size()]; + int[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public byte[] keys() { + byte[] keys = new byte[size()]; + byte[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public byte[] keys(byte[] a) { + int size = size(); + if (a.length < size) { + a = (byte[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + byte[] k = (byte[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an int value + * @return a boolean value + */ + public boolean containsValue(int val) { + byte[] states = _states; + int[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an byte value + * @return a boolean value + */ + public boolean containsKey(byte key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TByteProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TByteProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TIntProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TIntProcedure procedure) { + byte[] states = _states; + int[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOByteIntProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TByteIntProcedure procedure) { + byte[] states = _states; + byte[] keys = _set; + int[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TByteIntProcedure procedure) { + boolean modified = false; + byte[] states = _states; + byte[] keys = _set; + int[] values = _values; + + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TIntFunction value + */ + public void transformValues(TIntFunction function) { + byte[] states = _states; + int[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(byte key) { + return adjustValue(key, (int) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(byte key, int amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public int adjustOrPutValue(final byte key, final int adjust_amount, final int put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final int newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + byte previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + byte key = in.readByte(); + int val = in.readInt(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TByteIntProcedure() { + private boolean first = true; + + public boolean execute(byte key, int value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TByteIntHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteIntIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteIntIterator.java new file mode 100644 index 00000000000..aea8aa1e7c8 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteIntIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type byte and int. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TByteIntIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TByteIntIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TByteIntIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TByteIntIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TByteIntIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TByteIntHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TByteIntIterator(TByteIntHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public byte key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public int value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public int setValue(int val) { + int old = value(); + _map._values[_index] = val; + return old; + } +}// TByteIntIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteIntProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteIntProcedure.java new file mode 100644 index 00000000000..efd3d462b92 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteIntProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type byte and int. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TByteIntProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a byte value + * @param b a int value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(byte a, int b); +}// TByteIntProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteIterator.java new file mode 100644 index 00000000000..cd7a4885fb1 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteIterator.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for byte collections. + * + * @author Eric D. Friedman + * @version $Id: PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TByteIterator extends TPrimitiveIterator { + /** + * the collection on which the iterator operates + */ + private final TByteHash _hash; + + /** + * Creates a TByteIterator for the elements in the specified collection. + */ + public TByteIterator(TByteHash hash) { + super(hash); + this._hash = hash; + } + + /** + * Advances the iterator to the next element in the underlying collection + * and returns it. + * + * @return the next byte in the collection + * @throws NoSuchElementException if the iterator is already exhausted + */ + public byte next() { + moveToNextIndex(); + return _hash._set[_index]; + } +}// TByteIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteLongHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteLongHashMap.java new file mode 100644 index 00000000000..2f971ba9b4b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteLongHashMap.java @@ -0,0 +1,650 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for byte keys and long values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TByteLongHashMap extends TByteHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TByteLongProcedure PUT_ALL_PROC = new TByteLongProcedure() { + public boolean execute(byte key, long value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient long[] _values; + + /** + * Creates a new TByteLongHashMap instance with the default + * capacity and load factor. + */ + public TByteLongHashMap() { + super(); + } + + /** + * Creates a new TByteLongHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TByteLongHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TByteLongHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TByteLongHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TByteLongHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TByteLongHashMap(TByteHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TByteLongHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TByteLongHashMap(int initialCapacity, TByteHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TByteLongHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TByteLongHashMap(int initialCapacity, float loadFactor, TByteHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TByteLongHashMap m = (TByteLongHashMap) super.clone(); + m._values = (long[]) this._values.clone(); + return m; + } + + /** + * @return a TByteLongIterator with access to this map's keys and values + */ + public TByteLongIterator iterator() { + return new TByteLongIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new long[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an byte value + * @param value an long value + * @return the previous value associated with key, + * or (byte)0 if none was found. + */ + public long put(byte key, long value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an byte value + * @param value an long value + * @return the previous value associated with key, + * or (byte)0 if none was found. + */ + public long putIfAbsent(byte key, long value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private long doPut(byte key, long value, int index) { + byte previousState; + long previous = (long) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TByteLongHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + byte oldKeys[] = _set; + long oldVals[] = _values; + byte oldStates[] = _states; + + _set = new byte[newCapacity]; + _values = new long[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + byte o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an byte value + * @return the value of key or (byte)0 if no such mapping exists. + */ + public long get(byte key) { + int index = index(key); + return index < 0 ? (long) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + byte[] keys = _set; + long[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (byte) 0); + Arrays.fill(_values, 0, _values.length, (long) 0); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an byte value + * @return an long value, or (byte)0 if no mapping for key exists + */ + public long remove(byte key) { + long prev = (long) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TByteLongHashMap)) { + return false; + } + TByteLongHashMap that = (TByteLongHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TByteLongProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(byte key, long value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TByteLongProcedure { + private final TByteLongHashMap _otherMap; + + EqProcedure(TByteLongHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(byte key, long value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two longs for equality. + */ + private final boolean eq(long v1, long v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = (long) 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public long[] getValues() { + long[] vals = new long[size()]; + long[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public byte[] keys() { + byte[] keys = new byte[size()]; + byte[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public byte[] keys(byte[] a) { + int size = size(); + if (a.length < size) { + a = (byte[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + byte[] k = (byte[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an long value + * @return a boolean value + */ + public boolean containsValue(long val) { + byte[] states = _states; + long[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an byte value + * @return a boolean value + */ + public boolean containsKey(byte key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TByteProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TByteProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TLongProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TLongProcedure procedure) { + byte[] states = _states; + long[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOByteLongProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TByteLongProcedure procedure) { + byte[] states = _states; + byte[] keys = _set; + long[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TByteLongProcedure procedure) { + boolean modified = false; + byte[] states = _states; + byte[] keys = _set; + long[] values = _values; + + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TLongFunction value + */ + public void transformValues(TLongFunction function) { + byte[] states = _states; + long[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(byte key) { + return adjustValue(key, (long) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(byte key, long amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public long adjustOrPutValue(final byte key, final long adjust_amount, final long put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final long newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + byte previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + byte key = in.readByte(); + long val = in.readLong(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TByteLongProcedure() { + private boolean first = true; + + public boolean execute(byte key, long value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TByteLongHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteLongIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteLongIterator.java new file mode 100644 index 00000000000..8df153f45eb --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteLongIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type byte and long. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TByteLongIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TByteLongIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TByteLongIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TByteLongIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TByteLongIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TByteLongHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TByteLongIterator(TByteLongHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public byte key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public long value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public long setValue(long val) { + long old = value(); + _map._values[_index] = val; + return old; + } +}// TByteLongIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteLongProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteLongProcedure.java new file mode 100644 index 00000000000..a96b639da6c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteLongProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type byte and long. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TByteLongProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a byte value + * @param b a long value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(byte a, long b); +}// TByteLongProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteObjectHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteObjectHashMap.java new file mode 100644 index 00000000000..91f97d53e4a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteObjectHashMap.java @@ -0,0 +1,632 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for byte keys and Object values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TByteObjectHashMap extends TByteHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TByteObjectProcedure PUT_ALL_PROC = new TByteObjectProcedure() { + public boolean execute(byte key, V value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient V[] _values; + + /** + * Creates a new TByteObjectHashMap instance with the default + * capacity and load factor. + */ + public TByteObjectHashMap() { + super(); + } + + /** + * Creates a new TByteObjectHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TByteObjectHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TByteObjectHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TByteObjectHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TByteObjectHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TByteObjectHashMap(TByteHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TByteObjectHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TByteObjectHashMap(int initialCapacity, TByteHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TByteObjectHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TByteObjectHashMap(int initialCapacity, float loadFactor, TByteHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public TByteObjectHashMap clone() { + TByteObjectHashMap m = (TByteObjectHashMap) super.clone(); + m._values = (V[]) this._values.clone(); + return m; + } + + /** + * @return a TByteObjectIterator with access to this map's keys and values + */ + public TByteObjectIterator iterator() { + return new TByteObjectIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = (V[]) new Object[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an byte value + * @param value an Object value + * @return the previous value associated with key, + * or {@code null} if none was found. + */ + public V put(byte key, V value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an byte value + * @param value an Object value + * @return the previous value associated with key, + * or {@code null} if none was found. + */ + public V putIfAbsent(byte key, V value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private V doPut(byte key, V value, int index) { + byte previousState; + V previous = null; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TByteObjectHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + byte oldKeys[] = _set; + V oldVals[] = _values; + byte oldStates[] = _states; + + _set = new byte[newCapacity]; + _values = (V[]) new Object[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + byte o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an byte value + * @return the value of key or (byte)0 if no such mapping exists. + */ + public V get(byte key) { + int index = index(key); + return index < 0 ? null : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + byte[] keys = _set; + Object[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (byte) 0); + Arrays.fill(_values, 0, _values.length, null); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an byte value + * @return an Object value or (byte)0 if no such mapping exists. + */ + public V remove(byte key) { + V prev = null; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TByteObjectHashMap)) { + return false; + } + TByteObjectHashMap that = (TByteObjectHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TByteObjectProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(byte key, Object value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TByteObjectProcedure { + private final TByteObjectHashMap _otherMap; + + EqProcedure(TByteObjectHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(byte key, Object value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two objects for equality. + */ + private final boolean eq(Object o1, Object o2) { + return o1 == o2 || ((o1 != null) && o1.equals(o2)); + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = null; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + * @see #getValues(Object[]) + */ + public Object[] getValues() { + Object[] vals = new Object[size()]; + V[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * Return the values of the map; the runtime type of the returned array is that of + * the specified array. + * + * @param a the array into which the elements of this collection are to be + * stored, if it is big enough; otherwise, a new array of the same + * runtime type is allocated for this purpose. + * @return an array containing the elements of this collection + * @throws ArrayStoreException the runtime type of the specified array is + * not a supertype of the runtime type of every element in this + * collection. + * @throws NullPointerException if the specified array is null. + * @see #getValues() + */ + public T[] getValues(T[] a) { + if (a.length < _size) { + a = (T[]) java.lang.reflect.Array.newInstance(a.getClass().getComponentType(), + _size); + } + + V[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = (T) v[i]; + } + } + return a; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public byte[] keys() { + byte[] keys = new byte[size()]; + byte[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public byte[] keys(byte[] a) { + int size = size(); + if (a.length < size) { + a = (byte[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + byte[] k = (byte[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(V val) { + byte[] states = _states; + V[] vals = _values; + + // special case null values so that we don't have to + // perform null checks before every call to equals() + if (null == val) { + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && + val == vals[i]) { + return true; + } + } + } else { + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && + (val == vals[i] || val.equals(vals[i]))) { + return true; + } + } + } // end of else + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an byte value + * @return a boolean value + */ + public boolean containsKey(byte key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TByteProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TByteProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TObjectProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TObjectProcedure procedure) { + byte[] states = _states; + V[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOByteObjectProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TByteObjectProcedure procedure) { + byte[] states = _states; + byte[] keys = _set; + V[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TByteObjectProcedure procedure) { + boolean modified = false; + byte[] states = _states; + byte[] keys = _set; + V[] values = _values; + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TObjectFunction value + */ + public void transformValues(TObjectFunction function) { + byte[] states = _states; + V[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + byte key = in.readByte(); + V val = (V) in.readObject(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TByteObjectProcedure() { + private boolean first = true; + + public boolean execute(byte key, Object value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TByteObjectHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteObjectIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteObjectIterator.java new file mode 100644 index 00000000000..64d100b79b7 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteObjectIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type byte and Object. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TByteObjectIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TByteObjectIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TByteObjectIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TByteObjectIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2OIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TByteObjectIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TByteObjectHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TByteObjectIterator(TByteObjectHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public byte key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public V value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public V setValue(V val) { + V old = value(); + _map._values[_index] = val; + return old; + } +}// TByteObjectIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteObjectProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteObjectProcedure.java new file mode 100644 index 00000000000..0659fc0f396 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteObjectProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type byte and Object. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2OProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TByteObjectProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a byte value + * @param b an Object value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(byte a, T b); +}// TByteObjectProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteProcedure.java new file mode 100644 index 00000000000..174ed81f9ff --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteProcedure.java @@ -0,0 +1,47 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures with one byte parameter. + *

    + * Created: Mon Nov 5 21:45:49 2001 + * + * @author Eric D. Friedman + * @version $Id: PProcedure.template,v 1.2 2007/11/01 16:08:14 robeden Exp $ + */ + +public interface TByteProcedure { + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param value a value of type byte + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(byte value); +}// TByteProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteShortHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteShortHashMap.java new file mode 100644 index 00000000000..5c52b883419 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteShortHashMap.java @@ -0,0 +1,650 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for byte keys and short values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TByteShortHashMap extends TByteHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TByteShortProcedure PUT_ALL_PROC = new TByteShortProcedure() { + public boolean execute(byte key, short value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient short[] _values; + + /** + * Creates a new TByteShortHashMap instance with the default + * capacity and load factor. + */ + public TByteShortHashMap() { + super(); + } + + /** + * Creates a new TByteShortHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TByteShortHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TByteShortHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TByteShortHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TByteShortHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TByteShortHashMap(TByteHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TByteShortHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TByteShortHashMap(int initialCapacity, TByteHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TByteShortHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TByteShortHashMap(int initialCapacity, float loadFactor, TByteHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TByteShortHashMap m = (TByteShortHashMap) super.clone(); + m._values = (short[]) this._values.clone(); + return m; + } + + /** + * @return a TByteShortIterator with access to this map's keys and values + */ + public TByteShortIterator iterator() { + return new TByteShortIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new short[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an byte value + * @param value an short value + * @return the previous value associated with key, + * or (byte)0 if none was found. + */ + public short put(byte key, short value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an byte value + * @param value an short value + * @return the previous value associated with key, + * or (byte)0 if none was found. + */ + public short putIfAbsent(byte key, short value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private short doPut(byte key, short value, int index) { + byte previousState; + short previous = (short) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TByteShortHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + byte oldKeys[] = _set; + short oldVals[] = _values; + byte oldStates[] = _states; + + _set = new byte[newCapacity]; + _values = new short[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + byte o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an byte value + * @return the value of key or (byte)0 if no such mapping exists. + */ + public short get(byte key) { + int index = index(key); + return index < 0 ? (short) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + byte[] keys = _set; + short[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (byte) 0); + Arrays.fill(_values, 0, _values.length, (short) 0); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an byte value + * @return an short value, or (byte)0 if no mapping for key exists + */ + public short remove(byte key) { + short prev = (short) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TByteShortHashMap)) { + return false; + } + TByteShortHashMap that = (TByteShortHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TByteShortProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(byte key, short value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TByteShortProcedure { + private final TByteShortHashMap _otherMap; + + EqProcedure(TByteShortHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(byte key, short value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two shorts for equality. + */ + private final boolean eq(short v1, short v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = (short) 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public short[] getValues() { + short[] vals = new short[size()]; + short[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public byte[] keys() { + byte[] keys = new byte[size()]; + byte[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public byte[] keys(byte[] a) { + int size = size(); + if (a.length < size) { + a = (byte[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + byte[] k = (byte[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an short value + * @return a boolean value + */ + public boolean containsValue(short val) { + byte[] states = _states; + short[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an byte value + * @return a boolean value + */ + public boolean containsKey(byte key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TByteProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TByteProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TShortProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TShortProcedure procedure) { + byte[] states = _states; + short[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOByteShortProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TByteShortProcedure procedure) { + byte[] states = _states; + byte[] keys = _set; + short[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TByteShortProcedure procedure) { + boolean modified = false; + byte[] states = _states; + byte[] keys = _set; + short[] values = _values; + + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TShortFunction value + */ + public void transformValues(TShortFunction function) { + byte[] states = _states; + short[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(byte key) { + return adjustValue(key, (short) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(byte key, short amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public short adjustOrPutValue(final byte key, final short adjust_amount, final short put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final short newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + byte previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + byte key = in.readByte(); + short val = in.readShort(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TByteShortProcedure() { + private boolean first = true; + + public boolean execute(byte key, short value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TByteShortHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteShortIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteShortIterator.java new file mode 100644 index 00000000000..193bbee5946 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteShortIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type byte and short. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TByteShortIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TByteShortIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TByteShortIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TByteShortIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TByteShortIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TByteShortHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TByteShortIterator(TByteShortHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public byte key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public short value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public short setValue(short val) { + short old = value(); + _map._values[_index] = val; + return old; + } +}// TByteShortIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteShortProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteShortProcedure.java new file mode 100644 index 00000000000..2941284fa76 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteShortProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type byte and short. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TByteShortProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a byte value + * @param b a short value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(byte a, short b); +}// TByteShortProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteStack.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteStack.java new file mode 100644 index 00000000000..578f415885b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TByteStack.java @@ -0,0 +1,124 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + + +package org.elasticsearch.util.gnu.trove; + +/** + * A stack of byte primitives, backed by a TByteArrayList. + * + * @author Eric D. Friedman, Rob Eden + * @version $Id: PStack.template,v 1.2 2007/02/28 23:03:57 robeden Exp $ + */ + +public class TByteStack { + + /** + * the list used to hold the stack values. + */ + protected TByteArrayList _list; + + public static final int DEFAULT_CAPACITY = TByteArrayList.DEFAULT_CAPACITY; + + /** + * Creates a new TByteStack instance with the default + * capacity. + */ + public TByteStack() { + this(DEFAULT_CAPACITY); + } + + /** + * Creates a new TByteStack instance with the + * specified capacity. + * + * @param capacity the initial depth of the stack + */ + public TByteStack(int capacity) { + _list = new TByteArrayList(capacity); + } + + /** + * Pushes the value onto the top of the stack. + * + * @param val an byte value + */ + public void push(byte val) { + _list.add(val); + } + + /** + * Removes and returns the value at the top of the stack. + * + * @return an byte value + */ + public byte pop() { + return _list.remove(_list.size() - 1); + } + + /** + * Returns the value at the top of the stack. + * + * @return an byte value + */ + public byte peek() { + return _list.get(_list.size() - 1); + } + + /** + * Returns the current depth of the stack. + */ + public int size() { + return _list.size(); + } + + /** + * Clears the stack, reseting its capacity to the default. + */ + public void clear() { + _list.clear(DEFAULT_CAPACITY); + } + + /** + * Clears the stack without releasing its internal capacity allocation. + */ + public void reset() { + _list.reset(); + } + + /** + * Copies the contents of the stack into a native array. Note that this will NOT + * pop them out of the stack. + * + * @return an byte[] value + */ + public byte[] toNativeArray() { + return _list.toNativeArray(); + } + + /** + * Copies a slice of the list into a native array. Note that this will NOT + * pop them out of the stack. + * + * @param dest the array to copy into. + */ + public void toNativeArray(byte[] dest) { + _list.toNativeArray(dest, 0, size()); + } +} // TByteStack diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleArrayList.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleArrayList.java new file mode 100644 index 00000000000..35780f46674 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleArrayList.java @@ -0,0 +1,935 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; +import java.util.Random; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * A resizable, array-backed list of double primitives. + *

    + * Created: Sat Dec 29 14:21:12 2001 + * + * @author Eric D. Friedman + * @author Rob Eden + */ + +public class TDoubleArrayList implements Externalizable, Cloneable { + static final long serialVersionUID = 1L; + + /** + * the data of the list + */ + protected double[] _data; + + /** + * the index after the last entry in the list + */ + protected int _pos; + + /** + * the default capacity for new lists + */ + protected static final int DEFAULT_CAPACITY = 10; + + /** + * Creates a new TDoubleArrayList instance with the + * default capacity. + */ + public TDoubleArrayList() { + this(DEFAULT_CAPACITY); + } + + /** + * Creates a new TDoubleArrayList instance with the + * specified capacity. + * + * @param capacity an int value + */ + public TDoubleArrayList(int capacity) { + _data = new double[capacity]; + _pos = 0; + } + + /** + * Creates a new TDoubleArrayList instance whose + * capacity is the greater of the length of values and + * DEFAULT_CAPACITY and whose initial contents are the specified + * values. + * + * @param values an double[] value + */ + public TDoubleArrayList(double[] values) { + this(Math.max(values.length, DEFAULT_CAPACITY)); + add(values); + } + + // sizing + + /** + * Grow the internal array as needed to accommodate the specified + * number of elements. The size of the array doubles on each + * resize unless capacity requires more than twice the + * current capacity. + * + * @param capacity an int value + */ + public void ensureCapacity(int capacity) { + if (capacity > _data.length) { + int newCap = Math.max(_data.length << 1, capacity); + double[] tmp = new double[newCap]; + System.arraycopy(_data, 0, tmp, 0, _data.length); + _data = tmp; + } + } + + /** + * Returns the number of values in the list. + * + * @return the number of values in the list. + */ + public int size() { + return _pos; + } + + /** + * Tests whether this list contains any values. + * + * @return true if the list is empty. + */ + public boolean isEmpty() { + return _pos == 0; + } + + /** + * Sheds any excess capacity above and beyond the current size of + * the list. + */ + public void trimToSize() { + if (_data.length > size()) { + double[] tmp = new double[size()]; + toNativeArray(tmp, 0, tmp.length); + _data = tmp; + } + } + + // modifying + + /** + * Adds val to the end of the list, growing as needed. + * + * @param val an double value + */ + public void add(double val) { + ensureCapacity(_pos + 1); + _data[_pos++] = val; + } + + /** + * Adds the values in the array vals to the end of the + * list, in order. + * + * @param vals an double[] value + */ + public void add(double[] vals) { + add(vals, 0, vals.length); + } + + /** + * Adds a subset of the values in the array vals to the + * end of the list, in order. + * + * @param vals an double[] value + * @param offset the offset at which to start copying + * @param length the number of values to copy. + */ + public void add(double[] vals, int offset, int length) { + ensureCapacity(_pos + length); + System.arraycopy(vals, offset, _data, _pos, length); + _pos += length; + } + + /** + * Inserts value into the list at offset. All + * values including and to the right of offset are shifted + * to the right. + * + * @param offset an int value + * @param value an double value + */ + public void insert(int offset, double value) { + if (offset == _pos) { + add(value); + return; + } + ensureCapacity(_pos + 1); + // shift right + System.arraycopy(_data, offset, _data, offset + 1, _pos - offset); + // insert + _data[offset] = value; + _pos++; + } + + /** + * Inserts the array of values into the list at + * offset. All values including and to the right of + * offset are shifted to the right. + * + * @param offset an int value + * @param values an double[] value + */ + public void insert(int offset, double[] values) { + insert(offset, values, 0, values.length); + } + + /** + * Inserts a slice of the array of values into the list + * at offset. All values including and to the right of + * offset are shifted to the right. + * + * @param offset an int value + * @param values an double[] value + * @param valOffset the offset in the values array at which to + * start copying. + * @param len the number of values to copy from the values array + */ + public void insert(int offset, double[] values, int valOffset, int len) { + if (offset == _pos) { + add(values, valOffset, len); + return; + } + + ensureCapacity(_pos + len); + // shift right + System.arraycopy(_data, offset, _data, offset + len, _pos - offset); + // insert + System.arraycopy(values, valOffset, _data, offset, len); + _pos += len; + } + + /** + * Returns the value at the specified offset. + * + * @param offset an int value + * @return an double value + */ + public double get(int offset) { + if (offset >= _pos) { + throw new ArrayIndexOutOfBoundsException(offset); + } + return _data[offset]; + } + + /** + * Returns the value at the specified offset without doing any + * bounds checking. + * + * @param offset an int value + * @return an double value + */ + public double getQuick(int offset) { + return _data[offset]; + } + + /** + * Sets the value at the specified offset. + * + * @param offset an int value + * @param val an double value + */ + public void set(int offset, double val) { + if (offset >= _pos) { + throw new ArrayIndexOutOfBoundsException(offset); + } + _data[offset] = val; + } + + /** + * Sets the value at the specified offset and returns the + * previously stored value. + * + * @param offset an int value + * @param val an double value + * @return the value previously stored at offset. + */ + public double getSet(int offset, double val) { + if (offset >= _pos) { + throw new ArrayIndexOutOfBoundsException(offset); + } + double old = _data[offset]; + _data[offset] = val; + return old; + } + + /** + * Replace the values in the list starting at offset with + * the contents of the values array. + * + * @param offset the first offset to replace + * @param values the source of the new values + */ + public void set(int offset, double[] values) { + set(offset, values, 0, values.length); + } + + /** + * Replace the values in the list starting at offset with + * length values from the values array, starting + * at valOffset. + * + * @param offset the first offset to replace + * @param values the source of the new values + * @param valOffset the first value to copy from the values array + * @param length the number of values to copy + */ + public void set(int offset, double[] values, int valOffset, int length) { + if (offset < 0 || offset + length > _pos) { + throw new ArrayIndexOutOfBoundsException(offset); + } + System.arraycopy(values, valOffset, _data, offset, length); + } + + /** + * Sets the value at the specified offset without doing any bounds + * checking. + * + * @param offset an int value + * @param val an double value + */ + public void setQuick(int offset, double val) { + _data[offset] = val; + } + + /** + * Flushes the internal state of the list, resetting the capacity + * to the default. + */ + public void clear() { + clear(DEFAULT_CAPACITY); + } + + /** + * Flushes the internal state of the list, setting the capacity of + * the empty list to capacity. + * + * @param capacity an int value + */ + public void clear(int capacity) { + _data = new double[capacity]; + _pos = 0; + } + + /** + * Sets the size of the list to 0, but does not change its + * capacity. This method can be used as an alternative to the + * {@link #clear clear} method if you want to recyle a list without + * allocating new backing arrays. + * + * @see #clear + */ + public void reset() { + _pos = 0; + fill((double) 0); + } + + /** + * Sets the size of the list to 0, but does not change its + * capacity. This method can be used as an alternative to the + * {@link #clear clear} method if you want to recyle a list + * without allocating new backing arrays. This method differs + * from {@link #reset reset} in that it does not clear the old + * values in the backing array. Thus, it is possible for {@link + * #getQuick getQuick} to return stale data if this method is used + * and the caller is careless about bounds checking. + * + * @see #reset + * @see #clear + * @see #getQuick + */ + public void resetQuick() { + _pos = 0; + } + + /** + * Removes the value at offset from the list. + * + * @param offset an int value + * @return the value previously stored at offset. + */ + public double remove(int offset) { + double old = get(offset); + remove(offset, 1); + return old; + } + + /** + * Removes length values from the list, starting at + * offset + * + * @param offset an int value + * @param length an int value + */ + public void remove(int offset, int length) { + if (offset < 0 || offset >= _pos) { + throw new ArrayIndexOutOfBoundsException(offset); + } + + if (offset == 0) { + // data at the front + System.arraycopy(_data, length, _data, 0, _pos - length); + } else if (_pos - length == offset) { + // no copy to make, decrementing pos "deletes" values at + // the end + } else { + // data in the middle + System.arraycopy(_data, offset + length, + _data, offset, _pos - (offset + length)); + } + _pos -= length; + // no need to clear old values beyond _pos, because this is a + // primitive collection and 0 takes as much room as any other + // value + } + + /** + * Transform each value in the list using the specified function. + * + * @param function a TDoubleFunction value + */ + public void transformValues(TDoubleFunction function) { + for (int i = _pos; i-- > 0;) { + _data[i] = function.execute(_data[i]); + } + } + + /** + * Reverse the order of the elements in the list. + */ + public void reverse() { + reverse(0, _pos); + } + + /** + * Reverse the order of the elements in the range of the list. + * + * @param from the inclusive index at which to start reversing + * @param to the exclusive index at which to stop reversing + */ + public void reverse(int from, int to) { + if (from == to) { + return; // nothing to do + } + if (from > to) { + throw new IllegalArgumentException("from cannot be greater than to"); + } + for (int i = from, j = to - 1; i < j; i++, j--) { + swap(i, j); + } + } + + /** + * Shuffle the elements of the list using the specified random + * number generator. + * + * @param rand a Random value + */ + public void shuffle(Random rand) { + for (int i = _pos; i-- > 1;) { + swap(i, rand.nextInt(i)); + } + } + + /** + * Swap the values at offsets i and j. + * + * @param i an offset into the data array + * @param j an offset into the data array + */ + private final void swap(int i, int j) { + double tmp = _data[i]; + _data[i] = _data[j]; + _data[j] = tmp; + } + + // copying + + /** + * Returns a clone of this list. Since this is a primitive + * collection, this will be a deep clone. + * + * @return a deep clone of the list. + */ + public Object clone() { + TDoubleArrayList list = null; + try { + list = (TDoubleArrayList) super.clone(); + list._data = toNativeArray(); + } catch (CloneNotSupportedException e) { + // it's supported + } // end of try-catch + return list; + } + + + /** + * Returns a sublist of this list. + * + * @param begin low endpoint (inclusive) of the subList. + * @param end high endpoint (exclusive) of the subList. + * @return sublist of this list from begin, inclusive to end, exclusive. + * @throws IndexOutOfBoundsException - endpoint out of range + * @throws IllegalArgumentException - endpoints out of order (end > begin) + */ + public TDoubleArrayList subList(int begin, int end) { + if (end < begin) throw new IllegalArgumentException("end index " + end + " greater than begin index " + begin); + if (begin < 0) throw new IndexOutOfBoundsException("begin index can not be < 0"); + if (end > _data.length) throw new IndexOutOfBoundsException("end index < " + _data.length); + TDoubleArrayList list = new TDoubleArrayList(end - begin); + for (int i = begin; i < end; i++) { + list.add(_data[i]); + } + return list; + } + + + /** + * Copies the contents of the list into a native array. + * + * @return an double[] value + */ + public double[] toNativeArray() { + return toNativeArray(0, _pos); + } + + /** + * Copies a slice of the list into a native array. + * + * @param offset the offset at which to start copying + * @param len the number of values to copy. + * @return an double[] value + */ + public double[] toNativeArray(int offset, int len) { + double[] rv = new double[len]; + toNativeArray(rv, offset, len); + return rv; + } + + /** + * Copies a slice of the list into a native array. + * + * @param dest the array to copy into. + * @param offset the offset of the first value to copy + * @param len the number of values to copy. + */ + public void toNativeArray(double[] dest, int offset, int len) { + if (len == 0) { + return; // nothing to copy + } + if (offset < 0 || offset >= _pos) { + throw new ArrayIndexOutOfBoundsException(offset); + } + System.arraycopy(_data, offset, dest, 0, len); + } + + // comparing + + /** + * Compares this list to another list, value by value. + * + * @param other the object to compare against + * @return true if other is a TDoubleArrayList and has exactly the + * same values. + */ + public boolean equals(Object other) { + if (other == this) { + return true; + } else if (other instanceof TDoubleArrayList) { + TDoubleArrayList that = (TDoubleArrayList) other; + if (that.size() != this.size()) { + return false; + } else { + for (int i = _pos; i-- > 0;) { + if (this._data[i] != that._data[i]) { + return false; + } + } + return true; + } + } else { + return false; + } + } + + public int hashCode() { + int h = 0; + for (int i = _pos; i-- > 0;) { + h = 37 * h + HashFunctions.hash(_data[i]); + } + return h; + } + + // procedures + + /** + * Applies the procedure to each value in the list in ascending + * (front to back) order. + * + * @param procedure a TDoubleProcedure value + * @return true if the procedure did not terminate prematurely. + */ + public boolean forEach(TDoubleProcedure procedure) { + for (int i = 0; i < _pos; i++) { + if (!procedure.execute(_data[i])) { + return false; + } + } + return true; + } + + /** + * Applies the procedure to each value in the list in descending + * (back to front) order. + * + * @param procedure a TDoubleProcedure value + * @return true if the procedure did not terminate prematurely. + */ + public boolean forEachDescending(TDoubleProcedure procedure) { + for (int i = _pos; i-- > 0;) { + if (!procedure.execute(_data[i])) { + return false; + } + } + return true; + } + + // sorting + + /** + * Sort the values in the list (ascending) using the Sun quicksort + * implementation. + * + * @see java.util.Arrays#sort + */ + public void sort() { + Arrays.sort(_data, 0, _pos); + } + + /** + * Sort a slice of the list (ascending) using the Sun quicksort + * implementation. + * + * @param fromIndex the index at which to start sorting (inclusive) + * @param toIndex the index at which to stop sorting (exclusive) + * @see java.util.Arrays#sort + */ + public void sort(int fromIndex, int toIndex) { + Arrays.sort(_data, fromIndex, toIndex); + } + + // filling + + /** + * Fills every slot in the list with the specified value. + * + * @param val the value to use when filling + */ + public void fill(double val) { + Arrays.fill(_data, 0, _pos, val); + } + + /** + * Fills a range in the list with the specified value. + * + * @param fromIndex the offset at which to start filling (inclusive) + * @param toIndex the offset at which to stop filling (exclusive) + * @param val the value to use when filling + */ + public void fill(int fromIndex, int toIndex, double val) { + if (toIndex > _pos) { + ensureCapacity(toIndex); + _pos = toIndex; + } + Arrays.fill(_data, fromIndex, toIndex, val); + } + + // searching + + /** + * Performs a binary search for value in the entire list. + * Note that you must @{link #sort sort} the list before + * doing a search. + * + * @param value the value to search for + * @return the absolute offset in the list of the value, or its + * negative insertion point into the sorted list. + */ + public int binarySearch(double value) { + return binarySearch(value, 0, _pos); + } + + /** + * Performs a binary search for value in the specified + * range. Note that you must @{link #sort sort} the list + * or the range before doing a search. + * + * @param value the value to search for + * @param fromIndex the lower boundary of the range (inclusive) + * @param toIndex the upper boundary of the range (exclusive) + * @return the absolute offset in the list of the value, or its + * negative insertion point into the sorted list. + */ + public int binarySearch(double value, int fromIndex, int toIndex) { + if (fromIndex < 0) { + throw new ArrayIndexOutOfBoundsException(fromIndex); + } + if (toIndex > _pos) { + throw new ArrayIndexOutOfBoundsException(toIndex); + } + + int low = fromIndex; + int high = toIndex - 1; + + while (low <= high) { + int mid = (low + high) >>> 1; + double midVal = _data[mid]; + + if (midVal < value) { + low = mid + 1; + } else if (midVal > value) { + high = mid - 1; + } else { + return mid; // value found + } + } + return -(low + 1); // value not found. + } + + /** + * Searches the list front to back for the index of + * value. + * + * @param value an double value + * @return the first offset of the value, or -1 if it is not in + * the list. + * @see #binarySearch for faster searches on sorted lists + */ + public int indexOf(double value) { + return indexOf(0, value); + } + + /** + * Searches the list front to back for the index of + * value, starting at offset. + * + * @param offset the offset at which to start the linear search + * (inclusive) + * @param value an double value + * @return the first offset of the value, or -1 if it is not in + * the list. + * @see #binarySearch for faster searches on sorted lists + */ + public int indexOf(int offset, double value) { + for (int i = offset; i < _pos; i++) { + if (_data[i] == value) { + return i; + } + } + return -1; + } + + /** + * Searches the list back to front for the last index of + * value. + * + * @param value an double value + * @return the last offset of the value, or -1 if it is not in + * the list. + * @see #binarySearch for faster searches on sorted lists + */ + public int lastIndexOf(double value) { + return lastIndexOf(_pos, value); + } + + /** + * Searches the list back to front for the last index of + * value, starting at offset. + * + * @param offset the offset at which to start the linear search + * (exclusive) + * @param value an double value + * @return the last offset of the value, or -1 if it is not in + * the list. + * @see #binarySearch for faster searches on sorted lists + */ + public int lastIndexOf(int offset, double value) { + for (int i = offset; i-- > 0;) { + if (_data[i] == value) { + return i; + } + } + return -1; + } + + /** + * Searches the list for value + * + * @param value an double value + * @return true if value is in the list. + */ + public boolean contains(double value) { + return lastIndexOf(value) >= 0; + } + + /** + * Searches the list for values satisfying condition in + * the manner of the *nix grep utility. + * + * @param condition a condition to apply to each element in the list + * @return a list of values which match the condition. + */ + public TDoubleArrayList grep(TDoubleProcedure condition) { + TDoubleArrayList list = new TDoubleArrayList(); + for (int i = 0; i < _pos; i++) { + if (condition.execute(_data[i])) { + list.add(_data[i]); + } + } + return list; + } + + /** + * Searches the list for values which do not satisfy + * condition. This is akin to *nix grep -v. + * + * @param condition a condition to apply to each element in the list + * @return a list of values which do not match the condition. + */ + public TDoubleArrayList inverseGrep(TDoubleProcedure condition) { + TDoubleArrayList list = new TDoubleArrayList(); + for (int i = 0; i < _pos; i++) { + if (!condition.execute(_data[i])) { + list.add(_data[i]); + } + } + return list; + } + + /** + * Finds the maximum value in the list. + * + * @return the largest value in the list. + * @throws IllegalStateException if the list is empty + */ + public double max() { + if (size() == 0) { + throw new IllegalStateException("cannot find maximum of an empty list"); + } + double max = Double.NEGATIVE_INFINITY; + for (int i = 0; i < _pos; i++) { + if (_data[i] > max) { + max = _data[i]; + } + } + return max; + } + + /** + * Finds the minimum value in the list. + * + * @return the smallest value in the list. + * @throws IllegalStateException if the list is empty + */ + public double min() { + if (size() == 0) { + throw new IllegalStateException("cannot find minimum of an empty list"); + } + double min = Double.POSITIVE_INFINITY; + for (int i = 0; i < _pos; i++) { + if (_data[i] < min) { + min = _data[i]; + } + } + return min; + } + + // stringification + + /** + * Returns a String representation of the list, front to back. + * + * @return a String value + */ + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + for (int i = 0, end = _pos - 1; i < end; i++) { + buf.append(_data[i]); + buf.append(", "); + } + if (size() > 0) { + buf.append(_data[_pos - 1]); + } + buf.append("}"); + return buf.toString(); + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(1); + + // POSITION + out.writeInt(_pos); + + // ENTRIES + int len = _pos; + out.writeInt(_pos); // Written twice for backwards compatability with + // version 0 + for (int i = 0; i < len; i++) { + out.writeDouble(_data[i]); + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // POSITION + _pos = in.readInt(); + + // ENTRIES + int len = in.readInt(); + _data = new double[len]; + for (int i = 0; i < len; i++) { + _data[i] = in.readDouble(); + } + } +} // TDoubleArrayList diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleByteHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleByteHashMap.java new file mode 100644 index 00000000000..33538828fe7 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleByteHashMap.java @@ -0,0 +1,650 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for double keys and byte values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TDoubleByteHashMap extends TDoubleHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TDoubleByteProcedure PUT_ALL_PROC = new TDoubleByteProcedure() { + public boolean execute(double key, byte value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient byte[] _values; + + /** + * Creates a new TDoubleByteHashMap instance with the default + * capacity and load factor. + */ + public TDoubleByteHashMap() { + super(); + } + + /** + * Creates a new TDoubleByteHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TDoubleByteHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TDoubleByteHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TDoubleByteHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TDoubleByteHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TDoubleByteHashMap(TDoubleHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TDoubleByteHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TDoubleByteHashMap(int initialCapacity, TDoubleHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TDoubleByteHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TDoubleByteHashMap(int initialCapacity, float loadFactor, TDoubleHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TDoubleByteHashMap m = (TDoubleByteHashMap) super.clone(); + m._values = (byte[]) this._values.clone(); + return m; + } + + /** + * @return a TDoubleByteIterator with access to this map's keys and values + */ + public TDoubleByteIterator iterator() { + return new TDoubleByteIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new byte[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an double value + * @param value an byte value + * @return the previous value associated with key, + * or (double)0 if none was found. + */ + public byte put(double key, byte value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an double value + * @param value an byte value + * @return the previous value associated with key, + * or (double)0 if none was found. + */ + public byte putIfAbsent(double key, byte value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private byte doPut(double key, byte value, int index) { + byte previousState; + byte previous = (byte) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TDoubleByteHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + double oldKeys[] = _set; + byte oldVals[] = _values; + byte oldStates[] = _states; + + _set = new double[newCapacity]; + _values = new byte[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + double o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an double value + * @return the value of key or (double)0 if no such mapping exists. + */ + public byte get(double key) { + int index = index(key); + return index < 0 ? (byte) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + double[] keys = _set; + byte[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (double) 0); + Arrays.fill(_values, 0, _values.length, (byte) 0); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an double value + * @return an byte value, or (double)0 if no mapping for key exists + */ + public byte remove(double key) { + byte prev = (byte) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TDoubleByteHashMap)) { + return false; + } + TDoubleByteHashMap that = (TDoubleByteHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TDoubleByteProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(double key, byte value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TDoubleByteProcedure { + private final TDoubleByteHashMap _otherMap; + + EqProcedure(TDoubleByteHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(double key, byte value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two bytes for equality. + */ + private final boolean eq(byte v1, byte v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = (byte) 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public byte[] getValues() { + byte[] vals = new byte[size()]; + byte[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public double[] keys() { + double[] keys = new double[size()]; + double[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public double[] keys(double[] a) { + int size = size(); + if (a.length < size) { + a = (double[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + double[] k = (double[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an byte value + * @return a boolean value + */ + public boolean containsValue(byte val) { + byte[] states = _states; + byte[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an double value + * @return a boolean value + */ + public boolean containsKey(double key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TDoubleProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TDoubleProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TByteProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TByteProcedure procedure) { + byte[] states = _states; + byte[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TODoubleByteProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TDoubleByteProcedure procedure) { + byte[] states = _states; + double[] keys = _set; + byte[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TDoubleByteProcedure procedure) { + boolean modified = false; + byte[] states = _states; + double[] keys = _set; + byte[] values = _values; + + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TByteFunction value + */ + public void transformValues(TByteFunction function) { + byte[] states = _states; + byte[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(double key) { + return adjustValue(key, (byte) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(double key, byte amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public byte adjustOrPutValue(final double key, final byte adjust_amount, final byte put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final byte newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + byte previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + double key = in.readDouble(); + byte val = in.readByte(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TDoubleByteProcedure() { + private boolean first = true; + + public boolean execute(double key, byte value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TDoubleByteHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleByteIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleByteIterator.java new file mode 100644 index 00000000000..ffb82a59776 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleByteIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type double and byte. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TDoubleByteIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TDoubleByteIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TDoubleByteIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TDoubleByteIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TDoubleByteIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TDoubleByteHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TDoubleByteIterator(TDoubleByteHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public double key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public byte value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public byte setValue(byte val) { + byte old = value(); + _map._values[_index] = val; + return old; + } +}// TDoubleByteIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleByteProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleByteProcedure.java new file mode 100644 index 00000000000..93337159081 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleByteProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type double and byte. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TDoubleByteProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a double value + * @param b a byte value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(double a, byte b); +}// TDoubleByteProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleDoubleHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleDoubleHashMap.java new file mode 100644 index 00000000000..82bb05f9714 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleDoubleHashMap.java @@ -0,0 +1,650 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for double keys and double values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TDoubleDoubleHashMap extends TDoubleHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TDoubleDoubleProcedure PUT_ALL_PROC = new TDoubleDoubleProcedure() { + public boolean execute(double key, double value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient double[] _values; + + /** + * Creates a new TDoubleDoubleHashMap instance with the default + * capacity and load factor. + */ + public TDoubleDoubleHashMap() { + super(); + } + + /** + * Creates a new TDoubleDoubleHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TDoubleDoubleHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TDoubleDoubleHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TDoubleDoubleHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TDoubleDoubleHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TDoubleDoubleHashMap(TDoubleHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TDoubleDoubleHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TDoubleDoubleHashMap(int initialCapacity, TDoubleHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TDoubleDoubleHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TDoubleDoubleHashMap(int initialCapacity, float loadFactor, TDoubleHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TDoubleDoubleHashMap m = (TDoubleDoubleHashMap) super.clone(); + m._values = (double[]) this._values.clone(); + return m; + } + + /** + * @return a TDoubleDoubleIterator with access to this map's keys and values + */ + public TDoubleDoubleIterator iterator() { + return new TDoubleDoubleIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new double[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an double value + * @param value an double value + * @return the previous value associated with key, + * or (double)0 if none was found. + */ + public double put(double key, double value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an double value + * @param value an double value + * @return the previous value associated with key, + * or (double)0 if none was found. + */ + public double putIfAbsent(double key, double value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private double doPut(double key, double value, int index) { + byte previousState; + double previous = (double) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TDoubleDoubleHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + double oldKeys[] = _set; + double oldVals[] = _values; + byte oldStates[] = _states; + + _set = new double[newCapacity]; + _values = new double[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + double o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an double value + * @return the value of key or (double)0 if no such mapping exists. + */ + public double get(double key) { + int index = index(key); + return index < 0 ? (double) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + double[] keys = _set; + double[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (double) 0); + Arrays.fill(_values, 0, _values.length, (double) 0); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an double value + * @return an double value, or (double)0 if no mapping for key exists + */ + public double remove(double key) { + double prev = (double) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TDoubleDoubleHashMap)) { + return false; + } + TDoubleDoubleHashMap that = (TDoubleDoubleHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TDoubleDoubleProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(double key, double value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TDoubleDoubleProcedure { + private final TDoubleDoubleHashMap _otherMap; + + EqProcedure(TDoubleDoubleHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(double key, double value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two doubles for equality. + */ + private final boolean eq(double v1, double v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = (double) 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public double[] getValues() { + double[] vals = new double[size()]; + double[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public double[] keys() { + double[] keys = new double[size()]; + double[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public double[] keys(double[] a) { + int size = size(); + if (a.length < size) { + a = (double[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + double[] k = (double[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an double value + * @return a boolean value + */ + public boolean containsValue(double val) { + byte[] states = _states; + double[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an double value + * @return a boolean value + */ + public boolean containsKey(double key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TDoubleProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TDoubleProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TDoubleProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TDoubleProcedure procedure) { + byte[] states = _states; + double[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TODoubleDoubleProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TDoubleDoubleProcedure procedure) { + byte[] states = _states; + double[] keys = _set; + double[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TDoubleDoubleProcedure procedure) { + boolean modified = false; + byte[] states = _states; + double[] keys = _set; + double[] values = _values; + + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TDoubleFunction value + */ + public void transformValues(TDoubleFunction function) { + byte[] states = _states; + double[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(double key) { + return adjustValue(key, (double) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(double key, double amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public double adjustOrPutValue(final double key, final double adjust_amount, final double put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final double newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + byte previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + double key = in.readDouble(); + double val = in.readDouble(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TDoubleDoubleProcedure() { + private boolean first = true; + + public boolean execute(double key, double value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TDoubleDoubleHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleDoubleIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleDoubleIterator.java new file mode 100644 index 00000000000..74dce7adb5a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleDoubleIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type double and double. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TDoubleDoubleIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TDoubleDoubleIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TDoubleDoubleIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TDoubleDoubleIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TDoubleDoubleIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TDoubleDoubleHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TDoubleDoubleIterator(TDoubleDoubleHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public double key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public double value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public double setValue(double val) { + double old = value(); + _map._values[_index] = val; + return old; + } +}// TDoubleDoubleIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleDoubleProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleDoubleProcedure.java new file mode 100644 index 00000000000..b0f7033fb8e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleDoubleProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type double and double. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TDoubleDoubleProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a double value + * @param b a double value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(double a, double b); +}// TDoubleDoubleProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleFloatHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleFloatHashMap.java new file mode 100644 index 00000000000..f9572b02a12 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleFloatHashMap.java @@ -0,0 +1,650 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for double keys and float values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TDoubleFloatHashMap extends TDoubleHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TDoubleFloatProcedure PUT_ALL_PROC = new TDoubleFloatProcedure() { + public boolean execute(double key, float value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient float[] _values; + + /** + * Creates a new TDoubleFloatHashMap instance with the default + * capacity and load factor. + */ + public TDoubleFloatHashMap() { + super(); + } + + /** + * Creates a new TDoubleFloatHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TDoubleFloatHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TDoubleFloatHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TDoubleFloatHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TDoubleFloatHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TDoubleFloatHashMap(TDoubleHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TDoubleFloatHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TDoubleFloatHashMap(int initialCapacity, TDoubleHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TDoubleFloatHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TDoubleFloatHashMap(int initialCapacity, float loadFactor, TDoubleHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TDoubleFloatHashMap m = (TDoubleFloatHashMap) super.clone(); + m._values = (float[]) this._values.clone(); + return m; + } + + /** + * @return a TDoubleFloatIterator with access to this map's keys and values + */ + public TDoubleFloatIterator iterator() { + return new TDoubleFloatIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new float[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an double value + * @param value an float value + * @return the previous value associated with key, + * or (double)0 if none was found. + */ + public float put(double key, float value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an double value + * @param value an float value + * @return the previous value associated with key, + * or (double)0 if none was found. + */ + public float putIfAbsent(double key, float value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private float doPut(double key, float value, int index) { + byte previousState; + float previous = (float) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TDoubleFloatHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + double oldKeys[] = _set; + float oldVals[] = _values; + byte oldStates[] = _states; + + _set = new double[newCapacity]; + _values = new float[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + double o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an double value + * @return the value of key or (double)0 if no such mapping exists. + */ + public float get(double key) { + int index = index(key); + return index < 0 ? (float) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + double[] keys = _set; + float[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (double) 0); + Arrays.fill(_values, 0, _values.length, (float) 0); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an double value + * @return an float value, or (double)0 if no mapping for key exists + */ + public float remove(double key) { + float prev = (float) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TDoubleFloatHashMap)) { + return false; + } + TDoubleFloatHashMap that = (TDoubleFloatHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TDoubleFloatProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(double key, float value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TDoubleFloatProcedure { + private final TDoubleFloatHashMap _otherMap; + + EqProcedure(TDoubleFloatHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(double key, float value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two floats for equality. + */ + private final boolean eq(float v1, float v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = (float) 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public float[] getValues() { + float[] vals = new float[size()]; + float[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public double[] keys() { + double[] keys = new double[size()]; + double[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public double[] keys(double[] a) { + int size = size(); + if (a.length < size) { + a = (double[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + double[] k = (double[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an float value + * @return a boolean value + */ + public boolean containsValue(float val) { + byte[] states = _states; + float[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an double value + * @return a boolean value + */ + public boolean containsKey(double key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TDoubleProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TDoubleProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TFloatProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TFloatProcedure procedure) { + byte[] states = _states; + float[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TODoubleFloatProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TDoubleFloatProcedure procedure) { + byte[] states = _states; + double[] keys = _set; + float[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TDoubleFloatProcedure procedure) { + boolean modified = false; + byte[] states = _states; + double[] keys = _set; + float[] values = _values; + + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TFloatFunction value + */ + public void transformValues(TFloatFunction function) { + byte[] states = _states; + float[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(double key) { + return adjustValue(key, (float) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(double key, float amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public float adjustOrPutValue(final double key, final float adjust_amount, final float put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final float newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + byte previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + double key = in.readDouble(); + float val = in.readFloat(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TDoubleFloatProcedure() { + private boolean first = true; + + public boolean execute(double key, float value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TDoubleFloatHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleFloatIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleFloatIterator.java new file mode 100644 index 00000000000..8b88f88b6e8 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleFloatIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type double and float. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TDoubleFloatIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TDoubleFloatIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TDoubleFloatIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TDoubleFloatIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TDoubleFloatIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TDoubleFloatHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TDoubleFloatIterator(TDoubleFloatHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public double key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public float value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public float setValue(float val) { + float old = value(); + _map._values[_index] = val; + return old; + } +}// TDoubleFloatIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleFloatProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleFloatProcedure.java new file mode 100644 index 00000000000..f917ff7ea5c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleFloatProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type double and float. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TDoubleFloatProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a double value + * @param b a float value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(double a, float b); +}// TDoubleFloatProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleFunction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleFunction.java new file mode 100644 index 00000000000..68e16c5060b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleFunction.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for functions that accept and return one double primitive. + *

    + * Created: Mon Nov 5 22:19:36 2001 + * + * @author Eric D. Friedman + * @version $Id: PFunction.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TDoubleFunction { + /** + * Execute this function with value + * + * @param value a double input + * @return a double result + */ + public double execute(double value); +}// TDoubleFunction diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleHash.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleHash.java new file mode 100644 index 00000000000..8375e316eef --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleHash.java @@ -0,0 +1,291 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed hashing implementation for double primitives. + *

    + * Created: Sun Nov 4 08:56:06 2001 + * + * @author Eric D. Friedman + * @version $Id: PHash.template,v 1.2 2007/06/29 22:39:46 robeden Exp $ + */ + +abstract public class TDoubleHash extends TPrimitiveHash implements TDoubleHashingStrategy { + + /** + * the set of doubles + */ + protected transient double[] _set; + + /** + * strategy used to hash values in this collection + */ + protected TDoubleHashingStrategy _hashingStrategy; + + /** + * Creates a new TDoubleHash instance with the default + * capacity and load factor. + */ + public TDoubleHash() { + super(); + this._hashingStrategy = this; + } + + /** + * Creates a new TDoubleHash instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + */ + public TDoubleHash(int initialCapacity) { + super(initialCapacity); + this._hashingStrategy = this; + } + + /** + * Creates a new TDoubleHash instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + */ + public TDoubleHash(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + this._hashingStrategy = this; + } + + /** + * Creates a new TDoubleHash instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TDoubleHash(TDoubleHashingStrategy strategy) { + super(); + this._hashingStrategy = strategy; + } + + /** + * Creates a new TDoubleHash instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TDoubleHash(int initialCapacity, TDoubleHashingStrategy strategy) { + super(initialCapacity); + this._hashingStrategy = strategy; + } + + /** + * Creates a new TDoubleHash instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TDoubleHash(int initialCapacity, float loadFactor, TDoubleHashingStrategy strategy) { + super(initialCapacity, loadFactor); + this._hashingStrategy = strategy; + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TDoubleHash h = (TDoubleHash) super.clone(); + h._set = (double[]) this._set.clone(); + return h; + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _set = new double[capacity]; + return capacity; + } + + /** + * Searches the set for val + * + * @param val an double value + * @return a boolean value + */ + public boolean contains(double val) { + return index(val) >= 0; + } + + /** + * Executes procedure for each element in the set. + * + * @param procedure a TObjectProcedure value + * @return false if the loop over the set terminated because + * the procedure returned false for some value. + */ + public boolean forEach(TDoubleProcedure procedure) { + byte[] states = _states; + double[] set = _set; + for (int i = set.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(set[i])) { + return false; + } + } + return true; + } + + /** + * Releases the element currently stored at index. + * + * @param index an int value + */ + protected void removeAt(int index) { + _set[index] = (double) 0; + super.removeAt(index); + } + + /** + * Locates the index of val. + * + * @param val an double value + * @return the index of val or -1 if it isn't in the set. + */ + protected int index(double val) { + int hash, probe, index, length; + + final byte[] states = _states; + final double[] set = _set; + length = states.length; + hash = _hashingStrategy.computeHashCode(val) & 0x7fffffff; + index = hash % length; + + if (states[index] != FREE && + (states[index] == REMOVED || set[index] != val)) { + // see Knuth, p. 529 + probe = 1 + (hash % (length - 2)); + + do { + index -= probe; + if (index < 0) { + index += length; + } + } while (states[index] != FREE && + (states[index] == REMOVED || set[index] != val)); + } + + return states[index] == FREE ? -1 : index; + } + + /** + * Locates the index at which val can be inserted. if + * there is already a value equal()ing val in the set, + * returns that value as a negative integer. + * + * @param val an double value + * @return an int value + */ + protected int insertionIndex(double val) { + int hash, probe, index, length; + + final byte[] states = _states; + final double[] set = _set; + length = states.length; + hash = _hashingStrategy.computeHashCode(val) & 0x7fffffff; + index = hash % length; + + if (states[index] == FREE) { + return index; // empty, all done + } else if (states[index] == FULL && set[index] == val) { + return -index - 1; // already stored + } else { // already FULL or REMOVED, must probe + // compute the double hash + probe = 1 + (hash % (length - 2)); + + // if the slot we landed on is FULL (but not removed), probe + // until we find an empty slot, a REMOVED slot, or an element + // equal to the one we are trying to insert. + // finding an empty slot means that the value is not present + // and that we should use that slot as the insertion point; + // finding a REMOVED slot means that we need to keep searching, + // however we want to remember the offset of that REMOVED slot + // so we can reuse it in case a "new" insertion (i.e. not an update) + // is possible. + // finding a matching value means that we've found that our desired + // key is already in the table + + if (states[index] != REMOVED) { + // starting at the natural offset, probe until we find an + // offset that isn't full. + do { + index -= probe; + if (index < 0) { + index += length; + } + } while (states[index] == FULL && set[index] != val); + } + + // if the index we found was removed: continue probing until we + // locate a free location or an element which equal()s the + // one we have. + if (states[index] == REMOVED) { + int firstRemoved = index; + while (states[index] != FREE && + (states[index] == REMOVED || set[index] != val)) { + index -= probe; + if (index < 0) { + index += length; + } + } + return states[index] == FULL ? -index - 1 : firstRemoved; + } + // if it's full, the key is already stored + return states[index] == FULL ? -index - 1 : index; + } + } + + /** + * Default implementation of TDoubleHashingStrategy: + * delegates hashing to HashFunctions.hash(double). + * + * @param val the value to hash + * @return the hashcode. + */ + public final int computeHashCode(double val) { + return HashFunctions.hash(val); + } +} // TDoubleHash diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleHashSet.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleHashSet.java new file mode 100644 index 00000000000..372149bb5c2 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleHashSet.java @@ -0,0 +1,373 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed set implementation for double primitives. + * + * @author Eric D. Friedman + * @author Rob Eden + */ + +public class TDoubleHashSet extends TDoubleHash implements Externalizable { + static final long serialVersionUID = 1L; + + /** + * Creates a new TDoubleHashSet instance with the default + * capacity and load factor. + */ + public TDoubleHashSet() { + super(); + } + + /** + * Creates a new TDoubleHashSet instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TDoubleHashSet(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TDoubleHashSet instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TDoubleHashSet(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TDoubleHashSet instance containing the + * elements of array. + * + * @param array an array of double primitives + */ + public TDoubleHashSet(double[] array) { + this(array.length); + addAll(array); + } + + /** + * Creates a new TDoubleHash instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TDoubleHashSet(TDoubleHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TDoubleHash instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TDoubleHashSet(int initialCapacity, TDoubleHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TDoubleHash instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TDoubleHashSet(int initialCapacity, float loadFactor, TDoubleHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * Creates a new TDoubleHashSet instance containing the + * elements of array. + * + * @param array an array of double primitives + * @param strategy used to compute hash codes and to compare keys. + */ + public TDoubleHashSet(double[] array, TDoubleHashingStrategy strategy) { + this(array.length, strategy); + addAll(array); + } + + /** + * @return a TDoubleIterator with access to the values in this set + */ + public TDoubleIterator iterator() { + return new TDoubleIterator(this); + } + + /** + * Inserts a value into the set. + * + * @param val an double value + * @return true if the set was modified by the add operation + */ + public boolean add(double val) { + int index = insertionIndex(val); + + if (index < 0) { + return false; // already present in set, nothing to add + } + + byte previousState = _states[index]; + _set[index] = val; + _states[index] = FULL; + postInsertHook(previousState == FREE); + + return true; // yes, we added something + } + + /** + * Expands the set to accommodate new values. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + double oldSet[] = _set; + byte oldStates[] = _states; + + _set = new double[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + double o = oldSet[i]; + int index = insertionIndex(o); + _set[index] = o; + _states[index] = FULL; + } + } + } + + /** + * Returns a new array containing the values in the set. + * + * @return an double[] value + */ + public double[] toArray() { + double[] result = new double[size()]; + double[] set = _set; + byte[] states = _states; + + for (int i = states.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + result[j++] = set[i]; + } + } + return result; + } + + /** + * Empties the set. + */ + public void clear() { + super.clear(); + double[] set = _set; + byte[] states = _states; + + for (int i = set.length; i-- > 0;) { + set[i] = (double) 0; + states[i] = FREE; + } + } + + /** + * Compares this set with another set for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TDoubleHashSet)) { + return false; + } + final TDoubleHashSet that = (TDoubleHashSet) other; + if (that.size() != this.size()) { + return false; + } + return forEach(new TDoubleProcedure() { + public final boolean execute(double value) { + return that.contains(value); + } + }); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEach(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TDoubleProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(double key) { + h += _hashingStrategy.computeHashCode(key); + return true; + } + } + + /** + * Removes val from the set. + * + * @param val an double value + * @return true if the set was modified by the remove operation. + */ + public boolean remove(double val) { + int index = index(val); + if (index >= 0) { + removeAt(index); + return true; + } + return false; + } + + /** + * Tests the set to determine if all of the elements in + * array are present. + * + * @param array an array of double primitives. + * @return true if all elements were present in the set. + */ + public boolean containsAll(double[] array) { + for (int i = array.length; i-- > 0;) { + if (!contains(array[i])) { + return false; + } + } + return true; + } + + /** + * Adds all of the elements in array to the set. + * + * @param array an array of double primitives. + * @return true if the set was modified by the add all operation. + */ + public boolean addAll(double[] array) { + boolean changed = false; + for (int i = array.length; i-- > 0;) { + if (add(array[i])) { + changed = true; + } + } + return changed; + } + + /** + * Removes all of the elements in array from the set. + * + * @param array an array of double primitives. + * @return true if the set was modified by the remove all operation. + */ + public boolean removeAll(double[] array) { + boolean changed = false; + for (int i = array.length; i-- > 0;) { + if (remove(array[i])) { + changed = true; + } + } + return changed; + } + + /** + * Removes any values in the set which are not contained in + * array. + * + * @param array an array of double primitives. + * @return true if the set was modified by the retain all operation + */ + public boolean retainAll(double[] array) { + boolean changed = false; + Arrays.sort(array); + double[] set = _set; + byte[] states = _states; + + for (int i = set.length; i-- > 0;) { + if (states[i] == FULL && (Arrays.binarySearch(array, set[i]) < 0)) { + remove(set[i]); + changed = true; + } + } + return changed; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEach(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + + // ENTRIES + setUp(size); + while (size-- > 0) { + double val = in.readDouble(); + add(val); + } + } +} // TDoubleHashSet diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleHashingStrategy.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleHashingStrategy.java new file mode 100644 index 00000000000..b891383205c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleHashingStrategy.java @@ -0,0 +1,50 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Serializable; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface to support pluggable hashing strategies in maps and sets. + * Implementors can use this interface to make the trove hashing + * algorithms use an optimal strategy when computing hashcodes. + *

    + * Created: Sun Nov 4 08:56:06 2001 + * + * @author Eric D. Friedman + * @version $Id: PHashingStrategy.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TDoubleHashingStrategy extends Serializable { + /** + * Computes a hash code for the specified double. Implementors + * can use the double's own value or a custom scheme designed to + * minimize collisions for a known set of input. + * + * @param val double for which the hashcode is to be computed + * @return the hashCode + */ + public int computeHashCode(double val); +} // TDoubleHashingStrategy diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleIntHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleIntHashMap.java new file mode 100644 index 00000000000..53086ef3d16 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleIntHashMap.java @@ -0,0 +1,650 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for double keys and int values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TDoubleIntHashMap extends TDoubleHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TDoubleIntProcedure PUT_ALL_PROC = new TDoubleIntProcedure() { + public boolean execute(double key, int value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient int[] _values; + + /** + * Creates a new TDoubleIntHashMap instance with the default + * capacity and load factor. + */ + public TDoubleIntHashMap() { + super(); + } + + /** + * Creates a new TDoubleIntHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TDoubleIntHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TDoubleIntHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TDoubleIntHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TDoubleIntHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TDoubleIntHashMap(TDoubleHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TDoubleIntHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TDoubleIntHashMap(int initialCapacity, TDoubleHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TDoubleIntHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TDoubleIntHashMap(int initialCapacity, float loadFactor, TDoubleHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TDoubleIntHashMap m = (TDoubleIntHashMap) super.clone(); + m._values = (int[]) this._values.clone(); + return m; + } + + /** + * @return a TDoubleIntIterator with access to this map's keys and values + */ + public TDoubleIntIterator iterator() { + return new TDoubleIntIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new int[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an double value + * @param value an int value + * @return the previous value associated with key, + * or (double)0 if none was found. + */ + public int put(double key, int value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an double value + * @param value an int value + * @return the previous value associated with key, + * or (double)0 if none was found. + */ + public int putIfAbsent(double key, int value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private int doPut(double key, int value, int index) { + byte previousState; + int previous = (int) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TDoubleIntHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + double oldKeys[] = _set; + int oldVals[] = _values; + byte oldStates[] = _states; + + _set = new double[newCapacity]; + _values = new int[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + double o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an double value + * @return the value of key or (double)0 if no such mapping exists. + */ + public int get(double key) { + int index = index(key); + return index < 0 ? (int) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + double[] keys = _set; + int[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (double) 0); + Arrays.fill(_values, 0, _values.length, (int) 0); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an double value + * @return an int value, or (double)0 if no mapping for key exists + */ + public int remove(double key) { + int prev = (int) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TDoubleIntHashMap)) { + return false; + } + TDoubleIntHashMap that = (TDoubleIntHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TDoubleIntProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(double key, int value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TDoubleIntProcedure { + private final TDoubleIntHashMap _otherMap; + + EqProcedure(TDoubleIntHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(double key, int value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two ints for equality. + */ + private final boolean eq(int v1, int v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = (int) 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public int[] getValues() { + int[] vals = new int[size()]; + int[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public double[] keys() { + double[] keys = new double[size()]; + double[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public double[] keys(double[] a) { + int size = size(); + if (a.length < size) { + a = (double[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + double[] k = (double[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an int value + * @return a boolean value + */ + public boolean containsValue(int val) { + byte[] states = _states; + int[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an double value + * @return a boolean value + */ + public boolean containsKey(double key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TDoubleProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TDoubleProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TIntProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TIntProcedure procedure) { + byte[] states = _states; + int[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TODoubleIntProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TDoubleIntProcedure procedure) { + byte[] states = _states; + double[] keys = _set; + int[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TDoubleIntProcedure procedure) { + boolean modified = false; + byte[] states = _states; + double[] keys = _set; + int[] values = _values; + + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TIntFunction value + */ + public void transformValues(TIntFunction function) { + byte[] states = _states; + int[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(double key) { + return adjustValue(key, (int) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(double key, int amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public int adjustOrPutValue(final double key, final int adjust_amount, final int put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final int newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + byte previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + double key = in.readDouble(); + int val = in.readInt(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TDoubleIntProcedure() { + private boolean first = true; + + public boolean execute(double key, int value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TDoubleIntHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleIntIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleIntIterator.java new file mode 100644 index 00000000000..f1ba0bd1d54 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleIntIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type double and int. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TDoubleIntIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TDoubleIntIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TDoubleIntIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TDoubleIntIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TDoubleIntIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TDoubleIntHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TDoubleIntIterator(TDoubleIntHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public double key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public int value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public int setValue(int val) { + int old = value(); + _map._values[_index] = val; + return old; + } +}// TDoubleIntIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleIntProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleIntProcedure.java new file mode 100644 index 00000000000..b05f3b9c312 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleIntProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type double and int. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TDoubleIntProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a double value + * @param b a int value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(double a, int b); +}// TDoubleIntProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleIterator.java new file mode 100644 index 00000000000..0bcaf8b3f0a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleIterator.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for double collections. + * + * @author Eric D. Friedman + * @version $Id: PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TDoubleIterator extends TPrimitiveIterator { + /** + * the collection on which the iterator operates + */ + private final TDoubleHash _hash; + + /** + * Creates a TDoubleIterator for the elements in the specified collection. + */ + public TDoubleIterator(TDoubleHash hash) { + super(hash); + this._hash = hash; + } + + /** + * Advances the iterator to the next element in the underlying collection + * and returns it. + * + * @return the next double in the collection + * @throws NoSuchElementException if the iterator is already exhausted + */ + public double next() { + moveToNextIndex(); + return _hash._set[_index]; + } +}// TDoubleIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleLongHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleLongHashMap.java new file mode 100644 index 00000000000..a94edd2df97 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleLongHashMap.java @@ -0,0 +1,650 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for double keys and long values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TDoubleLongHashMap extends TDoubleHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TDoubleLongProcedure PUT_ALL_PROC = new TDoubleLongProcedure() { + public boolean execute(double key, long value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient long[] _values; + + /** + * Creates a new TDoubleLongHashMap instance with the default + * capacity and load factor. + */ + public TDoubleLongHashMap() { + super(); + } + + /** + * Creates a new TDoubleLongHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TDoubleLongHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TDoubleLongHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TDoubleLongHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TDoubleLongHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TDoubleLongHashMap(TDoubleHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TDoubleLongHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TDoubleLongHashMap(int initialCapacity, TDoubleHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TDoubleLongHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TDoubleLongHashMap(int initialCapacity, float loadFactor, TDoubleHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TDoubleLongHashMap m = (TDoubleLongHashMap) super.clone(); + m._values = (long[]) this._values.clone(); + return m; + } + + /** + * @return a TDoubleLongIterator with access to this map's keys and values + */ + public TDoubleLongIterator iterator() { + return new TDoubleLongIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new long[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an double value + * @param value an long value + * @return the previous value associated with key, + * or (double)0 if none was found. + */ + public long put(double key, long value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an double value + * @param value an long value + * @return the previous value associated with key, + * or (double)0 if none was found. + */ + public long putIfAbsent(double key, long value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private long doPut(double key, long value, int index) { + byte previousState; + long previous = (long) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TDoubleLongHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + double oldKeys[] = _set; + long oldVals[] = _values; + byte oldStates[] = _states; + + _set = new double[newCapacity]; + _values = new long[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + double o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an double value + * @return the value of key or (double)0 if no such mapping exists. + */ + public long get(double key) { + int index = index(key); + return index < 0 ? (long) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + double[] keys = _set; + long[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (double) 0); + Arrays.fill(_values, 0, _values.length, (long) 0); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an double value + * @return an long value, or (double)0 if no mapping for key exists + */ + public long remove(double key) { + long prev = (long) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TDoubleLongHashMap)) { + return false; + } + TDoubleLongHashMap that = (TDoubleLongHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TDoubleLongProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(double key, long value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TDoubleLongProcedure { + private final TDoubleLongHashMap _otherMap; + + EqProcedure(TDoubleLongHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(double key, long value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two longs for equality. + */ + private final boolean eq(long v1, long v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = (long) 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public long[] getValues() { + long[] vals = new long[size()]; + long[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public double[] keys() { + double[] keys = new double[size()]; + double[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public double[] keys(double[] a) { + int size = size(); + if (a.length < size) { + a = (double[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + double[] k = (double[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an long value + * @return a boolean value + */ + public boolean containsValue(long val) { + byte[] states = _states; + long[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an double value + * @return a boolean value + */ + public boolean containsKey(double key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TDoubleProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TDoubleProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TLongProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TLongProcedure procedure) { + byte[] states = _states; + long[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TODoubleLongProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TDoubleLongProcedure procedure) { + byte[] states = _states; + double[] keys = _set; + long[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TDoubleLongProcedure procedure) { + boolean modified = false; + byte[] states = _states; + double[] keys = _set; + long[] values = _values; + + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TLongFunction value + */ + public void transformValues(TLongFunction function) { + byte[] states = _states; + long[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(double key) { + return adjustValue(key, (long) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(double key, long amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public long adjustOrPutValue(final double key, final long adjust_amount, final long put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final long newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + byte previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + double key = in.readDouble(); + long val = in.readLong(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TDoubleLongProcedure() { + private boolean first = true; + + public boolean execute(double key, long value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TDoubleLongHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleLongIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleLongIterator.java new file mode 100644 index 00000000000..1381215d528 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleLongIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type double and long. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TDoubleLongIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TDoubleLongIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TDoubleLongIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TDoubleLongIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TDoubleLongIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TDoubleLongHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TDoubleLongIterator(TDoubleLongHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public double key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public long value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public long setValue(long val) { + long old = value(); + _map._values[_index] = val; + return old; + } +}// TDoubleLongIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleLongProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleLongProcedure.java new file mode 100644 index 00000000000..4f45c6422be --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleLongProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type double and long. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TDoubleLongProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a double value + * @param b a long value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(double a, long b); +}// TDoubleLongProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleObjectHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleObjectHashMap.java new file mode 100644 index 00000000000..07b317eca3f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleObjectHashMap.java @@ -0,0 +1,632 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for double keys and Object values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TDoubleObjectHashMap extends TDoubleHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TDoubleObjectProcedure PUT_ALL_PROC = new TDoubleObjectProcedure() { + public boolean execute(double key, V value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient V[] _values; + + /** + * Creates a new TDoubleObjectHashMap instance with the default + * capacity and load factor. + */ + public TDoubleObjectHashMap() { + super(); + } + + /** + * Creates a new TDoubleObjectHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TDoubleObjectHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TDoubleObjectHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TDoubleObjectHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TDoubleObjectHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TDoubleObjectHashMap(TDoubleHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TDoubleObjectHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TDoubleObjectHashMap(int initialCapacity, TDoubleHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TDoubleObjectHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TDoubleObjectHashMap(int initialCapacity, float loadFactor, TDoubleHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public TDoubleObjectHashMap clone() { + TDoubleObjectHashMap m = (TDoubleObjectHashMap) super.clone(); + m._values = (V[]) this._values.clone(); + return m; + } + + /** + * @return a TDoubleObjectIterator with access to this map's keys and values + */ + public TDoubleObjectIterator iterator() { + return new TDoubleObjectIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = (V[]) new Object[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an double value + * @param value an Object value + * @return the previous value associated with key, + * or {@code null} if none was found. + */ + public V put(double key, V value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an double value + * @param value an Object value + * @return the previous value associated with key, + * or {@code null} if none was found. + */ + public V putIfAbsent(double key, V value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private V doPut(double key, V value, int index) { + byte previousState; + V previous = null; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TDoubleObjectHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + double oldKeys[] = _set; + V oldVals[] = _values; + byte oldStates[] = _states; + + _set = new double[newCapacity]; + _values = (V[]) new Object[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + double o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an double value + * @return the value of key or (double)0 if no such mapping exists. + */ + public V get(double key) { + int index = index(key); + return index < 0 ? null : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + double[] keys = _set; + Object[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (double) 0); + Arrays.fill(_values, 0, _values.length, null); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an double value + * @return an Object value or (double)0 if no such mapping exists. + */ + public V remove(double key) { + V prev = null; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TDoubleObjectHashMap)) { + return false; + } + TDoubleObjectHashMap that = (TDoubleObjectHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TDoubleObjectProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(double key, Object value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TDoubleObjectProcedure { + private final TDoubleObjectHashMap _otherMap; + + EqProcedure(TDoubleObjectHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(double key, Object value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two objects for equality. + */ + private final boolean eq(Object o1, Object o2) { + return o1 == o2 || ((o1 != null) && o1.equals(o2)); + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = null; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + * @see #getValues(Object[]) + */ + public Object[] getValues() { + Object[] vals = new Object[size()]; + V[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * Return the values of the map; the runtime type of the returned array is that of + * the specified array. + * + * @param a the array into which the elements of this collection are to be + * stored, if it is big enough; otherwise, a new array of the same + * runtime type is allocated for this purpose. + * @return an array containing the elements of this collection + * @throws ArrayStoreException the runtime type of the specified array is + * not a supertype of the runtime type of every element in this + * collection. + * @throws NullPointerException if the specified array is null. + * @see #getValues() + */ + public T[] getValues(T[] a) { + if (a.length < _size) { + a = (T[]) java.lang.reflect.Array.newInstance(a.getClass().getComponentType(), + _size); + } + + V[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = (T) v[i]; + } + } + return a; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public double[] keys() { + double[] keys = new double[size()]; + double[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public double[] keys(double[] a) { + int size = size(); + if (a.length < size) { + a = (double[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + double[] k = (double[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(V val) { + byte[] states = _states; + V[] vals = _values; + + // special case null values so that we don't have to + // perform null checks before every call to equals() + if (null == val) { + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && + val == vals[i]) { + return true; + } + } + } else { + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && + (val == vals[i] || val.equals(vals[i]))) { + return true; + } + } + } // end of else + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an double value + * @return a boolean value + */ + public boolean containsKey(double key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TDoubleProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TDoubleProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TObjectProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TObjectProcedure procedure) { + byte[] states = _states; + V[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TODoubleObjectProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TDoubleObjectProcedure procedure) { + byte[] states = _states; + double[] keys = _set; + V[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TDoubleObjectProcedure procedure) { + boolean modified = false; + byte[] states = _states; + double[] keys = _set; + V[] values = _values; + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TObjectFunction value + */ + public void transformValues(TObjectFunction function) { + byte[] states = _states; + V[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + double key = in.readDouble(); + V val = (V) in.readObject(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TDoubleObjectProcedure() { + private boolean first = true; + + public boolean execute(double key, Object value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TDoubleObjectHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleObjectIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleObjectIterator.java new file mode 100644 index 00000000000..8c57c01d45f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleObjectIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type double and Object. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TDoubleObjectIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TDoubleObjectIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TDoubleObjectIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TDoubleObjectIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2OIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TDoubleObjectIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TDoubleObjectHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TDoubleObjectIterator(TDoubleObjectHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public double key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public V value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public V setValue(V val) { + V old = value(); + _map._values[_index] = val; + return old; + } +}// TDoubleObjectIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleObjectProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleObjectProcedure.java new file mode 100644 index 00000000000..89354ac38c3 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleObjectProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type double and Object. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2OProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TDoubleObjectProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a double value + * @param b an Object value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(double a, T b); +}// TDoubleObjectProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleProcedure.java new file mode 100644 index 00000000000..bf4d31bd19f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleProcedure.java @@ -0,0 +1,47 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures with one double parameter. + *

    + * Created: Mon Nov 5 21:45:49 2001 + * + * @author Eric D. Friedman + * @version $Id: PProcedure.template,v 1.2 2007/11/01 16:08:14 robeden Exp $ + */ + +public interface TDoubleProcedure { + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param value a value of type double + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(double value); +}// TDoubleProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleShortHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleShortHashMap.java new file mode 100644 index 00000000000..87fdfeef035 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleShortHashMap.java @@ -0,0 +1,650 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for double keys and short values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TDoubleShortHashMap extends TDoubleHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TDoubleShortProcedure PUT_ALL_PROC = new TDoubleShortProcedure() { + public boolean execute(double key, short value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient short[] _values; + + /** + * Creates a new TDoubleShortHashMap instance with the default + * capacity and load factor. + */ + public TDoubleShortHashMap() { + super(); + } + + /** + * Creates a new TDoubleShortHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TDoubleShortHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TDoubleShortHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TDoubleShortHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TDoubleShortHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TDoubleShortHashMap(TDoubleHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TDoubleShortHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TDoubleShortHashMap(int initialCapacity, TDoubleHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TDoubleShortHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TDoubleShortHashMap(int initialCapacity, float loadFactor, TDoubleHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TDoubleShortHashMap m = (TDoubleShortHashMap) super.clone(); + m._values = (short[]) this._values.clone(); + return m; + } + + /** + * @return a TDoubleShortIterator with access to this map's keys and values + */ + public TDoubleShortIterator iterator() { + return new TDoubleShortIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new short[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an double value + * @param value an short value + * @return the previous value associated with key, + * or (double)0 if none was found. + */ + public short put(double key, short value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an double value + * @param value an short value + * @return the previous value associated with key, + * or (double)0 if none was found. + */ + public short putIfAbsent(double key, short value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private short doPut(double key, short value, int index) { + byte previousState; + short previous = (short) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TDoubleShortHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + double oldKeys[] = _set; + short oldVals[] = _values; + byte oldStates[] = _states; + + _set = new double[newCapacity]; + _values = new short[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + double o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an double value + * @return the value of key or (double)0 if no such mapping exists. + */ + public short get(double key) { + int index = index(key); + return index < 0 ? (short) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + double[] keys = _set; + short[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (double) 0); + Arrays.fill(_values, 0, _values.length, (short) 0); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an double value + * @return an short value, or (double)0 if no mapping for key exists + */ + public short remove(double key) { + short prev = (short) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TDoubleShortHashMap)) { + return false; + } + TDoubleShortHashMap that = (TDoubleShortHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TDoubleShortProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(double key, short value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TDoubleShortProcedure { + private final TDoubleShortHashMap _otherMap; + + EqProcedure(TDoubleShortHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(double key, short value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two shorts for equality. + */ + private final boolean eq(short v1, short v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = (short) 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public short[] getValues() { + short[] vals = new short[size()]; + short[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public double[] keys() { + double[] keys = new double[size()]; + double[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public double[] keys(double[] a) { + int size = size(); + if (a.length < size) { + a = (double[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + double[] k = (double[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an short value + * @return a boolean value + */ + public boolean containsValue(short val) { + byte[] states = _states; + short[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an double value + * @return a boolean value + */ + public boolean containsKey(double key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TDoubleProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TDoubleProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TShortProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TShortProcedure procedure) { + byte[] states = _states; + short[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TODoubleShortProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TDoubleShortProcedure procedure) { + byte[] states = _states; + double[] keys = _set; + short[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TDoubleShortProcedure procedure) { + boolean modified = false; + byte[] states = _states; + double[] keys = _set; + short[] values = _values; + + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TShortFunction value + */ + public void transformValues(TShortFunction function) { + byte[] states = _states; + short[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(double key) { + return adjustValue(key, (short) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(double key, short amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public short adjustOrPutValue(final double key, final short adjust_amount, final short put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final short newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + byte previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + double key = in.readDouble(); + short val = in.readShort(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TDoubleShortProcedure() { + private boolean first = true; + + public boolean execute(double key, short value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TDoubleShortHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleShortIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleShortIterator.java new file mode 100644 index 00000000000..66c2f6e3805 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleShortIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type double and short. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TDoubleShortIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TDoubleShortIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TDoubleShortIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TDoubleShortIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TDoubleShortIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TDoubleShortHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TDoubleShortIterator(TDoubleShortHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public double key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public short value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public short setValue(short val) { + short old = value(); + _map._values[_index] = val; + return old; + } +}// TDoubleShortIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleShortProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleShortProcedure.java new file mode 100644 index 00000000000..d597d7cd0fb --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleShortProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type double and short. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TDoubleShortProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a double value + * @param b a short value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(double a, short b); +}// TDoubleShortProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleStack.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleStack.java new file mode 100644 index 00000000000..a44b130f7b3 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TDoubleStack.java @@ -0,0 +1,124 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + + +package org.elasticsearch.util.gnu.trove; + +/** + * A stack of double primitives, backed by a TDoubleArrayList. + * + * @author Eric D. Friedman, Rob Eden + * @version $Id: PStack.template,v 1.2 2007/02/28 23:03:57 robeden Exp $ + */ + +public class TDoubleStack { + + /** + * the list used to hold the stack values. + */ + protected TDoubleArrayList _list; + + public static final int DEFAULT_CAPACITY = TDoubleArrayList.DEFAULT_CAPACITY; + + /** + * Creates a new TDoubleStack instance with the default + * capacity. + */ + public TDoubleStack() { + this(DEFAULT_CAPACITY); + } + + /** + * Creates a new TDoubleStack instance with the + * specified capacity. + * + * @param capacity the initial depth of the stack + */ + public TDoubleStack(int capacity) { + _list = new TDoubleArrayList(capacity); + } + + /** + * Pushes the value onto the top of the stack. + * + * @param val an double value + */ + public void push(double val) { + _list.add(val); + } + + /** + * Removes and returns the value at the top of the stack. + * + * @return an double value + */ + public double pop() { + return _list.remove(_list.size() - 1); + } + + /** + * Returns the value at the top of the stack. + * + * @return an double value + */ + public double peek() { + return _list.get(_list.size() - 1); + } + + /** + * Returns the current depth of the stack. + */ + public int size() { + return _list.size(); + } + + /** + * Clears the stack, reseting its capacity to the default. + */ + public void clear() { + _list.clear(DEFAULT_CAPACITY); + } + + /** + * Clears the stack without releasing its internal capacity allocation. + */ + public void reset() { + _list.reset(); + } + + /** + * Copies the contents of the stack into a native array. Note that this will NOT + * pop them out of the stack. + * + * @return an double[] value + */ + public double[] toNativeArray() { + return _list.toNativeArray(); + } + + /** + * Copies a slice of the list into a native array. Note that this will NOT + * pop them out of the stack. + * + * @param dest the array to copy into. + */ + public void toNativeArray(double[] dest) { + _list.toNativeArray(dest, 0, size()); + } +} // TDoubleStack diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatArrayList.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatArrayList.java new file mode 100644 index 00000000000..bdd6e7d6481 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatArrayList.java @@ -0,0 +1,935 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; +import java.util.Random; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * A resizable, array-backed list of float primitives. + *

    + * Created: Sat Dec 29 14:21:12 2001 + * + * @author Eric D. Friedman + * @author Rob Eden + */ + +public class TFloatArrayList implements Externalizable, Cloneable { + static final long serialVersionUID = 1L; + + /** + * the data of the list + */ + protected float[] _data; + + /** + * the index after the last entry in the list + */ + protected int _pos; + + /** + * the default capacity for new lists + */ + protected static final int DEFAULT_CAPACITY = 10; + + /** + * Creates a new TFloatArrayList instance with the + * default capacity. + */ + public TFloatArrayList() { + this(DEFAULT_CAPACITY); + } + + /** + * Creates a new TFloatArrayList instance with the + * specified capacity. + * + * @param capacity an int value + */ + public TFloatArrayList(int capacity) { + _data = new float[capacity]; + _pos = 0; + } + + /** + * Creates a new TFloatArrayList instance whose + * capacity is the greater of the length of values and + * DEFAULT_CAPACITY and whose initial contents are the specified + * values. + * + * @param values an float[] value + */ + public TFloatArrayList(float[] values) { + this(Math.max(values.length, DEFAULT_CAPACITY)); + add(values); + } + + // sizing + + /** + * Grow the internal array as needed to accommodate the specified + * number of elements. The size of the array floats on each + * resize unless capacity requires more than twice the + * current capacity. + * + * @param capacity an int value + */ + public void ensureCapacity(int capacity) { + if (capacity > _data.length) { + int newCap = Math.max(_data.length << 1, capacity); + float[] tmp = new float[newCap]; + System.arraycopy(_data, 0, tmp, 0, _data.length); + _data = tmp; + } + } + + /** + * Returns the number of values in the list. + * + * @return the number of values in the list. + */ + public int size() { + return _pos; + } + + /** + * Tests whether this list contains any values. + * + * @return true if the list is empty. + */ + public boolean isEmpty() { + return _pos == 0; + } + + /** + * Sheds any excess capacity above and beyond the current size of + * the list. + */ + public void trimToSize() { + if (_data.length > size()) { + float[] tmp = new float[size()]; + toNativeArray(tmp, 0, tmp.length); + _data = tmp; + } + } + + // modifying + + /** + * Adds val to the end of the list, growing as needed. + * + * @param val an float value + */ + public void add(float val) { + ensureCapacity(_pos + 1); + _data[_pos++] = val; + } + + /** + * Adds the values in the array vals to the end of the + * list, in order. + * + * @param vals an float[] value + */ + public void add(float[] vals) { + add(vals, 0, vals.length); + } + + /** + * Adds a subset of the values in the array vals to the + * end of the list, in order. + * + * @param vals an float[] value + * @param offset the offset at which to start copying + * @param length the number of values to copy. + */ + public void add(float[] vals, int offset, int length) { + ensureCapacity(_pos + length); + System.arraycopy(vals, offset, _data, _pos, length); + _pos += length; + } + + /** + * Inserts value into the list at offset. All + * values including and to the right of offset are shifted + * to the right. + * + * @param offset an int value + * @param value an float value + */ + public void insert(int offset, float value) { + if (offset == _pos) { + add(value); + return; + } + ensureCapacity(_pos + 1); + // shift right + System.arraycopy(_data, offset, _data, offset + 1, _pos - offset); + // insert + _data[offset] = value; + _pos++; + } + + /** + * Inserts the array of values into the list at + * offset. All values including and to the right of + * offset are shifted to the right. + * + * @param offset an int value + * @param values an float[] value + */ + public void insert(int offset, float[] values) { + insert(offset, values, 0, values.length); + } + + /** + * Inserts a slice of the array of values into the list + * at offset. All values including and to the right of + * offset are shifted to the right. + * + * @param offset an int value + * @param values an float[] value + * @param valOffset the offset in the values array at which to + * start copying. + * @param len the number of values to copy from the values array + */ + public void insert(int offset, float[] values, int valOffset, int len) { + if (offset == _pos) { + add(values, valOffset, len); + return; + } + + ensureCapacity(_pos + len); + // shift right + System.arraycopy(_data, offset, _data, offset + len, _pos - offset); + // insert + System.arraycopy(values, valOffset, _data, offset, len); + _pos += len; + } + + /** + * Returns the value at the specified offset. + * + * @param offset an int value + * @return an float value + */ + public float get(int offset) { + if (offset >= _pos) { + throw new ArrayIndexOutOfBoundsException(offset); + } + return _data[offset]; + } + + /** + * Returns the value at the specified offset without doing any + * bounds checking. + * + * @param offset an int value + * @return an float value + */ + public float getQuick(int offset) { + return _data[offset]; + } + + /** + * Sets the value at the specified offset. + * + * @param offset an int value + * @param val an float value + */ + public void set(int offset, float val) { + if (offset >= _pos) { + throw new ArrayIndexOutOfBoundsException(offset); + } + _data[offset] = val; + } + + /** + * Sets the value at the specified offset and returns the + * previously stored value. + * + * @param offset an int value + * @param val an float value + * @return the value previously stored at offset. + */ + public float getSet(int offset, float val) { + if (offset >= _pos) { + throw new ArrayIndexOutOfBoundsException(offset); + } + float old = _data[offset]; + _data[offset] = val; + return old; + } + + /** + * Replace the values in the list starting at offset with + * the contents of the values array. + * + * @param offset the first offset to replace + * @param values the source of the new values + */ + public void set(int offset, float[] values) { + set(offset, values, 0, values.length); + } + + /** + * Replace the values in the list starting at offset with + * length values from the values array, starting + * at valOffset. + * + * @param offset the first offset to replace + * @param values the source of the new values + * @param valOffset the first value to copy from the values array + * @param length the number of values to copy + */ + public void set(int offset, float[] values, int valOffset, int length) { + if (offset < 0 || offset + length > _pos) { + throw new ArrayIndexOutOfBoundsException(offset); + } + System.arraycopy(values, valOffset, _data, offset, length); + } + + /** + * Sets the value at the specified offset without doing any bounds + * checking. + * + * @param offset an int value + * @param val an float value + */ + public void setQuick(int offset, float val) { + _data[offset] = val; + } + + /** + * Flushes the internal state of the list, resetting the capacity + * to the default. + */ + public void clear() { + clear(DEFAULT_CAPACITY); + } + + /** + * Flushes the internal state of the list, setting the capacity of + * the empty list to capacity. + * + * @param capacity an int value + */ + public void clear(int capacity) { + _data = new float[capacity]; + _pos = 0; + } + + /** + * Sets the size of the list to 0, but does not change its + * capacity. This method can be used as an alternative to the + * {@link #clear clear} method if you want to recyle a list without + * allocating new backing arrays. + * + * @see #clear + */ + public void reset() { + _pos = 0; + fill((float) 0); + } + + /** + * Sets the size of the list to 0, but does not change its + * capacity. This method can be used as an alternative to the + * {@link #clear clear} method if you want to recyle a list + * without allocating new backing arrays. This method differs + * from {@link #reset reset} in that it does not clear the old + * values in the backing array. Thus, it is possible for {@link + * #getQuick getQuick} to return stale data if this method is used + * and the caller is careless about bounds checking. + * + * @see #reset + * @see #clear + * @see #getQuick + */ + public void resetQuick() { + _pos = 0; + } + + /** + * Removes the value at offset from the list. + * + * @param offset an int value + * @return the value previously stored at offset. + */ + public float remove(int offset) { + float old = get(offset); + remove(offset, 1); + return old; + } + + /** + * Removes length values from the list, starting at + * offset + * + * @param offset an int value + * @param length an int value + */ + public void remove(int offset, int length) { + if (offset < 0 || offset >= _pos) { + throw new ArrayIndexOutOfBoundsException(offset); + } + + if (offset == 0) { + // data at the front + System.arraycopy(_data, length, _data, 0, _pos - length); + } else if (_pos - length == offset) { + // no copy to make, decrementing pos "deletes" values at + // the end + } else { + // data in the middle + System.arraycopy(_data, offset + length, + _data, offset, _pos - (offset + length)); + } + _pos -= length; + // no need to clear old values beyond _pos, because this is a + // primitive collection and 0 takes as much room as any other + // value + } + + /** + * Transform each value in the list using the specified function. + * + * @param function a TFloatFunction value + */ + public void transformValues(TFloatFunction function) { + for (int i = _pos; i-- > 0;) { + _data[i] = function.execute(_data[i]); + } + } + + /** + * Reverse the order of the elements in the list. + */ + public void reverse() { + reverse(0, _pos); + } + + /** + * Reverse the order of the elements in the range of the list. + * + * @param from the inclusive index at which to start reversing + * @param to the exclusive index at which to stop reversing + */ + public void reverse(int from, int to) { + if (from == to) { + return; // nothing to do + } + if (from > to) { + throw new IllegalArgumentException("from cannot be greater than to"); + } + for (int i = from, j = to - 1; i < j; i++, j--) { + swap(i, j); + } + } + + /** + * Shuffle the elements of the list using the specified random + * number generator. + * + * @param rand a Random value + */ + public void shuffle(Random rand) { + for (int i = _pos; i-- > 1;) { + swap(i, rand.nextInt(i)); + } + } + + /** + * Swap the values at offsets i and j. + * + * @param i an offset into the data array + * @param j an offset into the data array + */ + private final void swap(int i, int j) { + float tmp = _data[i]; + _data[i] = _data[j]; + _data[j] = tmp; + } + + // copying + + /** + * Returns a clone of this list. Since this is a primitive + * collection, this will be a deep clone. + * + * @return a deep clone of the list. + */ + public Object clone() { + TFloatArrayList list = null; + try { + list = (TFloatArrayList) super.clone(); + list._data = toNativeArray(); + } catch (CloneNotSupportedException e) { + // it's supported + } // end of try-catch + return list; + } + + + /** + * Returns a sublist of this list. + * + * @param begin low endpoint (inclusive) of the subList. + * @param end high endpoint (exclusive) of the subList. + * @return sublist of this list from begin, inclusive to end, exclusive. + * @throws IndexOutOfBoundsException - endpoint out of range + * @throws IllegalArgumentException - endpoints out of order (end > begin) + */ + public TFloatArrayList subList(int begin, int end) { + if (end < begin) throw new IllegalArgumentException("end index " + end + " greater than begin index " + begin); + if (begin < 0) throw new IndexOutOfBoundsException("begin index can not be < 0"); + if (end > _data.length) throw new IndexOutOfBoundsException("end index < " + _data.length); + TFloatArrayList list = new TFloatArrayList(end - begin); + for (int i = begin; i < end; i++) { + list.add(_data[i]); + } + return list; + } + + + /** + * Copies the contents of the list into a native array. + * + * @return an float[] value + */ + public float[] toNativeArray() { + return toNativeArray(0, _pos); + } + + /** + * Copies a slice of the list into a native array. + * + * @param offset the offset at which to start copying + * @param len the number of values to copy. + * @return an float[] value + */ + public float[] toNativeArray(int offset, int len) { + float[] rv = new float[len]; + toNativeArray(rv, offset, len); + return rv; + } + + /** + * Copies a slice of the list into a native array. + * + * @param dest the array to copy into. + * @param offset the offset of the first value to copy + * @param len the number of values to copy. + */ + public void toNativeArray(float[] dest, int offset, int len) { + if (len == 0) { + return; // nothing to copy + } + if (offset < 0 || offset >= _pos) { + throw new ArrayIndexOutOfBoundsException(offset); + } + System.arraycopy(_data, offset, dest, 0, len); + } + + // comparing + + /** + * Compares this list to another list, value by value. + * + * @param other the object to compare against + * @return true if other is a TFloatArrayList and has exactly the + * same values. + */ + public boolean equals(Object other) { + if (other == this) { + return true; + } else if (other instanceof TFloatArrayList) { + TFloatArrayList that = (TFloatArrayList) other; + if (that.size() != this.size()) { + return false; + } else { + for (int i = _pos; i-- > 0;) { + if (this._data[i] != that._data[i]) { + return false; + } + } + return true; + } + } else { + return false; + } + } + + public int hashCode() { + int h = 0; + for (int i = _pos; i-- > 0;) { + h = 37 * h + HashFunctions.hash(_data[i]); + } + return h; + } + + // procedures + + /** + * Applies the procedure to each value in the list in ascending + * (front to back) order. + * + * @param procedure a TFloatProcedure value + * @return true if the procedure did not terminate prematurely. + */ + public boolean forEach(TFloatProcedure procedure) { + for (int i = 0; i < _pos; i++) { + if (!procedure.execute(_data[i])) { + return false; + } + } + return true; + } + + /** + * Applies the procedure to each value in the list in descending + * (back to front) order. + * + * @param procedure a TFloatProcedure value + * @return true if the procedure did not terminate prematurely. + */ + public boolean forEachDescending(TFloatProcedure procedure) { + for (int i = _pos; i-- > 0;) { + if (!procedure.execute(_data[i])) { + return false; + } + } + return true; + } + + // sorting + + /** + * Sort the values in the list (ascending) using the Sun quicksort + * implementation. + * + * @see java.util.Arrays#sort + */ + public void sort() { + Arrays.sort(_data, 0, _pos); + } + + /** + * Sort a slice of the list (ascending) using the Sun quicksort + * implementation. + * + * @param fromIndex the index at which to start sorting (inclusive) + * @param toIndex the index at which to stop sorting (exclusive) + * @see java.util.Arrays#sort + */ + public void sort(int fromIndex, int toIndex) { + Arrays.sort(_data, fromIndex, toIndex); + } + + // filling + + /** + * Fills every slot in the list with the specified value. + * + * @param val the value to use when filling + */ + public void fill(float val) { + Arrays.fill(_data, 0, _pos, val); + } + + /** + * Fills a range in the list with the specified value. + * + * @param fromIndex the offset at which to start filling (inclusive) + * @param toIndex the offset at which to stop filling (exclusive) + * @param val the value to use when filling + */ + public void fill(int fromIndex, int toIndex, float val) { + if (toIndex > _pos) { + ensureCapacity(toIndex); + _pos = toIndex; + } + Arrays.fill(_data, fromIndex, toIndex, val); + } + + // searching + + /** + * Performs a binary search for value in the entire list. + * Note that you must @{link #sort sort} the list before + * doing a search. + * + * @param value the value to search for + * @return the absolute offset in the list of the value, or its + * negative insertion point into the sorted list. + */ + public int binarySearch(float value) { + return binarySearch(value, 0, _pos); + } + + /** + * Performs a binary search for value in the specified + * range. Note that you must @{link #sort sort} the list + * or the range before doing a search. + * + * @param value the value to search for + * @param fromIndex the lower boundary of the range (inclusive) + * @param toIndex the upper boundary of the range (exclusive) + * @return the absolute offset in the list of the value, or its + * negative insertion point into the sorted list. + */ + public int binarySearch(float value, int fromIndex, int toIndex) { + if (fromIndex < 0) { + throw new ArrayIndexOutOfBoundsException(fromIndex); + } + if (toIndex > _pos) { + throw new ArrayIndexOutOfBoundsException(toIndex); + } + + int low = fromIndex; + int high = toIndex - 1; + + while (low <= high) { + int mid = (low + high) >>> 1; + float midVal = _data[mid]; + + if (midVal < value) { + low = mid + 1; + } else if (midVal > value) { + high = mid - 1; + } else { + return mid; // value found + } + } + return -(low + 1); // value not found. + } + + /** + * Searches the list front to back for the index of + * value. + * + * @param value an float value + * @return the first offset of the value, or -1 if it is not in + * the list. + * @see #binarySearch for faster searches on sorted lists + */ + public int indexOf(float value) { + return indexOf(0, value); + } + + /** + * Searches the list front to back for the index of + * value, starting at offset. + * + * @param offset the offset at which to start the linear search + * (inclusive) + * @param value an float value + * @return the first offset of the value, or -1 if it is not in + * the list. + * @see #binarySearch for faster searches on sorted lists + */ + public int indexOf(int offset, float value) { + for (int i = offset; i < _pos; i++) { + if (_data[i] == value) { + return i; + } + } + return -1; + } + + /** + * Searches the list back to front for the last index of + * value. + * + * @param value an float value + * @return the last offset of the value, or -1 if it is not in + * the list. + * @see #binarySearch for faster searches on sorted lists + */ + public int lastIndexOf(float value) { + return lastIndexOf(_pos, value); + } + + /** + * Searches the list back to front for the last index of + * value, starting at offset. + * + * @param offset the offset at which to start the linear search + * (exclusive) + * @param value an float value + * @return the last offset of the value, or -1 if it is not in + * the list. + * @see #binarySearch for faster searches on sorted lists + */ + public int lastIndexOf(int offset, float value) { + for (int i = offset; i-- > 0;) { + if (_data[i] == value) { + return i; + } + } + return -1; + } + + /** + * Searches the list for value + * + * @param value an float value + * @return true if value is in the list. + */ + public boolean contains(float value) { + return lastIndexOf(value) >= 0; + } + + /** + * Searches the list for values satisfying condition in + * the manner of the *nix grep utility. + * + * @param condition a condition to apply to each element in the list + * @return a list of values which match the condition. + */ + public TFloatArrayList grep(TFloatProcedure condition) { + TFloatArrayList list = new TFloatArrayList(); + for (int i = 0; i < _pos; i++) { + if (condition.execute(_data[i])) { + list.add(_data[i]); + } + } + return list; + } + + /** + * Searches the list for values which do not satisfy + * condition. This is akin to *nix grep -v. + * + * @param condition a condition to apply to each element in the list + * @return a list of values which do not match the condition. + */ + public TFloatArrayList inverseGrep(TFloatProcedure condition) { + TFloatArrayList list = new TFloatArrayList(); + for (int i = 0; i < _pos; i++) { + if (!condition.execute(_data[i])) { + list.add(_data[i]); + } + } + return list; + } + + /** + * Finds the maximum value in the list. + * + * @return the largest value in the list. + * @throws IllegalStateException if the list is empty + */ + public float max() { + if (size() == 0) { + throw new IllegalStateException("cannot find maximum of an empty list"); + } + float max = Float.NEGATIVE_INFINITY; + for (int i = 0; i < _pos; i++) { + if (_data[i] > max) { + max = _data[i]; + } + } + return max; + } + + /** + * Finds the minimum value in the list. + * + * @return the smallest value in the list. + * @throws IllegalStateException if the list is empty + */ + public float min() { + if (size() == 0) { + throw new IllegalStateException("cannot find minimum of an empty list"); + } + float min = Float.POSITIVE_INFINITY; + for (int i = 0; i < _pos; i++) { + if (_data[i] < min) { + min = _data[i]; + } + } + return min; + } + + // stringification + + /** + * Returns a String representation of the list, front to back. + * + * @return a String value + */ + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + for (int i = 0, end = _pos - 1; i < end; i++) { + buf.append(_data[i]); + buf.append(", "); + } + if (size() > 0) { + buf.append(_data[_pos - 1]); + } + buf.append("}"); + return buf.toString(); + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(1); + + // POSITION + out.writeInt(_pos); + + // ENTRIES + int len = _pos; + out.writeInt(_pos); // Written twice for backwards compatability with + // version 0 + for (int i = 0; i < len; i++) { + out.writeFloat(_data[i]); + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // POSITION + _pos = in.readInt(); + + // ENTRIES + int len = in.readInt(); + _data = new float[len]; + for (int i = 0; i < len; i++) { + _data[i] = in.readFloat(); + } + } +} // TFloatArrayList diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatByteHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatByteHashMap.java new file mode 100644 index 00000000000..f4d6621d9a2 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatByteHashMap.java @@ -0,0 +1,650 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for float keys and byte values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TFloatByteHashMap extends TFloatHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TFloatByteProcedure PUT_ALL_PROC = new TFloatByteProcedure() { + public boolean execute(float key, byte value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient byte[] _values; + + /** + * Creates a new TFloatByteHashMap instance with the default + * capacity and load factor. + */ + public TFloatByteHashMap() { + super(); + } + + /** + * Creates a new TFloatByteHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TFloatByteHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TFloatByteHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TFloatByteHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TFloatByteHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TFloatByteHashMap(TFloatHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TFloatByteHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TFloatByteHashMap(int initialCapacity, TFloatHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TFloatByteHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TFloatByteHashMap(int initialCapacity, float loadFactor, TFloatHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TFloatByteHashMap m = (TFloatByteHashMap) super.clone(); + m._values = (byte[]) this._values.clone(); + return m; + } + + /** + * @return a TFloatByteIterator with access to this map's keys and values + */ + public TFloatByteIterator iterator() { + return new TFloatByteIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new byte[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an float value + * @param value an byte value + * @return the previous value associated with key, + * or (float)0 if none was found. + */ + public byte put(float key, byte value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an float value + * @param value an byte value + * @return the previous value associated with key, + * or (float)0 if none was found. + */ + public byte putIfAbsent(float key, byte value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private byte doPut(float key, byte value, int index) { + byte previousState; + byte previous = (byte) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TFloatByteHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + float oldKeys[] = _set; + byte oldVals[] = _values; + byte oldStates[] = _states; + + _set = new float[newCapacity]; + _values = new byte[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + float o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an float value + * @return the value of key or (float)0 if no such mapping exists. + */ + public byte get(float key) { + int index = index(key); + return index < 0 ? (byte) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + float[] keys = _set; + byte[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (float) 0); + Arrays.fill(_values, 0, _values.length, (byte) 0); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an float value + * @return an byte value, or (float)0 if no mapping for key exists + */ + public byte remove(float key) { + byte prev = (byte) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TFloatByteHashMap)) { + return false; + } + TFloatByteHashMap that = (TFloatByteHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TFloatByteProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(float key, byte value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TFloatByteProcedure { + private final TFloatByteHashMap _otherMap; + + EqProcedure(TFloatByteHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(float key, byte value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two bytes for equality. + */ + private final boolean eq(byte v1, byte v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = (byte) 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public byte[] getValues() { + byte[] vals = new byte[size()]; + byte[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public float[] keys() { + float[] keys = new float[size()]; + float[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public float[] keys(float[] a) { + int size = size(); + if (a.length < size) { + a = (float[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + float[] k = (float[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an byte value + * @return a boolean value + */ + public boolean containsValue(byte val) { + byte[] states = _states; + byte[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an float value + * @return a boolean value + */ + public boolean containsKey(float key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TFloatProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TFloatProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TByteProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TByteProcedure procedure) { + byte[] states = _states; + byte[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOFloatByteProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TFloatByteProcedure procedure) { + byte[] states = _states; + float[] keys = _set; + byte[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TFloatByteProcedure procedure) { + boolean modified = false; + byte[] states = _states; + float[] keys = _set; + byte[] values = _values; + + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TByteFunction value + */ + public void transformValues(TByteFunction function) { + byte[] states = _states; + byte[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(float key) { + return adjustValue(key, (byte) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(float key, byte amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public byte adjustOrPutValue(final float key, final byte adjust_amount, final byte put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final byte newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + byte previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + float key = in.readFloat(); + byte val = in.readByte(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TFloatByteProcedure() { + private boolean first = true; + + public boolean execute(float key, byte value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TFloatByteHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatByteIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatByteIterator.java new file mode 100644 index 00000000000..afaeb042d51 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatByteIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type float and byte. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TFloatByteIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TFloatByteIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TFloatByteIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TFloatByteIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TFloatByteIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TFloatByteHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TFloatByteIterator(TFloatByteHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public float key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public byte value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public byte setValue(byte val) { + byte old = value(); + _map._values[_index] = val; + return old; + } +}// TFloatByteIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatByteProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatByteProcedure.java new file mode 100644 index 00000000000..f9fd012c1bc --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatByteProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type float and byte. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TFloatByteProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a float value + * @param b a byte value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(float a, byte b); +}// TFloatByteProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatDoubleHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatDoubleHashMap.java new file mode 100644 index 00000000000..74ab9c87a76 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatDoubleHashMap.java @@ -0,0 +1,650 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for float keys and double values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TFloatDoubleHashMap extends TFloatHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TFloatDoubleProcedure PUT_ALL_PROC = new TFloatDoubleProcedure() { + public boolean execute(float key, double value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient double[] _values; + + /** + * Creates a new TFloatDoubleHashMap instance with the default + * capacity and load factor. + */ + public TFloatDoubleHashMap() { + super(); + } + + /** + * Creates a new TFloatDoubleHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TFloatDoubleHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TFloatDoubleHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TFloatDoubleHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TFloatDoubleHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TFloatDoubleHashMap(TFloatHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TFloatDoubleHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TFloatDoubleHashMap(int initialCapacity, TFloatHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TFloatDoubleHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TFloatDoubleHashMap(int initialCapacity, float loadFactor, TFloatHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TFloatDoubleHashMap m = (TFloatDoubleHashMap) super.clone(); + m._values = (double[]) this._values.clone(); + return m; + } + + /** + * @return a TFloatDoubleIterator with access to this map's keys and values + */ + public TFloatDoubleIterator iterator() { + return new TFloatDoubleIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new double[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an float value + * @param value an double value + * @return the previous value associated with key, + * or (float)0 if none was found. + */ + public double put(float key, double value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an float value + * @param value an double value + * @return the previous value associated with key, + * or (float)0 if none was found. + */ + public double putIfAbsent(float key, double value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private double doPut(float key, double value, int index) { + byte previousState; + double previous = (double) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TFloatDoubleHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + float oldKeys[] = _set; + double oldVals[] = _values; + byte oldStates[] = _states; + + _set = new float[newCapacity]; + _values = new double[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + float o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an float value + * @return the value of key or (float)0 if no such mapping exists. + */ + public double get(float key) { + int index = index(key); + return index < 0 ? (double) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + float[] keys = _set; + double[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (float) 0); + Arrays.fill(_values, 0, _values.length, (double) 0); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an float value + * @return an double value, or (float)0 if no mapping for key exists + */ + public double remove(float key) { + double prev = (double) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TFloatDoubleHashMap)) { + return false; + } + TFloatDoubleHashMap that = (TFloatDoubleHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TFloatDoubleProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(float key, double value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TFloatDoubleProcedure { + private final TFloatDoubleHashMap _otherMap; + + EqProcedure(TFloatDoubleHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(float key, double value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two doubles for equality. + */ + private final boolean eq(double v1, double v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = (double) 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public double[] getValues() { + double[] vals = new double[size()]; + double[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public float[] keys() { + float[] keys = new float[size()]; + float[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public float[] keys(float[] a) { + int size = size(); + if (a.length < size) { + a = (float[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + float[] k = (float[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an double value + * @return a boolean value + */ + public boolean containsValue(double val) { + byte[] states = _states; + double[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an float value + * @return a boolean value + */ + public boolean containsKey(float key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TFloatProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TFloatProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TDoubleProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TDoubleProcedure procedure) { + byte[] states = _states; + double[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOFloatDoubleProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TFloatDoubleProcedure procedure) { + byte[] states = _states; + float[] keys = _set; + double[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TFloatDoubleProcedure procedure) { + boolean modified = false; + byte[] states = _states; + float[] keys = _set; + double[] values = _values; + + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TDoubleFunction value + */ + public void transformValues(TDoubleFunction function) { + byte[] states = _states; + double[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(float key) { + return adjustValue(key, (double) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(float key, double amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public double adjustOrPutValue(final float key, final double adjust_amount, final double put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final double newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + byte previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + float key = in.readFloat(); + double val = in.readDouble(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TFloatDoubleProcedure() { + private boolean first = true; + + public boolean execute(float key, double value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TFloatDoubleHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatDoubleIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatDoubleIterator.java new file mode 100644 index 00000000000..53de0f0e55a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatDoubleIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type float and double. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TFloatDoubleIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TFloatDoubleIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TFloatDoubleIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TFloatDoubleIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TFloatDoubleIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TFloatDoubleHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TFloatDoubleIterator(TFloatDoubleHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public float key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public double value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public double setValue(double val) { + double old = value(); + _map._values[_index] = val; + return old; + } +}// TFloatDoubleIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatDoubleProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatDoubleProcedure.java new file mode 100644 index 00000000000..ff18ac7621f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatDoubleProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type float and double. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TFloatDoubleProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a float value + * @param b a double value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(float a, double b); +}// TFloatDoubleProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatFloatHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatFloatHashMap.java new file mode 100644 index 00000000000..f2fbda538b9 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatFloatHashMap.java @@ -0,0 +1,650 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for float keys and float values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TFloatFloatHashMap extends TFloatHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TFloatFloatProcedure PUT_ALL_PROC = new TFloatFloatProcedure() { + public boolean execute(float key, float value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient float[] _values; + + /** + * Creates a new TFloatFloatHashMap instance with the default + * capacity and load factor. + */ + public TFloatFloatHashMap() { + super(); + } + + /** + * Creates a new TFloatFloatHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TFloatFloatHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TFloatFloatHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TFloatFloatHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TFloatFloatHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TFloatFloatHashMap(TFloatHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TFloatFloatHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TFloatFloatHashMap(int initialCapacity, TFloatHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TFloatFloatHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TFloatFloatHashMap(int initialCapacity, float loadFactor, TFloatHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TFloatFloatHashMap m = (TFloatFloatHashMap) super.clone(); + m._values = (float[]) this._values.clone(); + return m; + } + + /** + * @return a TFloatFloatIterator with access to this map's keys and values + */ + public TFloatFloatIterator iterator() { + return new TFloatFloatIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new float[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an float value + * @param value an float value + * @return the previous value associated with key, + * or (float)0 if none was found. + */ + public float put(float key, float value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an float value + * @param value an float value + * @return the previous value associated with key, + * or (float)0 if none was found. + */ + public float putIfAbsent(float key, float value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private float doPut(float key, float value, int index) { + byte previousState; + float previous = (float) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TFloatFloatHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + float oldKeys[] = _set; + float oldVals[] = _values; + byte oldStates[] = _states; + + _set = new float[newCapacity]; + _values = new float[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + float o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an float value + * @return the value of key or (float)0 if no such mapping exists. + */ + public float get(float key) { + int index = index(key); + return index < 0 ? (float) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + float[] keys = _set; + float[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (float) 0); + Arrays.fill(_values, 0, _values.length, (float) 0); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an float value + * @return an float value, or (float)0 if no mapping for key exists + */ + public float remove(float key) { + float prev = (float) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TFloatFloatHashMap)) { + return false; + } + TFloatFloatHashMap that = (TFloatFloatHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TFloatFloatProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(float key, float value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TFloatFloatProcedure { + private final TFloatFloatHashMap _otherMap; + + EqProcedure(TFloatFloatHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(float key, float value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two floats for equality. + */ + private final boolean eq(float v1, float v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = (float) 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public float[] getValues() { + float[] vals = new float[size()]; + float[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public float[] keys() { + float[] keys = new float[size()]; + float[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public float[] keys(float[] a) { + int size = size(); + if (a.length < size) { + a = (float[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + float[] k = (float[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an float value + * @return a boolean value + */ + public boolean containsValue(float val) { + byte[] states = _states; + float[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an float value + * @return a boolean value + */ + public boolean containsKey(float key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TFloatProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TFloatProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TFloatProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TFloatProcedure procedure) { + byte[] states = _states; + float[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOFloatFloatProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TFloatFloatProcedure procedure) { + byte[] states = _states; + float[] keys = _set; + float[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TFloatFloatProcedure procedure) { + boolean modified = false; + byte[] states = _states; + float[] keys = _set; + float[] values = _values; + + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TFloatFunction value + */ + public void transformValues(TFloatFunction function) { + byte[] states = _states; + float[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(float key) { + return adjustValue(key, (float) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(float key, float amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public float adjustOrPutValue(final float key, final float adjust_amount, final float put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final float newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + byte previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + float key = in.readFloat(); + float val = in.readFloat(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TFloatFloatProcedure() { + private boolean first = true; + + public boolean execute(float key, float value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TFloatFloatHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatFloatIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatFloatIterator.java new file mode 100644 index 00000000000..e4942ab80e3 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatFloatIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type float and float. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TFloatFloatIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TFloatFloatIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TFloatFloatIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TFloatFloatIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TFloatFloatIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TFloatFloatHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TFloatFloatIterator(TFloatFloatHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public float key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public float value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public float setValue(float val) { + float old = value(); + _map._values[_index] = val; + return old; + } +}// TFloatFloatIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatFloatProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatFloatProcedure.java new file mode 100644 index 00000000000..97a717778c5 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatFloatProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type float and float. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TFloatFloatProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a float value + * @param b a float value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(float a, float b); +}// TFloatFloatProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatFunction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatFunction.java new file mode 100644 index 00000000000..39370f238d9 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatFunction.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for functions that accept and return one float primitive. + *

    + * Created: Mon Nov 5 22:19:36 2001 + * + * @author Eric D. Friedman + * @version $Id: PFunction.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TFloatFunction { + /** + * Execute this function with value + * + * @param value a float input + * @return a float result + */ + public float execute(float value); +}// TFloatFunction diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatHash.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatHash.java new file mode 100644 index 00000000000..c59e65b370a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatHash.java @@ -0,0 +1,291 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed hashing implementation for float primitives. + *

    + * Created: Sun Nov 4 08:56:06 2001 + * + * @author Eric D. Friedman + * @version $Id: PHash.template,v 1.2 2007/06/29 22:39:46 robeden Exp $ + */ + +abstract public class TFloatHash extends TPrimitiveHash implements TFloatHashingStrategy { + + /** + * the set of floats + */ + protected transient float[] _set; + + /** + * strategy used to hash values in this collection + */ + protected TFloatHashingStrategy _hashingStrategy; + + /** + * Creates a new TFloatHash instance with the default + * capacity and load factor. + */ + public TFloatHash() { + super(); + this._hashingStrategy = this; + } + + /** + * Creates a new TFloatHash instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + */ + public TFloatHash(int initialCapacity) { + super(initialCapacity); + this._hashingStrategy = this; + } + + /** + * Creates a new TFloatHash instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + */ + public TFloatHash(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + this._hashingStrategy = this; + } + + /** + * Creates a new TFloatHash instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TFloatHash(TFloatHashingStrategy strategy) { + super(); + this._hashingStrategy = strategy; + } + + /** + * Creates a new TFloatHash instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TFloatHash(int initialCapacity, TFloatHashingStrategy strategy) { + super(initialCapacity); + this._hashingStrategy = strategy; + } + + /** + * Creates a new TFloatHash instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TFloatHash(int initialCapacity, float loadFactor, TFloatHashingStrategy strategy) { + super(initialCapacity, loadFactor); + this._hashingStrategy = strategy; + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TFloatHash h = (TFloatHash) super.clone(); + h._set = (float[]) this._set.clone(); + return h; + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _set = new float[capacity]; + return capacity; + } + + /** + * Searches the set for val + * + * @param val an float value + * @return a boolean value + */ + public boolean contains(float val) { + return index(val) >= 0; + } + + /** + * Executes procedure for each element in the set. + * + * @param procedure a TObjectProcedure value + * @return false if the loop over the set terminated because + * the procedure returned false for some value. + */ + public boolean forEach(TFloatProcedure procedure) { + byte[] states = _states; + float[] set = _set; + for (int i = set.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(set[i])) { + return false; + } + } + return true; + } + + /** + * Releases the element currently stored at index. + * + * @param index an int value + */ + protected void removeAt(int index) { + _set[index] = (float) 0; + super.removeAt(index); + } + + /** + * Locates the index of val. + * + * @param val an float value + * @return the index of val or -1 if it isn't in the set. + */ + protected int index(float val) { + int hash, probe, index, length; + + final byte[] states = _states; + final float[] set = _set; + length = states.length; + hash = _hashingStrategy.computeHashCode(val) & 0x7fffffff; + index = hash % length; + + if (states[index] != FREE && + (states[index] == REMOVED || set[index] != val)) { + // see Knuth, p. 529 + probe = 1 + (hash % (length - 2)); + + do { + index -= probe; + if (index < 0) { + index += length; + } + } while (states[index] != FREE && + (states[index] == REMOVED || set[index] != val)); + } + + return states[index] == FREE ? -1 : index; + } + + /** + * Locates the index at which val can be inserted. if + * there is already a value equal()ing val in the set, + * returns that value as a negative integer. + * + * @param val an float value + * @return an int value + */ + protected int insertionIndex(float val) { + int hash, probe, index, length; + + final byte[] states = _states; + final float[] set = _set; + length = states.length; + hash = _hashingStrategy.computeHashCode(val) & 0x7fffffff; + index = hash % length; + + if (states[index] == FREE) { + return index; // empty, all done + } else if (states[index] == FULL && set[index] == val) { + return -index - 1; // already stored + } else { // already FULL or REMOVED, must probe + // compute the double hash + probe = 1 + (hash % (length - 2)); + + // if the slot we landed on is FULL (but not removed), probe + // until we find an empty slot, a REMOVED slot, or an element + // equal to the one we are trying to insert. + // finding an empty slot means that the value is not present + // and that we should use that slot as the insertion point; + // finding a REMOVED slot means that we need to keep searching, + // however we want to remember the offset of that REMOVED slot + // so we can reuse it in case a "new" insertion (i.e. not an update) + // is possible. + // finding a matching value means that we've found that our desired + // key is already in the table + + if (states[index] != REMOVED) { + // starting at the natural offset, probe until we find an + // offset that isn't full. + do { + index -= probe; + if (index < 0) { + index += length; + } + } while (states[index] == FULL && set[index] != val); + } + + // if the index we found was removed: continue probing until we + // locate a free location or an element which equal()s the + // one we have. + if (states[index] == REMOVED) { + int firstRemoved = index; + while (states[index] != FREE && + (states[index] == REMOVED || set[index] != val)) { + index -= probe; + if (index < 0) { + index += length; + } + } + return states[index] == FULL ? -index - 1 : firstRemoved; + } + // if it's full, the key is already stored + return states[index] == FULL ? -index - 1 : index; + } + } + + /** + * Default implementation of TFloatHashingStrategy: + * delegates hashing to HashFunctions.hash(float). + * + * @param val the value to hash + * @return the hashcode. + */ + public final int computeHashCode(float val) { + return HashFunctions.hash(val); + } +} // TFloatHash diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatHashSet.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatHashSet.java new file mode 100644 index 00000000000..77166396982 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatHashSet.java @@ -0,0 +1,373 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed set implementation for float primitives. + * + * @author Eric D. Friedman + * @author Rob Eden + */ + +public class TFloatHashSet extends TFloatHash implements Externalizable { + static final long serialVersionUID = 1L; + + /** + * Creates a new TFloatHashSet instance with the default + * capacity and load factor. + */ + public TFloatHashSet() { + super(); + } + + /** + * Creates a new TFloatHashSet instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TFloatHashSet(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TFloatHashSet instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TFloatHashSet(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TFloatHashSet instance containing the + * elements of array. + * + * @param array an array of float primitives + */ + public TFloatHashSet(float[] array) { + this(array.length); + addAll(array); + } + + /** + * Creates a new TFloatHash instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TFloatHashSet(TFloatHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TFloatHash instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TFloatHashSet(int initialCapacity, TFloatHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TFloatHash instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TFloatHashSet(int initialCapacity, float loadFactor, TFloatHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * Creates a new TFloatHashSet instance containing the + * elements of array. + * + * @param array an array of float primitives + * @param strategy used to compute hash codes and to compare keys. + */ + public TFloatHashSet(float[] array, TFloatHashingStrategy strategy) { + this(array.length, strategy); + addAll(array); + } + + /** + * @return a TFloatIterator with access to the values in this set + */ + public TFloatIterator iterator() { + return new TFloatIterator(this); + } + + /** + * Inserts a value into the set. + * + * @param val an float value + * @return true if the set was modified by the add operation + */ + public boolean add(float val) { + int index = insertionIndex(val); + + if (index < 0) { + return false; // already present in set, nothing to add + } + + byte previousState = _states[index]; + _set[index] = val; + _states[index] = FULL; + postInsertHook(previousState == FREE); + + return true; // yes, we added something + } + + /** + * Expands the set to accommodate new values. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + float oldSet[] = _set; + byte oldStates[] = _states; + + _set = new float[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + float o = oldSet[i]; + int index = insertionIndex(o); + _set[index] = o; + _states[index] = FULL; + } + } + } + + /** + * Returns a new array containing the values in the set. + * + * @return an float[] value + */ + public float[] toArray() { + float[] result = new float[size()]; + float[] set = _set; + byte[] states = _states; + + for (int i = states.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + result[j++] = set[i]; + } + } + return result; + } + + /** + * Empties the set. + */ + public void clear() { + super.clear(); + float[] set = _set; + byte[] states = _states; + + for (int i = set.length; i-- > 0;) { + set[i] = (float) 0; + states[i] = FREE; + } + } + + /** + * Compares this set with another set for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TFloatHashSet)) { + return false; + } + final TFloatHashSet that = (TFloatHashSet) other; + if (that.size() != this.size()) { + return false; + } + return forEach(new TFloatProcedure() { + public final boolean execute(float value) { + return that.contains(value); + } + }); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEach(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TFloatProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(float key) { + h += _hashingStrategy.computeHashCode(key); + return true; + } + } + + /** + * Removes val from the set. + * + * @param val an float value + * @return true if the set was modified by the remove operation. + */ + public boolean remove(float val) { + int index = index(val); + if (index >= 0) { + removeAt(index); + return true; + } + return false; + } + + /** + * Tests the set to determine if all of the elements in + * array are present. + * + * @param array an array of float primitives. + * @return true if all elements were present in the set. + */ + public boolean containsAll(float[] array) { + for (int i = array.length; i-- > 0;) { + if (!contains(array[i])) { + return false; + } + } + return true; + } + + /** + * Adds all of the elements in array to the set. + * + * @param array an array of float primitives. + * @return true if the set was modified by the add all operation. + */ + public boolean addAll(float[] array) { + boolean changed = false; + for (int i = array.length; i-- > 0;) { + if (add(array[i])) { + changed = true; + } + } + return changed; + } + + /** + * Removes all of the elements in array from the set. + * + * @param array an array of float primitives. + * @return true if the set was modified by the remove all operation. + */ + public boolean removeAll(float[] array) { + boolean changed = false; + for (int i = array.length; i-- > 0;) { + if (remove(array[i])) { + changed = true; + } + } + return changed; + } + + /** + * Removes any values in the set which are not contained in + * array. + * + * @param array an array of float primitives. + * @return true if the set was modified by the retain all operation + */ + public boolean retainAll(float[] array) { + boolean changed = false; + Arrays.sort(array); + float[] set = _set; + byte[] states = _states; + + for (int i = set.length; i-- > 0;) { + if (states[i] == FULL && (Arrays.binarySearch(array, set[i]) < 0)) { + remove(set[i]); + changed = true; + } + } + return changed; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEach(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + + // ENTRIES + setUp(size); + while (size-- > 0) { + float val = in.readFloat(); + add(val); + } + } +} // TFloatHashSet diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatHashingStrategy.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatHashingStrategy.java new file mode 100644 index 00000000000..06886eb365a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatHashingStrategy.java @@ -0,0 +1,50 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Serializable; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface to support pluggable hashing strategies in maps and sets. + * Implementors can use this interface to make the trove hashing + * algorithms use an optimal strategy when computing hashcodes. + *

    + * Created: Sun Nov 4 08:56:06 2001 + * + * @author Eric D. Friedman + * @version $Id: PHashingStrategy.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TFloatHashingStrategy extends Serializable { + /** + * Computes a hash code for the specified float. Implementors + * can use the float's own value or a custom scheme designed to + * minimize collisions for a known set of input. + * + * @param val float for which the hashcode is to be computed + * @return the hashCode + */ + public int computeHashCode(float val); +} // TFloatHashingStrategy diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatIntHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatIntHashMap.java new file mode 100644 index 00000000000..f042f3f169e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatIntHashMap.java @@ -0,0 +1,650 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for float keys and int values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TFloatIntHashMap extends TFloatHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TFloatIntProcedure PUT_ALL_PROC = new TFloatIntProcedure() { + public boolean execute(float key, int value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient int[] _values; + + /** + * Creates a new TFloatIntHashMap instance with the default + * capacity and load factor. + */ + public TFloatIntHashMap() { + super(); + } + + /** + * Creates a new TFloatIntHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TFloatIntHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TFloatIntHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TFloatIntHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TFloatIntHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TFloatIntHashMap(TFloatHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TFloatIntHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TFloatIntHashMap(int initialCapacity, TFloatHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TFloatIntHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TFloatIntHashMap(int initialCapacity, float loadFactor, TFloatHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TFloatIntHashMap m = (TFloatIntHashMap) super.clone(); + m._values = (int[]) this._values.clone(); + return m; + } + + /** + * @return a TFloatIntIterator with access to this map's keys and values + */ + public TFloatIntIterator iterator() { + return new TFloatIntIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new int[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an float value + * @param value an int value + * @return the previous value associated with key, + * or (float)0 if none was found. + */ + public int put(float key, int value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an float value + * @param value an int value + * @return the previous value associated with key, + * or (float)0 if none was found. + */ + public int putIfAbsent(float key, int value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private int doPut(float key, int value, int index) { + byte previousState; + int previous = (int) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TFloatIntHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + float oldKeys[] = _set; + int oldVals[] = _values; + byte oldStates[] = _states; + + _set = new float[newCapacity]; + _values = new int[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + float o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an float value + * @return the value of key or (float)0 if no such mapping exists. + */ + public int get(float key) { + int index = index(key); + return index < 0 ? (int) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + float[] keys = _set; + int[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (float) 0); + Arrays.fill(_values, 0, _values.length, (int) 0); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an float value + * @return an int value, or (float)0 if no mapping for key exists + */ + public int remove(float key) { + int prev = (int) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TFloatIntHashMap)) { + return false; + } + TFloatIntHashMap that = (TFloatIntHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TFloatIntProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(float key, int value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TFloatIntProcedure { + private final TFloatIntHashMap _otherMap; + + EqProcedure(TFloatIntHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(float key, int value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two ints for equality. + */ + private final boolean eq(int v1, int v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = (int) 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public int[] getValues() { + int[] vals = new int[size()]; + int[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public float[] keys() { + float[] keys = new float[size()]; + float[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public float[] keys(float[] a) { + int size = size(); + if (a.length < size) { + a = (float[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + float[] k = (float[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an int value + * @return a boolean value + */ + public boolean containsValue(int val) { + byte[] states = _states; + int[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an float value + * @return a boolean value + */ + public boolean containsKey(float key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TFloatProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TFloatProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TIntProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TIntProcedure procedure) { + byte[] states = _states; + int[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOFloatIntProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TFloatIntProcedure procedure) { + byte[] states = _states; + float[] keys = _set; + int[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TFloatIntProcedure procedure) { + boolean modified = false; + byte[] states = _states; + float[] keys = _set; + int[] values = _values; + + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TIntFunction value + */ + public void transformValues(TIntFunction function) { + byte[] states = _states; + int[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(float key) { + return adjustValue(key, (int) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(float key, int amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public int adjustOrPutValue(final float key, final int adjust_amount, final int put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final int newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + byte previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + float key = in.readFloat(); + int val = in.readInt(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TFloatIntProcedure() { + private boolean first = true; + + public boolean execute(float key, int value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TFloatIntHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatIntIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatIntIterator.java new file mode 100644 index 00000000000..a713f775342 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatIntIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type float and int. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TFloatIntIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TFloatIntIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TFloatIntIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TFloatIntIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TFloatIntIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TFloatIntHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TFloatIntIterator(TFloatIntHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public float key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public int value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public int setValue(int val) { + int old = value(); + _map._values[_index] = val; + return old; + } +}// TFloatIntIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatIntProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatIntProcedure.java new file mode 100644 index 00000000000..ece899ea6d6 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatIntProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type float and int. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TFloatIntProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a float value + * @param b a int value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(float a, int b); +}// TFloatIntProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatIterator.java new file mode 100644 index 00000000000..3ca08a167eb --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatIterator.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for float collections. + * + * @author Eric D. Friedman + * @version $Id: PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TFloatIterator extends TPrimitiveIterator { + /** + * the collection on which the iterator operates + */ + private final TFloatHash _hash; + + /** + * Creates a TFloatIterator for the elements in the specified collection. + */ + public TFloatIterator(TFloatHash hash) { + super(hash); + this._hash = hash; + } + + /** + * Advances the iterator to the next element in the underlying collection + * and returns it. + * + * @return the next float in the collection + * @throws NoSuchElementException if the iterator is already exhausted + */ + public float next() { + moveToNextIndex(); + return _hash._set[_index]; + } +}// TFloatIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatLongHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatLongHashMap.java new file mode 100644 index 00000000000..792932cc84d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatLongHashMap.java @@ -0,0 +1,650 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for float keys and long values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TFloatLongHashMap extends TFloatHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TFloatLongProcedure PUT_ALL_PROC = new TFloatLongProcedure() { + public boolean execute(float key, long value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient long[] _values; + + /** + * Creates a new TFloatLongHashMap instance with the default + * capacity and load factor. + */ + public TFloatLongHashMap() { + super(); + } + + /** + * Creates a new TFloatLongHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TFloatLongHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TFloatLongHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TFloatLongHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TFloatLongHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TFloatLongHashMap(TFloatHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TFloatLongHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TFloatLongHashMap(int initialCapacity, TFloatHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TFloatLongHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TFloatLongHashMap(int initialCapacity, float loadFactor, TFloatHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TFloatLongHashMap m = (TFloatLongHashMap) super.clone(); + m._values = (long[]) this._values.clone(); + return m; + } + + /** + * @return a TFloatLongIterator with access to this map's keys and values + */ + public TFloatLongIterator iterator() { + return new TFloatLongIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new long[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an float value + * @param value an long value + * @return the previous value associated with key, + * or (float)0 if none was found. + */ + public long put(float key, long value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an float value + * @param value an long value + * @return the previous value associated with key, + * or (float)0 if none was found. + */ + public long putIfAbsent(float key, long value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private long doPut(float key, long value, int index) { + byte previousState; + long previous = (long) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TFloatLongHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + float oldKeys[] = _set; + long oldVals[] = _values; + byte oldStates[] = _states; + + _set = new float[newCapacity]; + _values = new long[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + float o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an float value + * @return the value of key or (float)0 if no such mapping exists. + */ + public long get(float key) { + int index = index(key); + return index < 0 ? (long) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + float[] keys = _set; + long[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (float) 0); + Arrays.fill(_values, 0, _values.length, (long) 0); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an float value + * @return an long value, or (float)0 if no mapping for key exists + */ + public long remove(float key) { + long prev = (long) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TFloatLongHashMap)) { + return false; + } + TFloatLongHashMap that = (TFloatLongHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TFloatLongProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(float key, long value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TFloatLongProcedure { + private final TFloatLongHashMap _otherMap; + + EqProcedure(TFloatLongHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(float key, long value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two longs for equality. + */ + private final boolean eq(long v1, long v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = (long) 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public long[] getValues() { + long[] vals = new long[size()]; + long[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public float[] keys() { + float[] keys = new float[size()]; + float[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public float[] keys(float[] a) { + int size = size(); + if (a.length < size) { + a = (float[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + float[] k = (float[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an long value + * @return a boolean value + */ + public boolean containsValue(long val) { + byte[] states = _states; + long[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an float value + * @return a boolean value + */ + public boolean containsKey(float key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TFloatProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TFloatProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TLongProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TLongProcedure procedure) { + byte[] states = _states; + long[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOFloatLongProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TFloatLongProcedure procedure) { + byte[] states = _states; + float[] keys = _set; + long[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TFloatLongProcedure procedure) { + boolean modified = false; + byte[] states = _states; + float[] keys = _set; + long[] values = _values; + + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TLongFunction value + */ + public void transformValues(TLongFunction function) { + byte[] states = _states; + long[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(float key) { + return adjustValue(key, (long) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(float key, long amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public long adjustOrPutValue(final float key, final long adjust_amount, final long put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final long newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + byte previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + float key = in.readFloat(); + long val = in.readLong(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TFloatLongProcedure() { + private boolean first = true; + + public boolean execute(float key, long value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TFloatLongHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatLongIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatLongIterator.java new file mode 100644 index 00000000000..cd853082e04 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatLongIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type float and long. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TFloatLongIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TFloatLongIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TFloatLongIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TFloatLongIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TFloatLongIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TFloatLongHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TFloatLongIterator(TFloatLongHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public float key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public long value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public long setValue(long val) { + long old = value(); + _map._values[_index] = val; + return old; + } +}// TFloatLongIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatLongProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatLongProcedure.java new file mode 100644 index 00000000000..f89d5cbb71b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatLongProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type float and long. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TFloatLongProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a float value + * @param b a long value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(float a, long b); +}// TFloatLongProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatObjectHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatObjectHashMap.java new file mode 100644 index 00000000000..c0d99216ecd --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatObjectHashMap.java @@ -0,0 +1,632 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for float keys and Object values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TFloatObjectHashMap extends TFloatHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TFloatObjectProcedure PUT_ALL_PROC = new TFloatObjectProcedure() { + public boolean execute(float key, V value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient V[] _values; + + /** + * Creates a new TFloatObjectHashMap instance with the default + * capacity and load factor. + */ + public TFloatObjectHashMap() { + super(); + } + + /** + * Creates a new TFloatObjectHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TFloatObjectHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TFloatObjectHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TFloatObjectHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TFloatObjectHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TFloatObjectHashMap(TFloatHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TFloatObjectHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TFloatObjectHashMap(int initialCapacity, TFloatHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TFloatObjectHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TFloatObjectHashMap(int initialCapacity, float loadFactor, TFloatHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public TFloatObjectHashMap clone() { + TFloatObjectHashMap m = (TFloatObjectHashMap) super.clone(); + m._values = (V[]) this._values.clone(); + return m; + } + + /** + * @return a TFloatObjectIterator with access to this map's keys and values + */ + public TFloatObjectIterator iterator() { + return new TFloatObjectIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = (V[]) new Object[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an float value + * @param value an Object value + * @return the previous value associated with key, + * or {@code null} if none was found. + */ + public V put(float key, V value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an float value + * @param value an Object value + * @return the previous value associated with key, + * or {@code null} if none was found. + */ + public V putIfAbsent(float key, V value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private V doPut(float key, V value, int index) { + byte previousState; + V previous = null; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TFloatObjectHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + float oldKeys[] = _set; + V oldVals[] = _values; + byte oldStates[] = _states; + + _set = new float[newCapacity]; + _values = (V[]) new Object[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + float o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an float value + * @return the value of key or (float)0 if no such mapping exists. + */ + public V get(float key) { + int index = index(key); + return index < 0 ? null : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + float[] keys = _set; + Object[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (float) 0); + Arrays.fill(_values, 0, _values.length, null); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an float value + * @return an Object value or (float)0 if no such mapping exists. + */ + public V remove(float key) { + V prev = null; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TFloatObjectHashMap)) { + return false; + } + TFloatObjectHashMap that = (TFloatObjectHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TFloatObjectProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(float key, Object value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TFloatObjectProcedure { + private final TFloatObjectHashMap _otherMap; + + EqProcedure(TFloatObjectHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(float key, Object value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two objects for equality. + */ + private final boolean eq(Object o1, Object o2) { + return o1 == o2 || ((o1 != null) && o1.equals(o2)); + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = null; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + * @see #getValues(Object[]) + */ + public Object[] getValues() { + Object[] vals = new Object[size()]; + V[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * Return the values of the map; the runtime type of the returned array is that of + * the specified array. + * + * @param a the array into which the elements of this collection are to be + * stored, if it is big enough; otherwise, a new array of the same + * runtime type is allocated for this purpose. + * @return an array containing the elements of this collection + * @throws ArrayStoreException the runtime type of the specified array is + * not a supertype of the runtime type of every element in this + * collection. + * @throws NullPointerException if the specified array is null. + * @see #getValues() + */ + public T[] getValues(T[] a) { + if (a.length < _size) { + a = (T[]) java.lang.reflect.Array.newInstance(a.getClass().getComponentType(), + _size); + } + + V[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = (T) v[i]; + } + } + return a; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public float[] keys() { + float[] keys = new float[size()]; + float[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public float[] keys(float[] a) { + int size = size(); + if (a.length < size) { + a = (float[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + float[] k = (float[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(V val) { + byte[] states = _states; + V[] vals = _values; + + // special case null values so that we don't have to + // perform null checks before every call to equals() + if (null == val) { + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && + val == vals[i]) { + return true; + } + } + } else { + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && + (val == vals[i] || val.equals(vals[i]))) { + return true; + } + } + } // end of else + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an float value + * @return a boolean value + */ + public boolean containsKey(float key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TFloatProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TFloatProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TObjectProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TObjectProcedure procedure) { + byte[] states = _states; + V[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOFloatObjectProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TFloatObjectProcedure procedure) { + byte[] states = _states; + float[] keys = _set; + V[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TFloatObjectProcedure procedure) { + boolean modified = false; + byte[] states = _states; + float[] keys = _set; + V[] values = _values; + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TObjectFunction value + */ + public void transformValues(TObjectFunction function) { + byte[] states = _states; + V[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + float key = in.readFloat(); + V val = (V) in.readObject(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TFloatObjectProcedure() { + private boolean first = true; + + public boolean execute(float key, Object value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TFloatObjectHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatObjectIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatObjectIterator.java new file mode 100644 index 00000000000..63c454c1279 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatObjectIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type float and Object. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TFloatObjectIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TFloatObjectIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TFloatObjectIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TFloatObjectIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2OIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TFloatObjectIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TFloatObjectHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TFloatObjectIterator(TFloatObjectHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public float key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public V value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public V setValue(V val) { + V old = value(); + _map._values[_index] = val; + return old; + } +}// TFloatObjectIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatObjectProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatObjectProcedure.java new file mode 100644 index 00000000000..2bc79123441 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatObjectProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type float and Object. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2OProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TFloatObjectProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a float value + * @param b an Object value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(float a, T b); +}// TFloatObjectProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatProcedure.java new file mode 100644 index 00000000000..938e053e8e1 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatProcedure.java @@ -0,0 +1,47 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures with one float parameter. + *

    + * Created: Mon Nov 5 21:45:49 2001 + * + * @author Eric D. Friedman + * @version $Id: PProcedure.template,v 1.2 2007/11/01 16:08:14 robeden Exp $ + */ + +public interface TFloatProcedure { + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param value a value of type float + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(float value); +}// TFloatProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatShortHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatShortHashMap.java new file mode 100644 index 00000000000..46d57905478 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatShortHashMap.java @@ -0,0 +1,650 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for float keys and short values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TFloatShortHashMap extends TFloatHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TFloatShortProcedure PUT_ALL_PROC = new TFloatShortProcedure() { + public boolean execute(float key, short value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient short[] _values; + + /** + * Creates a new TFloatShortHashMap instance with the default + * capacity and load factor. + */ + public TFloatShortHashMap() { + super(); + } + + /** + * Creates a new TFloatShortHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TFloatShortHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TFloatShortHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TFloatShortHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TFloatShortHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TFloatShortHashMap(TFloatHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TFloatShortHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TFloatShortHashMap(int initialCapacity, TFloatHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TFloatShortHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TFloatShortHashMap(int initialCapacity, float loadFactor, TFloatHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TFloatShortHashMap m = (TFloatShortHashMap) super.clone(); + m._values = (short[]) this._values.clone(); + return m; + } + + /** + * @return a TFloatShortIterator with access to this map's keys and values + */ + public TFloatShortIterator iterator() { + return new TFloatShortIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new short[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an float value + * @param value an short value + * @return the previous value associated with key, + * or (float)0 if none was found. + */ + public short put(float key, short value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an float value + * @param value an short value + * @return the previous value associated with key, + * or (float)0 if none was found. + */ + public short putIfAbsent(float key, short value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private short doPut(float key, short value, int index) { + byte previousState; + short previous = (short) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TFloatShortHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + float oldKeys[] = _set; + short oldVals[] = _values; + byte oldStates[] = _states; + + _set = new float[newCapacity]; + _values = new short[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + float o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an float value + * @return the value of key or (float)0 if no such mapping exists. + */ + public short get(float key) { + int index = index(key); + return index < 0 ? (short) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + float[] keys = _set; + short[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (float) 0); + Arrays.fill(_values, 0, _values.length, (short) 0); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an float value + * @return an short value, or (float)0 if no mapping for key exists + */ + public short remove(float key) { + short prev = (short) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TFloatShortHashMap)) { + return false; + } + TFloatShortHashMap that = (TFloatShortHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TFloatShortProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(float key, short value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TFloatShortProcedure { + private final TFloatShortHashMap _otherMap; + + EqProcedure(TFloatShortHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(float key, short value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two shorts for equality. + */ + private final boolean eq(short v1, short v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = (short) 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public short[] getValues() { + short[] vals = new short[size()]; + short[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public float[] keys() { + float[] keys = new float[size()]; + float[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public float[] keys(float[] a) { + int size = size(); + if (a.length < size) { + a = (float[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + float[] k = (float[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an short value + * @return a boolean value + */ + public boolean containsValue(short val) { + byte[] states = _states; + short[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an float value + * @return a boolean value + */ + public boolean containsKey(float key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TFloatProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TFloatProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TShortProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TShortProcedure procedure) { + byte[] states = _states; + short[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOFloatShortProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TFloatShortProcedure procedure) { + byte[] states = _states; + float[] keys = _set; + short[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TFloatShortProcedure procedure) { + boolean modified = false; + byte[] states = _states; + float[] keys = _set; + short[] values = _values; + + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TShortFunction value + */ + public void transformValues(TShortFunction function) { + byte[] states = _states; + short[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(float key) { + return adjustValue(key, (short) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(float key, short amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public short adjustOrPutValue(final float key, final short adjust_amount, final short put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final short newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + byte previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + float key = in.readFloat(); + short val = in.readShort(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TFloatShortProcedure() { + private boolean first = true; + + public boolean execute(float key, short value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TFloatShortHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatShortIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatShortIterator.java new file mode 100644 index 00000000000..33835a693c7 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatShortIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type float and short. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TFloatShortIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TFloatShortIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TFloatShortIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TFloatShortIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TFloatShortIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TFloatShortHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TFloatShortIterator(TFloatShortHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public float key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public short value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public short setValue(short val) { + short old = value(); + _map._values[_index] = val; + return old; + } +}// TFloatShortIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatShortProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatShortProcedure.java new file mode 100644 index 00000000000..3d06316051d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatShortProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type float and short. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TFloatShortProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a float value + * @param b a short value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(float a, short b); +}// TFloatShortProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatStack.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatStack.java new file mode 100644 index 00000000000..ee1be7741c2 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TFloatStack.java @@ -0,0 +1,124 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + + +package org.elasticsearch.util.gnu.trove; + +/** + * A stack of float primitives, backed by a TFloatArrayList. + * + * @author Eric D. Friedman, Rob Eden + * @version $Id: PStack.template,v 1.2 2007/02/28 23:03:57 robeden Exp $ + */ + +public class TFloatStack { + + /** + * the list used to hold the stack values. + */ + protected TFloatArrayList _list; + + public static final int DEFAULT_CAPACITY = TFloatArrayList.DEFAULT_CAPACITY; + + /** + * Creates a new TFloatStack instance with the default + * capacity. + */ + public TFloatStack() { + this(DEFAULT_CAPACITY); + } + + /** + * Creates a new TFloatStack instance with the + * specified capacity. + * + * @param capacity the initial depth of the stack + */ + public TFloatStack(int capacity) { + _list = new TFloatArrayList(capacity); + } + + /** + * Pushes the value onto the top of the stack. + * + * @param val an float value + */ + public void push(float val) { + _list.add(val); + } + + /** + * Removes and returns the value at the top of the stack. + * + * @return an float value + */ + public float pop() { + return _list.remove(_list.size() - 1); + } + + /** + * Returns the value at the top of the stack. + * + * @return an float value + */ + public float peek() { + return _list.get(_list.size() - 1); + } + + /** + * Returns the current depth of the stack. + */ + public int size() { + return _list.size(); + } + + /** + * Clears the stack, reseting its capacity to the default. + */ + public void clear() { + _list.clear(DEFAULT_CAPACITY); + } + + /** + * Clears the stack without releasing its internal capacity allocation. + */ + public void reset() { + _list.reset(); + } + + /** + * Copies the contents of the stack into a native array. Note that this will NOT + * pop them out of the stack. + * + * @return an float[] value + */ + public float[] toNativeArray() { + return _list.toNativeArray(); + } + + /** + * Copies a slice of the list into a native array. Note that this will NOT + * pop them out of the stack. + * + * @param dest the array to copy into. + */ + public void toNativeArray(float[] dest) { + _list.toNativeArray(dest, 0, size()); + } +} // TFloatStack diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/THash.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/THash.java new file mode 100644 index 00000000000..fddc42d7783 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/THash.java @@ -0,0 +1,419 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; + + +/** + * Base class for hashtables that use open addressing to resolve + * collisions. + *

    + * Created: Wed Nov 28 21:11:16 2001 + * + * @author Eric D. Friedman + * @author Rob Eden (auto-compaction) + * @version $Id: THash.java,v 1.14 2008/10/08 16:39:10 robeden Exp $ + */ + +abstract public class THash implements Cloneable, Externalizable { + static final long serialVersionUID = -1792948471915530295L; + + /** + * the load above which rehashing occurs. + */ + protected static final float DEFAULT_LOAD_FACTOR = 0.5f; + + /** + * the default initial capacity for the hash table. This is one + * less than a prime value because one is added to it when + * searching for a prime capacity to account for the free slot + * required by open addressing. Thus, the real default capacity is + * 11. + */ + protected static final int DEFAULT_INITIAL_CAPACITY = 10; + + + /** + * the current number of occupied slots in the hash. + */ + protected transient int _size; + + /** + * the current number of free slots in the hash. + */ + protected transient int _free; + + /** + * Determines how full the internal table can become before + * rehashing is required. This must be a value in the range: 0.0 < + * loadFactor < 1.0. The default value is 0.5, which is about as + * large as you can get in open addressing without hurting + * performance. Cf. Knuth, Volume 3., Chapter 6. + */ + protected float _loadFactor; + + /** + * The maximum number of elements allowed without allocating more + * space. + */ + protected int _maxSize; + + + /** + * The number of removes that should be performed before an auto-compaction occurs. + */ + protected int _autoCompactRemovesRemaining; + + /** + * The auto-compaction factor for the table. + * + * @see #setAutoCompactionFactor + */ + protected float _autoCompactionFactor; + + /** + * @see #tempDisableAutoCompaction + */ + private transient boolean _autoCompactTemporaryDisable = false; + + + /** + * Creates a new THash instance with the default + * capacity and load factor. + */ + public THash() { + this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR); + } + + /** + * Creates a new THash instance with a prime capacity + * at or near the specified capacity and with the default load + * factor. + * + * @param initialCapacity an int value + */ + public THash(int initialCapacity) { + this(initialCapacity, DEFAULT_LOAD_FACTOR); + } + + /** + * Creates a new THash instance with a prime capacity + * at or near the minimum needed to hold initialCapacity + * elements with load factor loadFactor without triggering + * a rehash. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public THash(int initialCapacity, float loadFactor) { + super(); + _loadFactor = loadFactor; + + // Through testing, the load factor (especially the default load factor) has been + // found to be a pretty good starting auto-compaction factor. + _autoCompactionFactor = loadFactor; + + setUp(HashFunctions.fastCeil(initialCapacity / loadFactor)); + } + + public Object clone() { + try { + return super.clone(); + } catch (CloneNotSupportedException cnse) { + return null; // it's supported + } + } + + /** + * Tells whether this set is currently holding any elements. + * + * @return a boolean value + */ + public boolean isEmpty() { + return 0 == _size; + } + + /** + * Returns the number of distinct elements in this collection. + * + * @return an int value + */ + public int size() { + return _size; + } + + /** + * @return the current physical capacity of the hash table. + */ + abstract protected int capacity(); + + /** + * Ensure that this hashtable has sufficient capacity to hold + * desiredCapacity additional elements without + * requiring a rehash. This is a tuning method you can call + * before doing a large insert. + * + * @param desiredCapacity an int value + */ + public void ensureCapacity(int desiredCapacity) { + if (desiredCapacity > (_maxSize - size())) { + rehash(PrimeFinder.nextPrime(HashFunctions.fastCeil( + (desiredCapacity + size()) / _loadFactor) + 1)); + computeMaxSize(capacity()); + } + } + + /** + * Compresses the hashtable to the minimum prime size (as defined + * by PrimeFinder) that will hold all of the elements currently in + * the table. If you have done a lot of remove + * operations and plan to do a lot of queries or insertions or + * iteration, it is a good idea to invoke this method. Doing so + * will accomplish two things: + *

    + *

      + *
    1. You'll free memory allocated to the table but no + * longer needed because of the remove()s.
    2. + *

      + *

    3. You'll get better query/insert/iterator performance + * because there won't be any REMOVED slots to skip + * over when probing for indices in the table.
    4. + *
    + */ + public void compact() { + // need at least one free spot for open addressing + rehash(PrimeFinder.nextPrime(HashFunctions.fastCeil(size() / _loadFactor) + 1)); + computeMaxSize(capacity()); + + // If auto-compaction is enabled, re-determine the compaction interval + if (_autoCompactionFactor != 0) { + computeNextAutoCompactionAmount(size()); + } + } + + + /** + * The auto-compaction factor controls whether and when a table performs a + * {@link #compact} automatically after a certain number of remove operations. + * If the value is non-zero, the number of removes that need to occur for + * auto-compaction is the size of table at the time of the previous compaction + * (or the initial capacity) multiplied by this factor. + *

    + * Setting this value to zero will disable auto-compaction. + */ + public void setAutoCompactionFactor(float factor) { + if (factor < 0) { + throw new IllegalArgumentException("Factor must be >= 0: " + factor); + } + + _autoCompactionFactor = factor; + } + + /** + * @see #setAutoCompactionFactor + */ + public float getAutoCompactionFactor() { + return _autoCompactionFactor; + } + + + /** + * This simply calls {@link #compact compact}. It is included for + * symmetry with other collection classes. Note that the name of this + * method is somewhat misleading (which is why we prefer + * compact) as the load factor may require capacity above + * and beyond the size of this collection. + * + * @see #compact + */ + public final void trimToSize() { + compact(); + } + + /** + * Delete the record at index. Reduces the size of the + * collection by one. + * + * @param index an int value + */ + protected void removeAt(int index) { + _size--; + + // If auto-compaction is enabled, see if we need to compact + if (_autoCompactionFactor != 0) { + _autoCompactRemovesRemaining--; + + if (!_autoCompactTemporaryDisable && _autoCompactRemovesRemaining <= 0) { + // Do the compact + // NOTE: this will cause the next compaction interval to be calculated + compact(); + } + } + } + + /** + * Empties the collection. + */ + public void clear() { + _size = 0; + _free = capacity(); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = PrimeFinder.nextPrime(initialCapacity); + computeMaxSize(capacity); + computeNextAutoCompactionAmount(initialCapacity); + + return capacity; + } + + /** + * Rehashes the set. + * + * @param newCapacity an int value + */ + protected abstract void rehash(int newCapacity); + + /** + * Temporarily disables auto-compaction. MUST be followed by calling + * {@link #reenableAutoCompaction}. + */ + protected void tempDisableAutoCompaction() { + _autoCompactTemporaryDisable = true; + } + + /** + * Re-enable auto-compaction after it was disabled via + * {@link #tempDisableAutoCompaction()}. + * + * @param check_for_compaction True if compaction should be performed if needed + * before returning. If false, no compaction will be + * performed. + */ + protected void reenableAutoCompaction(boolean check_for_compaction) { + _autoCompactTemporaryDisable = false; + + if (check_for_compaction && _autoCompactRemovesRemaining <= 0 && + _autoCompactionFactor != 0) { + + // Do the compact + // NOTE: this will cause the next compaction interval to be calculated + compact(); + } + } + + + /** + * Computes the values of maxSize. There will always be at least + * one free slot required. + * + * @param capacity an int value + */ + private void computeMaxSize(int capacity) { + // need at least one free slot for open addressing + _maxSize = Math.min(capacity - 1, (int) (capacity * _loadFactor)); + _free = capacity - _size; // reset the free element count + } + + + /** + * Computes the number of removes that need to happen before the next auto-compaction + * will occur. + */ + private void computeNextAutoCompactionAmount(int size) { + if (_autoCompactionFactor != 0) { + // NOTE: doing the round ourselves has been found to be faster than using + // Math.round. + _autoCompactRemovesRemaining = + (int) ((size * _autoCompactionFactor) + 0.5f); + } + } + + + /** + * After an insert, this hook is called to adjust the size/free + * values of the set and to perform rehashing if necessary. + */ + protected final void postInsertHook(boolean usedFreeSlot) { + if (usedFreeSlot) { + _free--; + } + + // rehash whenever we exhaust the available space in the table + if (++_size > _maxSize || _free == 0) { + // choose a new capacity suited to the new state of the table + // if we've grown beyond our maximum size, double capacity; + // if we've exhausted the free spots, rehash to the same capacity, + // which will free up any stale removed slots for reuse. + int newCapacity = _size > _maxSize ? PrimeFinder.nextPrime(capacity() << 1) : capacity(); + rehash(newCapacity); + computeMaxSize(capacity()); + } + } + + protected int calculateGrownCapacity() { + return capacity() << 1; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // LOAD FACTOR + out.writeFloat(_loadFactor); + + // AUTO COMPACTION LOAD FACTOR + out.writeFloat(_autoCompactionFactor); + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // LOAD FACTOR + float old_factor = _loadFactor; + _loadFactor = in.readFloat(); + + // AUTO COMPACTION LOAD FACTOR + _autoCompactionFactor = in.readFloat(); + + + // If we change the laod factor from the default, re-setup + if (old_factor != _loadFactor) { + setUp((int) Math.ceil(DEFAULT_INITIAL_CAPACITY / _loadFactor)); + } + } +}// THash diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/THashIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/THashIterator.java new file mode 100644 index 00000000000..947216a7d79 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/THashIterator.java @@ -0,0 +1,102 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.util.ConcurrentModificationException; +import java.util.Iterator; +import java.util.NoSuchElementException; + +/** + * Implements all iterator functions for the hashed object set. + * Subclasses may override objectAtIndex to vary the object + * returned by calls to next() (e.g. for values, and Map.Entry + * objects). + *

    + *

    Note that iteration is fastest if you forego the calls to + * hasNext in favor of checking the size of the structure + * yourself and then call next() that many times: + *

    + *

    + * Iterator i = collection.iterator();
    + * for (int size = collection.size(); size-- > 0;) {
    + *   Object o = i.next();
    + * }
    + * 
    + *

    + *

    You may, of course, use the hasNext(), next() idiom too if + * you aren't in a performance critical spot.

    + */ +abstract class THashIterator extends TIterator implements Iterator { + private final TObjectHash _object_hash; + + /** + * Create an instance of THashIterator over the values of the TObjectHash + */ + public THashIterator(TObjectHash hash) { + super(hash); + _object_hash = hash; + } + + /** + * Moves the iterator to the next Object and returns it. + * + * @return an Object value + * @throws ConcurrentModificationException + * if the structure + * was changed using a method that isn't on this iterator. + * @throws NoSuchElementException if this is called on an + * exhausted iterator. + */ + public V next() { + moveToNextIndex(); + return objectAtIndex(_index); + } + + /** + * Returns the index of the next value in the data structure + * or a negative value if the iterator is exhausted. + * + * @return an int value + * @throws ConcurrentModificationException + * if the underlying + * collection's size has been modified since the iterator was + * created. + */ + protected final int nextIndex() { + if (_expectedSize != _hash.size()) { + throw new ConcurrentModificationException(); + } + + Object[] set = _object_hash._set; + int i = _index; + while (i-- > 0 && (set[i] == TObjectHash.FREE || set[i] == TObjectHash.REMOVED)) ; + return i; + } + + /** + * Returns the object at the specified index. Subclasses should + * implement this to return the appropriate object for the given + * index. + * + * @param index the index of the value to return. + * @return an Object value + */ + abstract protected V objectAtIndex(int index); +} // THashIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/THashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/THashMap.java new file mode 100644 index 00000000000..1a65d10f82e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/THashMap.java @@ -0,0 +1,824 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + +/** + * An implementation of the Map interface which uses an open addressed + * hash table to store its contents. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + * @version $Id: THashMap.java,v 1.33 2008/05/08 17:42:55 robeden Exp $ + */ +public class THashMap extends TObjectHash implements Map, Externalizable { + static final long serialVersionUID = 1L; + + /** + * the values of the map + */ + protected transient V[] _values; + + /** + * Creates a new THashMap instance with the default + * capacity and load factor. + */ + public THashMap() { + super(); + } + + /** + * Creates a new THashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare objects. + */ + public THashMap(TObjectHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new THashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public THashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new THashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare objects. + */ + public THashMap(int initialCapacity, TObjectHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new THashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public THashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new THashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + * @param strategy used to compute hash codes and to compare objects. + */ + public THashMap(int initialCapacity, float loadFactor, TObjectHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * Creates a new THashMap instance which contains the + * key/value pairs in map. + * + * @param map a Map value + */ + public THashMap(Map map) { + this(map.size()); + putAll(map); + } + + /** + * Creates a new THashMap instance which contains the + * key/value pairs in map. + * + * @param map a Map value + * @param strategy used to compute hash codes and to compare objects. + */ + public THashMap(Map map, TObjectHashingStrategy strategy) { + this(map.size(), strategy); + putAll(map); + } + + /** + * @return a shallow clone of this collection + */ + public THashMap clone() { + THashMap m = (THashMap) super.clone(); + m._values = this._values.clone(); + return m; + } + + /** + * initialize the value array of the map. + * + * @param initialCapacity an int value + * @return an int value + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + //noinspection unchecked + _values = (V[]) new Object[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or {@code null} if none was found. + */ + public V put(K key, V value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or {@code null} if none was found. + */ + public V putIfAbsent(K key, V value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private V doPut(K key, V value, int index) { + V previous = null; + Object oldKey; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + oldKey = _set[index]; + _set[index] = key; + _values[index] = value; + if (isNewMapping) { + postInsertHook(oldKey == FREE); + } + + return previous; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof Map)) { + return false; + } + Map that = (Map) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TObjectObjectProcedure() { + private boolean first = true; + + public boolean execute(K key, V value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } + + private final class HashProcedure implements TObjectObjectProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(K key, V value) { + h += _hashingStrategy.computeHashCode(key) ^ (value == null ? 0 : value.hashCode()); + return true; + } + } + + private static final class EqProcedure implements TObjectObjectProcedure { + private final Map _otherMap; + + EqProcedure(Map otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(K key, V value) { + // Check to make sure the key is there. This avoids problems that come up with + // null values. Since it is only caused in that cause, only do this when the + // value is null (to avoid extra work). + if (value == null && !_otherMap.containsKey(key)) return false; + + V oValue = _otherMap.get(key); + return oValue == value || (oValue != null && oValue.equals(value)); + } + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TObjectProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TObjectProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TObjectProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TObjectProcedure procedure) { + V[] values = _values; + Object[] set = _set; + for (int i = values.length; i-- > 0;) { + if (set[i] != FREE + && set[i] != REMOVED + && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TObjectObjectProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TObjectObjectProcedure procedure) { + Object[] keys = _set; + V[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (keys[i] != FREE + && keys[i] != REMOVED + && !procedure.execute((K) keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TObjectObjectProcedure procedure) { + boolean modified = false; + Object[] keys = _set; + V[] values = _values; + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (keys[i] != FREE + && keys[i] != REMOVED + && !procedure.execute((K) keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TObjectFunction value + */ + public void transformValues(TObjectFunction function) { + V[] values = _values; + Object[] set = _set; + for (int i = values.length; i-- > 0;) { + if (set[i] != FREE && set[i] != REMOVED) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + Object oldKeys[] = _set; + V oldVals[] = _values; + + _set = new Object[newCapacity]; + Arrays.fill(_set, FREE); + _values = (V[]) new Object[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldKeys[i] != FREE && oldKeys[i] != REMOVED) { + Object o = oldKeys[i]; + int index = insertionIndex((K) o); + if (index < 0) { + throwObjectContractViolation(_set[(-index - 1)], o); + } + _set[index] = o; + _values[index] = oldVals[i]; + } + } + } + + /** + * retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public V get(Object key) { + int index = index((K) key); + return index < 0 ? null : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + if (size() == 0) return; // optimization + + super.clear(); + + Arrays.fill(_set, 0, _set.length, FREE); + Arrays.fill(_values, 0, _values.length, null); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return an Object value + */ + public V remove(Object key) { + V prev = null; + int index = index((K) key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = null; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns a view on the values of the map. + * + * @return a Collection value + */ + public Collection values() { + return new ValueView(); + } + + /** + * returns a Set view on the keys of the map. + * + * @return a Set value + */ + public Set keySet() { + return new KeyView(); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new EntryView(); + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + Object[] set = _set; + V[] vals = _values; + + // special case null values so that we don't have to + // perform null checks before every call to equals() + if (null == val) { + for (int i = vals.length; i-- > 0;) { + if ((set[i] != FREE && set[i] != REMOVED) && + val == vals[i]) { + return true; + } + } + } else { + for (int i = vals.length; i-- > 0;) { + if ((set[i] != FREE && set[i] != REMOVED) && + (val == vals[i] || val.equals(vals[i]))) { + return true; + } + } + } // end of else + return false; + } + + /** + * checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return contains(key); + } + + /** + * copies the key/value mappings in map into this map. + * + * @param map a Map value + */ + public void putAll(Map map) { + ensureCapacity(map.size()); + // could optimize this for cases when map instanceof THashMap + for (Iterator> i = map.entrySet().iterator(); i.hasNext();) { + Map.Entry e = i.next(); + put(e.getKey(), e.getValue()); + } + } + + /** + * a view onto the values of the map. + */ + protected class ValueView extends MapBackedView { + public Iterator iterator() { + return new THashIterator(THashMap.this) { + protected V objectAtIndex(int index) { + return _values[index]; + } + }; + } + + public boolean containsElement(V value) { + return containsValue(value); + } + + public boolean removeElement(V value) { + Object[] values = _values; + Object[] set = _set; + + for (int i = values.length; i-- > 0;) { + if ((set[i] != FREE && set[i] != REMOVED) && + value == values[i] || + (null != values[i] && values[i].equals(value))) { + + removeAt(i); + return true; + } + } + + return false; + } + } + + /** + * a view onto the entries of the map. + */ + protected class EntryView extends MapBackedView> { + private final class EntryIterator extends THashIterator> { + EntryIterator(THashMap map) { + super(map); + } + + public Entry objectAtIndex(final int index) { + return new Entry((K) _set[index], _values[index], index); + } + } + + public Iterator> iterator() { + return new EntryIterator(THashMap.this); + } + + public boolean removeElement(Map.Entry entry) { + // have to effectively reimplement Map.remove here + // because we need to return true/false depending on + // whether the removal took place. Since the Entry's + // value can be null, this means that we can't rely + // on the value of the object returned by Map.remove() + // to determine whether a deletion actually happened. + // + // Note also that the deletion is only legal if + // both the key and the value match. + Object val; + int index; + + K key = keyForEntry(entry); + index = index(key); + if (index >= 0) { + val = valueForEntry(entry); + if (val == _values[index] || + (null != val && val.equals(_values[index]))) { + removeAt(index); // clear key,state; adjust size + return true; + } + } + return false; + } + + public boolean containsElement(Map.Entry entry) { + Object val = get(keyForEntry(entry)); + Object entryValue = entry.getValue(); + return entryValue == val || + (null != val && val.equals(entryValue)); + } + + protected V valueForEntry(Map.Entry entry) { + return entry.getValue(); + } + + protected K keyForEntry(Map.Entry entry) { + return entry.getKey(); + } + } + + private abstract class MapBackedView extends AbstractSet + implements Set, Iterable { + + public abstract Iterator iterator(); + + public abstract boolean removeElement(E key); + + public abstract boolean containsElement(E key); + + public boolean contains(Object key) { + return containsElement((E) key); + } + + public boolean remove(Object o) { + return removeElement((E) o); + } + + public boolean containsAll(Collection collection) { + for (Iterator i = collection.iterator(); i.hasNext();) { + if (!contains(i.next())) { + return false; + } + } + return true; + } + + public void clear() { + THashMap.this.clear(); + } + + public boolean add(E obj) { + throw new UnsupportedOperationException(); + } + + public int size() { + return THashMap.this.size(); + } + + public Object[] toArray() { + Object[] result = new Object[size()]; + Iterator e = iterator(); + for (int i = 0; e.hasNext(); i++) + result[i] = e.next(); + return result; + } + + public T[] toArray(T[] a) { + int size = size(); + if (a.length < size) + a = (T[]) java.lang.reflect.Array.newInstance(a.getClass().getComponentType(), size); + + Iterator it = iterator(); + Object[] result = a; + for (int i = 0; i < size; i++) { + result[i] = it.next(); + } + + if (a.length > size) { + a[size] = null; + } + + return a; + } + + public boolean isEmpty() { + return THashMap.this.isEmpty(); + } + + public boolean addAll(Collection collection) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection collection) { + boolean changed = false; + Iterator i = iterator(); + while (i.hasNext()) { + if (!collection.contains(i.next())) { + i.remove(); + changed = true; + } + } + return changed; + } + } + + /** + * a view onto the keys of the map. + */ + protected class KeyView extends MapBackedView { + public Iterator iterator() { + return new TObjectHashIterator(THashMap.this); + } + + public boolean removeElement(K key) { + return null != THashMap.this.remove(key); + } + + public boolean containsElement(K key) { + return THashMap.this.contains(key); + } + } + + final class Entry implements Map.Entry { + private K key; + private V val; + private final int index; + + Entry(final K key, V value, final int index) { + this.key = key; + this.val = value; + this.index = index; + } + + void setKey(K aKey) { + this.key = aKey; + } + + void setValue0(V aValue) { + this.val = aValue; + } + + public K getKey() { + return key; + } + + public V getValue() { + return val; + } + + public V setValue(V o) { + if (_values[index] != val) { + throw new ConcurrentModificationException(); + } + _values[index] = o; + o = val; // need to return previous value + val = o; // update this entry's value, in case + // setValue is called again + return o; + } + + public boolean equals(Object o) { + if (o instanceof Map.Entry) { + Map.Entry e1 = this; + Map.Entry e2 = (Map.Entry) o; + return (e1.getKey() == null ? e2.getKey() == null : e1.getKey().equals(e2.getKey())) + && (e1.getValue() == null ? e2.getValue() == null : e1.getValue().equals(e2.getValue())); + } + return false; + } + + public int hashCode() { + return (getKey() == null ? 0 : getKey().hashCode()) ^ (getValue() == null ? 0 : getValue().hashCode()); + } + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(1); + + // NOTE: Super was not written in version 0 + super.writeExternal(out); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + byte version = in.readByte(); + + // NOTE: super was not written in version 0 + if (version != 0) super.readExternal(in); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + //noinspection unchecked + K key = (K) in.readObject(); + //noinspection unchecked + V val = (V) in.readObject(); + put(key, val); + } + } +} // THashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/THashSet.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/THashSet.java new file mode 100644 index 00000000000..1b4bc62f400 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/THashSet.java @@ -0,0 +1,415 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.lang.reflect.Array; +import java.util.Arrays; +import java.util.Collection; +import java.util.Iterator; +import java.util.Set; + +/** + * An implementation of the Set interface that uses an + * open-addressed hash table to store its contents. + *

    + * Created: Sat Nov 3 10:38:17 2001 + * + * @author Eric D. Friedman + * @version $Id: THashSet.java,v 1.21 2008/10/07 20:33:56 robeden Exp $ + */ + +public class THashSet extends TObjectHash + implements Set, Iterable, Externalizable { + + static final long serialVersionUID = 1L; + + /** + * Creates a new THashSet instance with the default + * capacity and load factor. + */ + public THashSet() { + super(); + } + + /** + * Creates a new THashSet instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare objects. + */ + public THashSet(TObjectHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new THashSet instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public THashSet(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new THashSet instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare objects. + */ + public THashSet(int initialCapacity, TObjectHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new THashSet instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public THashSet(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new THashSet instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + * @param strategy used to compute hash codes and to compare objects. + */ + public THashSet(int initialCapacity, float loadFactor, TObjectHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * Creates a new THashSet instance containing the + * elements of collection. + * + * @param collection a Collection value + */ + public THashSet(Collection collection) { + this(collection.size()); + addAll(collection); + } + + /** + * Creates a new THashSet instance containing the + * elements of collection. + * + * @param collection a Collection value + * @param strategy used to compute hash codes and to compare objects. + */ + public THashSet(Collection collection, TObjectHashingStrategy strategy) { + this(collection.size(), strategy); + addAll(collection); + } + + /** + * Inserts a value into the set. + * + * @param obj an Object value + * @return true if the set was modified by the add operation + */ + public boolean add(E obj) { + int index = insertionIndex(obj); + + if (index < 0) { + return false; // already present in set, nothing to add + } + + Object old = _set[index]; + _set[index] = obj; + + postInsertHook(old == FREE); + return true; // yes, we added something + } + + public boolean equals(Object other) { + if (!(other instanceof Set)) { + return false; + } + Set that = (Set) other; + if (that.size() != this.size()) { + return false; + } + return containsAll(that); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEach(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TObjectProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(E key) { + h += _hashingStrategy.computeHashCode(key); + return true; + } + } + + /** + * Expands the set to accommodate new values. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + Object oldSet[] = _set; + + _set = new Object[newCapacity]; + Arrays.fill(_set, FREE); + + for (int i = oldCapacity; i-- > 0;) { + if (oldSet[i] != FREE && oldSet[i] != REMOVED) { + E o = (E) oldSet[i]; + int index = insertionIndex(o); + if (index < 0) { // everyone pays for this because some people can't RTFM + throwObjectContractViolation(_set[(-index - 1)], o); + } + _set[index] = o; + } + } + } + + /** + * Returns a new array containing the objects in the set. + * + * @return an Object[] value + */ + public Object[] toArray() { + Object[] result = new Object[size()]; + forEach(new ToObjectArrayProcedure(result)); + return result; + } + + /** + * Returns a typed array of the objects in the set. + * + * @param a an Object[] value + * @return an Object[] value + */ + public T[] toArray(T[] a) { + int size = size(); + if (a.length < size) + a = (T[]) Array.newInstance(a.getClass().getComponentType(), size); + + forEach(new ToObjectArrayProcedure(a)); + + // If this collection fits in the specified array with room to + // spare (i.e., the array has more elements than this + // collection), the element in the array immediately following + // the end of the collection is set to null. This is useful in + // determining the length of this collection only if the + // caller knows that this collection does not contain any null + // elements.) + + if (a.length > size) { + a[size] = null; + } + + return a; + } + + /** + * Empties the set. + */ + public void clear() { + super.clear(); + + Arrays.fill(_set, 0, _set.length, FREE); + } + + /** + * Removes obj from the set. + * + * @param obj an Object value + * @return true if the set was modified by the remove operation. + */ + public boolean remove(Object obj) { + int index = index((E) obj); + if (index >= 0) { + removeAt(index); + return true; + } + return false; + } + + /** + * Creates an iterator over the values of the set. The iterator + * supports element deletion. + * + * @return an Iterator value + */ + public Iterator iterator() { + return new TObjectHashIterator(this); + } + + /** + * Tests the set to determine if all of the elements in + * collection are present. + * + * @param collection a Collection value + * @return true if all elements were present in the set. + */ + public boolean containsAll(Collection collection) { + for (Iterator i = collection.iterator(); i.hasNext();) { + if (!contains(i.next())) { + return false; + } + } + return true; + } + + /** + * Adds all of the elements in collection to the set. + * + * @param collection a Collection value + * @return true if the set was modified by the add all operation. + */ + public boolean addAll(Collection collection) { + boolean changed = false; + int size = collection.size(); + + ensureCapacity(size); + Iterator it = collection.iterator(); + while (size-- > 0) { + if (add(it.next())) { + changed = true; + } + } + return changed; + } + + /** + * Removes all of the elements in collection from the set. + * + * @param collection a Collection value + * @return true if the set was modified by the remove all operation. + */ + public boolean removeAll(Collection collection) { + boolean changed = false; + int size = collection.size(); + Iterator it; + + it = collection.iterator(); + while (size-- > 0) { + if (remove(it.next())) { + changed = true; + } + } + return changed; + } + + /** + * Removes any values in the set which are not contained in + * collection. + * + * @param collection a Collection value + * @return true if the set was modified by the retain all operation + */ + public boolean retainAll(Collection collection) { + boolean changed = false; + int size = size(); + Iterator it; + + it = iterator(); + while (size-- > 0) { + if (!collection.contains(it.next())) { + it.remove(); + changed = true; + } + } + return changed; + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEach(new TObjectProcedure() { + private boolean first = true; + + public boolean execute(Object value) { + if (first) first = false; + else buf.append(","); + + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(1); + + // NOTE: Super was not written in version 0 + super.writeExternal(out); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEach(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + byte version = in.readByte(); + + // NOTE: super was not written in version 0 + if (version != 0) super.readExternal(in); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + E val = (E) in.readObject(); + add(val); + } + } +} // THashSet diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntArrayList.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntArrayList.java new file mode 100644 index 00000000000..96f4b69a062 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntArrayList.java @@ -0,0 +1,935 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; +import java.util.Random; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * A resizable, array-backed list of int primitives. + *

    + * Created: Sat Dec 29 14:21:12 2001 + * + * @author Eric D. Friedman + * @author Rob Eden + */ + +public class TIntArrayList implements Externalizable, Cloneable { + static final long serialVersionUID = 1L; + + /** + * the data of the list + */ + protected int[] _data; + + /** + * the index after the last entry in the list + */ + protected int _pos; + + /** + * the default capacity for new lists + */ + protected static final int DEFAULT_CAPACITY = 10; + + /** + * Creates a new TIntArrayList instance with the + * default capacity. + */ + public TIntArrayList() { + this(DEFAULT_CAPACITY); + } + + /** + * Creates a new TIntArrayList instance with the + * specified capacity. + * + * @param capacity an int value + */ + public TIntArrayList(int capacity) { + _data = new int[capacity]; + _pos = 0; + } + + /** + * Creates a new TIntArrayList instance whose + * capacity is the greater of the length of values and + * DEFAULT_CAPACITY and whose initial contents are the specified + * values. + * + * @param values an int[] value + */ + public TIntArrayList(int[] values) { + this(Math.max(values.length, DEFAULT_CAPACITY)); + add(values); + } + + // sizing + + /** + * Grow the internal array as needed to accommodate the specified + * number of elements. The size of the array ints on each + * resize unless capacity requires more than twice the + * current capacity. + * + * @param capacity an int value + */ + public void ensureCapacity(int capacity) { + if (capacity > _data.length) { + int newCap = Math.max(_data.length << 1, capacity); + int[] tmp = new int[newCap]; + System.arraycopy(_data, 0, tmp, 0, _data.length); + _data = tmp; + } + } + + /** + * Returns the number of values in the list. + * + * @return the number of values in the list. + */ + public int size() { + return _pos; + } + + /** + * Tests whether this list contains any values. + * + * @return true if the list is empty. + */ + public boolean isEmpty() { + return _pos == 0; + } + + /** + * Sheds any excess capacity above and beyond the current size of + * the list. + */ + public void trimToSize() { + if (_data.length > size()) { + int[] tmp = new int[size()]; + toNativeArray(tmp, 0, tmp.length); + _data = tmp; + } + } + + // modifying + + /** + * Adds val to the end of the list, growing as needed. + * + * @param val an int value + */ + public void add(int val) { + ensureCapacity(_pos + 1); + _data[_pos++] = val; + } + + /** + * Adds the values in the array vals to the end of the + * list, in order. + * + * @param vals an int[] value + */ + public void add(int[] vals) { + add(vals, 0, vals.length); + } + + /** + * Adds a subset of the values in the array vals to the + * end of the list, in order. + * + * @param vals an int[] value + * @param offset the offset at which to start copying + * @param length the number of values to copy. + */ + public void add(int[] vals, int offset, int length) { + ensureCapacity(_pos + length); + System.arraycopy(vals, offset, _data, _pos, length); + _pos += length; + } + + /** + * Inserts value into the list at offset. All + * values including and to the right of offset are shifted + * to the right. + * + * @param offset an int value + * @param value an int value + */ + public void insert(int offset, int value) { + if (offset == _pos) { + add(value); + return; + } + ensureCapacity(_pos + 1); + // shift right + System.arraycopy(_data, offset, _data, offset + 1, _pos - offset); + // insert + _data[offset] = value; + _pos++; + } + + /** + * Inserts the array of values into the list at + * offset. All values including and to the right of + * offset are shifted to the right. + * + * @param offset an int value + * @param values an int[] value + */ + public void insert(int offset, int[] values) { + insert(offset, values, 0, values.length); + } + + /** + * Inserts a slice of the array of values into the list + * at offset. All values including and to the right of + * offset are shifted to the right. + * + * @param offset an int value + * @param values an int[] value + * @param valOffset the offset in the values array at which to + * start copying. + * @param len the number of values to copy from the values array + */ + public void insert(int offset, int[] values, int valOffset, int len) { + if (offset == _pos) { + add(values, valOffset, len); + return; + } + + ensureCapacity(_pos + len); + // shift right + System.arraycopy(_data, offset, _data, offset + len, _pos - offset); + // insert + System.arraycopy(values, valOffset, _data, offset, len); + _pos += len; + } + + /** + * Returns the value at the specified offset. + * + * @param offset an int value + * @return an int value + */ + public int get(int offset) { + if (offset >= _pos) { + throw new ArrayIndexOutOfBoundsException(offset); + } + return _data[offset]; + } + + /** + * Returns the value at the specified offset without doing any + * bounds checking. + * + * @param offset an int value + * @return an int value + */ + public int getQuick(int offset) { + return _data[offset]; + } + + /** + * Sets the value at the specified offset. + * + * @param offset an int value + * @param val an int value + */ + public void set(int offset, int val) { + if (offset >= _pos) { + throw new ArrayIndexOutOfBoundsException(offset); + } + _data[offset] = val; + } + + /** + * Sets the value at the specified offset and returns the + * previously stored value. + * + * @param offset an int value + * @param val an int value + * @return the value previously stored at offset. + */ + public int getSet(int offset, int val) { + if (offset >= _pos) { + throw new ArrayIndexOutOfBoundsException(offset); + } + int old = _data[offset]; + _data[offset] = val; + return old; + } + + /** + * Replace the values in the list starting at offset with + * the contents of the values array. + * + * @param offset the first offset to replace + * @param values the source of the new values + */ + public void set(int offset, int[] values) { + set(offset, values, 0, values.length); + } + + /** + * Replace the values in the list starting at offset with + * length values from the values array, starting + * at valOffset. + * + * @param offset the first offset to replace + * @param values the source of the new values + * @param valOffset the first value to copy from the values array + * @param length the number of values to copy + */ + public void set(int offset, int[] values, int valOffset, int length) { + if (offset < 0 || offset + length > _pos) { + throw new ArrayIndexOutOfBoundsException(offset); + } + System.arraycopy(values, valOffset, _data, offset, length); + } + + /** + * Sets the value at the specified offset without doing any bounds + * checking. + * + * @param offset an int value + * @param val an int value + */ + public void setQuick(int offset, int val) { + _data[offset] = val; + } + + /** + * Flushes the internal state of the list, resetting the capacity + * to the default. + */ + public void clear() { + clear(DEFAULT_CAPACITY); + } + + /** + * Flushes the internal state of the list, setting the capacity of + * the empty list to capacity. + * + * @param capacity an int value + */ + public void clear(int capacity) { + _data = new int[capacity]; + _pos = 0; + } + + /** + * Sets the size of the list to 0, but does not change its + * capacity. This method can be used as an alternative to the + * {@link #clear clear} method if you want to recyle a list without + * allocating new backing arrays. + * + * @see #clear + */ + public void reset() { + _pos = 0; + fill((int) 0); + } + + /** + * Sets the size of the list to 0, but does not change its + * capacity. This method can be used as an alternative to the + * {@link #clear clear} method if you want to recyle a list + * without allocating new backing arrays. This method differs + * from {@link #reset reset} in that it does not clear the old + * values in the backing array. Thus, it is possible for {@link + * #getQuick getQuick} to return stale data if this method is used + * and the caller is careless about bounds checking. + * + * @see #reset + * @see #clear + * @see #getQuick + */ + public void resetQuick() { + _pos = 0; + } + + /** + * Removes the value at offset from the list. + * + * @param offset an int value + * @return the value previously stored at offset. + */ + public int remove(int offset) { + int old = get(offset); + remove(offset, 1); + return old; + } + + /** + * Removes length values from the list, starting at + * offset + * + * @param offset an int value + * @param length an int value + */ + public void remove(int offset, int length) { + if (offset < 0 || offset >= _pos) { + throw new ArrayIndexOutOfBoundsException(offset); + } + + if (offset == 0) { + // data at the front + System.arraycopy(_data, length, _data, 0, _pos - length); + } else if (_pos - length == offset) { + // no copy to make, decrementing pos "deletes" values at + // the end + } else { + // data in the middle + System.arraycopy(_data, offset + length, + _data, offset, _pos - (offset + length)); + } + _pos -= length; + // no need to clear old values beyond _pos, because this is a + // primitive collection and 0 takes as much room as any other + // value + } + + /** + * Transform each value in the list using the specified function. + * + * @param function a TIntFunction value + */ + public void transformValues(TIntFunction function) { + for (int i = _pos; i-- > 0;) { + _data[i] = function.execute(_data[i]); + } + } + + /** + * Reverse the order of the elements in the list. + */ + public void reverse() { + reverse(0, _pos); + } + + /** + * Reverse the order of the elements in the range of the list. + * + * @param from the inclusive index at which to start reversing + * @param to the exclusive index at which to stop reversing + */ + public void reverse(int from, int to) { + if (from == to) { + return; // nothing to do + } + if (from > to) { + throw new IllegalArgumentException("from cannot be greater than to"); + } + for (int i = from, j = to - 1; i < j; i++, j--) { + swap(i, j); + } + } + + /** + * Shuffle the elements of the list using the specified random + * number generator. + * + * @param rand a Random value + */ + public void shuffle(Random rand) { + for (int i = _pos; i-- > 1;) { + swap(i, rand.nextInt(i)); + } + } + + /** + * Swap the values at offsets i and j. + * + * @param i an offset into the data array + * @param j an offset into the data array + */ + private final void swap(int i, int j) { + int tmp = _data[i]; + _data[i] = _data[j]; + _data[j] = tmp; + } + + // copying + + /** + * Returns a clone of this list. Since this is a primitive + * collection, this will be a deep clone. + * + * @return a deep clone of the list. + */ + public Object clone() { + TIntArrayList list = null; + try { + list = (TIntArrayList) super.clone(); + list._data = toNativeArray(); + } catch (CloneNotSupportedException e) { + // it's supported + } // end of try-catch + return list; + } + + + /** + * Returns a sublist of this list. + * + * @param begin low endpoint (inclusive) of the subList. + * @param end high endpoint (exclusive) of the subList. + * @return sublist of this list from begin, inclusive to end, exclusive. + * @throws IndexOutOfBoundsException - endpoint out of range + * @throws IllegalArgumentException - endpoints out of order (end > begin) + */ + public TIntArrayList subList(int begin, int end) { + if (end < begin) throw new IllegalArgumentException("end index " + end + " greater than begin index " + begin); + if (begin < 0) throw new IndexOutOfBoundsException("begin index can not be < 0"); + if (end > _data.length) throw new IndexOutOfBoundsException("end index < " + _data.length); + TIntArrayList list = new TIntArrayList(end - begin); + for (int i = begin; i < end; i++) { + list.add(_data[i]); + } + return list; + } + + + /** + * Copies the contents of the list into a native array. + * + * @return an int[] value + */ + public int[] toNativeArray() { + return toNativeArray(0, _pos); + } + + /** + * Copies a slice of the list into a native array. + * + * @param offset the offset at which to start copying + * @param len the number of values to copy. + * @return an int[] value + */ + public int[] toNativeArray(int offset, int len) { + int[] rv = new int[len]; + toNativeArray(rv, offset, len); + return rv; + } + + /** + * Copies a slice of the list into a native array. + * + * @param dest the array to copy into. + * @param offset the offset of the first value to copy + * @param len the number of values to copy. + */ + public void toNativeArray(int[] dest, int offset, int len) { + if (len == 0) { + return; // nothing to copy + } + if (offset < 0 || offset >= _pos) { + throw new ArrayIndexOutOfBoundsException(offset); + } + System.arraycopy(_data, offset, dest, 0, len); + } + + // comparing + + /** + * Compares this list to another list, value by value. + * + * @param other the object to compare against + * @return true if other is a TIntArrayList and has exactly the + * same values. + */ + public boolean equals(Object other) { + if (other == this) { + return true; + } else if (other instanceof TIntArrayList) { + TIntArrayList that = (TIntArrayList) other; + if (that.size() != this.size()) { + return false; + } else { + for (int i = _pos; i-- > 0;) { + if (this._data[i] != that._data[i]) { + return false; + } + } + return true; + } + } else { + return false; + } + } + + public int hashCode() { + int h = 0; + for (int i = _pos; i-- > 0;) { + h = 37 * h + HashFunctions.hash(_data[i]); + } + return h; + } + + // procedures + + /** + * Applies the procedure to each value in the list in ascending + * (front to back) order. + * + * @param procedure a TIntProcedure value + * @return true if the procedure did not terminate prematurely. + */ + public boolean forEach(TIntProcedure procedure) { + for (int i = 0; i < _pos; i++) { + if (!procedure.execute(_data[i])) { + return false; + } + } + return true; + } + + /** + * Applies the procedure to each value in the list in descending + * (back to front) order. + * + * @param procedure a TIntProcedure value + * @return true if the procedure did not terminate prematurely. + */ + public boolean forEachDescending(TIntProcedure procedure) { + for (int i = _pos; i-- > 0;) { + if (!procedure.execute(_data[i])) { + return false; + } + } + return true; + } + + // sorting + + /** + * Sort the values in the list (ascending) using the Sun quicksort + * implementation. + * + * @see java.util.Arrays#sort + */ + public void sort() { + Arrays.sort(_data, 0, _pos); + } + + /** + * Sort a slice of the list (ascending) using the Sun quicksort + * implementation. + * + * @param fromIndex the index at which to start sorting (inclusive) + * @param toIndex the index at which to stop sorting (exclusive) + * @see java.util.Arrays#sort + */ + public void sort(int fromIndex, int toIndex) { + Arrays.sort(_data, fromIndex, toIndex); + } + + // filling + + /** + * Fills every slot in the list with the specified value. + * + * @param val the value to use when filling + */ + public void fill(int val) { + Arrays.fill(_data, 0, _pos, val); + } + + /** + * Fills a range in the list with the specified value. + * + * @param fromIndex the offset at which to start filling (inclusive) + * @param toIndex the offset at which to stop filling (exclusive) + * @param val the value to use when filling + */ + public void fill(int fromIndex, int toIndex, int val) { + if (toIndex > _pos) { + ensureCapacity(toIndex); + _pos = toIndex; + } + Arrays.fill(_data, fromIndex, toIndex, val); + } + + // searching + + /** + * Performs a binary search for value in the entire list. + * Note that you must @{link #sort sort} the list before + * doing a search. + * + * @param value the value to search for + * @return the absolute offset in the list of the value, or its + * negative insertion point into the sorted list. + */ + public int binarySearch(int value) { + return binarySearch(value, 0, _pos); + } + + /** + * Performs a binary search for value in the specified + * range. Note that you must @{link #sort sort} the list + * or the range before doing a search. + * + * @param value the value to search for + * @param fromIndex the lower boundary of the range (inclusive) + * @param toIndex the upper boundary of the range (exclusive) + * @return the absolute offset in the list of the value, or its + * negative insertion point into the sorted list. + */ + public int binarySearch(int value, int fromIndex, int toIndex) { + if (fromIndex < 0) { + throw new ArrayIndexOutOfBoundsException(fromIndex); + } + if (toIndex > _pos) { + throw new ArrayIndexOutOfBoundsException(toIndex); + } + + int low = fromIndex; + int high = toIndex - 1; + + while (low <= high) { + int mid = (low + high) >>> 1; + int midVal = _data[mid]; + + if (midVal < value) { + low = mid + 1; + } else if (midVal > value) { + high = mid - 1; + } else { + return mid; // value found + } + } + return -(low + 1); // value not found. + } + + /** + * Searches the list front to back for the index of + * value. + * + * @param value an int value + * @return the first offset of the value, or -1 if it is not in + * the list. + * @see #binarySearch for faster searches on sorted lists + */ + public int indexOf(int value) { + return indexOf(0, value); + } + + /** + * Searches the list front to back for the index of + * value, starting at offset. + * + * @param offset the offset at which to start the linear search + * (inclusive) + * @param value an int value + * @return the first offset of the value, or -1 if it is not in + * the list. + * @see #binarySearch for faster searches on sorted lists + */ + public int indexOf(int offset, int value) { + for (int i = offset; i < _pos; i++) { + if (_data[i] == value) { + return i; + } + } + return -1; + } + + /** + * Searches the list back to front for the last index of + * value. + * + * @param value an int value + * @return the last offset of the value, or -1 if it is not in + * the list. + * @see #binarySearch for faster searches on sorted lists + */ + public int lastIndexOf(int value) { + return lastIndexOf(_pos, value); + } + + /** + * Searches the list back to front for the last index of + * value, starting at offset. + * + * @param offset the offset at which to start the linear search + * (exclusive) + * @param value an int value + * @return the last offset of the value, or -1 if it is not in + * the list. + * @see #binarySearch for faster searches on sorted lists + */ + public int lastIndexOf(int offset, int value) { + for (int i = offset; i-- > 0;) { + if (_data[i] == value) { + return i; + } + } + return -1; + } + + /** + * Searches the list for value + * + * @param value an int value + * @return true if value is in the list. + */ + public boolean contains(int value) { + return lastIndexOf(value) >= 0; + } + + /** + * Searches the list for values satisfying condition in + * the manner of the *nix grep utility. + * + * @param condition a condition to apply to each element in the list + * @return a list of values which match the condition. + */ + public TIntArrayList grep(TIntProcedure condition) { + TIntArrayList list = new TIntArrayList(); + for (int i = 0; i < _pos; i++) { + if (condition.execute(_data[i])) { + list.add(_data[i]); + } + } + return list; + } + + /** + * Searches the list for values which do not satisfy + * condition. This is akin to *nix grep -v. + * + * @param condition a condition to apply to each element in the list + * @return a list of values which do not match the condition. + */ + public TIntArrayList inverseGrep(TIntProcedure condition) { + TIntArrayList list = new TIntArrayList(); + for (int i = 0; i < _pos; i++) { + if (!condition.execute(_data[i])) { + list.add(_data[i]); + } + } + return list; + } + + /** + * Finds the maximum value in the list. + * + * @return the largest value in the list. + * @throws IllegalStateException if the list is empty + */ + public int max() { + if (size() == 0) { + throw new IllegalStateException("cannot find maximum of an empty list"); + } + int max = Integer.MIN_VALUE; + for (int i = 0; i < _pos; i++) { + if (_data[i] > max) { + max = _data[i]; + } + } + return max; + } + + /** + * Finds the minimum value in the list. + * + * @return the smallest value in the list. + * @throws IllegalStateException if the list is empty + */ + public int min() { + if (size() == 0) { + throw new IllegalStateException("cannot find minimum of an empty list"); + } + int min = Integer.MAX_VALUE; + for (int i = 0; i < _pos; i++) { + if (_data[i] < min) { + min = _data[i]; + } + } + return min; + } + + // stringification + + /** + * Returns a String representation of the list, front to back. + * + * @return a String value + */ + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + for (int i = 0, end = _pos - 1; i < end; i++) { + buf.append(_data[i]); + buf.append(", "); + } + if (size() > 0) { + buf.append(_data[_pos - 1]); + } + buf.append("}"); + return buf.toString(); + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(1); + + // POSITION + out.writeInt(_pos); + + // ENTRIES + int len = _pos; + out.writeInt(_pos); // Written twice for backwards compatability with + // version 0 + for (int i = 0; i < len; i++) { + out.writeInt(_data[i]); + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // POSITION + _pos = in.readInt(); + + // ENTRIES + int len = in.readInt(); + _data = new int[len]; + for (int i = 0; i < len; i++) { + _data[i] = in.readInt(); + } + } +} // TIntArrayList diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntByteHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntByteHashMap.java new file mode 100644 index 00000000000..6e030471893 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntByteHashMap.java @@ -0,0 +1,650 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for int keys and byte values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TIntByteHashMap extends TIntHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TIntByteProcedure PUT_ALL_PROC = new TIntByteProcedure() { + public boolean execute(int key, byte value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient byte[] _values; + + /** + * Creates a new TIntByteHashMap instance with the default + * capacity and load factor. + */ + public TIntByteHashMap() { + super(); + } + + /** + * Creates a new TIntByteHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TIntByteHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TIntByteHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TIntByteHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TIntByteHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TIntByteHashMap(TIntHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TIntByteHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TIntByteHashMap(int initialCapacity, TIntHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TIntByteHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TIntByteHashMap(int initialCapacity, float loadFactor, TIntHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TIntByteHashMap m = (TIntByteHashMap) super.clone(); + m._values = (byte[]) this._values.clone(); + return m; + } + + /** + * @return a TIntByteIterator with access to this map's keys and values + */ + public TIntByteIterator iterator() { + return new TIntByteIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new byte[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an int value + * @param value an byte value + * @return the previous value associated with key, + * or (int)0 if none was found. + */ + public byte put(int key, byte value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an int value + * @param value an byte value + * @return the previous value associated with key, + * or (int)0 if none was found. + */ + public byte putIfAbsent(int key, byte value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private byte doPut(int key, byte value, int index) { + byte previousState; + byte previous = (byte) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TIntByteHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + int oldKeys[] = _set; + byte oldVals[] = _values; + byte oldStates[] = _states; + + _set = new int[newCapacity]; + _values = new byte[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + int o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an int value + * @return the value of key or (int)0 if no such mapping exists. + */ + public byte get(int key) { + int index = index(key); + return index < 0 ? (byte) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + int[] keys = _set; + byte[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (int) 0); + Arrays.fill(_values, 0, _values.length, (byte) 0); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an int value + * @return an byte value, or (int)0 if no mapping for key exists + */ + public byte remove(int key) { + byte prev = (byte) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TIntByteHashMap)) { + return false; + } + TIntByteHashMap that = (TIntByteHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TIntByteProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(int key, byte value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TIntByteProcedure { + private final TIntByteHashMap _otherMap; + + EqProcedure(TIntByteHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(int key, byte value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two bytes for equality. + */ + private final boolean eq(byte v1, byte v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = (byte) 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public byte[] getValues() { + byte[] vals = new byte[size()]; + byte[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public int[] keys() { + int[] keys = new int[size()]; + int[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public int[] keys(int[] a) { + int size = size(); + if (a.length < size) { + a = (int[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + int[] k = (int[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an byte value + * @return a boolean value + */ + public boolean containsValue(byte val) { + byte[] states = _states; + byte[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an int value + * @return a boolean value + */ + public boolean containsKey(int key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TIntProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TIntProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TByteProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TByteProcedure procedure) { + byte[] states = _states; + byte[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOIntByteProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TIntByteProcedure procedure) { + byte[] states = _states; + int[] keys = _set; + byte[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TIntByteProcedure procedure) { + boolean modified = false; + byte[] states = _states; + int[] keys = _set; + byte[] values = _values; + + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TByteFunction value + */ + public void transformValues(TByteFunction function) { + byte[] states = _states; + byte[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(int key) { + return adjustValue(key, (byte) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(int key, byte amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public byte adjustOrPutValue(final int key, final byte adjust_amount, final byte put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final byte newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + byte previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + int key = in.readInt(); + byte val = in.readByte(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TIntByteProcedure() { + private boolean first = true; + + public boolean execute(int key, byte value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TIntByteHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntByteIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntByteIterator.java new file mode 100644 index 00000000000..c8f9f2a0fd5 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntByteIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type int and byte. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TIntByteIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TIntByteIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TIntByteIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TIntByteIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TIntByteIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TIntByteHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TIntByteIterator(TIntByteHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public int key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public byte value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public byte setValue(byte val) { + byte old = value(); + _map._values[_index] = val; + return old; + } +}// TIntByteIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntByteProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntByteProcedure.java new file mode 100644 index 00000000000..c3060f7b0d9 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntByteProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type int and byte. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TIntByteProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a int value + * @param b a byte value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(int a, byte b); +}// TIntByteProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntDoubleHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntDoubleHashMap.java new file mode 100644 index 00000000000..feb08170a99 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntDoubleHashMap.java @@ -0,0 +1,650 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for int keys and double values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TIntDoubleHashMap extends TIntHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TIntDoubleProcedure PUT_ALL_PROC = new TIntDoubleProcedure() { + public boolean execute(int key, double value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient double[] _values; + + /** + * Creates a new TIntDoubleHashMap instance with the default + * capacity and load factor. + */ + public TIntDoubleHashMap() { + super(); + } + + /** + * Creates a new TIntDoubleHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TIntDoubleHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TIntDoubleHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TIntDoubleHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TIntDoubleHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TIntDoubleHashMap(TIntHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TIntDoubleHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TIntDoubleHashMap(int initialCapacity, TIntHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TIntDoubleHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TIntDoubleHashMap(int initialCapacity, float loadFactor, TIntHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TIntDoubleHashMap m = (TIntDoubleHashMap) super.clone(); + m._values = (double[]) this._values.clone(); + return m; + } + + /** + * @return a TIntDoubleIterator with access to this map's keys and values + */ + public TIntDoubleIterator iterator() { + return new TIntDoubleIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new double[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an int value + * @param value an double value + * @return the previous value associated with key, + * or (int)0 if none was found. + */ + public double put(int key, double value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an int value + * @param value an double value + * @return the previous value associated with key, + * or (int)0 if none was found. + */ + public double putIfAbsent(int key, double value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private double doPut(int key, double value, int index) { + byte previousState; + double previous = (double) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TIntDoubleHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + int oldKeys[] = _set; + double oldVals[] = _values; + byte oldStates[] = _states; + + _set = new int[newCapacity]; + _values = new double[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + int o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an int value + * @return the value of key or (int)0 if no such mapping exists. + */ + public double get(int key) { + int index = index(key); + return index < 0 ? (double) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + int[] keys = _set; + double[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (int) 0); + Arrays.fill(_values, 0, _values.length, (double) 0); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an int value + * @return an double value, or (int)0 if no mapping for key exists + */ + public double remove(int key) { + double prev = (double) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TIntDoubleHashMap)) { + return false; + } + TIntDoubleHashMap that = (TIntDoubleHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TIntDoubleProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(int key, double value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TIntDoubleProcedure { + private final TIntDoubleHashMap _otherMap; + + EqProcedure(TIntDoubleHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(int key, double value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two doubles for equality. + */ + private final boolean eq(double v1, double v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = (double) 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public double[] getValues() { + double[] vals = new double[size()]; + double[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public int[] keys() { + int[] keys = new int[size()]; + int[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public int[] keys(int[] a) { + int size = size(); + if (a.length < size) { + a = (int[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + int[] k = (int[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an double value + * @return a boolean value + */ + public boolean containsValue(double val) { + byte[] states = _states; + double[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an int value + * @return a boolean value + */ + public boolean containsKey(int key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TIntProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TIntProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TDoubleProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TDoubleProcedure procedure) { + byte[] states = _states; + double[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOIntDoubleProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TIntDoubleProcedure procedure) { + byte[] states = _states; + int[] keys = _set; + double[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TIntDoubleProcedure procedure) { + boolean modified = false; + byte[] states = _states; + int[] keys = _set; + double[] values = _values; + + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TDoubleFunction value + */ + public void transformValues(TDoubleFunction function) { + byte[] states = _states; + double[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(int key) { + return adjustValue(key, (double) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(int key, double amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public double adjustOrPutValue(final int key, final double adjust_amount, final double put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final double newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + byte previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + int key = in.readInt(); + double val = in.readDouble(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TIntDoubleProcedure() { + private boolean first = true; + + public boolean execute(int key, double value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TIntDoubleHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntDoubleIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntDoubleIterator.java new file mode 100644 index 00000000000..6c46f10f55c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntDoubleIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type int and double. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TIntDoubleIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TIntDoubleIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TIntDoubleIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TIntDoubleIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TIntDoubleIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TIntDoubleHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TIntDoubleIterator(TIntDoubleHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public int key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public double value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public double setValue(double val) { + double old = value(); + _map._values[_index] = val; + return old; + } +}// TIntDoubleIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntDoubleProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntDoubleProcedure.java new file mode 100644 index 00000000000..261146b8703 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntDoubleProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type int and double. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TIntDoubleProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a int value + * @param b a double value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(int a, double b); +}// TIntDoubleProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntFloatHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntFloatHashMap.java new file mode 100644 index 00000000000..559a6e0c2c0 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntFloatHashMap.java @@ -0,0 +1,650 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for int keys and float values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TIntFloatHashMap extends TIntHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TIntFloatProcedure PUT_ALL_PROC = new TIntFloatProcedure() { + public boolean execute(int key, float value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient float[] _values; + + /** + * Creates a new TIntFloatHashMap instance with the default + * capacity and load factor. + */ + public TIntFloatHashMap() { + super(); + } + + /** + * Creates a new TIntFloatHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TIntFloatHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TIntFloatHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TIntFloatHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TIntFloatHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TIntFloatHashMap(TIntHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TIntFloatHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TIntFloatHashMap(int initialCapacity, TIntHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TIntFloatHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TIntFloatHashMap(int initialCapacity, float loadFactor, TIntHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TIntFloatHashMap m = (TIntFloatHashMap) super.clone(); + m._values = (float[]) this._values.clone(); + return m; + } + + /** + * @return a TIntFloatIterator with access to this map's keys and values + */ + public TIntFloatIterator iterator() { + return new TIntFloatIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new float[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an int value + * @param value an float value + * @return the previous value associated with key, + * or (int)0 if none was found. + */ + public float put(int key, float value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an int value + * @param value an float value + * @return the previous value associated with key, + * or (int)0 if none was found. + */ + public float putIfAbsent(int key, float value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private float doPut(int key, float value, int index) { + byte previousState; + float previous = (float) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TIntFloatHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + int oldKeys[] = _set; + float oldVals[] = _values; + byte oldStates[] = _states; + + _set = new int[newCapacity]; + _values = new float[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + int o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an int value + * @return the value of key or (int)0 if no such mapping exists. + */ + public float get(int key) { + int index = index(key); + return index < 0 ? (float) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + int[] keys = _set; + float[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (int) 0); + Arrays.fill(_values, 0, _values.length, (float) 0); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an int value + * @return an float value, or (int)0 if no mapping for key exists + */ + public float remove(int key) { + float prev = (float) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TIntFloatHashMap)) { + return false; + } + TIntFloatHashMap that = (TIntFloatHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TIntFloatProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(int key, float value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TIntFloatProcedure { + private final TIntFloatHashMap _otherMap; + + EqProcedure(TIntFloatHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(int key, float value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two floats for equality. + */ + private final boolean eq(float v1, float v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = (float) 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public float[] getValues() { + float[] vals = new float[size()]; + float[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public int[] keys() { + int[] keys = new int[size()]; + int[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public int[] keys(int[] a) { + int size = size(); + if (a.length < size) { + a = (int[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + int[] k = (int[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an float value + * @return a boolean value + */ + public boolean containsValue(float val) { + byte[] states = _states; + float[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an int value + * @return a boolean value + */ + public boolean containsKey(int key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TIntProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TIntProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TFloatProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TFloatProcedure procedure) { + byte[] states = _states; + float[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOIntFloatProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TIntFloatProcedure procedure) { + byte[] states = _states; + int[] keys = _set; + float[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TIntFloatProcedure procedure) { + boolean modified = false; + byte[] states = _states; + int[] keys = _set; + float[] values = _values; + + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TFloatFunction value + */ + public void transformValues(TFloatFunction function) { + byte[] states = _states; + float[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(int key) { + return adjustValue(key, (float) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(int key, float amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public float adjustOrPutValue(final int key, final float adjust_amount, final float put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final float newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + byte previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + int key = in.readInt(); + float val = in.readFloat(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TIntFloatProcedure() { + private boolean first = true; + + public boolean execute(int key, float value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TIntFloatHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntFloatIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntFloatIterator.java new file mode 100644 index 00000000000..8db542e11c9 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntFloatIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type int and float. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TIntFloatIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TIntFloatIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TIntFloatIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TIntFloatIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TIntFloatIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TIntFloatHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TIntFloatIterator(TIntFloatHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public int key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public float value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public float setValue(float val) { + float old = value(); + _map._values[_index] = val; + return old; + } +}// TIntFloatIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntFloatProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntFloatProcedure.java new file mode 100644 index 00000000000..b07522ed630 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntFloatProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type int and float. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TIntFloatProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a int value + * @param b a float value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(int a, float b); +}// TIntFloatProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntFunction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntFunction.java new file mode 100644 index 00000000000..3e063234adb --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntFunction.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for functions that accept and return one int primitive. + *

    + * Created: Mon Nov 5 22:19:36 2001 + * + * @author Eric D. Friedman + * @version $Id: PFunction.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TIntFunction { + /** + * Execute this function with value + * + * @param value a int input + * @return a int result + */ + public int execute(int value); +}// TIntFunction diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntHash.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntHash.java new file mode 100644 index 00000000000..df71c205e78 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntHash.java @@ -0,0 +1,291 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed hashing implementation for int primitives. + *

    + * Created: Sun Nov 4 08:56:06 2001 + * + * @author Eric D. Friedman + * @version $Id: PHash.template,v 1.2 2007/06/29 22:39:46 robeden Exp $ + */ + +abstract public class TIntHash extends TPrimitiveHash implements TIntHashingStrategy { + + /** + * the set of ints + */ + protected transient int[] _set; + + /** + * strategy used to hash values in this collection + */ + protected TIntHashingStrategy _hashingStrategy; + + /** + * Creates a new TIntHash instance with the default + * capacity and load factor. + */ + public TIntHash() { + super(); + this._hashingStrategy = this; + } + + /** + * Creates a new TIntHash instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + */ + public TIntHash(int initialCapacity) { + super(initialCapacity); + this._hashingStrategy = this; + } + + /** + * Creates a new TIntHash instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + */ + public TIntHash(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + this._hashingStrategy = this; + } + + /** + * Creates a new TIntHash instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TIntHash(TIntHashingStrategy strategy) { + super(); + this._hashingStrategy = strategy; + } + + /** + * Creates a new TIntHash instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TIntHash(int initialCapacity, TIntHashingStrategy strategy) { + super(initialCapacity); + this._hashingStrategy = strategy; + } + + /** + * Creates a new TIntHash instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TIntHash(int initialCapacity, float loadFactor, TIntHashingStrategy strategy) { + super(initialCapacity, loadFactor); + this._hashingStrategy = strategy; + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TIntHash h = (TIntHash) super.clone(); + h._set = (int[]) this._set.clone(); + return h; + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _set = new int[capacity]; + return capacity; + } + + /** + * Searches the set for val + * + * @param val an int value + * @return a boolean value + */ + public boolean contains(int val) { + return index(val) >= 0; + } + + /** + * Executes procedure for each element in the set. + * + * @param procedure a TObjectProcedure value + * @return false if the loop over the set terminated because + * the procedure returned false for some value. + */ + public boolean forEach(TIntProcedure procedure) { + byte[] states = _states; + int[] set = _set; + for (int i = set.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(set[i])) { + return false; + } + } + return true; + } + + /** + * Releases the element currently stored at index. + * + * @param index an int value + */ + protected void removeAt(int index) { + _set[index] = (int) 0; + super.removeAt(index); + } + + /** + * Locates the index of val. + * + * @param val an int value + * @return the index of val or -1 if it isn't in the set. + */ + protected int index(int val) { + int hash, probe, index, length; + + final byte[] states = _states; + final int[] set = _set; + length = states.length; + hash = _hashingStrategy.computeHashCode(val) & 0x7fffffff; + index = hash % length; + + if (states[index] != FREE && + (states[index] == REMOVED || set[index] != val)) { + // see Knuth, p. 529 + probe = 1 + (hash % (length - 2)); + + do { + index -= probe; + if (index < 0) { + index += length; + } + } while (states[index] != FREE && + (states[index] == REMOVED || set[index] != val)); + } + + return states[index] == FREE ? -1 : index; + } + + /** + * Locates the index at which val can be inserted. if + * there is already a value equal()ing val in the set, + * returns that value as a negative integer. + * + * @param val an int value + * @return an int value + */ + protected int insertionIndex(int val) { + int hash, probe, index, length; + + final byte[] states = _states; + final int[] set = _set; + length = states.length; + hash = _hashingStrategy.computeHashCode(val) & 0x7fffffff; + index = hash % length; + + if (states[index] == FREE) { + return index; // empty, all done + } else if (states[index] == FULL && set[index] == val) { + return -index - 1; // already stored + } else { // already FULL or REMOVED, must probe + // compute the double hash + probe = 1 + (hash % (length - 2)); + + // if the slot we landed on is FULL (but not removed), probe + // until we find an empty slot, a REMOVED slot, or an element + // equal to the one we are trying to insert. + // finding an empty slot means that the value is not present + // and that we should use that slot as the insertion point; + // finding a REMOVED slot means that we need to keep searching, + // however we want to remember the offset of that REMOVED slot + // so we can reuse it in case a "new" insertion (i.e. not an update) + // is possible. + // finding a matching value means that we've found that our desired + // key is already in the table + + if (states[index] != REMOVED) { + // starting at the natural offset, probe until we find an + // offset that isn't full. + do { + index -= probe; + if (index < 0) { + index += length; + } + } while (states[index] == FULL && set[index] != val); + } + + // if the index we found was removed: continue probing until we + // locate a free location or an element which equal()s the + // one we have. + if (states[index] == REMOVED) { + int firstRemoved = index; + while (states[index] != FREE && + (states[index] == REMOVED || set[index] != val)) { + index -= probe; + if (index < 0) { + index += length; + } + } + return states[index] == FULL ? -index - 1 : firstRemoved; + } + // if it's full, the key is already stored + return states[index] == FULL ? -index - 1 : index; + } + } + + /** + * Default implementation of TIntHashingStrategy: + * delegates hashing to HashFunctions.hash(int). + * + * @param val the value to hash + * @return the hashcode. + */ + public final int computeHashCode(int val) { + return HashFunctions.hash(val); + } +} // TIntHash diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntHashSet.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntHashSet.java new file mode 100644 index 00000000000..c9ae62bf6dd --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntHashSet.java @@ -0,0 +1,373 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed set implementation for int primitives. + * + * @author Eric D. Friedman + * @author Rob Eden + */ + +public class TIntHashSet extends TIntHash implements Externalizable { + static final long serialVersionUID = 1L; + + /** + * Creates a new TIntHashSet instance with the default + * capacity and load factor. + */ + public TIntHashSet() { + super(); + } + + /** + * Creates a new TIntHashSet instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TIntHashSet(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TIntHashSet instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TIntHashSet(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TIntHashSet instance containing the + * elements of array. + * + * @param array an array of int primitives + */ + public TIntHashSet(int[] array) { + this(array.length); + addAll(array); + } + + /** + * Creates a new TIntHash instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TIntHashSet(TIntHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TIntHash instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TIntHashSet(int initialCapacity, TIntHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TIntHash instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TIntHashSet(int initialCapacity, float loadFactor, TIntHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * Creates a new TIntHashSet instance containing the + * elements of array. + * + * @param array an array of int primitives + * @param strategy used to compute hash codes and to compare keys. + */ + public TIntHashSet(int[] array, TIntHashingStrategy strategy) { + this(array.length, strategy); + addAll(array); + } + + /** + * @return a TIntIterator with access to the values in this set + */ + public TIntIterator iterator() { + return new TIntIterator(this); + } + + /** + * Inserts a value into the set. + * + * @param val an int value + * @return true if the set was modified by the add operation + */ + public boolean add(int val) { + int index = insertionIndex(val); + + if (index < 0) { + return false; // already present in set, nothing to add + } + + byte previousState = _states[index]; + _set[index] = val; + _states[index] = FULL; + postInsertHook(previousState == FREE); + + return true; // yes, we added something + } + + /** + * Expands the set to accommodate new values. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + int oldSet[] = _set; + byte oldStates[] = _states; + + _set = new int[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + int o = oldSet[i]; + int index = insertionIndex(o); + _set[index] = o; + _states[index] = FULL; + } + } + } + + /** + * Returns a new array containing the values in the set. + * + * @return an int[] value + */ + public int[] toArray() { + int[] result = new int[size()]; + int[] set = _set; + byte[] states = _states; + + for (int i = states.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + result[j++] = set[i]; + } + } + return result; + } + + /** + * Empties the set. + */ + public void clear() { + super.clear(); + int[] set = _set; + byte[] states = _states; + + for (int i = set.length; i-- > 0;) { + set[i] = (int) 0; + states[i] = FREE; + } + } + + /** + * Compares this set with another set for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TIntHashSet)) { + return false; + } + final TIntHashSet that = (TIntHashSet) other; + if (that.size() != this.size()) { + return false; + } + return forEach(new TIntProcedure() { + public final boolean execute(int value) { + return that.contains(value); + } + }); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEach(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TIntProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(int key) { + h += _hashingStrategy.computeHashCode(key); + return true; + } + } + + /** + * Removes val from the set. + * + * @param val an int value + * @return true if the set was modified by the remove operation. + */ + public boolean remove(int val) { + int index = index(val); + if (index >= 0) { + removeAt(index); + return true; + } + return false; + } + + /** + * Tests the set to determine if all of the elements in + * array are present. + * + * @param array an array of int primitives. + * @return true if all elements were present in the set. + */ + public boolean containsAll(int[] array) { + for (int i = array.length; i-- > 0;) { + if (!contains(array[i])) { + return false; + } + } + return true; + } + + /** + * Adds all of the elements in array to the set. + * + * @param array an array of int primitives. + * @return true if the set was modified by the add all operation. + */ + public boolean addAll(int[] array) { + boolean changed = false; + for (int i = array.length; i-- > 0;) { + if (add(array[i])) { + changed = true; + } + } + return changed; + } + + /** + * Removes all of the elements in array from the set. + * + * @param array an array of int primitives. + * @return true if the set was modified by the remove all operation. + */ + public boolean removeAll(int[] array) { + boolean changed = false; + for (int i = array.length; i-- > 0;) { + if (remove(array[i])) { + changed = true; + } + } + return changed; + } + + /** + * Removes any values in the set which are not contained in + * array. + * + * @param array an array of int primitives. + * @return true if the set was modified by the retain all operation + */ + public boolean retainAll(int[] array) { + boolean changed = false; + Arrays.sort(array); + int[] set = _set; + byte[] states = _states; + + for (int i = set.length; i-- > 0;) { + if (states[i] == FULL && (Arrays.binarySearch(array, set[i]) < 0)) { + remove(set[i]); + changed = true; + } + } + return changed; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEach(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + + // ENTRIES + setUp(size); + while (size-- > 0) { + int val = in.readInt(); + add(val); + } + } +} // TIntHashSet diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntHashingStrategy.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntHashingStrategy.java new file mode 100644 index 00000000000..b877730cac3 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntHashingStrategy.java @@ -0,0 +1,50 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Serializable; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface to support pluggable hashing strategies in maps and sets. + * Implementors can use this interface to make the trove hashing + * algorithms use an optimal strategy when computing hashcodes. + *

    + * Created: Sun Nov 4 08:56:06 2001 + * + * @author Eric D. Friedman + * @version $Id: PHashingStrategy.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TIntHashingStrategy extends Serializable { + /** + * Computes a hash code for the specified int. Implementors + * can use the int's own value or a custom scheme designed to + * minimize collisions for a known set of input. + * + * @param val int for which the hashcode is to be computed + * @return the hashCode + */ + public int computeHashCode(int val); +} // TIntHashingStrategy diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntIntHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntIntHashMap.java new file mode 100644 index 00000000000..d63cacccc2c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntIntHashMap.java @@ -0,0 +1,650 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for int keys and int values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TIntIntHashMap extends TIntHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TIntIntProcedure PUT_ALL_PROC = new TIntIntProcedure() { + public boolean execute(int key, int value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient int[] _values; + + /** + * Creates a new TIntIntHashMap instance with the default + * capacity and load factor. + */ + public TIntIntHashMap() { + super(); + } + + /** + * Creates a new TIntIntHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TIntIntHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TIntIntHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TIntIntHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TIntIntHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TIntIntHashMap(TIntHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TIntIntHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TIntIntHashMap(int initialCapacity, TIntHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TIntIntHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TIntIntHashMap(int initialCapacity, float loadFactor, TIntHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TIntIntHashMap m = (TIntIntHashMap) super.clone(); + m._values = (int[]) this._values.clone(); + return m; + } + + /** + * @return a TIntIntIterator with access to this map's keys and values + */ + public TIntIntIterator iterator() { + return new TIntIntIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new int[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an int value + * @param value an int value + * @return the previous value associated with key, + * or (int)0 if none was found. + */ + public int put(int key, int value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an int value + * @param value an int value + * @return the previous value associated with key, + * or (int)0 if none was found. + */ + public int putIfAbsent(int key, int value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private int doPut(int key, int value, int index) { + byte previousState; + int previous = (int) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TIntIntHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + int oldKeys[] = _set; + int oldVals[] = _values; + byte oldStates[] = _states; + + _set = new int[newCapacity]; + _values = new int[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + int o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an int value + * @return the value of key or (int)0 if no such mapping exists. + */ + public int get(int key) { + int index = index(key); + return index < 0 ? (int) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + int[] keys = _set; + int[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (int) 0); + Arrays.fill(_values, 0, _values.length, (int) 0); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an int value + * @return an int value, or (int)0 if no mapping for key exists + */ + public int remove(int key) { + int prev = (int) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TIntIntHashMap)) { + return false; + } + TIntIntHashMap that = (TIntIntHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TIntIntProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(int key, int value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TIntIntProcedure { + private final TIntIntHashMap _otherMap; + + EqProcedure(TIntIntHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(int key, int value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two ints for equality. + */ + private final boolean eq(int v1, int v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = (int) 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public int[] getValues() { + int[] vals = new int[size()]; + int[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public int[] keys() { + int[] keys = new int[size()]; + int[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public int[] keys(int[] a) { + int size = size(); + if (a.length < size) { + a = (int[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + int[] k = (int[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an int value + * @return a boolean value + */ + public boolean containsValue(int val) { + byte[] states = _states; + int[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an int value + * @return a boolean value + */ + public boolean containsKey(int key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TIntProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TIntProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TIntProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TIntProcedure procedure) { + byte[] states = _states; + int[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOIntIntProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TIntIntProcedure procedure) { + byte[] states = _states; + int[] keys = _set; + int[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TIntIntProcedure procedure) { + boolean modified = false; + byte[] states = _states; + int[] keys = _set; + int[] values = _values; + + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TIntFunction value + */ + public void transformValues(TIntFunction function) { + byte[] states = _states; + int[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(int key) { + return adjustValue(key, (int) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(int key, int amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public int adjustOrPutValue(final int key, final int adjust_amount, final int put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final int newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + byte previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + int key = in.readInt(); + int val = in.readInt(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TIntIntProcedure() { + private boolean first = true; + + public boolean execute(int key, int value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TIntIntHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntIntIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntIntIterator.java new file mode 100644 index 00000000000..de97baa707b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntIntIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type int and int. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TIntIntIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TIntIntIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TIntIntIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TIntIntIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TIntIntIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TIntIntHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TIntIntIterator(TIntIntHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public int key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public int value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public int setValue(int val) { + int old = value(); + _map._values[_index] = val; + return old; + } +}// TIntIntIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntIntProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntIntProcedure.java new file mode 100644 index 00000000000..5a4890bad6a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntIntProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type int and int. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TIntIntProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a int value + * @param b a int value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(int a, int b); +}// TIntIntProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntIterator.java new file mode 100644 index 00000000000..db3b82f7bc0 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntIterator.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for int collections. + * + * @author Eric D. Friedman + * @version $Id: PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TIntIterator extends TPrimitiveIterator { + /** + * the collection on which the iterator operates + */ + private final TIntHash _hash; + + /** + * Creates a TIntIterator for the elements in the specified collection. + */ + public TIntIterator(TIntHash hash) { + super(hash); + this._hash = hash; + } + + /** + * Advances the iterator to the next element in the underlying collection + * and returns it. + * + * @return the next int in the collection + * @throws NoSuchElementException if the iterator is already exhausted + */ + public int next() { + moveToNextIndex(); + return _hash._set[_index]; + } +}// TIntIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntLongHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntLongHashMap.java new file mode 100644 index 00000000000..eecfa9f9d60 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntLongHashMap.java @@ -0,0 +1,650 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for int keys and long values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TIntLongHashMap extends TIntHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TIntLongProcedure PUT_ALL_PROC = new TIntLongProcedure() { + public boolean execute(int key, long value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient long[] _values; + + /** + * Creates a new TIntLongHashMap instance with the default + * capacity and load factor. + */ + public TIntLongHashMap() { + super(); + } + + /** + * Creates a new TIntLongHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TIntLongHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TIntLongHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TIntLongHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TIntLongHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TIntLongHashMap(TIntHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TIntLongHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TIntLongHashMap(int initialCapacity, TIntHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TIntLongHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TIntLongHashMap(int initialCapacity, float loadFactor, TIntHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TIntLongHashMap m = (TIntLongHashMap) super.clone(); + m._values = (long[]) this._values.clone(); + return m; + } + + /** + * @return a TIntLongIterator with access to this map's keys and values + */ + public TIntLongIterator iterator() { + return new TIntLongIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new long[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an int value + * @param value an long value + * @return the previous value associated with key, + * or (int)0 if none was found. + */ + public long put(int key, long value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an int value + * @param value an long value + * @return the previous value associated with key, + * or (int)0 if none was found. + */ + public long putIfAbsent(int key, long value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private long doPut(int key, long value, int index) { + byte previousState; + long previous = (long) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TIntLongHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + int oldKeys[] = _set; + long oldVals[] = _values; + byte oldStates[] = _states; + + _set = new int[newCapacity]; + _values = new long[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + int o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an int value + * @return the value of key or (int)0 if no such mapping exists. + */ + public long get(int key) { + int index = index(key); + return index < 0 ? (long) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + int[] keys = _set; + long[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (int) 0); + Arrays.fill(_values, 0, _values.length, (long) 0); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an int value + * @return an long value, or (int)0 if no mapping for key exists + */ + public long remove(int key) { + long prev = (long) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TIntLongHashMap)) { + return false; + } + TIntLongHashMap that = (TIntLongHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TIntLongProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(int key, long value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TIntLongProcedure { + private final TIntLongHashMap _otherMap; + + EqProcedure(TIntLongHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(int key, long value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two longs for equality. + */ + private final boolean eq(long v1, long v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = (long) 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public long[] getValues() { + long[] vals = new long[size()]; + long[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public int[] keys() { + int[] keys = new int[size()]; + int[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public int[] keys(int[] a) { + int size = size(); + if (a.length < size) { + a = (int[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + int[] k = (int[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an long value + * @return a boolean value + */ + public boolean containsValue(long val) { + byte[] states = _states; + long[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an int value + * @return a boolean value + */ + public boolean containsKey(int key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TIntProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TIntProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TLongProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TLongProcedure procedure) { + byte[] states = _states; + long[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOIntLongProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TIntLongProcedure procedure) { + byte[] states = _states; + int[] keys = _set; + long[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TIntLongProcedure procedure) { + boolean modified = false; + byte[] states = _states; + int[] keys = _set; + long[] values = _values; + + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TLongFunction value + */ + public void transformValues(TLongFunction function) { + byte[] states = _states; + long[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(int key) { + return adjustValue(key, (long) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(int key, long amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public long adjustOrPutValue(final int key, final long adjust_amount, final long put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final long newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + byte previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + int key = in.readInt(); + long val = in.readLong(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TIntLongProcedure() { + private boolean first = true; + + public boolean execute(int key, long value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TIntLongHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntLongIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntLongIterator.java new file mode 100644 index 00000000000..69d59cffeb8 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntLongIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type int and long. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TIntLongIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TIntLongIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TIntLongIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TIntLongIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TIntLongIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TIntLongHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TIntLongIterator(TIntLongHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public int key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public long value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public long setValue(long val) { + long old = value(); + _map._values[_index] = val; + return old; + } +}// TIntLongIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntLongProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntLongProcedure.java new file mode 100644 index 00000000000..dd0a0b3ee95 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntLongProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type int and long. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TIntLongProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a int value + * @param b a long value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(int a, long b); +}// TIntLongProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntObjectHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntObjectHashMap.java new file mode 100644 index 00000000000..499ca659ae3 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntObjectHashMap.java @@ -0,0 +1,632 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for int keys and Object values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TIntObjectHashMap extends TIntHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TIntObjectProcedure PUT_ALL_PROC = new TIntObjectProcedure() { + public boolean execute(int key, V value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient V[] _values; + + /** + * Creates a new TIntObjectHashMap instance with the default + * capacity and load factor. + */ + public TIntObjectHashMap() { + super(); + } + + /** + * Creates a new TIntObjectHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TIntObjectHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TIntObjectHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TIntObjectHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TIntObjectHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TIntObjectHashMap(TIntHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TIntObjectHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TIntObjectHashMap(int initialCapacity, TIntHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TIntObjectHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TIntObjectHashMap(int initialCapacity, float loadFactor, TIntHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public TIntObjectHashMap clone() { + TIntObjectHashMap m = (TIntObjectHashMap) super.clone(); + m._values = (V[]) this._values.clone(); + return m; + } + + /** + * @return a TIntObjectIterator with access to this map's keys and values + */ + public TIntObjectIterator iterator() { + return new TIntObjectIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = (V[]) new Object[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an int value + * @param value an Object value + * @return the previous value associated with key, + * or {@code null} if none was found. + */ + public V put(int key, V value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an int value + * @param value an Object value + * @return the previous value associated with key, + * or {@code null} if none was found. + */ + public V putIfAbsent(int key, V value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private V doPut(int key, V value, int index) { + byte previousState; + V previous = null; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TIntObjectHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + int oldKeys[] = _set; + V oldVals[] = _values; + byte oldStates[] = _states; + + _set = new int[newCapacity]; + _values = (V[]) new Object[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + int o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an int value + * @return the value of key or (int)0 if no such mapping exists. + */ + public V get(int key) { + int index = index(key); + return index < 0 ? null : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + int[] keys = _set; + Object[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (int) 0); + Arrays.fill(_values, 0, _values.length, null); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an int value + * @return an Object value or (int)0 if no such mapping exists. + */ + public V remove(int key) { + V prev = null; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TIntObjectHashMap)) { + return false; + } + TIntObjectHashMap that = (TIntObjectHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TIntObjectProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(int key, Object value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TIntObjectProcedure { + private final TIntObjectHashMap _otherMap; + + EqProcedure(TIntObjectHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(int key, Object value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two objects for equality. + */ + private final boolean eq(Object o1, Object o2) { + return o1 == o2 || ((o1 != null) && o1.equals(o2)); + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = null; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + * @see #getValues(Object[]) + */ + public Object[] getValues() { + Object[] vals = new Object[size()]; + V[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * Return the values of the map; the runtime type of the returned array is that of + * the specified array. + * + * @param a the array into which the elements of this collection are to be + * stored, if it is big enough; otherwise, a new array of the same + * runtime type is allocated for this purpose. + * @return an array containing the elements of this collection + * @throws ArrayStoreException the runtime type of the specified array is + * not a supertype of the runtime type of every element in this + * collection. + * @throws NullPointerException if the specified array is null. + * @see #getValues() + */ + public T[] getValues(T[] a) { + if (a.length < _size) { + a = (T[]) java.lang.reflect.Array.newInstance(a.getClass().getComponentType(), + _size); + } + + V[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = (T) v[i]; + } + } + return a; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public int[] keys() { + int[] keys = new int[size()]; + int[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public int[] keys(int[] a) { + int size = size(); + if (a.length < size) { + a = (int[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + int[] k = (int[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(V val) { + byte[] states = _states; + V[] vals = _values; + + // special case null values so that we don't have to + // perform null checks before every call to equals() + if (null == val) { + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && + val == vals[i]) { + return true; + } + } + } else { + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && + (val == vals[i] || val.equals(vals[i]))) { + return true; + } + } + } // end of else + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an int value + * @return a boolean value + */ + public boolean containsKey(int key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TIntProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TIntProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TObjectProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TObjectProcedure procedure) { + byte[] states = _states; + V[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOIntObjectProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TIntObjectProcedure procedure) { + byte[] states = _states; + int[] keys = _set; + V[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TIntObjectProcedure procedure) { + boolean modified = false; + byte[] states = _states; + int[] keys = _set; + V[] values = _values; + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TObjectFunction value + */ + public void transformValues(TObjectFunction function) { + byte[] states = _states; + V[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + int key = in.readInt(); + V val = (V) in.readObject(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TIntObjectProcedure() { + private boolean first = true; + + public boolean execute(int key, Object value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TIntObjectHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntObjectIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntObjectIterator.java new file mode 100644 index 00000000000..67e63fb04ac --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntObjectIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type int and Object. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TIntObjectIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TIntObjectIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TIntObjectIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TIntObjectIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2OIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TIntObjectIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TIntObjectHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TIntObjectIterator(TIntObjectHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public int key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public V value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public V setValue(V val) { + V old = value(); + _map._values[_index] = val; + return old; + } +}// TIntObjectIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntObjectProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntObjectProcedure.java new file mode 100644 index 00000000000..c14e332bd87 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntObjectProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type int and Object. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2OProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TIntObjectProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a int value + * @param b an Object value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(int a, T b); +}// TIntObjectProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntProcedure.java new file mode 100644 index 00000000000..a0a4391d425 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntProcedure.java @@ -0,0 +1,47 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures with one int parameter. + *

    + * Created: Mon Nov 5 21:45:49 2001 + * + * @author Eric D. Friedman + * @version $Id: PProcedure.template,v 1.2 2007/11/01 16:08:14 robeden Exp $ + */ + +public interface TIntProcedure { + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param value a value of type int + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(int value); +}// TIntProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntShortHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntShortHashMap.java new file mode 100644 index 00000000000..5c8a5b5228e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntShortHashMap.java @@ -0,0 +1,650 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for int keys and short values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TIntShortHashMap extends TIntHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TIntShortProcedure PUT_ALL_PROC = new TIntShortProcedure() { + public boolean execute(int key, short value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient short[] _values; + + /** + * Creates a new TIntShortHashMap instance with the default + * capacity and load factor. + */ + public TIntShortHashMap() { + super(); + } + + /** + * Creates a new TIntShortHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TIntShortHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TIntShortHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TIntShortHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TIntShortHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TIntShortHashMap(TIntHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TIntShortHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TIntShortHashMap(int initialCapacity, TIntHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TIntShortHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TIntShortHashMap(int initialCapacity, float loadFactor, TIntHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TIntShortHashMap m = (TIntShortHashMap) super.clone(); + m._values = (short[]) this._values.clone(); + return m; + } + + /** + * @return a TIntShortIterator with access to this map's keys and values + */ + public TIntShortIterator iterator() { + return new TIntShortIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new short[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an int value + * @param value an short value + * @return the previous value associated with key, + * or (int)0 if none was found. + */ + public short put(int key, short value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an int value + * @param value an short value + * @return the previous value associated with key, + * or (int)0 if none was found. + */ + public short putIfAbsent(int key, short value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private short doPut(int key, short value, int index) { + byte previousState; + short previous = (short) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TIntShortHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + int oldKeys[] = _set; + short oldVals[] = _values; + byte oldStates[] = _states; + + _set = new int[newCapacity]; + _values = new short[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + int o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an int value + * @return the value of key or (int)0 if no such mapping exists. + */ + public short get(int key) { + int index = index(key); + return index < 0 ? (short) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + int[] keys = _set; + short[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (int) 0); + Arrays.fill(_values, 0, _values.length, (short) 0); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an int value + * @return an short value, or (int)0 if no mapping for key exists + */ + public short remove(int key) { + short prev = (short) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TIntShortHashMap)) { + return false; + } + TIntShortHashMap that = (TIntShortHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TIntShortProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(int key, short value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TIntShortProcedure { + private final TIntShortHashMap _otherMap; + + EqProcedure(TIntShortHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(int key, short value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two shorts for equality. + */ + private final boolean eq(short v1, short v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = (short) 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public short[] getValues() { + short[] vals = new short[size()]; + short[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public int[] keys() { + int[] keys = new int[size()]; + int[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public int[] keys(int[] a) { + int size = size(); + if (a.length < size) { + a = (int[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + int[] k = (int[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an short value + * @return a boolean value + */ + public boolean containsValue(short val) { + byte[] states = _states; + short[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an int value + * @return a boolean value + */ + public boolean containsKey(int key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TIntProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TIntProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TShortProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TShortProcedure procedure) { + byte[] states = _states; + short[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOIntShortProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TIntShortProcedure procedure) { + byte[] states = _states; + int[] keys = _set; + short[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TIntShortProcedure procedure) { + boolean modified = false; + byte[] states = _states; + int[] keys = _set; + short[] values = _values; + + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TShortFunction value + */ + public void transformValues(TShortFunction function) { + byte[] states = _states; + short[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(int key) { + return adjustValue(key, (short) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(int key, short amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public short adjustOrPutValue(final int key, final short adjust_amount, final short put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final short newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + byte previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + int key = in.readInt(); + short val = in.readShort(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TIntShortProcedure() { + private boolean first = true; + + public boolean execute(int key, short value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TIntShortHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntShortIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntShortIterator.java new file mode 100644 index 00000000000..b51bd221f91 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntShortIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type int and short. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TIntShortIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TIntShortIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TIntShortIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TIntShortIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TIntShortIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TIntShortHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TIntShortIterator(TIntShortHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public int key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public short value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public short setValue(short val) { + short old = value(); + _map._values[_index] = val; + return old; + } +}// TIntShortIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntShortProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntShortProcedure.java new file mode 100644 index 00000000000..fb0c96e1591 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntShortProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type int and short. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TIntShortProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a int value + * @param b a short value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(int a, short b); +}// TIntShortProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntStack.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntStack.java new file mode 100644 index 00000000000..68c41f33286 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIntStack.java @@ -0,0 +1,124 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + + +package org.elasticsearch.util.gnu.trove; + +/** + * A stack of int primitives, backed by a TIntArrayList. + * + * @author Eric D. Friedman, Rob Eden + * @version $Id: PStack.template,v 1.2 2007/02/28 23:03:57 robeden Exp $ + */ + +public class TIntStack { + + /** + * the list used to hold the stack values. + */ + protected TIntArrayList _list; + + public static final int DEFAULT_CAPACITY = TIntArrayList.DEFAULT_CAPACITY; + + /** + * Creates a new TIntStack instance with the default + * capacity. + */ + public TIntStack() { + this(DEFAULT_CAPACITY); + } + + /** + * Creates a new TIntStack instance with the + * specified capacity. + * + * @param capacity the initial depth of the stack + */ + public TIntStack(int capacity) { + _list = new TIntArrayList(capacity); + } + + /** + * Pushes the value onto the top of the stack. + * + * @param val an int value + */ + public void push(int val) { + _list.add(val); + } + + /** + * Removes and returns the value at the top of the stack. + * + * @return an int value + */ + public int pop() { + return _list.remove(_list.size() - 1); + } + + /** + * Returns the value at the top of the stack. + * + * @return an int value + */ + public int peek() { + return _list.get(_list.size() - 1); + } + + /** + * Returns the current depth of the stack. + */ + public int size() { + return _list.size(); + } + + /** + * Clears the stack, reseting its capacity to the default. + */ + public void clear() { + _list.clear(DEFAULT_CAPACITY); + } + + /** + * Clears the stack without releasing its internal capacity allocation. + */ + public void reset() { + _list.reset(); + } + + /** + * Copies the contents of the stack into a native array. Note that this will NOT + * pop them out of the stack. + * + * @return an int[] value + */ + public int[] toNativeArray() { + return _list.toNativeArray(); + } + + /** + * Copies a slice of the list into a native array. Note that this will NOT + * pop them out of the stack. + * + * @param dest the array to copy into. + */ + public void toNativeArray(int[] dest) { + _list.toNativeArray(dest, 0, size()); + } +} // TIntStack diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIterator.java new file mode 100644 index 00000000000..93878c98f8d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TIterator.java @@ -0,0 +1,109 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.util.ConcurrentModificationException; +import java.util.NoSuchElementException; + +/** + * Abstract iterator class for THash implementations. This class provides some + * of the common iterator operations (hasNext(), remove()) and allows subclasses + * to define the mechanism(s) for advancing the iterator and returning data. + * + * @author Eric D. Friedman + * @version $Id: TIterator.java,v 1.3 2007/06/29 20:03:10 robeden Exp $ + */ +abstract class TIterator { + /** + * the data structure this iterator traverses + */ + protected final THash _hash; + /** + * the number of elements this iterator believes are in the + * data structure it accesses. + */ + protected int _expectedSize; + /** + * the index used for iteration. + */ + protected int _index; + + /** + * Create an instance of TIterator over the specified THash. + */ + public TIterator(THash hash) { + _hash = hash; + _expectedSize = _hash.size(); + _index = _hash.capacity(); + } + + /** + * Returns true if the iterator can be advanced past its current + * location. + * + * @return a boolean value + */ + public boolean hasNext() { + return nextIndex() >= 0; + } + + /** + * Removes the last entry returned by the iterator. + * Invoking this method more than once for a single entry + * will leave the underlying data structure in a confused + * state. + */ + public void remove() { + if (_expectedSize != _hash.size()) { + throw new ConcurrentModificationException(); + } + + // Disable auto compaction during the remove. This is a workaround for bug 1642768. + try { + _hash.tempDisableAutoCompaction(); + _hash.removeAt(_index); + } + finally { + _hash.reenableAutoCompaction(false); + } + + _expectedSize--; + } + + /** + * Sets the internal index so that the `next' object + * can be returned. + */ + protected final void moveToNextIndex() { + // doing the assignment && < 0 in one line shaves + // 3 opcodes... + if ((_index = nextIndex()) < 0) { + throw new NoSuchElementException(); + } + } + + /** + * Returns the index of the next value in the data structure + * or a negative value if the iterator is exhausted. + * + * @return an int value + */ + abstract protected int nextIndex(); +} // TIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLinkable.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLinkable.java new file mode 100644 index 00000000000..9f888e78933 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLinkable.java @@ -0,0 +1,66 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Serializable; + + +/** + * Interface for Objects which can be inserted into a TLinkedList. + *

    + *

    + * Created: Sat Nov 10 15:23:41 2001 + *

    + * + * @author Eric D. Friedman + * @version $Id: TLinkable.java,v 1.2 2001/12/03 00:16:25 ericdf Exp $ + * @see org.elasticsearch.util.gnu.trove.TLinkedList + */ + +public interface TLinkable extends Serializable { + + /** + * Returns the linked list node after this one. + * + * @return a TLinkable value + */ + public TLinkable getNext(); + + /** + * Returns the linked list node before this one. + * + * @return a TLinkable value + */ + public TLinkable getPrevious(); + + /** + * Sets the linked list node after this one. + * + * @param linkable a TLinkable value + */ + public void setNext(TLinkable linkable); + + /** + * Sets the linked list node before this one. + * + * @param linkable a TLinkable value + */ + public void setPrevious(TLinkable linkable); +}// TLinkable diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLinkableAdapter.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLinkableAdapter.java new file mode 100644 index 00000000000..11e4c5d753c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLinkableAdapter.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +/** + * Adapter for TLinkable interface which implements the interface and can + * therefore be extended trivially to create TLinkable objects without + * having to implement the obvious. + *

    + *

    + * Created: Thurs Nov 15 16:25:00 2001 + *

    + * + * @author Jason Baldridge + * @version $Id: TLinkableAdapter.java,v 1.1 2006/11/10 23:27:56 robeden Exp $ + * @see org.elasticsearch.util.gnu.trove.TLinkedList + */ + +public class TLinkableAdapter implements TLinkable { + TLinkable _previous, _next; + + /** + * Returns the linked list node after this one. + * + * @return a TLinkable value + */ + public TLinkable getNext() { + return _next; + } + + /** + * Returns the linked list node before this one. + * + * @return a TLinkable value + */ + public TLinkable getPrevious() { + return _previous; + } + + /** + * Sets the linked list node after this one. + * + * @param linkable a TLinkable value + */ + public void setNext(TLinkable linkable) { + _next = linkable; + } + + /** + * Sets the linked list node before this one. + * + * @param linkable a TLinkable value + */ + public void setPrevious(TLinkable linkable) { + _previous = linkable; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLinkedList.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLinkedList.java new file mode 100644 index 00000000000..03dc0402ee3 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLinkedList.java @@ -0,0 +1,749 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.AbstractSequentialList; +import java.util.ListIterator; +import java.util.NoSuchElementException; + +/** + * A LinkedList implementation which holds instances of type + * TLinkable. + *

    + *

    Using this implementation allows you to get java.util.LinkedList + * behavior (a doubly linked list, with Iterators that support insert + * and delete operations) without incurring the overhead of creating + * Node wrapper objects for every element in your list.

    + *

    + *

    The requirement to achieve this time/space gain is that the + * Objects stored in the List implement the TLinkable + * interface.

    + *

    + *

    The limitations are that you cannot put the same object into + * more than one list or more than once in the same list. You must + * also ensure that you only remove objects that are actually in the + * list. That is, if you have an object A and lists l1 and l2, you + * must ensure that you invoke List.remove(A) on the correct list. It + * is also forbidden to invoke List.remove() with an unaffiliated + * TLinkable (one that belongs to no list): this will destroy the list + * you invoke it on.

    + *

    + *

    + * Created: Sat Nov 10 15:25:10 2001 + *

    + * + * @author Eric D. Friedman + * @version $Id: TLinkedList.java,v 1.15 2009/03/31 19:43:14 robeden Exp $ + * @see org.elasticsearch.util.gnu.trove.TLinkable + */ + +public class TLinkedList extends AbstractSequentialList + implements Externalizable { + + static final long serialVersionUID = 1L; + + + /** + * the head of the list + */ + protected T _head; + /** + * the tail of the list + */ + protected T _tail; + /** + * the number of elements in the list + */ + protected int _size = 0; + + /** + * Creates a new TLinkedList instance. + */ + public TLinkedList() { + super(); + } + + /** + * Returns an iterator positioned at index. Assuming + * that the list has a value at that index, calling next() will + * retrieve and advance the iterator. Assuming that there is a + * value before index in the list, calling previous() + * will retrieve it (the value at index - 1) and move the iterator + * to that position. So, iterating from front to back starts at + * 0; iterating from back to front starts at size(). + * + * @param index an int value + * @return a ListIterator value + */ + public ListIterator listIterator(int index) { + return new IteratorImpl(index); + } + + /** + * Returns the number of elements in the list. + * + * @return an int value + */ + public int size() { + return _size; + } + + /** + * Inserts linkable at index index in the list. + * All values > index are shifted over one position to accommodate + * the new addition. + * + * @param index an int value + * @param linkable an object of type TLinkable + */ + public void add(int index, T linkable) { + if (index < 0 || index > size()) { + throw new IndexOutOfBoundsException("index:" + index); + } + insert(index, linkable); + } + + /** + * Appends linkable to the end of the list. + * + * @param linkable an object of type TLinkable + * @return always true + */ + public boolean add(T linkable) { + insert(_size, linkable); + return true; + } + + /** + * Inserts linkable at the head of the list. + * + * @param linkable an object of type TLinkable + */ + public void addFirst(T linkable) { + insert(0, linkable); + } + + /** + * Adds linkable to the end of the list. + * + * @param linkable an object of type TLinkable + */ + public void addLast(T linkable) { + insert(size(), linkable); + } + + /** + * Empties the list. + */ + public void clear() { + if (null != _head) { + for (TLinkable link = _head.getNext(); + link != null; + link = link.getNext()) { + TLinkable prev = link.getPrevious(); + prev.setNext(null); + link.setPrevious(null); + } + _head = _tail = null; + } + _size = 0; + } + + /** + * Copies the list's contents into a native array. This will be a + * shallow copy: the Tlinkable instances in the Object[] array + * have links to one another: changing those will put this list + * into an unpredictable state. Holding a reference to one + * element in the list will prevent the others from being garbage + * collected unless you clear the next/previous links. Caveat + * programmer! + * + * @return an Object[] value + */ + public Object[] toArray() { + Object[] o = new Object[_size]; + int i = 0; + for (TLinkable link = _head; link != null; link = link.getNext()) { + o[i++] = link; + } + return o; + } + + /** + * Copies the list to a native array, destroying the next/previous + * links as the copy is made. This list will be emptied after the + * copy (as if clear() had been invoked). The Object[] array + * returned will contain TLinkables that do not hold + * references to one another and so are less likely to be the + * cause of memory leaks. + * + * @return an Object[] value + */ + public Object[] toUnlinkedArray() { + Object[] o = new Object[_size]; + int i = 0; + for (T link = _head, tmp = null; link != null; i++) { + o[i] = link; + tmp = link; + link = (T) link.getNext(); + tmp.setNext(null); // clear the links + tmp.setPrevious(null); + } + _size = 0; // clear the list + _head = _tail = null; + return o; + } + + /** + * A linear search for o in the list. + * + * @param o an Object value + * @return a boolean value + */ + public boolean contains(Object o) { + for (TLinkable link = _head; link != null; link = link.getNext()) { + if (o.equals(link)) { + return true; + } + } + return false; + } + + + /** + * {@inheritDoc} + */ + @Override + public T get(int index) { + // Blow out for bogus values + if (index < 0 || index >= _size) { + throw new IndexOutOfBoundsException("Index: " + index + ", Size: " + _size); + } + + // Determine if it's better to get there from the front or the back + if (index > (_size >> 1)) { + int position = _size - 1; + T node = _tail; + + while (position > index) { + node = (T) node.getPrevious(); + position--; + } + + return node; + } else { + int position = 0; + T node = _head; + + while (position < index) { + node = (T) node.getNext(); + position++; + } + + return node; + } + } + + + /** + * Returns the head of the list + * + * @return an Object value + */ + public T getFirst() { + return _head; + } + + /** + * Returns the tail of the list. + * + * @return an Object value + */ + public T getLast() { + return _tail; + } + + + /** + * Return the node following the given node. This method exists for two reasons: + *
      + *
    1. It's really not recommended that the methods implemented by TLinkable be + * called directly since they're used internally by this class.
    2. + *
    3. This solves problems arising from generics when working with the linked + * objects directly.
    4. + *
    + *

    + * NOTE: this should only be used with nodes contained in the list. The results are + * undefined with anything else. + */ + public T getNext(T current) { + return (T) current.getNext(); + } + + /** + * Return the node preceding the given node. This method exists for two reasons: + *

      + *
    1. It's really not recommended that the methods implemented by TLinkable be + * called directly since they're used internally by this class.
    2. + *
    3. This solves problems arising from generics when working with the linked + * objects directly.
    4. + *
    + *

    + * NOTE: this should only be used with nodes contained in the list. The results are + * undefined with anything else. + */ + public T getPrevious(T current) { + return (T) current.getPrevious(); + } + + + /** + * Remove and return the first element in the list. + * + * @return an Object value + */ + public T removeFirst() { + T o = _head; + + if (o == null) return null; + + T n = (T) o.getNext(); + o.setNext(null); + + if (null != n) { + n.setPrevious(null); + } + + _head = n; + if (--_size == 0) { + _tail = null; + } + return o; + } + + /** + * Remove and return the last element in the list. + * + * @return an Object value + */ + public T removeLast() { + T o = _tail; + + if (o == null) return null; + + T prev = (T) o.getPrevious(); + o.setPrevious(null); + + if (null != prev) { + prev.setNext(null); + } + _tail = prev; + if (--_size == 0) { + _head = null; + } + return o; + } + + /** + * Implementation of index-based list insertions. + * + * @param index an int value + * @param linkable an object of type TLinkable + */ + protected void insert(int index, T linkable) { + T newLink = linkable; + + if (_size == 0) { + _head = _tail = newLink; // first insertion + } else if (index == 0) { + newLink.setNext(_head); // insert at front + _head.setPrevious(newLink); + _head = newLink; + } else if (index == _size) { // insert at back + _tail.setNext(newLink); + newLink.setPrevious(_tail); + _tail = newLink; + } else { + T node = get(index); + + T before = (T) node.getPrevious(); + if (before != null) before.setNext(linkable); + + linkable.setPrevious(before); + linkable.setNext(node); + node.setPrevious(linkable); + } + _size++; + } + + /** + * Removes the specified element from the list. Note that + * it is the caller's responsibility to ensure that the + * element does, in fact, belong to this list and not another + * instance of TLinkedList. + * + * @param o a TLinkable element already inserted in this list. + * @return true if the element was a TLinkable and removed + */ + public boolean remove(Object o) { + if (o instanceof TLinkable) { + T p, n; + TLinkable link = (TLinkable) o; + + p = (T) link.getPrevious(); + n = (T) link.getNext(); + + if (n == null && p == null) { // emptying the list + // It's possible this object is not something that's in the list. So, + // make sure it's the head if it doesn't point to anything. This solves + // problems caused by removing something multiple times. + if (o != _head) return false; + + _head = _tail = null; + } else if (n == null) { // this is the tail + // make previous the new tail + link.setPrevious(null); + p.setNext(null); + _tail = p; + } else if (p == null) { // this is the head + // make next the new head + link.setNext(null); + n.setPrevious(null); + _head = n; + } else { // somewhere in the middle + p.setNext(n); + n.setPrevious(p); + link.setNext(null); + link.setPrevious(null); + } + + _size--; // reduce size of list + return true; + } else { + return false; + } + } + + /** + * Inserts newElement into the list immediately before current. + * All elements to the right of and including current are shifted + * over. + * + * @param current a TLinkable value currently in the list. + * @param newElement a TLinkable value to be added to + * the list. + */ + public void addBefore(T current, T newElement) { + if (current == _head) { + addFirst(newElement); + } else if (current == null) { + addLast(newElement); + } else { + TLinkable p = current.getPrevious(); + newElement.setNext(current); + p.setNext(newElement); + newElement.setPrevious(p); + current.setPrevious(newElement); + _size++; + } + } + + /** + * Inserts newElement into the list immediately after current. + * All elements to the left of and including current are shifted + * over. + * + * @param current a TLinkable value currently in the list. + * @param newElement a TLinkable value to be added to + * the list. + */ + public void addAfter(T current, T newElement) { + if (current == _tail) { + addLast(newElement); + } else if (current == null) { + addFirst(newElement); + } else { + TLinkable n = current.getNext(); + newElement.setPrevious(current); + newElement.setNext(n); + current.setNext(newElement); + n.setPrevious(newElement); + _size++; + } + } + + + /** + * Executes procedure for each entry in the list. + * + * @param procedure a TObjectProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TObjectProcedure procedure) { + T node = _head; + while (node != null) { + boolean keep_going = procedure.execute(node); + if (!keep_going) return false; + + node = (T) node.getNext(); + } + + return true; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // HEAD + out.writeObject(_head); + + // TAIL + out.writeObject(_tail); + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIED + _size = in.readInt(); + + // HEAD + _head = (T) in.readObject(); + + // TAIL + _tail = (T) in.readObject(); + } + + + /** + * A ListIterator that supports additions and deletions. + */ + protected final class IteratorImpl implements ListIterator { + private int _nextIndex = 0; + private T _next; + private T _lastReturned; + + /** + * Creates a new Iterator instance positioned at + * index. + * + * @param position an int value + */ + IteratorImpl(int position) { + if (position < 0 || position > _size) { + throw new IndexOutOfBoundsException(); + } + + _nextIndex = position; + if (position == 0) { + _next = _head; + } else if (position == _size) { + _next = null; + } else if (position < (_size >> 1)) { + int pos = 0; + for (_next = _head; pos < position; pos++) { + _next = (T) _next.getNext(); + } + } else { + int pos = _size - 1; + for (_next = _tail; pos > position; pos--) { + _next = (T) _next.getPrevious(); + } + } + } + + /** + * Insert linkable at the current position of the iterator. + * Calling next() after add() will return the added object. + * + * @param linkable an object of type TLinkable + */ + public final void add(T linkable) { + _lastReturned = null; + _nextIndex++; + + if (_size == 0) { + TLinkedList.this.add(linkable); + } else { + TLinkedList.this.addBefore(_next, linkable); + } + } + + /** + * True if a call to next() will return an object. + * + * @return a boolean value + */ + public final boolean hasNext() { + return _nextIndex != _size; + } + + /** + * True if a call to previous() will return a value. + * + * @return a boolean value + */ + public final boolean hasPrevious() { + return _nextIndex != 0; + } + + /** + * Returns the value at the Iterator's index and advances the + * iterator. + * + * @return an Object value + * @throws NoSuchElementException if there is no next element + */ + public final T next() { + if (_nextIndex == _size) { + throw new NoSuchElementException(); + } + + _lastReturned = _next; + _next = (T) _next.getNext(); + _nextIndex++; + return _lastReturned; + } + + /** + * returns the index of the next node in the list (the + * one that would be returned by a call to next()). + * + * @return an int value + */ + public final int nextIndex() { + return _nextIndex; + } + + /** + * Returns the value before the Iterator's index and moves the + * iterator back one index. + * + * @return an Object value + * @throws NoSuchElementException if there is no previous element. + */ + public final T previous() { + if (_nextIndex == 0) { + throw new NoSuchElementException(); + } + + if (_nextIndex == _size) { + _lastReturned = _next = _tail; + } else { + _lastReturned = _next = (T) _next.getPrevious(); + } + + _nextIndex--; + return _lastReturned; + } + + /** + * Returns the previous element's index. + * + * @return an int value + */ + public final int previousIndex() { + return _nextIndex - 1; + } + + /** + * Removes the current element in the list and shrinks its + * size accordingly. + * + * @throws IllegalStateException neither next nor previous + * have been invoked, or remove or add have been invoked after + * the last invocation of next or previous. + */ + public final void remove() { + if (_lastReturned == null) { + throw new IllegalStateException("must invoke next or previous before invoking remove"); + } + + if (_lastReturned != _next) { + _nextIndex--; + } + _next = (T) _lastReturned.getNext(); + TLinkedList.this.remove(_lastReturned); + _lastReturned = null; + } + + /** + * Replaces the current element in the list with + * linkable + * + * @param linkable an object of type TLinkable + */ + public final void set(T linkable) { + if (_lastReturned == null) { + throw new IllegalStateException(); + } + T l = linkable; + + // need to check both, since this could be the only + // element in the list. + if (_lastReturned == _head) { + _head = l; + } + + if (_lastReturned == _tail) { + _tail = l; + } + + swap(_lastReturned, l); + _lastReturned = l; + } + + /** + * Replace from with to in the list. + * + * @param from a TLinkable value + * @param to a TLinkable value + */ + private void swap(T from, T to) { + T p = (T) from.getPrevious(); + T n = (T) from.getNext(); + + if (null != p) { + to.setPrevious(p); + p.setNext(to); + } + if (null != n) { + to.setNext(n); + n.setPrevious(to); + } + from.setNext(null); + from.setPrevious(null); + } + } +} // TLinkedList diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongArrayList.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongArrayList.java new file mode 100644 index 00000000000..29aa3844bc6 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongArrayList.java @@ -0,0 +1,935 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; +import java.util.Random; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * A resizable, array-backed list of long primitives. + *

    + * Created: Sat Dec 29 14:21:12 2001 + * + * @author Eric D. Friedman + * @author Rob Eden + */ + +public class TLongArrayList implements Externalizable, Cloneable { + static final long serialVersionUID = 1L; + + /** + * the data of the list + */ + protected long[] _data; + + /** + * the index after the last entry in the list + */ + protected int _pos; + + /** + * the default capacity for new lists + */ + protected static final int DEFAULT_CAPACITY = 10; + + /** + * Creates a new TLongArrayList instance with the + * default capacity. + */ + public TLongArrayList() { + this(DEFAULT_CAPACITY); + } + + /** + * Creates a new TLongArrayList instance with the + * specified capacity. + * + * @param capacity an int value + */ + public TLongArrayList(int capacity) { + _data = new long[capacity]; + _pos = 0; + } + + /** + * Creates a new TLongArrayList instance whose + * capacity is the greater of the length of values and + * DEFAULT_CAPACITY and whose initial contents are the specified + * values. + * + * @param values an long[] value + */ + public TLongArrayList(long[] values) { + this(Math.max(values.length, DEFAULT_CAPACITY)); + add(values); + } + + // sizing + + /** + * Grow the internal array as needed to accommodate the specified + * number of elements. The size of the array longs on each + * resize unless capacity requires more than twice the + * current capacity. + * + * @param capacity an int value + */ + public void ensureCapacity(int capacity) { + if (capacity > _data.length) { + int newCap = Math.max(_data.length << 1, capacity); + long[] tmp = new long[newCap]; + System.arraycopy(_data, 0, tmp, 0, _data.length); + _data = tmp; + } + } + + /** + * Returns the number of values in the list. + * + * @return the number of values in the list. + */ + public int size() { + return _pos; + } + + /** + * Tests whether this list contains any values. + * + * @return true if the list is empty. + */ + public boolean isEmpty() { + return _pos == 0; + } + + /** + * Sheds any excess capacity above and beyond the current size of + * the list. + */ + public void trimToSize() { + if (_data.length > size()) { + long[] tmp = new long[size()]; + toNativeArray(tmp, 0, tmp.length); + _data = tmp; + } + } + + // modifying + + /** + * Adds val to the end of the list, growing as needed. + * + * @param val an long value + */ + public void add(long val) { + ensureCapacity(_pos + 1); + _data[_pos++] = val; + } + + /** + * Adds the values in the array vals to the end of the + * list, in order. + * + * @param vals an long[] value + */ + public void add(long[] vals) { + add(vals, 0, vals.length); + } + + /** + * Adds a subset of the values in the array vals to the + * end of the list, in order. + * + * @param vals an long[] value + * @param offset the offset at which to start copying + * @param length the number of values to copy. + */ + public void add(long[] vals, int offset, int length) { + ensureCapacity(_pos + length); + System.arraycopy(vals, offset, _data, _pos, length); + _pos += length; + } + + /** + * Inserts value into the list at offset. All + * values including and to the right of offset are shifted + * to the right. + * + * @param offset an int value + * @param value an long value + */ + public void insert(int offset, long value) { + if (offset == _pos) { + add(value); + return; + } + ensureCapacity(_pos + 1); + // shift right + System.arraycopy(_data, offset, _data, offset + 1, _pos - offset); + // insert + _data[offset] = value; + _pos++; + } + + /** + * Inserts the array of values into the list at + * offset. All values including and to the right of + * offset are shifted to the right. + * + * @param offset an int value + * @param values an long[] value + */ + public void insert(int offset, long[] values) { + insert(offset, values, 0, values.length); + } + + /** + * Inserts a slice of the array of values into the list + * at offset. All values including and to the right of + * offset are shifted to the right. + * + * @param offset an int value + * @param values an long[] value + * @param valOffset the offset in the values array at which to + * start copying. + * @param len the number of values to copy from the values array + */ + public void insert(int offset, long[] values, int valOffset, int len) { + if (offset == _pos) { + add(values, valOffset, len); + return; + } + + ensureCapacity(_pos + len); + // shift right + System.arraycopy(_data, offset, _data, offset + len, _pos - offset); + // insert + System.arraycopy(values, valOffset, _data, offset, len); + _pos += len; + } + + /** + * Returns the value at the specified offset. + * + * @param offset an int value + * @return an long value + */ + public long get(int offset) { + if (offset >= _pos) { + throw new ArrayIndexOutOfBoundsException(offset); + } + return _data[offset]; + } + + /** + * Returns the value at the specified offset without doing any + * bounds checking. + * + * @param offset an int value + * @return an long value + */ + public long getQuick(int offset) { + return _data[offset]; + } + + /** + * Sets the value at the specified offset. + * + * @param offset an int value + * @param val an long value + */ + public void set(int offset, long val) { + if (offset >= _pos) { + throw new ArrayIndexOutOfBoundsException(offset); + } + _data[offset] = val; + } + + /** + * Sets the value at the specified offset and returns the + * previously stored value. + * + * @param offset an int value + * @param val an long value + * @return the value previously stored at offset. + */ + public long getSet(int offset, long val) { + if (offset >= _pos) { + throw new ArrayIndexOutOfBoundsException(offset); + } + long old = _data[offset]; + _data[offset] = val; + return old; + } + + /** + * Replace the values in the list starting at offset with + * the contents of the values array. + * + * @param offset the first offset to replace + * @param values the source of the new values + */ + public void set(int offset, long[] values) { + set(offset, values, 0, values.length); + } + + /** + * Replace the values in the list starting at offset with + * length values from the values array, starting + * at valOffset. + * + * @param offset the first offset to replace + * @param values the source of the new values + * @param valOffset the first value to copy from the values array + * @param length the number of values to copy + */ + public void set(int offset, long[] values, int valOffset, int length) { + if (offset < 0 || offset + length > _pos) { + throw new ArrayIndexOutOfBoundsException(offset); + } + System.arraycopy(values, valOffset, _data, offset, length); + } + + /** + * Sets the value at the specified offset without doing any bounds + * checking. + * + * @param offset an int value + * @param val an long value + */ + public void setQuick(int offset, long val) { + _data[offset] = val; + } + + /** + * Flushes the internal state of the list, resetting the capacity + * to the default. + */ + public void clear() { + clear(DEFAULT_CAPACITY); + } + + /** + * Flushes the internal state of the list, setting the capacity of + * the empty list to capacity. + * + * @param capacity an int value + */ + public void clear(int capacity) { + _data = new long[capacity]; + _pos = 0; + } + + /** + * Sets the size of the list to 0, but does not change its + * capacity. This method can be used as an alternative to the + * {@link #clear clear} method if you want to recyle a list without + * allocating new backing arrays. + * + * @see #clear + */ + public void reset() { + _pos = 0; + fill((long) 0); + } + + /** + * Sets the size of the list to 0, but does not change its + * capacity. This method can be used as an alternative to the + * {@link #clear clear} method if you want to recyle a list + * without allocating new backing arrays. This method differs + * from {@link #reset reset} in that it does not clear the old + * values in the backing array. Thus, it is possible for {@link + * #getQuick getQuick} to return stale data if this method is used + * and the caller is careless about bounds checking. + * + * @see #reset + * @see #clear + * @see #getQuick + */ + public void resetQuick() { + _pos = 0; + } + + /** + * Removes the value at offset from the list. + * + * @param offset an int value + * @return the value previously stored at offset. + */ + public long remove(int offset) { + long old = get(offset); + remove(offset, 1); + return old; + } + + /** + * Removes length values from the list, starting at + * offset + * + * @param offset an int value + * @param length an int value + */ + public void remove(int offset, int length) { + if (offset < 0 || offset >= _pos) { + throw new ArrayIndexOutOfBoundsException(offset); + } + + if (offset == 0) { + // data at the front + System.arraycopy(_data, length, _data, 0, _pos - length); + } else if (_pos - length == offset) { + // no copy to make, decrementing pos "deletes" values at + // the end + } else { + // data in the middle + System.arraycopy(_data, offset + length, + _data, offset, _pos - (offset + length)); + } + _pos -= length; + // no need to clear old values beyond _pos, because this is a + // primitive collection and 0 takes as much room as any other + // value + } + + /** + * Transform each value in the list using the specified function. + * + * @param function a TLongFunction value + */ + public void transformValues(TLongFunction function) { + for (int i = _pos; i-- > 0;) { + _data[i] = function.execute(_data[i]); + } + } + + /** + * Reverse the order of the elements in the list. + */ + public void reverse() { + reverse(0, _pos); + } + + /** + * Reverse the order of the elements in the range of the list. + * + * @param from the inclusive index at which to start reversing + * @param to the exclusive index at which to stop reversing + */ + public void reverse(int from, int to) { + if (from == to) { + return; // nothing to do + } + if (from > to) { + throw new IllegalArgumentException("from cannot be greater than to"); + } + for (int i = from, j = to - 1; i < j; i++, j--) { + swap(i, j); + } + } + + /** + * Shuffle the elements of the list using the specified random + * number generator. + * + * @param rand a Random value + */ + public void shuffle(Random rand) { + for (int i = _pos; i-- > 1;) { + swap(i, rand.nextInt(i)); + } + } + + /** + * Swap the values at offsets i and j. + * + * @param i an offset into the data array + * @param j an offset into the data array + */ + private final void swap(int i, int j) { + long tmp = _data[i]; + _data[i] = _data[j]; + _data[j] = tmp; + } + + // copying + + /** + * Returns a clone of this list. Since this is a primitive + * collection, this will be a deep clone. + * + * @return a deep clone of the list. + */ + public Object clone() { + TLongArrayList list = null; + try { + list = (TLongArrayList) super.clone(); + list._data = toNativeArray(); + } catch (CloneNotSupportedException e) { + // it's supported + } // end of try-catch + return list; + } + + + /** + * Returns a sublist of this list. + * + * @param begin low endpoint (inclusive) of the subList. + * @param end high endpoint (exclusive) of the subList. + * @return sublist of this list from begin, inclusive to end, exclusive. + * @throws IndexOutOfBoundsException - endpoint out of range + * @throws IllegalArgumentException - endpoints out of order (end > begin) + */ + public TLongArrayList subList(int begin, int end) { + if (end < begin) throw new IllegalArgumentException("end index " + end + " greater than begin index " + begin); + if (begin < 0) throw new IndexOutOfBoundsException("begin index can not be < 0"); + if (end > _data.length) throw new IndexOutOfBoundsException("end index < " + _data.length); + TLongArrayList list = new TLongArrayList(end - begin); + for (int i = begin; i < end; i++) { + list.add(_data[i]); + } + return list; + } + + + /** + * Copies the contents of the list into a native array. + * + * @return an long[] value + */ + public long[] toNativeArray() { + return toNativeArray(0, _pos); + } + + /** + * Copies a slice of the list into a native array. + * + * @param offset the offset at which to start copying + * @param len the number of values to copy. + * @return an long[] value + */ + public long[] toNativeArray(int offset, int len) { + long[] rv = new long[len]; + toNativeArray(rv, offset, len); + return rv; + } + + /** + * Copies a slice of the list into a native array. + * + * @param dest the array to copy into. + * @param offset the offset of the first value to copy + * @param len the number of values to copy. + */ + public void toNativeArray(long[] dest, int offset, int len) { + if (len == 0) { + return; // nothing to copy + } + if (offset < 0 || offset >= _pos) { + throw new ArrayIndexOutOfBoundsException(offset); + } + System.arraycopy(_data, offset, dest, 0, len); + } + + // comparing + + /** + * Compares this list to another list, value by value. + * + * @param other the object to compare against + * @return true if other is a TLongArrayList and has exactly the + * same values. + */ + public boolean equals(Object other) { + if (other == this) { + return true; + } else if (other instanceof TLongArrayList) { + TLongArrayList that = (TLongArrayList) other; + if (that.size() != this.size()) { + return false; + } else { + for (int i = _pos; i-- > 0;) { + if (this._data[i] != that._data[i]) { + return false; + } + } + return true; + } + } else { + return false; + } + } + + public int hashCode() { + int h = 0; + for (int i = _pos; i-- > 0;) { + h = 37 * h + HashFunctions.hash(_data[i]); + } + return h; + } + + // procedures + + /** + * Applies the procedure to each value in the list in ascending + * (front to back) order. + * + * @param procedure a TLongProcedure value + * @return true if the procedure did not terminate prematurely. + */ + public boolean forEach(TLongProcedure procedure) { + for (int i = 0; i < _pos; i++) { + if (!procedure.execute(_data[i])) { + return false; + } + } + return true; + } + + /** + * Applies the procedure to each value in the list in descending + * (back to front) order. + * + * @param procedure a TLongProcedure value + * @return true if the procedure did not terminate prematurely. + */ + public boolean forEachDescending(TLongProcedure procedure) { + for (int i = _pos; i-- > 0;) { + if (!procedure.execute(_data[i])) { + return false; + } + } + return true; + } + + // sorting + + /** + * Sort the values in the list (ascending) using the Sun quicksort + * implementation. + * + * @see java.util.Arrays#sort + */ + public void sort() { + Arrays.sort(_data, 0, _pos); + } + + /** + * Sort a slice of the list (ascending) using the Sun quicksort + * implementation. + * + * @param fromIndex the index at which to start sorting (inclusive) + * @param toIndex the index at which to stop sorting (exclusive) + * @see java.util.Arrays#sort + */ + public void sort(int fromIndex, int toIndex) { + Arrays.sort(_data, fromIndex, toIndex); + } + + // filling + + /** + * Fills every slot in the list with the specified value. + * + * @param val the value to use when filling + */ + public void fill(long val) { + Arrays.fill(_data, 0, _pos, val); + } + + /** + * Fills a range in the list with the specified value. + * + * @param fromIndex the offset at which to start filling (inclusive) + * @param toIndex the offset at which to stop filling (exclusive) + * @param val the value to use when filling + */ + public void fill(int fromIndex, int toIndex, long val) { + if (toIndex > _pos) { + ensureCapacity(toIndex); + _pos = toIndex; + } + Arrays.fill(_data, fromIndex, toIndex, val); + } + + // searching + + /** + * Performs a binary search for value in the entire list. + * Note that you must @{link #sort sort} the list before + * doing a search. + * + * @param value the value to search for + * @return the absolute offset in the list of the value, or its + * negative insertion point into the sorted list. + */ + public int binarySearch(long value) { + return binarySearch(value, 0, _pos); + } + + /** + * Performs a binary search for value in the specified + * range. Note that you must @{link #sort sort} the list + * or the range before doing a search. + * + * @param value the value to search for + * @param fromIndex the lower boundary of the range (inclusive) + * @param toIndex the upper boundary of the range (exclusive) + * @return the absolute offset in the list of the value, or its + * negative insertion point into the sorted list. + */ + public int binarySearch(long value, int fromIndex, int toIndex) { + if (fromIndex < 0) { + throw new ArrayIndexOutOfBoundsException(fromIndex); + } + if (toIndex > _pos) { + throw new ArrayIndexOutOfBoundsException(toIndex); + } + + int low = fromIndex; + int high = toIndex - 1; + + while (low <= high) { + int mid = (low + high) >>> 1; + long midVal = _data[mid]; + + if (midVal < value) { + low = mid + 1; + } else if (midVal > value) { + high = mid - 1; + } else { + return mid; // value found + } + } + return -(low + 1); // value not found. + } + + /** + * Searches the list front to back for the index of + * value. + * + * @param value an long value + * @return the first offset of the value, or -1 if it is not in + * the list. + * @see #binarySearch for faster searches on sorted lists + */ + public int indexOf(long value) { + return indexOf(0, value); + } + + /** + * Searches the list front to back for the index of + * value, starting at offset. + * + * @param offset the offset at which to start the linear search + * (inclusive) + * @param value an long value + * @return the first offset of the value, or -1 if it is not in + * the list. + * @see #binarySearch for faster searches on sorted lists + */ + public int indexOf(int offset, long value) { + for (int i = offset; i < _pos; i++) { + if (_data[i] == value) { + return i; + } + } + return -1; + } + + /** + * Searches the list back to front for the last index of + * value. + * + * @param value an long value + * @return the last offset of the value, or -1 if it is not in + * the list. + * @see #binarySearch for faster searches on sorted lists + */ + public int lastIndexOf(long value) { + return lastIndexOf(_pos, value); + } + + /** + * Searches the list back to front for the last index of + * value, starting at offset. + * + * @param offset the offset at which to start the linear search + * (exclusive) + * @param value an long value + * @return the last offset of the value, or -1 if it is not in + * the list. + * @see #binarySearch for faster searches on sorted lists + */ + public int lastIndexOf(int offset, long value) { + for (int i = offset; i-- > 0;) { + if (_data[i] == value) { + return i; + } + } + return -1; + } + + /** + * Searches the list for value + * + * @param value an long value + * @return true if value is in the list. + */ + public boolean contains(long value) { + return lastIndexOf(value) >= 0; + } + + /** + * Searches the list for values satisfying condition in + * the manner of the *nix grep utility. + * + * @param condition a condition to apply to each element in the list + * @return a list of values which match the condition. + */ + public TLongArrayList grep(TLongProcedure condition) { + TLongArrayList list = new TLongArrayList(); + for (int i = 0; i < _pos; i++) { + if (condition.execute(_data[i])) { + list.add(_data[i]); + } + } + return list; + } + + /** + * Searches the list for values which do not satisfy + * condition. This is akin to *nix grep -v. + * + * @param condition a condition to apply to each element in the list + * @return a list of values which do not match the condition. + */ + public TLongArrayList inverseGrep(TLongProcedure condition) { + TLongArrayList list = new TLongArrayList(); + for (int i = 0; i < _pos; i++) { + if (!condition.execute(_data[i])) { + list.add(_data[i]); + } + } + return list; + } + + /** + * Finds the maximum value in the list. + * + * @return the largest value in the list. + * @throws IllegalStateException if the list is empty + */ + public long max() { + if (size() == 0) { + throw new IllegalStateException("cannot find maximum of an empty list"); + } + long max = Long.MIN_VALUE; + for (int i = 0; i < _pos; i++) { + if (_data[i] > max) { + max = _data[i]; + } + } + return max; + } + + /** + * Finds the minimum value in the list. + * + * @return the smallest value in the list. + * @throws IllegalStateException if the list is empty + */ + public long min() { + if (size() == 0) { + throw new IllegalStateException("cannot find minimum of an empty list"); + } + long min = Long.MAX_VALUE; + for (int i = 0; i < _pos; i++) { + if (_data[i] < min) { + min = _data[i]; + } + } + return min; + } + + // stringification + + /** + * Returns a String representation of the list, front to back. + * + * @return a String value + */ + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + for (int i = 0, end = _pos - 1; i < end; i++) { + buf.append(_data[i]); + buf.append(", "); + } + if (size() > 0) { + buf.append(_data[_pos - 1]); + } + buf.append("}"); + return buf.toString(); + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(1); + + // POSITION + out.writeInt(_pos); + + // ENTRIES + int len = _pos; + out.writeInt(_pos); // Written twice for backwards compatability with + // version 0 + for (int i = 0; i < len; i++) { + out.writeLong(_data[i]); + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // POSITION + _pos = in.readInt(); + + // ENTRIES + int len = in.readInt(); + _data = new long[len]; + for (int i = 0; i < len; i++) { + _data[i] = in.readLong(); + } + } +} // TLongArrayList diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongByteHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongByteHashMap.java new file mode 100644 index 00000000000..45e38ab7d0d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongByteHashMap.java @@ -0,0 +1,650 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for long keys and byte values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TLongByteHashMap extends TLongHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TLongByteProcedure PUT_ALL_PROC = new TLongByteProcedure() { + public boolean execute(long key, byte value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient byte[] _values; + + /** + * Creates a new TLongByteHashMap instance with the default + * capacity and load factor. + */ + public TLongByteHashMap() { + super(); + } + + /** + * Creates a new TLongByteHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TLongByteHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TLongByteHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TLongByteHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TLongByteHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TLongByteHashMap(TLongHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TLongByteHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TLongByteHashMap(int initialCapacity, TLongHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TLongByteHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TLongByteHashMap(int initialCapacity, float loadFactor, TLongHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TLongByteHashMap m = (TLongByteHashMap) super.clone(); + m._values = (byte[]) this._values.clone(); + return m; + } + + /** + * @return a TLongByteIterator with access to this map's keys and values + */ + public TLongByteIterator iterator() { + return new TLongByteIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new byte[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an long value + * @param value an byte value + * @return the previous value associated with key, + * or (long)0 if none was found. + */ + public byte put(long key, byte value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an long value + * @param value an byte value + * @return the previous value associated with key, + * or (long)0 if none was found. + */ + public byte putIfAbsent(long key, byte value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private byte doPut(long key, byte value, int index) { + byte previousState; + byte previous = (byte) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TLongByteHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + long oldKeys[] = _set; + byte oldVals[] = _values; + byte oldStates[] = _states; + + _set = new long[newCapacity]; + _values = new byte[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + long o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an long value + * @return the value of key or (long)0 if no such mapping exists. + */ + public byte get(long key) { + int index = index(key); + return index < 0 ? (byte) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + long[] keys = _set; + byte[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (long) 0); + Arrays.fill(_values, 0, _values.length, (byte) 0); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an long value + * @return an byte value, or (long)0 if no mapping for key exists + */ + public byte remove(long key) { + byte prev = (byte) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TLongByteHashMap)) { + return false; + } + TLongByteHashMap that = (TLongByteHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TLongByteProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(long key, byte value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TLongByteProcedure { + private final TLongByteHashMap _otherMap; + + EqProcedure(TLongByteHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(long key, byte value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two bytes for equality. + */ + private final boolean eq(byte v1, byte v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = (byte) 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public byte[] getValues() { + byte[] vals = new byte[size()]; + byte[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public long[] keys() { + long[] keys = new long[size()]; + long[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public long[] keys(long[] a) { + int size = size(); + if (a.length < size) { + a = (long[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + long[] k = (long[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an byte value + * @return a boolean value + */ + public boolean containsValue(byte val) { + byte[] states = _states; + byte[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an long value + * @return a boolean value + */ + public boolean containsKey(long key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TLongProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TLongProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TByteProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TByteProcedure procedure) { + byte[] states = _states; + byte[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOLongByteProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TLongByteProcedure procedure) { + byte[] states = _states; + long[] keys = _set; + byte[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TLongByteProcedure procedure) { + boolean modified = false; + byte[] states = _states; + long[] keys = _set; + byte[] values = _values; + + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TByteFunction value + */ + public void transformValues(TByteFunction function) { + byte[] states = _states; + byte[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(long key) { + return adjustValue(key, (byte) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(long key, byte amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public byte adjustOrPutValue(final long key, final byte adjust_amount, final byte put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final byte newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + byte previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + long key = in.readLong(); + byte val = in.readByte(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TLongByteProcedure() { + private boolean first = true; + + public boolean execute(long key, byte value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TLongByteHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongByteIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongByteIterator.java new file mode 100644 index 00000000000..6a1c476927b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongByteIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type long and byte. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TLongByteIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TLongByteIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TLongByteIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TLongByteIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TLongByteIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TLongByteHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TLongByteIterator(TLongByteHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public long key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public byte value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public byte setValue(byte val) { + byte old = value(); + _map._values[_index] = val; + return old; + } +}// TLongByteIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongByteProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongByteProcedure.java new file mode 100644 index 00000000000..c7150b1e3f1 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongByteProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type long and byte. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TLongByteProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a long value + * @param b a byte value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(long a, byte b); +}// TLongByteProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongDoubleHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongDoubleHashMap.java new file mode 100644 index 00000000000..09384ea4d9c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongDoubleHashMap.java @@ -0,0 +1,650 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for long keys and double values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TLongDoubleHashMap extends TLongHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TLongDoubleProcedure PUT_ALL_PROC = new TLongDoubleProcedure() { + public boolean execute(long key, double value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient double[] _values; + + /** + * Creates a new TLongDoubleHashMap instance with the default + * capacity and load factor. + */ + public TLongDoubleHashMap() { + super(); + } + + /** + * Creates a new TLongDoubleHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TLongDoubleHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TLongDoubleHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TLongDoubleHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TLongDoubleHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TLongDoubleHashMap(TLongHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TLongDoubleHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TLongDoubleHashMap(int initialCapacity, TLongHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TLongDoubleHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TLongDoubleHashMap(int initialCapacity, float loadFactor, TLongHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TLongDoubleHashMap m = (TLongDoubleHashMap) super.clone(); + m._values = (double[]) this._values.clone(); + return m; + } + + /** + * @return a TLongDoubleIterator with access to this map's keys and values + */ + public TLongDoubleIterator iterator() { + return new TLongDoubleIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new double[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an long value + * @param value an double value + * @return the previous value associated with key, + * or (long)0 if none was found. + */ + public double put(long key, double value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an long value + * @param value an double value + * @return the previous value associated with key, + * or (long)0 if none was found. + */ + public double putIfAbsent(long key, double value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private double doPut(long key, double value, int index) { + byte previousState; + double previous = (double) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TLongDoubleHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + long oldKeys[] = _set; + double oldVals[] = _values; + byte oldStates[] = _states; + + _set = new long[newCapacity]; + _values = new double[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + long o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an long value + * @return the value of key or (long)0 if no such mapping exists. + */ + public double get(long key) { + int index = index(key); + return index < 0 ? (double) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + long[] keys = _set; + double[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (long) 0); + Arrays.fill(_values, 0, _values.length, (double) 0); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an long value + * @return an double value, or (long)0 if no mapping for key exists + */ + public double remove(long key) { + double prev = (double) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TLongDoubleHashMap)) { + return false; + } + TLongDoubleHashMap that = (TLongDoubleHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TLongDoubleProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(long key, double value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TLongDoubleProcedure { + private final TLongDoubleHashMap _otherMap; + + EqProcedure(TLongDoubleHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(long key, double value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two doubles for equality. + */ + private final boolean eq(double v1, double v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = (double) 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public double[] getValues() { + double[] vals = new double[size()]; + double[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public long[] keys() { + long[] keys = new long[size()]; + long[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public long[] keys(long[] a) { + int size = size(); + if (a.length < size) { + a = (long[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + long[] k = (long[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an double value + * @return a boolean value + */ + public boolean containsValue(double val) { + byte[] states = _states; + double[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an long value + * @return a boolean value + */ + public boolean containsKey(long key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TLongProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TLongProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TDoubleProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TDoubleProcedure procedure) { + byte[] states = _states; + double[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOLongDoubleProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TLongDoubleProcedure procedure) { + byte[] states = _states; + long[] keys = _set; + double[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TLongDoubleProcedure procedure) { + boolean modified = false; + byte[] states = _states; + long[] keys = _set; + double[] values = _values; + + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TDoubleFunction value + */ + public void transformValues(TDoubleFunction function) { + byte[] states = _states; + double[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(long key) { + return adjustValue(key, (double) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(long key, double amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public double adjustOrPutValue(final long key, final double adjust_amount, final double put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final double newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + byte previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + long key = in.readLong(); + double val = in.readDouble(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TLongDoubleProcedure() { + private boolean first = true; + + public boolean execute(long key, double value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TLongDoubleHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongDoubleIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongDoubleIterator.java new file mode 100644 index 00000000000..7a7120d315f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongDoubleIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type long and double. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TLongDoubleIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TLongDoubleIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TLongDoubleIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TLongDoubleIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TLongDoubleIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TLongDoubleHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TLongDoubleIterator(TLongDoubleHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public long key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public double value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public double setValue(double val) { + double old = value(); + _map._values[_index] = val; + return old; + } +}// TLongDoubleIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongDoubleProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongDoubleProcedure.java new file mode 100644 index 00000000000..320a71a1cbc --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongDoubleProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type long and double. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TLongDoubleProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a long value + * @param b a double value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(long a, double b); +}// TLongDoubleProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongFloatHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongFloatHashMap.java new file mode 100644 index 00000000000..03c408bfb6c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongFloatHashMap.java @@ -0,0 +1,650 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for long keys and float values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TLongFloatHashMap extends TLongHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TLongFloatProcedure PUT_ALL_PROC = new TLongFloatProcedure() { + public boolean execute(long key, float value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient float[] _values; + + /** + * Creates a new TLongFloatHashMap instance with the default + * capacity and load factor. + */ + public TLongFloatHashMap() { + super(); + } + + /** + * Creates a new TLongFloatHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TLongFloatHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TLongFloatHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TLongFloatHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TLongFloatHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TLongFloatHashMap(TLongHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TLongFloatHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TLongFloatHashMap(int initialCapacity, TLongHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TLongFloatHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TLongFloatHashMap(int initialCapacity, float loadFactor, TLongHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TLongFloatHashMap m = (TLongFloatHashMap) super.clone(); + m._values = (float[]) this._values.clone(); + return m; + } + + /** + * @return a TLongFloatIterator with access to this map's keys and values + */ + public TLongFloatIterator iterator() { + return new TLongFloatIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new float[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an long value + * @param value an float value + * @return the previous value associated with key, + * or (long)0 if none was found. + */ + public float put(long key, float value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an long value + * @param value an float value + * @return the previous value associated with key, + * or (long)0 if none was found. + */ + public float putIfAbsent(long key, float value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private float doPut(long key, float value, int index) { + byte previousState; + float previous = (float) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TLongFloatHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + long oldKeys[] = _set; + float oldVals[] = _values; + byte oldStates[] = _states; + + _set = new long[newCapacity]; + _values = new float[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + long o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an long value + * @return the value of key or (long)0 if no such mapping exists. + */ + public float get(long key) { + int index = index(key); + return index < 0 ? (float) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + long[] keys = _set; + float[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (long) 0); + Arrays.fill(_values, 0, _values.length, (float) 0); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an long value + * @return an float value, or (long)0 if no mapping for key exists + */ + public float remove(long key) { + float prev = (float) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TLongFloatHashMap)) { + return false; + } + TLongFloatHashMap that = (TLongFloatHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TLongFloatProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(long key, float value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TLongFloatProcedure { + private final TLongFloatHashMap _otherMap; + + EqProcedure(TLongFloatHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(long key, float value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two floats for equality. + */ + private final boolean eq(float v1, float v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = (float) 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public float[] getValues() { + float[] vals = new float[size()]; + float[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public long[] keys() { + long[] keys = new long[size()]; + long[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public long[] keys(long[] a) { + int size = size(); + if (a.length < size) { + a = (long[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + long[] k = (long[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an float value + * @return a boolean value + */ + public boolean containsValue(float val) { + byte[] states = _states; + float[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an long value + * @return a boolean value + */ + public boolean containsKey(long key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TLongProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TLongProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TFloatProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TFloatProcedure procedure) { + byte[] states = _states; + float[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOLongFloatProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TLongFloatProcedure procedure) { + byte[] states = _states; + long[] keys = _set; + float[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TLongFloatProcedure procedure) { + boolean modified = false; + byte[] states = _states; + long[] keys = _set; + float[] values = _values; + + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TFloatFunction value + */ + public void transformValues(TFloatFunction function) { + byte[] states = _states; + float[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(long key) { + return adjustValue(key, (float) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(long key, float amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public float adjustOrPutValue(final long key, final float adjust_amount, final float put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final float newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + byte previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + long key = in.readLong(); + float val = in.readFloat(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TLongFloatProcedure() { + private boolean first = true; + + public boolean execute(long key, float value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TLongFloatHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongFloatIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongFloatIterator.java new file mode 100644 index 00000000000..fecb59c90ff --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongFloatIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type long and float. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TLongFloatIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TLongFloatIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TLongFloatIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TLongFloatIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TLongFloatIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TLongFloatHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TLongFloatIterator(TLongFloatHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public long key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public float value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public float setValue(float val) { + float old = value(); + _map._values[_index] = val; + return old; + } +}// TLongFloatIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongFloatProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongFloatProcedure.java new file mode 100644 index 00000000000..ae720f84f4e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongFloatProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type long and float. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TLongFloatProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a long value + * @param b a float value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(long a, float b); +}// TLongFloatProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongFunction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongFunction.java new file mode 100644 index 00000000000..33582da9e42 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongFunction.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for functions that accept and return one long primitive. + *

    + * Created: Mon Nov 5 22:19:36 2001 + * + * @author Eric D. Friedman + * @version $Id: PFunction.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TLongFunction { + /** + * Execute this function with value + * + * @param value a long input + * @return a long result + */ + public long execute(long value); +}// TLongFunction diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongHash.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongHash.java new file mode 100644 index 00000000000..21a4613f3a4 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongHash.java @@ -0,0 +1,291 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed hashing implementation for long primitives. + *

    + * Created: Sun Nov 4 08:56:06 2001 + * + * @author Eric D. Friedman + * @version $Id: PHash.template,v 1.2 2007/06/29 22:39:46 robeden Exp $ + */ + +abstract public class TLongHash extends TPrimitiveHash implements TLongHashingStrategy { + + /** + * the set of longs + */ + protected transient long[] _set; + + /** + * strategy used to hash values in this collection + */ + protected TLongHashingStrategy _hashingStrategy; + + /** + * Creates a new TLongHash instance with the default + * capacity and load factor. + */ + public TLongHash() { + super(); + this._hashingStrategy = this; + } + + /** + * Creates a new TLongHash instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + */ + public TLongHash(int initialCapacity) { + super(initialCapacity); + this._hashingStrategy = this; + } + + /** + * Creates a new TLongHash instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + */ + public TLongHash(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + this._hashingStrategy = this; + } + + /** + * Creates a new TLongHash instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TLongHash(TLongHashingStrategy strategy) { + super(); + this._hashingStrategy = strategy; + } + + /** + * Creates a new TLongHash instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TLongHash(int initialCapacity, TLongHashingStrategy strategy) { + super(initialCapacity); + this._hashingStrategy = strategy; + } + + /** + * Creates a new TLongHash instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TLongHash(int initialCapacity, float loadFactor, TLongHashingStrategy strategy) { + super(initialCapacity, loadFactor); + this._hashingStrategy = strategy; + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TLongHash h = (TLongHash) super.clone(); + h._set = (long[]) this._set.clone(); + return h; + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _set = new long[capacity]; + return capacity; + } + + /** + * Searches the set for val + * + * @param val an long value + * @return a boolean value + */ + public boolean contains(long val) { + return index(val) >= 0; + } + + /** + * Executes procedure for each element in the set. + * + * @param procedure a TObjectProcedure value + * @return false if the loop over the set terminated because + * the procedure returned false for some value. + */ + public boolean forEach(TLongProcedure procedure) { + byte[] states = _states; + long[] set = _set; + for (int i = set.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(set[i])) { + return false; + } + } + return true; + } + + /** + * Releases the element currently stored at index. + * + * @param index an int value + */ + protected void removeAt(int index) { + _set[index] = (long) 0; + super.removeAt(index); + } + + /** + * Locates the index of val. + * + * @param val an long value + * @return the index of val or -1 if it isn't in the set. + */ + protected int index(long val) { + int hash, probe, index, length; + + final byte[] states = _states; + final long[] set = _set; + length = states.length; + hash = _hashingStrategy.computeHashCode(val) & 0x7fffffff; + index = hash % length; + + if (states[index] != FREE && + (states[index] == REMOVED || set[index] != val)) { + // see Knuth, p. 529 + probe = 1 + (hash % (length - 2)); + + do { + index -= probe; + if (index < 0) { + index += length; + } + } while (states[index] != FREE && + (states[index] == REMOVED || set[index] != val)); + } + + return states[index] == FREE ? -1 : index; + } + + /** + * Locates the index at which val can be inserted. if + * there is already a value equal()ing val in the set, + * returns that value as a negative integer. + * + * @param val an long value + * @return an int value + */ + protected int insertionIndex(long val) { + int hash, probe, index, length; + + final byte[] states = _states; + final long[] set = _set; + length = states.length; + hash = _hashingStrategy.computeHashCode(val) & 0x7fffffff; + index = hash % length; + + if (states[index] == FREE) { + return index; // empty, all done + } else if (states[index] == FULL && set[index] == val) { + return -index - 1; // already stored + } else { // already FULL or REMOVED, must probe + // compute the double hash + probe = 1 + (hash % (length - 2)); + + // if the slot we landed on is FULL (but not removed), probe + // until we find an empty slot, a REMOVED slot, or an element + // equal to the one we are trying to insert. + // finding an empty slot means that the value is not present + // and that we should use that slot as the insertion point; + // finding a REMOVED slot means that we need to keep searching, + // however we want to remember the offset of that REMOVED slot + // so we can reuse it in case a "new" insertion (i.e. not an update) + // is possible. + // finding a matching value means that we've found that our desired + // key is already in the table + + if (states[index] != REMOVED) { + // starting at the natural offset, probe until we find an + // offset that isn't full. + do { + index -= probe; + if (index < 0) { + index += length; + } + } while (states[index] == FULL && set[index] != val); + } + + // if the index we found was removed: continue probing until we + // locate a free location or an element which equal()s the + // one we have. + if (states[index] == REMOVED) { + int firstRemoved = index; + while (states[index] != FREE && + (states[index] == REMOVED || set[index] != val)) { + index -= probe; + if (index < 0) { + index += length; + } + } + return states[index] == FULL ? -index - 1 : firstRemoved; + } + // if it's full, the key is already stored + return states[index] == FULL ? -index - 1 : index; + } + } + + /** + * Default implementation of TLongHashingStrategy: + * delegates hashing to HashFunctions.hash(long). + * + * @param val the value to hash + * @return the hashcode. + */ + public final int computeHashCode(long val) { + return HashFunctions.hash(val); + } +} // TLongHash diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongHashSet.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongHashSet.java new file mode 100644 index 00000000000..c7339be6e13 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongHashSet.java @@ -0,0 +1,373 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed set implementation for long primitives. + * + * @author Eric D. Friedman + * @author Rob Eden + */ + +public class TLongHashSet extends TLongHash implements Externalizable { + static final long serialVersionUID = 1L; + + /** + * Creates a new TLongHashSet instance with the default + * capacity and load factor. + */ + public TLongHashSet() { + super(); + } + + /** + * Creates a new TLongHashSet instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TLongHashSet(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TLongHashSet instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TLongHashSet(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TLongHashSet instance containing the + * elements of array. + * + * @param array an array of long primitives + */ + public TLongHashSet(long[] array) { + this(array.length); + addAll(array); + } + + /** + * Creates a new TLongHash instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TLongHashSet(TLongHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TLongHash instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TLongHashSet(int initialCapacity, TLongHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TLongHash instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TLongHashSet(int initialCapacity, float loadFactor, TLongHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * Creates a new TLongHashSet instance containing the + * elements of array. + * + * @param array an array of long primitives + * @param strategy used to compute hash codes and to compare keys. + */ + public TLongHashSet(long[] array, TLongHashingStrategy strategy) { + this(array.length, strategy); + addAll(array); + } + + /** + * @return a TLongIterator with access to the values in this set + */ + public TLongIterator iterator() { + return new TLongIterator(this); + } + + /** + * Inserts a value into the set. + * + * @param val an long value + * @return true if the set was modified by the add operation + */ + public boolean add(long val) { + int index = insertionIndex(val); + + if (index < 0) { + return false; // already present in set, nothing to add + } + + byte previousState = _states[index]; + _set[index] = val; + _states[index] = FULL; + postInsertHook(previousState == FREE); + + return true; // yes, we added something + } + + /** + * Expands the set to accommodate new values. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + long oldSet[] = _set; + byte oldStates[] = _states; + + _set = new long[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + long o = oldSet[i]; + int index = insertionIndex(o); + _set[index] = o; + _states[index] = FULL; + } + } + } + + /** + * Returns a new array containing the values in the set. + * + * @return an long[] value + */ + public long[] toArray() { + long[] result = new long[size()]; + long[] set = _set; + byte[] states = _states; + + for (int i = states.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + result[j++] = set[i]; + } + } + return result; + } + + /** + * Empties the set. + */ + public void clear() { + super.clear(); + long[] set = _set; + byte[] states = _states; + + for (int i = set.length; i-- > 0;) { + set[i] = (long) 0; + states[i] = FREE; + } + } + + /** + * Compares this set with another set for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TLongHashSet)) { + return false; + } + final TLongHashSet that = (TLongHashSet) other; + if (that.size() != this.size()) { + return false; + } + return forEach(new TLongProcedure() { + public final boolean execute(long value) { + return that.contains(value); + } + }); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEach(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TLongProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(long key) { + h += _hashingStrategy.computeHashCode(key); + return true; + } + } + + /** + * Removes val from the set. + * + * @param val an long value + * @return true if the set was modified by the remove operation. + */ + public boolean remove(long val) { + int index = index(val); + if (index >= 0) { + removeAt(index); + return true; + } + return false; + } + + /** + * Tests the set to determine if all of the elements in + * array are present. + * + * @param array an array of long primitives. + * @return true if all elements were present in the set. + */ + public boolean containsAll(long[] array) { + for (int i = array.length; i-- > 0;) { + if (!contains(array[i])) { + return false; + } + } + return true; + } + + /** + * Adds all of the elements in array to the set. + * + * @param array an array of long primitives. + * @return true if the set was modified by the add all operation. + */ + public boolean addAll(long[] array) { + boolean changed = false; + for (int i = array.length; i-- > 0;) { + if (add(array[i])) { + changed = true; + } + } + return changed; + } + + /** + * Removes all of the elements in array from the set. + * + * @param array an array of long primitives. + * @return true if the set was modified by the remove all operation. + */ + public boolean removeAll(long[] array) { + boolean changed = false; + for (int i = array.length; i-- > 0;) { + if (remove(array[i])) { + changed = true; + } + } + return changed; + } + + /** + * Removes any values in the set which are not contained in + * array. + * + * @param array an array of long primitives. + * @return true if the set was modified by the retain all operation + */ + public boolean retainAll(long[] array) { + boolean changed = false; + Arrays.sort(array); + long[] set = _set; + byte[] states = _states; + + for (int i = set.length; i-- > 0;) { + if (states[i] == FULL && (Arrays.binarySearch(array, set[i]) < 0)) { + remove(set[i]); + changed = true; + } + } + return changed; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEach(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + + // ENTRIES + setUp(size); + while (size-- > 0) { + long val = in.readLong(); + add(val); + } + } +} // TLongHashSet diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongHashingStrategy.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongHashingStrategy.java new file mode 100644 index 00000000000..eaeb09d7b56 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongHashingStrategy.java @@ -0,0 +1,50 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Serializable; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface to support pluggable hashing strategies in maps and sets. + * Implementors can use this interface to make the trove hashing + * algorithms use an optimal strategy when computing hashcodes. + *

    + * Created: Sun Nov 4 08:56:06 2001 + * + * @author Eric D. Friedman + * @version $Id: PHashingStrategy.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TLongHashingStrategy extends Serializable { + /** + * Computes a hash code for the specified long. Implementors + * can use the long's own value or a custom scheme designed to + * minimize collisions for a known set of input. + * + * @param val long for which the hashcode is to be computed + * @return the hashCode + */ + public int computeHashCode(long val); +} // TLongHashingStrategy diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongIntHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongIntHashMap.java new file mode 100644 index 00000000000..03e3a0af0e7 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongIntHashMap.java @@ -0,0 +1,650 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for long keys and int values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TLongIntHashMap extends TLongHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TLongIntProcedure PUT_ALL_PROC = new TLongIntProcedure() { + public boolean execute(long key, int value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient int[] _values; + + /** + * Creates a new TLongIntHashMap instance with the default + * capacity and load factor. + */ + public TLongIntHashMap() { + super(); + } + + /** + * Creates a new TLongIntHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TLongIntHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TLongIntHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TLongIntHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TLongIntHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TLongIntHashMap(TLongHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TLongIntHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TLongIntHashMap(int initialCapacity, TLongHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TLongIntHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TLongIntHashMap(int initialCapacity, float loadFactor, TLongHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TLongIntHashMap m = (TLongIntHashMap) super.clone(); + m._values = (int[]) this._values.clone(); + return m; + } + + /** + * @return a TLongIntIterator with access to this map's keys and values + */ + public TLongIntIterator iterator() { + return new TLongIntIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new int[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an long value + * @param value an int value + * @return the previous value associated with key, + * or (long)0 if none was found. + */ + public int put(long key, int value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an long value + * @param value an int value + * @return the previous value associated with key, + * or (long)0 if none was found. + */ + public int putIfAbsent(long key, int value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private int doPut(long key, int value, int index) { + byte previousState; + int previous = (int) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TLongIntHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + long oldKeys[] = _set; + int oldVals[] = _values; + byte oldStates[] = _states; + + _set = new long[newCapacity]; + _values = new int[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + long o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an long value + * @return the value of key or (long)0 if no such mapping exists. + */ + public int get(long key) { + int index = index(key); + return index < 0 ? (int) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + long[] keys = _set; + int[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (long) 0); + Arrays.fill(_values, 0, _values.length, (int) 0); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an long value + * @return an int value, or (long)0 if no mapping for key exists + */ + public int remove(long key) { + int prev = (int) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TLongIntHashMap)) { + return false; + } + TLongIntHashMap that = (TLongIntHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TLongIntProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(long key, int value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TLongIntProcedure { + private final TLongIntHashMap _otherMap; + + EqProcedure(TLongIntHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(long key, int value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two ints for equality. + */ + private final boolean eq(int v1, int v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = (int) 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public int[] getValues() { + int[] vals = new int[size()]; + int[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public long[] keys() { + long[] keys = new long[size()]; + long[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public long[] keys(long[] a) { + int size = size(); + if (a.length < size) { + a = (long[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + long[] k = (long[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an int value + * @return a boolean value + */ + public boolean containsValue(int val) { + byte[] states = _states; + int[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an long value + * @return a boolean value + */ + public boolean containsKey(long key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TLongProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TLongProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TIntProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TIntProcedure procedure) { + byte[] states = _states; + int[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOLongIntProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TLongIntProcedure procedure) { + byte[] states = _states; + long[] keys = _set; + int[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TLongIntProcedure procedure) { + boolean modified = false; + byte[] states = _states; + long[] keys = _set; + int[] values = _values; + + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TIntFunction value + */ + public void transformValues(TIntFunction function) { + byte[] states = _states; + int[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(long key) { + return adjustValue(key, (int) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(long key, int amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public int adjustOrPutValue(final long key, final int adjust_amount, final int put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final int newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + byte previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + long key = in.readLong(); + int val = in.readInt(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TLongIntProcedure() { + private boolean first = true; + + public boolean execute(long key, int value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TLongIntHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongIntIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongIntIterator.java new file mode 100644 index 00000000000..765a3bfe8be --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongIntIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type long and int. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TLongIntIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TLongIntIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TLongIntIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TLongIntIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TLongIntIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TLongIntHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TLongIntIterator(TLongIntHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public long key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public int value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public int setValue(int val) { + int old = value(); + _map._values[_index] = val; + return old; + } +}// TLongIntIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongIntProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongIntProcedure.java new file mode 100644 index 00000000000..2973913997b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongIntProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type long and int. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TLongIntProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a long value + * @param b a int value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(long a, int b); +}// TLongIntProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongIterator.java new file mode 100644 index 00000000000..d0c75891d5a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongIterator.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for long collections. + * + * @author Eric D. Friedman + * @version $Id: PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TLongIterator extends TPrimitiveIterator { + /** + * the collection on which the iterator operates + */ + private final TLongHash _hash; + + /** + * Creates a TLongIterator for the elements in the specified collection. + */ + public TLongIterator(TLongHash hash) { + super(hash); + this._hash = hash; + } + + /** + * Advances the iterator to the next element in the underlying collection + * and returns it. + * + * @return the next long in the collection + * @throws NoSuchElementException if the iterator is already exhausted + */ + public long next() { + moveToNextIndex(); + return _hash._set[_index]; + } +}// TLongIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongLongHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongLongHashMap.java new file mode 100644 index 00000000000..9b99a72faf9 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongLongHashMap.java @@ -0,0 +1,650 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for long keys and long values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TLongLongHashMap extends TLongHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TLongLongProcedure PUT_ALL_PROC = new TLongLongProcedure() { + public boolean execute(long key, long value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient long[] _values; + + /** + * Creates a new TLongLongHashMap instance with the default + * capacity and load factor. + */ + public TLongLongHashMap() { + super(); + } + + /** + * Creates a new TLongLongHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TLongLongHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TLongLongHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TLongLongHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TLongLongHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TLongLongHashMap(TLongHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TLongLongHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TLongLongHashMap(int initialCapacity, TLongHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TLongLongHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TLongLongHashMap(int initialCapacity, float loadFactor, TLongHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TLongLongHashMap m = (TLongLongHashMap) super.clone(); + m._values = (long[]) this._values.clone(); + return m; + } + + /** + * @return a TLongLongIterator with access to this map's keys and values + */ + public TLongLongIterator iterator() { + return new TLongLongIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new long[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an long value + * @param value an long value + * @return the previous value associated with key, + * or (long)0 if none was found. + */ + public long put(long key, long value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an long value + * @param value an long value + * @return the previous value associated with key, + * or (long)0 if none was found. + */ + public long putIfAbsent(long key, long value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private long doPut(long key, long value, int index) { + byte previousState; + long previous = (long) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TLongLongHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + long oldKeys[] = _set; + long oldVals[] = _values; + byte oldStates[] = _states; + + _set = new long[newCapacity]; + _values = new long[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + long o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an long value + * @return the value of key or (long)0 if no such mapping exists. + */ + public long get(long key) { + int index = index(key); + return index < 0 ? (long) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + long[] keys = _set; + long[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (long) 0); + Arrays.fill(_values, 0, _values.length, (long) 0); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an long value + * @return an long value, or (long)0 if no mapping for key exists + */ + public long remove(long key) { + long prev = (long) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TLongLongHashMap)) { + return false; + } + TLongLongHashMap that = (TLongLongHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TLongLongProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(long key, long value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TLongLongProcedure { + private final TLongLongHashMap _otherMap; + + EqProcedure(TLongLongHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(long key, long value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two longs for equality. + */ + private final boolean eq(long v1, long v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = (long) 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public long[] getValues() { + long[] vals = new long[size()]; + long[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public long[] keys() { + long[] keys = new long[size()]; + long[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public long[] keys(long[] a) { + int size = size(); + if (a.length < size) { + a = (long[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + long[] k = (long[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an long value + * @return a boolean value + */ + public boolean containsValue(long val) { + byte[] states = _states; + long[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an long value + * @return a boolean value + */ + public boolean containsKey(long key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TLongProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TLongProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TLongProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TLongProcedure procedure) { + byte[] states = _states; + long[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOLongLongProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TLongLongProcedure procedure) { + byte[] states = _states; + long[] keys = _set; + long[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TLongLongProcedure procedure) { + boolean modified = false; + byte[] states = _states; + long[] keys = _set; + long[] values = _values; + + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TLongFunction value + */ + public void transformValues(TLongFunction function) { + byte[] states = _states; + long[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(long key) { + return adjustValue(key, (long) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(long key, long amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public long adjustOrPutValue(final long key, final long adjust_amount, final long put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final long newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + byte previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + long key = in.readLong(); + long val = in.readLong(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TLongLongProcedure() { + private boolean first = true; + + public boolean execute(long key, long value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TLongLongHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongLongIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongLongIterator.java new file mode 100644 index 00000000000..5ba2424f2c5 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongLongIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type long and long. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TLongLongIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TLongLongIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TLongLongIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TLongLongIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TLongLongIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TLongLongHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TLongLongIterator(TLongLongHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public long key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public long value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public long setValue(long val) { + long old = value(); + _map._values[_index] = val; + return old; + } +}// TLongLongIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongLongProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongLongProcedure.java new file mode 100644 index 00000000000..42bf8718c51 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongLongProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type long and long. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TLongLongProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a long value + * @param b a long value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(long a, long b); +}// TLongLongProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongObjectHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongObjectHashMap.java new file mode 100644 index 00000000000..57515e9d8a9 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongObjectHashMap.java @@ -0,0 +1,632 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for long keys and Object values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TLongObjectHashMap extends TLongHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TLongObjectProcedure PUT_ALL_PROC = new TLongObjectProcedure() { + public boolean execute(long key, V value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient V[] _values; + + /** + * Creates a new TLongObjectHashMap instance with the default + * capacity and load factor. + */ + public TLongObjectHashMap() { + super(); + } + + /** + * Creates a new TLongObjectHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TLongObjectHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TLongObjectHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TLongObjectHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TLongObjectHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TLongObjectHashMap(TLongHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TLongObjectHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TLongObjectHashMap(int initialCapacity, TLongHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TLongObjectHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TLongObjectHashMap(int initialCapacity, float loadFactor, TLongHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public TLongObjectHashMap clone() { + TLongObjectHashMap m = (TLongObjectHashMap) super.clone(); + m._values = (V[]) this._values.clone(); + return m; + } + + /** + * @return a TLongObjectIterator with access to this map's keys and values + */ + public TLongObjectIterator iterator() { + return new TLongObjectIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = (V[]) new Object[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an long value + * @param value an Object value + * @return the previous value associated with key, + * or {@code null} if none was found. + */ + public V put(long key, V value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an long value + * @param value an Object value + * @return the previous value associated with key, + * or {@code null} if none was found. + */ + public V putIfAbsent(long key, V value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private V doPut(long key, V value, int index) { + byte previousState; + V previous = null; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TLongObjectHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + long oldKeys[] = _set; + V oldVals[] = _values; + byte oldStates[] = _states; + + _set = new long[newCapacity]; + _values = (V[]) new Object[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + long o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an long value + * @return the value of key or (long)0 if no such mapping exists. + */ + public V get(long key) { + int index = index(key); + return index < 0 ? null : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + long[] keys = _set; + Object[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (long) 0); + Arrays.fill(_values, 0, _values.length, null); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an long value + * @return an Object value or (long)0 if no such mapping exists. + */ + public V remove(long key) { + V prev = null; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TLongObjectHashMap)) { + return false; + } + TLongObjectHashMap that = (TLongObjectHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TLongObjectProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(long key, Object value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TLongObjectProcedure { + private final TLongObjectHashMap _otherMap; + + EqProcedure(TLongObjectHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(long key, Object value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two objects for equality. + */ + private final boolean eq(Object o1, Object o2) { + return o1 == o2 || ((o1 != null) && o1.equals(o2)); + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = null; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + * @see #getValues(Object[]) + */ + public Object[] getValues() { + Object[] vals = new Object[size()]; + V[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * Return the values of the map; the runtime type of the returned array is that of + * the specified array. + * + * @param a the array into which the elements of this collection are to be + * stored, if it is big enough; otherwise, a new array of the same + * runtime type is allocated for this purpose. + * @return an array containing the elements of this collection + * @throws ArrayStoreException the runtime type of the specified array is + * not a supertype of the runtime type of every element in this + * collection. + * @throws NullPointerException if the specified array is null. + * @see #getValues() + */ + public T[] getValues(T[] a) { + if (a.length < _size) { + a = (T[]) java.lang.reflect.Array.newInstance(a.getClass().getComponentType(), + _size); + } + + V[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = (T) v[i]; + } + } + return a; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public long[] keys() { + long[] keys = new long[size()]; + long[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public long[] keys(long[] a) { + int size = size(); + if (a.length < size) { + a = (long[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + long[] k = (long[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(V val) { + byte[] states = _states; + V[] vals = _values; + + // special case null values so that we don't have to + // perform null checks before every call to equals() + if (null == val) { + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && + val == vals[i]) { + return true; + } + } + } else { + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && + (val == vals[i] || val.equals(vals[i]))) { + return true; + } + } + } // end of else + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an long value + * @return a boolean value + */ + public boolean containsKey(long key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TLongProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TLongProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TObjectProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TObjectProcedure procedure) { + byte[] states = _states; + V[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOLongObjectProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TLongObjectProcedure procedure) { + byte[] states = _states; + long[] keys = _set; + V[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TLongObjectProcedure procedure) { + boolean modified = false; + byte[] states = _states; + long[] keys = _set; + V[] values = _values; + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TObjectFunction value + */ + public void transformValues(TObjectFunction function) { + byte[] states = _states; + V[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + long key = in.readLong(); + V val = (V) in.readObject(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TLongObjectProcedure() { + private boolean first = true; + + public boolean execute(long key, Object value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TLongObjectHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongObjectIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongObjectIterator.java new file mode 100644 index 00000000000..049a8cbc74f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongObjectIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type long and Object. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TLongObjectIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TLongObjectIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TLongObjectIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TLongObjectIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2OIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TLongObjectIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TLongObjectHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TLongObjectIterator(TLongObjectHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public long key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public V value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public V setValue(V val) { + V old = value(); + _map._values[_index] = val; + return old; + } +}// TLongObjectIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongObjectProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongObjectProcedure.java new file mode 100644 index 00000000000..7385736f0d1 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongObjectProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type long and Object. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2OProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TLongObjectProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a long value + * @param b an Object value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(long a, T b); +}// TLongObjectProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongProcedure.java new file mode 100644 index 00000000000..30c293c5a89 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongProcedure.java @@ -0,0 +1,47 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures with one long parameter. + *

    + * Created: Mon Nov 5 21:45:49 2001 + * + * @author Eric D. Friedman + * @version $Id: PProcedure.template,v 1.2 2007/11/01 16:08:14 robeden Exp $ + */ + +public interface TLongProcedure { + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param value a value of type long + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(long value); +}// TLongProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongShortHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongShortHashMap.java new file mode 100644 index 00000000000..160f06b293d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongShortHashMap.java @@ -0,0 +1,650 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for long keys and short values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TLongShortHashMap extends TLongHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TLongShortProcedure PUT_ALL_PROC = new TLongShortProcedure() { + public boolean execute(long key, short value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient short[] _values; + + /** + * Creates a new TLongShortHashMap instance with the default + * capacity and load factor. + */ + public TLongShortHashMap() { + super(); + } + + /** + * Creates a new TLongShortHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TLongShortHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TLongShortHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TLongShortHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TLongShortHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TLongShortHashMap(TLongHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TLongShortHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TLongShortHashMap(int initialCapacity, TLongHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TLongShortHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TLongShortHashMap(int initialCapacity, float loadFactor, TLongHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TLongShortHashMap m = (TLongShortHashMap) super.clone(); + m._values = (short[]) this._values.clone(); + return m; + } + + /** + * @return a TLongShortIterator with access to this map's keys and values + */ + public TLongShortIterator iterator() { + return new TLongShortIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new short[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an long value + * @param value an short value + * @return the previous value associated with key, + * or (long)0 if none was found. + */ + public short put(long key, short value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an long value + * @param value an short value + * @return the previous value associated with key, + * or (long)0 if none was found. + */ + public short putIfAbsent(long key, short value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private short doPut(long key, short value, int index) { + byte previousState; + short previous = (short) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TLongShortHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + long oldKeys[] = _set; + short oldVals[] = _values; + byte oldStates[] = _states; + + _set = new long[newCapacity]; + _values = new short[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + long o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an long value + * @return the value of key or (long)0 if no such mapping exists. + */ + public short get(long key) { + int index = index(key); + return index < 0 ? (short) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + long[] keys = _set; + short[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (long) 0); + Arrays.fill(_values, 0, _values.length, (short) 0); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an long value + * @return an short value, or (long)0 if no mapping for key exists + */ + public short remove(long key) { + short prev = (short) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TLongShortHashMap)) { + return false; + } + TLongShortHashMap that = (TLongShortHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TLongShortProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(long key, short value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TLongShortProcedure { + private final TLongShortHashMap _otherMap; + + EqProcedure(TLongShortHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(long key, short value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two shorts for equality. + */ + private final boolean eq(short v1, short v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = (short) 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public short[] getValues() { + short[] vals = new short[size()]; + short[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public long[] keys() { + long[] keys = new long[size()]; + long[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public long[] keys(long[] a) { + int size = size(); + if (a.length < size) { + a = (long[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + long[] k = (long[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an short value + * @return a boolean value + */ + public boolean containsValue(short val) { + byte[] states = _states; + short[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an long value + * @return a boolean value + */ + public boolean containsKey(long key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TLongProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TLongProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TShortProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TShortProcedure procedure) { + byte[] states = _states; + short[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOLongShortProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TLongShortProcedure procedure) { + byte[] states = _states; + long[] keys = _set; + short[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TLongShortProcedure procedure) { + boolean modified = false; + byte[] states = _states; + long[] keys = _set; + short[] values = _values; + + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TShortFunction value + */ + public void transformValues(TShortFunction function) { + byte[] states = _states; + short[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(long key) { + return adjustValue(key, (short) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(long key, short amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public short adjustOrPutValue(final long key, final short adjust_amount, final short put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final short newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + byte previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + long key = in.readLong(); + short val = in.readShort(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TLongShortProcedure() { + private boolean first = true; + + public boolean execute(long key, short value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TLongShortHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongShortIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongShortIterator.java new file mode 100644 index 00000000000..ddd43e799a0 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongShortIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type long and short. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TLongShortIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TLongShortIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TLongShortIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TLongShortIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TLongShortIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TLongShortHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TLongShortIterator(TLongShortHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public long key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public short value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public short setValue(short val) { + short old = value(); + _map._values[_index] = val; + return old; + } +}// TLongShortIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongShortProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongShortProcedure.java new file mode 100644 index 00000000000..75e0c74f2e2 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongShortProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type long and short. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TLongShortProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a long value + * @param b a short value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(long a, short b); +}// TLongShortProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongStack.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongStack.java new file mode 100644 index 00000000000..0f87182d39c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TLongStack.java @@ -0,0 +1,124 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + + +package org.elasticsearch.util.gnu.trove; + +/** + * A stack of long primitives, backed by a TLongArrayList. + * + * @author Eric D. Friedman, Rob Eden + * @version $Id: PStack.template,v 1.2 2007/02/28 23:03:57 robeden Exp $ + */ + +public class TLongStack { + + /** + * the list used to hold the stack values. + */ + protected TLongArrayList _list; + + public static final int DEFAULT_CAPACITY = TLongArrayList.DEFAULT_CAPACITY; + + /** + * Creates a new TLongStack instance with the default + * capacity. + */ + public TLongStack() { + this(DEFAULT_CAPACITY); + } + + /** + * Creates a new TLongStack instance with the + * specified capacity. + * + * @param capacity the initial depth of the stack + */ + public TLongStack(int capacity) { + _list = new TLongArrayList(capacity); + } + + /** + * Pushes the value onto the top of the stack. + * + * @param val an long value + */ + public void push(long val) { + _list.add(val); + } + + /** + * Removes and returns the value at the top of the stack. + * + * @return an long value + */ + public long pop() { + return _list.remove(_list.size() - 1); + } + + /** + * Returns the value at the top of the stack. + * + * @return an long value + */ + public long peek() { + return _list.get(_list.size() - 1); + } + + /** + * Returns the current depth of the stack. + */ + public int size() { + return _list.size(); + } + + /** + * Clears the stack, reseting its capacity to the default. + */ + public void clear() { + _list.clear(DEFAULT_CAPACITY); + } + + /** + * Clears the stack without releasing its internal capacity allocation. + */ + public void reset() { + _list.reset(); + } + + /** + * Copies the contents of the stack into a native array. Note that this will NOT + * pop them out of the stack. + * + * @return an long[] value + */ + public long[] toNativeArray() { + return _list.toNativeArray(); + } + + /** + * Copies a slice of the list into a native array. Note that this will NOT + * pop them out of the stack. + * + * @param dest the array to copy into. + */ + public void toNativeArray(long[] dest) { + _list.toNativeArray(dest, 0, size()); + } +} // TLongStack diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectByteHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectByteHashMap.java new file mode 100644 index 00000000000..96afa8858d9 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectByteHashMap.java @@ -0,0 +1,631 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for Object keys and byte values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TObjectByteHashMap extends TObjectHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TObjectByteProcedure PUT_ALL_PROC = new TObjectByteProcedure() { + public boolean execute(K key, byte value) { + put(key, value); + return true; + } + }; + + /** + * the values of the map + */ + protected transient byte[] _values; + + /** + * Creates a new TObjectByteHashMap instance with the default + * capacity and load factor. + */ + public TObjectByteHashMap() { + super(); + } + + /** + * Creates a new TObjectByteHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TObjectByteHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TObjectByteHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TObjectByteHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TObjectByteHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TObjectByteHashMap(TObjectHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TObjectByteHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TObjectByteHashMap(int initialCapacity, TObjectHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TObjectByteHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TObjectByteHashMap(int initialCapacity, float loadFactor, TObjectHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return an iterator over the entries in this map + */ + public TObjectByteIterator iterator() { + return new TObjectByteIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new byte[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an byte value + * @return the previous value associated with key, + * or (byte)0 if none was found. + */ + public byte put(K key, byte value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an Object value + * @param value an byte value + * @return the previous value associated with key, + * or (byte)0 if none was found. + */ + public byte putIfAbsent(K key, byte value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private byte doPut(K key, byte value, int index) { + byte previous = (byte) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + K oldKey = (K) _set[index]; + _set[index] = key; + _values[index] = value; + + if (isNewMapping) { + postInsertHook(oldKey == FREE); + } + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TObjectByteHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + K oldKeys[] = (K[]) _set; + byte oldVals[] = _values; + + _set = new Object[newCapacity]; + Arrays.fill(_set, FREE); + _values = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldKeys[i] != FREE && oldKeys[i] != REMOVED) { + K o = oldKeys[i]; + int index = insertionIndex(o); + if (index < 0) { + throwObjectContractViolation(_set[(-index - 1)], o); + } + _set[index] = o; + _values[index] = oldVals[i]; + } + } + } + + /** + * retrieves the value for key + * + * @param key an Object value + * @return the value of key or (byte)0 if no such mapping exists. + */ + public byte get(K key) { + int index = index(key); + return index < 0 ? (byte) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + Object[] keys = _set; + byte[] vals = _values; + + Arrays.fill(_set, 0, _set.length, FREE); + Arrays.fill(_values, 0, _values.length, (byte) 0); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return an byte value or (byte)0 if no such mapping exists. + */ + public byte remove(K key) { + byte prev = (byte) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TObjectByteHashMap)) { + return false; + } + TObjectByteHashMap that = (TObjectByteHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + /** + * {@inheritDoc} + */ + @Override + public TObjectByteHashMap clone() { + TObjectByteHashMap clone = (TObjectByteHashMap) super.clone(); + clone._values = new byte[_values.length]; + System.arraycopy(_values, 0, clone._values, 0, clone._values.length); + + return clone; + } + + + private static final class EqProcedure implements TObjectByteProcedure { + private final TObjectByteHashMap _otherMap; + + EqProcedure(TObjectByteHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(Object key, byte value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two bytes for equality. + */ + private final boolean eq(byte v1, byte v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public byte[] getValues() { + byte[] vals = new byte[size()]; + byte[] v = _values; + Object[] keys = _set; + + for (int i = v.length, j = 0; i-- > 0;) { + if (keys[i] != FREE && keys[i] != REMOVED) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public Object[] keys() { + Object[] keys = new Object[size()]; + K[] k = (K[]) _set; + + for (int i = k.length, j = 0; i-- > 0;) { + if (k[i] != FREE && k[i] != REMOVED) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same runtime type is allocated for this purpose. + * @return a Set value + */ + public K[] keys(K[] a) { + int size = size(); + if (a.length < size) { + a = (K[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + K[] k = (K[]) _set; + + for (int i = k.length, j = 0; i-- > 0;) { + if (k[i] != FREE && k[i] != REMOVED) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an byte value + * @return a boolean value + */ + public boolean containsValue(byte val) { + Object[] keys = _set; + byte[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (keys[i] != FREE && keys[i] != REMOVED && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(K key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TObjectProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TObjectProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TByteProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TByteProcedure procedure) { + Object[] keys = _set; + byte[] values = _values; + for (int i = values.length; i-- > 0;) { + if (keys[i] != FREE && keys[i] != REMOVED + && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOObjectByteProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TObjectByteProcedure procedure) { + K[] keys = (K[]) _set; + byte[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (keys[i] != FREE + && keys[i] != REMOVED + && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TObjectByteProcedure procedure) { + boolean modified = false; + K[] keys = (K[]) _set; + byte[] values = _values; + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (keys[i] != FREE + && keys[i] != REMOVED + && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TByteFunction value + */ + public void transformValues(TByteFunction function) { + Object[] keys = _set; + byte[] values = _values; + for (int i = values.length; i-- > 0;) { + if (keys[i] != null && keys[i] != REMOVED) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(K key) { + return adjustValue(key, (byte) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(K key, byte amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public byte adjustOrPutValue(final K key, final byte adjust_amount, final byte put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final byte newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + K oldKey = (K) _set[index]; + _set[index] = key; + + if (isNewMapping) { + postInsertHook(oldKey == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + K key = (K) in.readObject(); + byte val = in.readByte(); + put(key, val); + } + } + + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TObjectByteProcedure() { + private boolean first = true; + + public boolean execute(K key, byte value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TObjectByteHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectByteIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectByteIterator.java new file mode 100644 index 00000000000..a6b152c5edf --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectByteIterator.java @@ -0,0 +1,168 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.util.ConcurrentModificationException; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type Object and byte. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TObjectByteIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TObjectByteIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TObjectByteIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TObjectByteIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: O2PIterator.template,v 1.3 2007/01/22 16:56:39 robeden Exp $ + */ + +public class TObjectByteIterator extends TIterator { + private final TObjectByteHashMap _map; + + public TObjectByteIterator(TObjectByteHashMap map) { + super(map); + this._map = map; + } + + /** + * Returns the index of the next value in the data structure + * or a negative value if the iterator is exhausted. + * + * @return an byte value + */ + protected final int nextIndex() { + if (_expectedSize != _hash.size()) { + throw new ConcurrentModificationException(); + } + + Object[] set = _map._set; + int i = _index; + while (i-- > 0 && (set[i] == null || set[i] == TObjectHash.REMOVED || + set[i] == TObjectHash.FREE)) ; + return i; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public K key() { + return (K) _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public byte value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public byte setValue(byte val) { + byte old = value(); + _map._values[_index] = val; + return old; + } +}// TObjectByteIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectByteProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectByteProcedure.java new file mode 100644 index 00000000000..a269dbf364e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectByteProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type Object and byte. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: O2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TObjectByteProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a an Object value + * @param b a byte value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(K a, byte b); +}// TObjectByteProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectDoubleHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectDoubleHashMap.java new file mode 100644 index 00000000000..51ad45d238b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectDoubleHashMap.java @@ -0,0 +1,631 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for Object keys and double values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TObjectDoubleHashMap extends TObjectHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TObjectDoubleProcedure PUT_ALL_PROC = new TObjectDoubleProcedure() { + public boolean execute(K key, double value) { + put(key, value); + return true; + } + }; + + /** + * the values of the map + */ + protected transient double[] _values; + + /** + * Creates a new TObjectDoubleHashMap instance with the default + * capacity and load factor. + */ + public TObjectDoubleHashMap() { + super(); + } + + /** + * Creates a new TObjectDoubleHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TObjectDoubleHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TObjectDoubleHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TObjectDoubleHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TObjectDoubleHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TObjectDoubleHashMap(TObjectHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TObjectDoubleHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TObjectDoubleHashMap(int initialCapacity, TObjectHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TObjectDoubleHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TObjectDoubleHashMap(int initialCapacity, float loadFactor, TObjectHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return an iterator over the entries in this map + */ + public TObjectDoubleIterator iterator() { + return new TObjectDoubleIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new double[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an double value + * @return the previous value associated with key, + * or (double)0 if none was found. + */ + public double put(K key, double value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an Object value + * @param value an double value + * @return the previous value associated with key, + * or (double)0 if none was found. + */ + public double putIfAbsent(K key, double value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private double doPut(K key, double value, int index) { + double previous = (double) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + K oldKey = (K) _set[index]; + _set[index] = key; + _values[index] = value; + + if (isNewMapping) { + postInsertHook(oldKey == FREE); + } + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TObjectDoubleHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + K oldKeys[] = (K[]) _set; + double oldVals[] = _values; + + _set = new Object[newCapacity]; + Arrays.fill(_set, FREE); + _values = new double[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldKeys[i] != FREE && oldKeys[i] != REMOVED) { + K o = oldKeys[i]; + int index = insertionIndex(o); + if (index < 0) { + throwObjectContractViolation(_set[(-index - 1)], o); + } + _set[index] = o; + _values[index] = oldVals[i]; + } + } + } + + /** + * retrieves the value for key + * + * @param key an Object value + * @return the value of key or (double)0 if no such mapping exists. + */ + public double get(K key) { + int index = index(key); + return index < 0 ? (double) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + Object[] keys = _set; + double[] vals = _values; + + Arrays.fill(_set, 0, _set.length, FREE); + Arrays.fill(_values, 0, _values.length, (double) 0); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return an double value or (double)0 if no such mapping exists. + */ + public double remove(K key) { + double prev = (double) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TObjectDoubleHashMap)) { + return false; + } + TObjectDoubleHashMap that = (TObjectDoubleHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + /** + * {@inheritDoc} + */ + @Override + public TObjectDoubleHashMap clone() { + TObjectDoubleHashMap clone = (TObjectDoubleHashMap) super.clone(); + clone._values = new double[_values.length]; + System.arraycopy(_values, 0, clone._values, 0, clone._values.length); + + return clone; + } + + + private static final class EqProcedure implements TObjectDoubleProcedure { + private final TObjectDoubleHashMap _otherMap; + + EqProcedure(TObjectDoubleHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(Object key, double value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two doubles for equality. + */ + private final boolean eq(double v1, double v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public double[] getValues() { + double[] vals = new double[size()]; + double[] v = _values; + Object[] keys = _set; + + for (int i = v.length, j = 0; i-- > 0;) { + if (keys[i] != FREE && keys[i] != REMOVED) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public Object[] keys() { + Object[] keys = new Object[size()]; + K[] k = (K[]) _set; + + for (int i = k.length, j = 0; i-- > 0;) { + if (k[i] != FREE && k[i] != REMOVED) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same runtime type is allocated for this purpose. + * @return a Set value + */ + public K[] keys(K[] a) { + int size = size(); + if (a.length < size) { + a = (K[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + K[] k = (K[]) _set; + + for (int i = k.length, j = 0; i-- > 0;) { + if (k[i] != FREE && k[i] != REMOVED) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an double value + * @return a boolean value + */ + public boolean containsValue(double val) { + Object[] keys = _set; + double[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (keys[i] != FREE && keys[i] != REMOVED && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(K key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TObjectProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TObjectProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TDoubleProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TDoubleProcedure procedure) { + Object[] keys = _set; + double[] values = _values; + for (int i = values.length; i-- > 0;) { + if (keys[i] != FREE && keys[i] != REMOVED + && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOObjectDoubleProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TObjectDoubleProcedure procedure) { + K[] keys = (K[]) _set; + double[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (keys[i] != FREE + && keys[i] != REMOVED + && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TObjectDoubleProcedure procedure) { + boolean modified = false; + K[] keys = (K[]) _set; + double[] values = _values; + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (keys[i] != FREE + && keys[i] != REMOVED + && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TDoubleFunction value + */ + public void transformValues(TDoubleFunction function) { + Object[] keys = _set; + double[] values = _values; + for (int i = values.length; i-- > 0;) { + if (keys[i] != null && keys[i] != REMOVED) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(K key) { + return adjustValue(key, (double) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(K key, double amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public double adjustOrPutValue(final K key, final double adjust_amount, final double put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final double newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + K oldKey = (K) _set[index]; + _set[index] = key; + + if (isNewMapping) { + postInsertHook(oldKey == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + K key = (K) in.readObject(); + double val = in.readDouble(); + put(key, val); + } + } + + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TObjectDoubleProcedure() { + private boolean first = true; + + public boolean execute(K key, double value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TObjectDoubleHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectDoubleIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectDoubleIterator.java new file mode 100644 index 00000000000..e8dc0ec98a0 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectDoubleIterator.java @@ -0,0 +1,168 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.util.ConcurrentModificationException; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type Object and double. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TObjectDoubleIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TObjectDoubleIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TObjectDoubleIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TObjectDoubleIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: O2PIterator.template,v 1.3 2007/01/22 16:56:39 robeden Exp $ + */ + +public class TObjectDoubleIterator extends TIterator { + private final TObjectDoubleHashMap _map; + + public TObjectDoubleIterator(TObjectDoubleHashMap map) { + super(map); + this._map = map; + } + + /** + * Returns the index of the next value in the data structure + * or a negative value if the iterator is exhausted. + * + * @return an double value + */ + protected final int nextIndex() { + if (_expectedSize != _hash.size()) { + throw new ConcurrentModificationException(); + } + + Object[] set = _map._set; + int i = _index; + while (i-- > 0 && (set[i] == null || set[i] == TObjectHash.REMOVED || + set[i] == TObjectHash.FREE)) ; + return i; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public K key() { + return (K) _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public double value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public double setValue(double val) { + double old = value(); + _map._values[_index] = val; + return old; + } +}// TObjectDoubleIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectDoubleProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectDoubleProcedure.java new file mode 100644 index 00000000000..a7c416e02cc --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectDoubleProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type Object and double. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: O2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TObjectDoubleProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a an Object value + * @param b a double value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(K a, double b); +}// TObjectDoubleProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectFloatHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectFloatHashMap.java new file mode 100644 index 00000000000..3037f3bf383 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectFloatHashMap.java @@ -0,0 +1,631 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for Object keys and float values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TObjectFloatHashMap extends TObjectHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TObjectFloatProcedure PUT_ALL_PROC = new TObjectFloatProcedure() { + public boolean execute(K key, float value) { + put(key, value); + return true; + } + }; + + /** + * the values of the map + */ + protected transient float[] _values; + + /** + * Creates a new TObjectFloatHashMap instance with the default + * capacity and load factor. + */ + public TObjectFloatHashMap() { + super(); + } + + /** + * Creates a new TObjectFloatHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TObjectFloatHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TObjectFloatHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TObjectFloatHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TObjectFloatHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TObjectFloatHashMap(TObjectHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TObjectFloatHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TObjectFloatHashMap(int initialCapacity, TObjectHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TObjectFloatHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TObjectFloatHashMap(int initialCapacity, float loadFactor, TObjectHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return an iterator over the entries in this map + */ + public TObjectFloatIterator iterator() { + return new TObjectFloatIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new float[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an float value + * @return the previous value associated with key, + * or (float)0 if none was found. + */ + public float put(K key, float value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an Object value + * @param value an float value + * @return the previous value associated with key, + * or (float)0 if none was found. + */ + public float putIfAbsent(K key, float value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private float doPut(K key, float value, int index) { + float previous = (float) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + K oldKey = (K) _set[index]; + _set[index] = key; + _values[index] = value; + + if (isNewMapping) { + postInsertHook(oldKey == FREE); + } + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TObjectFloatHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + K oldKeys[] = (K[]) _set; + float oldVals[] = _values; + + _set = new Object[newCapacity]; + Arrays.fill(_set, FREE); + _values = new float[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldKeys[i] != FREE && oldKeys[i] != REMOVED) { + K o = oldKeys[i]; + int index = insertionIndex(o); + if (index < 0) { + throwObjectContractViolation(_set[(-index - 1)], o); + } + _set[index] = o; + _values[index] = oldVals[i]; + } + } + } + + /** + * retrieves the value for key + * + * @param key an Object value + * @return the value of key or (float)0 if no such mapping exists. + */ + public float get(K key) { + int index = index(key); + return index < 0 ? (float) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + Object[] keys = _set; + float[] vals = _values; + + Arrays.fill(_set, 0, _set.length, FREE); + Arrays.fill(_values, 0, _values.length, (float) 0); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return an float value or (float)0 if no such mapping exists. + */ + public float remove(K key) { + float prev = (float) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TObjectFloatHashMap)) { + return false; + } + TObjectFloatHashMap that = (TObjectFloatHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + /** + * {@inheritDoc} + */ + @Override + public TObjectFloatHashMap clone() { + TObjectFloatHashMap clone = (TObjectFloatHashMap) super.clone(); + clone._values = new float[_values.length]; + System.arraycopy(_values, 0, clone._values, 0, clone._values.length); + + return clone; + } + + + private static final class EqProcedure implements TObjectFloatProcedure { + private final TObjectFloatHashMap _otherMap; + + EqProcedure(TObjectFloatHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(Object key, float value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two floats for equality. + */ + private final boolean eq(float v1, float v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public float[] getValues() { + float[] vals = new float[size()]; + float[] v = _values; + Object[] keys = _set; + + for (int i = v.length, j = 0; i-- > 0;) { + if (keys[i] != FREE && keys[i] != REMOVED) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public Object[] keys() { + Object[] keys = new Object[size()]; + K[] k = (K[]) _set; + + for (int i = k.length, j = 0; i-- > 0;) { + if (k[i] != FREE && k[i] != REMOVED) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same runtime type is allocated for this purpose. + * @return a Set value + */ + public K[] keys(K[] a) { + int size = size(); + if (a.length < size) { + a = (K[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + K[] k = (K[]) _set; + + for (int i = k.length, j = 0; i-- > 0;) { + if (k[i] != FREE && k[i] != REMOVED) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an float value + * @return a boolean value + */ + public boolean containsValue(float val) { + Object[] keys = _set; + float[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (keys[i] != FREE && keys[i] != REMOVED && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(K key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TObjectProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TObjectProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TFloatProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TFloatProcedure procedure) { + Object[] keys = _set; + float[] values = _values; + for (int i = values.length; i-- > 0;) { + if (keys[i] != FREE && keys[i] != REMOVED + && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOObjectFloatProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TObjectFloatProcedure procedure) { + K[] keys = (K[]) _set; + float[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (keys[i] != FREE + && keys[i] != REMOVED + && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TObjectFloatProcedure procedure) { + boolean modified = false; + K[] keys = (K[]) _set; + float[] values = _values; + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (keys[i] != FREE + && keys[i] != REMOVED + && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TFloatFunction value + */ + public void transformValues(TFloatFunction function) { + Object[] keys = _set; + float[] values = _values; + for (int i = values.length; i-- > 0;) { + if (keys[i] != null && keys[i] != REMOVED) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(K key) { + return adjustValue(key, (float) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(K key, float amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public float adjustOrPutValue(final K key, final float adjust_amount, final float put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final float newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + K oldKey = (K) _set[index]; + _set[index] = key; + + if (isNewMapping) { + postInsertHook(oldKey == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + K key = (K) in.readObject(); + float val = in.readFloat(); + put(key, val); + } + } + + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TObjectFloatProcedure() { + private boolean first = true; + + public boolean execute(K key, float value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TObjectFloatHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectFloatIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectFloatIterator.java new file mode 100644 index 00000000000..da1e73c299e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectFloatIterator.java @@ -0,0 +1,168 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.util.ConcurrentModificationException; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type Object and float. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TObjectFloatIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TObjectFloatIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TObjectFloatIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TObjectFloatIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: O2PIterator.template,v 1.3 2007/01/22 16:56:39 robeden Exp $ + */ + +public class TObjectFloatIterator extends TIterator { + private final TObjectFloatHashMap _map; + + public TObjectFloatIterator(TObjectFloatHashMap map) { + super(map); + this._map = map; + } + + /** + * Returns the index of the next value in the data structure + * or a negative value if the iterator is exhausted. + * + * @return an float value + */ + protected final int nextIndex() { + if (_expectedSize != _hash.size()) { + throw new ConcurrentModificationException(); + } + + Object[] set = _map._set; + int i = _index; + while (i-- > 0 && (set[i] == null || set[i] == TObjectHash.REMOVED || + set[i] == TObjectHash.FREE)) ; + return i; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public K key() { + return (K) _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public float value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public float setValue(float val) { + float old = value(); + _map._values[_index] = val; + return old; + } +}// TObjectFloatIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectFloatProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectFloatProcedure.java new file mode 100644 index 00000000000..b657b9d94a7 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectFloatProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type Object and float. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: O2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TObjectFloatProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a an Object value + * @param b a float value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(K a, float b); +}// TObjectFloatProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectFunction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectFunction.java new file mode 100644 index 00000000000..3e4731565d9 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectFunction.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +/** + * Interface for functions that accept and return one Object reference. + *

    + * Created: Mon Nov 5 22:19:36 2001 + * + * @author Eric D. Friedman + * @version $Id: TObjectFunction.java,v 1.3 2006/11/10 23:27:56 robeden Exp $ + */ + +public interface TObjectFunction { + /** + * Execute this function with value + * + * @param value an Object input + * @return an Object result + */ + public R execute(T value); +}// TObjectFunction diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectHash.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectHash.java new file mode 100644 index 00000000000..d33d399a337 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectHash.java @@ -0,0 +1,378 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +/** + * An open addressed hashing implementation for Object types. + *

    + * Created: Sun Nov 4 08:56:06 2001 + * + * @author Eric D. Friedman + * @version $Id: TObjectHash.java,v 1.27 2009/06/01 22:14:44 robeden Exp $ + */ +abstract public class TObjectHash extends THash + implements TObjectHashingStrategy { + + static final long serialVersionUID = -3461112548087185871L; + + + /** + * the set of Objects + */ + protected transient Object[] _set; + + /** + * the strategy used to hash objects in this collection. + */ + protected TObjectHashingStrategy _hashingStrategy; + + protected static final Object REMOVED = new Object(), FREE = new Object(); + + /** + * Creates a new TObjectHash instance with the + * default capacity and load factor. + */ + public TObjectHash() { + super(); + this._hashingStrategy = this; + } + + /** + * Creates a new TObjectHash instance with the + * default capacity and load factor and a custom hashing strategy. + * + * @param strategy used to compute hash codes and to compare objects. + */ + public TObjectHash(TObjectHashingStrategy strategy) { + super(); + this._hashingStrategy = strategy; + } + + /** + * Creates a new TObjectHash instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + */ + public TObjectHash(int initialCapacity) { + super(initialCapacity); + this._hashingStrategy = this; + } + + /** + * Creates a new TObjectHash instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. Uses the specified custom + * hashing strategy. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare objects. + */ + public TObjectHash(int initialCapacity, TObjectHashingStrategy strategy) { + super(initialCapacity); + this._hashingStrategy = strategy; + } + + /** + * Creates a new TObjectHash instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + */ + public TObjectHash(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + this._hashingStrategy = this; + } + + /** + * Creates a new TObjectHash instance with a prime + * value at or near the specified capacity and load factor. Uses + * the specified custom hashing strategy. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare objects. + */ + public TObjectHash(int initialCapacity, float loadFactor, TObjectHashingStrategy strategy) { + super(initialCapacity, loadFactor); + this._hashingStrategy = strategy; + } + + /** + * @return a shallow clone of this collection + */ + public TObjectHash clone() { + TObjectHash h = (TObjectHash) super.clone(); + h._set = (Object[]) this._set.clone(); + return h; + } + + protected int capacity() { + return _set.length; + } + + protected void removeAt(int index) { + _set[index] = REMOVED; + super.removeAt(index); + } + + /** + * initializes the Object set of this hash table. + * + * @param initialCapacity an int value + * @return an int value + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _set = new Object[capacity]; + Arrays.fill(_set, FREE); + return capacity; + } + + /** + * Executes procedure for each element in the set. + * + * @param procedure a TObjectProcedure value + * @return false if the loop over the set terminated because + * the procedure returned false for some value. + */ + public boolean forEach(TObjectProcedure procedure) { + Object[] set = _set; + for (int i = set.length; i-- > 0;) { + if (set[i] != FREE + && set[i] != REMOVED + && !procedure.execute((T) set[i])) { + return false; + } + } + return true; + } + + /** + * Searches the set for obj + * + * @param obj an Object value + * @return a boolean value + */ + public boolean contains(Object obj) { + return index((T) obj) >= 0; + } + + /** + * Locates the index of obj. + * + * @param obj an Object value + * @return the index of obj or -1 if it isn't in the set. + */ + protected int index(T obj) { + final TObjectHashingStrategy hashing_strategy = _hashingStrategy; + + final Object[] set = _set; + final int length = set.length; + final int hash = hashing_strategy.computeHashCode(obj) & 0x7fffffff; + int index = hash % length; + Object cur = set[index]; + + if (cur == FREE) return -1; + + // NOTE: here it has to be REMOVED or FULL (some user-given value) + if (cur == REMOVED || !hashing_strategy.equals((T) cur, obj)) { + // see Knuth, p. 529 + final int probe = 1 + (hash % (length - 2)); + + do { + index -= probe; + if (index < 0) { + index += length; + } + cur = set[index]; + } while (cur != FREE + && (cur == REMOVED || !_hashingStrategy.equals((T) cur, obj))); + } + + return cur == FREE ? -1 : index; + } + + /** + * Locates the index at which obj can be inserted. if + * there is already a value equal()ing obj in the set, + * returns that value's index as -index - 1. + * + * @param obj an Object value + * @return the index of a FREE slot at which obj can be inserted + * or, if obj is already stored in the hash, the negative value of + * that index, minus 1: -index -1. + */ + protected int insertionIndex(T obj) { + final TObjectHashingStrategy hashing_strategy = _hashingStrategy; + + final Object[] set = _set; + final int length = set.length; + final int hash = hashing_strategy.computeHashCode(obj) & 0x7fffffff; + int index = hash % length; + Object cur = set[index]; + + if (cur == FREE) { + return index; // empty, all done + } else if (cur != REMOVED && hashing_strategy.equals((T) cur, obj)) { + return -index - 1; // already stored + } else { // already FULL or REMOVED, must probe + // compute the double hash + final int probe = 1 + (hash % (length - 2)); + + // if the slot we landed on is FULL (but not removed), probe + // until we find an empty slot, a REMOVED slot, or an element + // equal to the one we are trying to insert. + // finding an empty slot means that the value is not present + // and that we should use that slot as the insertion point; + // finding a REMOVED slot means that we need to keep searching, + // however we want to remember the offset of that REMOVED slot + // so we can reuse it in case a "new" insertion (i.e. not an update) + // is possible. + // finding a matching value means that we've found that our desired + // key is already in the table + if (cur != REMOVED) { + // starting at the natural offset, probe until we find an + // offset that isn't full. + do { + index -= probe; + if (index < 0) { + index += length; + } + cur = set[index]; + } while (cur != FREE + && cur != REMOVED + && !hashing_strategy.equals((T) cur, obj)); + } + + // if the index we found was removed: continue probing until we + // locate a free location or an element which equal()s the + // one we have. + if (cur == REMOVED) { + int firstRemoved = index; + while (cur != FREE + && (cur == REMOVED || !hashing_strategy.equals((T) cur, obj))) { + index -= probe; + if (index < 0) { + index += length; + } + cur = set[index]; + } + // NOTE: cur cannot == REMOVED in this block + return (cur != FREE) ? -index - 1 : firstRemoved; + } + // if it's full, the key is already stored + // NOTE: cur cannot equal REMOVE here (would have retuned already (see above) + return (cur != FREE) ? -index - 1 : index; + } + } + + /** + * This is the default implementation of TObjectHashingStrategy: + * it delegates hashing to the Object's hashCode method. + * + * @param o for which the hashcode is to be computed + * @return the hashCode + * @see Object#hashCode() + */ + public final int computeHashCode(T o) { + return o == null ? 0 : o.hashCode(); + } + + /** + * This is the default implementation of TObjectHashingStrategy: + * it delegates equality comparisons to the first parameter's + * equals() method. + * + * @param o1 an Object value + * @param o2 an Object value + * @return true if the objects are equal + * @see Object#equals(Object) + */ + public final boolean equals(T o1, T o2) { + return o1 == null ? o2 == null : o1.equals(o2); + } + + /** + * Convenience methods for subclasses to use in throwing exceptions about + * badly behaved user objects employed as keys. We have to throw an + * IllegalArgumentException with a rather verbose message telling the + * user that they need to fix their object implementation to conform + * to the general contract for java.lang.Object. + * + * @param o1 the first of the equal elements with unequal hash codes. + * @param o2 the second of the equal elements with unequal hash codes. + * @throws IllegalArgumentException the whole point of this method. + */ + protected final void throwObjectContractViolation(Object o1, Object o2) + throws IllegalArgumentException { + throw new IllegalArgumentException("Equal objects must have equal hashcodes. " + + "During rehashing, Trove discovered that " + + "the following two objects claim to be " + + "equal (as in java.lang.Object.equals()) " + + "but their hashCodes (or those calculated by " + + "your TObjectHashingStrategy) are not equal." + + "This violates the general contract of " + + "java.lang.Object.hashCode(). See bullet point two " + + "in that method's documentation. " + + "object #1 =" + o1 + + "; object #2 =" + o2); + } + + + @Override + public void writeExternal(ObjectOutput out) throws IOException { + super.writeExternal(out); + + // VERSION + out.writeByte(0); + + // HASHING STRATEGY + if (_hashingStrategy == this) out.writeObject(null); + else out.writeObject(_hashingStrategy); + } + + @Override + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + super.readExternal(in); + + // VERSION + in.readByte(); + + // HASHING STRATEGY + //noinspection unchecked + _hashingStrategy = (TObjectHashingStrategy) in.readObject(); + if (_hashingStrategy == null) _hashingStrategy = this; + } +} // TObjectHash diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectHashIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectHashIterator.java new file mode 100644 index 00000000000..5f3296ae950 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectHashIterator.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + + +package org.elasticsearch.util.gnu.trove; + +/** + * Created: Wed Nov 28 21:30:53 2001 + * + * @author Eric D. Friedman + * @version $Id: TObjectHashIterator.java,v 1.2 2006/11/10 23:27:56 robeden Exp $ + */ + +class TObjectHashIterator extends THashIterator { + protected final TObjectHash _objectHash; + + public TObjectHashIterator(TObjectHash hash) { + super(hash); + _objectHash = hash; + } + + protected E objectAtIndex(int index) { + return (E) _objectHash._set[index]; + } + +} // TObjectHashIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectHashingStrategy.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectHashingStrategy.java new file mode 100644 index 00000000000..b74c9c8fdde --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectHashingStrategy.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + + +package org.elasticsearch.util.gnu.trove; + +import java.io.Serializable; + +/** + * Interface to support pluggable hashing strategies in maps and sets. + * Implementors can use this interface to make the trove hashing + * algorithms use object values, values provided by the java runtime, + * or a custom strategy when computing hashcodes. + *

    + * Created: Sat Aug 17 10:52:32 2002 + * + * @author Eric Friedman + * @version $Id: TObjectHashingStrategy.java,v 1.3 2007/06/11 15:26:44 robeden Exp $ + */ + +public interface TObjectHashingStrategy extends Serializable { + + /** + * Computes a hash code for the specified object. Implementors + * can use the object's own hashCode method, the Java + * runtime's identityHashCode, or a custom scheme. + * + * @param object for which the hashcode is to be computed + * @return the hashCode + */ + int computeHashCode(T object); + + /** + * Compares o1 and o2 for equality. Strategy implementors may use + * the objects' own equals() methods, compare object references, + * or implement some custom scheme. + * + * @param o1 an Object value + * @param o2 an Object value + * @return true if the objects are equal according to this strategy. + */ + boolean equals(T o1, T o2); +} // TObjectHashingStrategy diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectIdentityHashingStrategy.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectIdentityHashingStrategy.java new file mode 100644 index 00000000000..f330ebde36c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectIdentityHashingStrategy.java @@ -0,0 +1,55 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +/** + * This object hashing strategy uses the System.identityHashCode + * method to provide identity hash codes. These are identical to the + * value produced by Object.hashCode(), even when the type of the + * object being hashed overrides that method. + *

    + * Created: Sat Aug 17 11:13:15 2002 + * + * @author Eric Friedman + * @version $Id: TObjectIdentityHashingStrategy.java,v 1.4 2007/06/11 15:26:44 robeden Exp $ + */ + +public final class TObjectIdentityHashingStrategy implements TObjectHashingStrategy { + /** + * Delegates hash code computation to the System.identityHashCode(Object) method. + * + * @param object for which the hashcode is to be computed + * @return the hashCode + */ + public final int computeHashCode(T object) { + return System.identityHashCode(object); + } + + /** + * Compares object references for equality. + * + * @param o1 an Object value + * @param o2 an Object value + * @return true if o1 == o2 + */ + public final boolean equals(T o1, T o2) { + return o1 == o2; + } +} // TObjectIdentityHashingStrategy diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectIntHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectIntHashMap.java new file mode 100644 index 00000000000..ec39bba3789 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectIntHashMap.java @@ -0,0 +1,631 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for Object keys and int values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TObjectIntHashMap extends TObjectHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TObjectIntProcedure PUT_ALL_PROC = new TObjectIntProcedure() { + public boolean execute(K key, int value) { + put(key, value); + return true; + } + }; + + /** + * the values of the map + */ + protected transient int[] _values; + + /** + * Creates a new TObjectIntHashMap instance with the default + * capacity and load factor. + */ + public TObjectIntHashMap() { + super(); + } + + /** + * Creates a new TObjectIntHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TObjectIntHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TObjectIntHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TObjectIntHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TObjectIntHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TObjectIntHashMap(TObjectHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TObjectIntHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TObjectIntHashMap(int initialCapacity, TObjectHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TObjectIntHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TObjectIntHashMap(int initialCapacity, float loadFactor, TObjectHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return an iterator over the entries in this map + */ + public TObjectIntIterator iterator() { + return new TObjectIntIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new int[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an int value + * @return the previous value associated with key, + * or (int)0 if none was found. + */ + public int put(K key, int value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an Object value + * @param value an int value + * @return the previous value associated with key, + * or (int)0 if none was found. + */ + public int putIfAbsent(K key, int value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private int doPut(K key, int value, int index) { + int previous = (int) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + K oldKey = (K) _set[index]; + _set[index] = key; + _values[index] = value; + + if (isNewMapping) { + postInsertHook(oldKey == FREE); + } + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TObjectIntHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + K oldKeys[] = (K[]) _set; + int oldVals[] = _values; + + _set = new Object[newCapacity]; + Arrays.fill(_set, FREE); + _values = new int[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldKeys[i] != FREE && oldKeys[i] != REMOVED) { + K o = oldKeys[i]; + int index = insertionIndex(o); + if (index < 0) { + throwObjectContractViolation(_set[(-index - 1)], o); + } + _set[index] = o; + _values[index] = oldVals[i]; + } + } + } + + /** + * retrieves the value for key + * + * @param key an Object value + * @return the value of key or (int)0 if no such mapping exists. + */ + public int get(K key) { + int index = index(key); + return index < 0 ? (int) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + Object[] keys = _set; + int[] vals = _values; + + Arrays.fill(_set, 0, _set.length, FREE); + Arrays.fill(_values, 0, _values.length, (int) 0); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return an int value or (int)0 if no such mapping exists. + */ + public int remove(K key) { + int prev = (int) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TObjectIntHashMap)) { + return false; + } + TObjectIntHashMap that = (TObjectIntHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + /** + * {@inheritDoc} + */ + @Override + public TObjectIntHashMap clone() { + TObjectIntHashMap clone = (TObjectIntHashMap) super.clone(); + clone._values = new int[_values.length]; + System.arraycopy(_values, 0, clone._values, 0, clone._values.length); + + return clone; + } + + + private static final class EqProcedure implements TObjectIntProcedure { + private final TObjectIntHashMap _otherMap; + + EqProcedure(TObjectIntHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(Object key, int value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two ints for equality. + */ + private final boolean eq(int v1, int v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public int[] getValues() { + int[] vals = new int[size()]; + int[] v = _values; + Object[] keys = _set; + + for (int i = v.length, j = 0; i-- > 0;) { + if (keys[i] != FREE && keys[i] != REMOVED) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public Object[] keys() { + Object[] keys = new Object[size()]; + K[] k = (K[]) _set; + + for (int i = k.length, j = 0; i-- > 0;) { + if (k[i] != FREE && k[i] != REMOVED) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same runtime type is allocated for this purpose. + * @return a Set value + */ + public K[] keys(K[] a) { + int size = size(); + if (a.length < size) { + a = (K[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + K[] k = (K[]) _set; + + for (int i = k.length, j = 0; i-- > 0;) { + if (k[i] != FREE && k[i] != REMOVED) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an int value + * @return a boolean value + */ + public boolean containsValue(int val) { + Object[] keys = _set; + int[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (keys[i] != FREE && keys[i] != REMOVED && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(K key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TObjectProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TObjectProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TIntProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TIntProcedure procedure) { + Object[] keys = _set; + int[] values = _values; + for (int i = values.length; i-- > 0;) { + if (keys[i] != FREE && keys[i] != REMOVED + && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOObjectIntProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TObjectIntProcedure procedure) { + K[] keys = (K[]) _set; + int[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (keys[i] != FREE + && keys[i] != REMOVED + && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TObjectIntProcedure procedure) { + boolean modified = false; + K[] keys = (K[]) _set; + int[] values = _values; + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (keys[i] != FREE + && keys[i] != REMOVED + && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TIntFunction value + */ + public void transformValues(TIntFunction function) { + Object[] keys = _set; + int[] values = _values; + for (int i = values.length; i-- > 0;) { + if (keys[i] != null && keys[i] != REMOVED) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(K key) { + return adjustValue(key, (int) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(K key, int amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public int adjustOrPutValue(final K key, final int adjust_amount, final int put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final int newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + K oldKey = (K) _set[index]; + _set[index] = key; + + if (isNewMapping) { + postInsertHook(oldKey == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + K key = (K) in.readObject(); + int val = in.readInt(); + put(key, val); + } + } + + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TObjectIntProcedure() { + private boolean first = true; + + public boolean execute(K key, int value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TObjectIntHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectIntIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectIntIterator.java new file mode 100644 index 00000000000..0d4903b6c1e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectIntIterator.java @@ -0,0 +1,168 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.util.ConcurrentModificationException; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type Object and int. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TObjectIntIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TObjectIntIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TObjectIntIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TObjectIntIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: O2PIterator.template,v 1.3 2007/01/22 16:56:39 robeden Exp $ + */ + +public class TObjectIntIterator extends TIterator { + private final TObjectIntHashMap _map; + + public TObjectIntIterator(TObjectIntHashMap map) { + super(map); + this._map = map; + } + + /** + * Returns the index of the next value in the data structure + * or a negative value if the iterator is exhausted. + * + * @return an int value + */ + protected final int nextIndex() { + if (_expectedSize != _hash.size()) { + throw new ConcurrentModificationException(); + } + + Object[] set = _map._set; + int i = _index; + while (i-- > 0 && (set[i] == null || set[i] == TObjectHash.REMOVED || + set[i] == TObjectHash.FREE)) ; + return i; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public K key() { + return (K) _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public int value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public int setValue(int val) { + int old = value(); + _map._values[_index] = val; + return old; + } +}// TObjectIntIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectIntProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectIntProcedure.java new file mode 100644 index 00000000000..d361dd53ee5 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectIntProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type Object and int. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: O2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TObjectIntProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a an Object value + * @param b a int value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(K a, int b); +}// TObjectIntProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectLongHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectLongHashMap.java new file mode 100644 index 00000000000..23fa7d9c61a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectLongHashMap.java @@ -0,0 +1,631 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for Object keys and long values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TObjectLongHashMap extends TObjectHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TObjectLongProcedure PUT_ALL_PROC = new TObjectLongProcedure() { + public boolean execute(K key, long value) { + put(key, value); + return true; + } + }; + + /** + * the values of the map + */ + protected transient long[] _values; + + /** + * Creates a new TObjectLongHashMap instance with the default + * capacity and load factor. + */ + public TObjectLongHashMap() { + super(); + } + + /** + * Creates a new TObjectLongHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TObjectLongHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TObjectLongHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TObjectLongHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TObjectLongHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TObjectLongHashMap(TObjectHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TObjectLongHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TObjectLongHashMap(int initialCapacity, TObjectHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TObjectLongHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TObjectLongHashMap(int initialCapacity, float loadFactor, TObjectHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return an iterator over the entries in this map + */ + public TObjectLongIterator iterator() { + return new TObjectLongIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new long[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an long value + * @return the previous value associated with key, + * or (long)0 if none was found. + */ + public long put(K key, long value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an Object value + * @param value an long value + * @return the previous value associated with key, + * or (long)0 if none was found. + */ + public long putIfAbsent(K key, long value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private long doPut(K key, long value, int index) { + long previous = (long) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + K oldKey = (K) _set[index]; + _set[index] = key; + _values[index] = value; + + if (isNewMapping) { + postInsertHook(oldKey == FREE); + } + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TObjectLongHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + K oldKeys[] = (K[]) _set; + long oldVals[] = _values; + + _set = new Object[newCapacity]; + Arrays.fill(_set, FREE); + _values = new long[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldKeys[i] != FREE && oldKeys[i] != REMOVED) { + K o = oldKeys[i]; + int index = insertionIndex(o); + if (index < 0) { + throwObjectContractViolation(_set[(-index - 1)], o); + } + _set[index] = o; + _values[index] = oldVals[i]; + } + } + } + + /** + * retrieves the value for key + * + * @param key an Object value + * @return the value of key or (long)0 if no such mapping exists. + */ + public long get(K key) { + int index = index(key); + return index < 0 ? (long) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + Object[] keys = _set; + long[] vals = _values; + + Arrays.fill(_set, 0, _set.length, FREE); + Arrays.fill(_values, 0, _values.length, (long) 0); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return an long value or (long)0 if no such mapping exists. + */ + public long remove(K key) { + long prev = (long) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TObjectLongHashMap)) { + return false; + } + TObjectLongHashMap that = (TObjectLongHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + /** + * {@inheritDoc} + */ + @Override + public TObjectLongHashMap clone() { + TObjectLongHashMap clone = (TObjectLongHashMap) super.clone(); + clone._values = new long[_values.length]; + System.arraycopy(_values, 0, clone._values, 0, clone._values.length); + + return clone; + } + + + private static final class EqProcedure implements TObjectLongProcedure { + private final TObjectLongHashMap _otherMap; + + EqProcedure(TObjectLongHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(Object key, long value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two longs for equality. + */ + private final boolean eq(long v1, long v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public long[] getValues() { + long[] vals = new long[size()]; + long[] v = _values; + Object[] keys = _set; + + for (int i = v.length, j = 0; i-- > 0;) { + if (keys[i] != FREE && keys[i] != REMOVED) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public Object[] keys() { + Object[] keys = new Object[size()]; + K[] k = (K[]) _set; + + for (int i = k.length, j = 0; i-- > 0;) { + if (k[i] != FREE && k[i] != REMOVED) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same runtime type is allocated for this purpose. + * @return a Set value + */ + public K[] keys(K[] a) { + int size = size(); + if (a.length < size) { + a = (K[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + K[] k = (K[]) _set; + + for (int i = k.length, j = 0; i-- > 0;) { + if (k[i] != FREE && k[i] != REMOVED) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an long value + * @return a boolean value + */ + public boolean containsValue(long val) { + Object[] keys = _set; + long[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (keys[i] != FREE && keys[i] != REMOVED && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(K key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TObjectProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TObjectProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TLongProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TLongProcedure procedure) { + Object[] keys = _set; + long[] values = _values; + for (int i = values.length; i-- > 0;) { + if (keys[i] != FREE && keys[i] != REMOVED + && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOObjectLongProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TObjectLongProcedure procedure) { + K[] keys = (K[]) _set; + long[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (keys[i] != FREE + && keys[i] != REMOVED + && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TObjectLongProcedure procedure) { + boolean modified = false; + K[] keys = (K[]) _set; + long[] values = _values; + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (keys[i] != FREE + && keys[i] != REMOVED + && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TLongFunction value + */ + public void transformValues(TLongFunction function) { + Object[] keys = _set; + long[] values = _values; + for (int i = values.length; i-- > 0;) { + if (keys[i] != null && keys[i] != REMOVED) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(K key) { + return adjustValue(key, (long) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(K key, long amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public long adjustOrPutValue(final K key, final long adjust_amount, final long put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final long newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + K oldKey = (K) _set[index]; + _set[index] = key; + + if (isNewMapping) { + postInsertHook(oldKey == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + K key = (K) in.readObject(); + long val = in.readLong(); + put(key, val); + } + } + + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TObjectLongProcedure() { + private boolean first = true; + + public boolean execute(K key, long value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TObjectLongHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectLongIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectLongIterator.java new file mode 100644 index 00000000000..a5b125c604e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectLongIterator.java @@ -0,0 +1,168 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.util.ConcurrentModificationException; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type Object and long. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TObjectLongIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TObjectLongIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TObjectLongIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TObjectLongIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: O2PIterator.template,v 1.3 2007/01/22 16:56:39 robeden Exp $ + */ + +public class TObjectLongIterator extends TIterator { + private final TObjectLongHashMap _map; + + public TObjectLongIterator(TObjectLongHashMap map) { + super(map); + this._map = map; + } + + /** + * Returns the index of the next value in the data structure + * or a negative value if the iterator is exhausted. + * + * @return an long value + */ + protected final int nextIndex() { + if (_expectedSize != _hash.size()) { + throw new ConcurrentModificationException(); + } + + Object[] set = _map._set; + int i = _index; + while (i-- > 0 && (set[i] == null || set[i] == TObjectHash.REMOVED || + set[i] == TObjectHash.FREE)) ; + return i; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public K key() { + return (K) _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public long value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public long setValue(long val) { + long old = value(); + _map._values[_index] = val; + return old; + } +}// TObjectLongIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectLongProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectLongProcedure.java new file mode 100644 index 00000000000..3db1eb8296d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectLongProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type Object and long. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: O2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TObjectLongProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a an Object value + * @param b a long value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(K a, long b); +}// TObjectLongProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectObjectProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectObjectProcedure.java new file mode 100644 index 00000000000..31b31ae9598 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectObjectProcedure.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +/** + * Interface for procedures that take two Object parameters. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: TObjectObjectProcedure.java,v 1.3 2006/11/10 23:27:57 robeden Exp $ + */ + +public interface TObjectObjectProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a an Object value + * @param b an Object value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(K a, V b); +}// TObjectObjectProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectProcedure.java new file mode 100644 index 00000000000..87bff1d3be0 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectProcedure.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + + +package org.elasticsearch.util.gnu.trove; + +/** + * Interface for procedures with one Object parameter. + *

    + * Created: Mon Nov 5 21:45:49 2001 + * + * @author Eric D. Friedman + * @version $Id: TObjectProcedure.java,v 1.4 2007/11/01 16:08:14 robeden Exp $ + */ + +public interface TObjectProcedure { + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param object an Object value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(T object); +}// TObjectProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectShortHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectShortHashMap.java new file mode 100644 index 00000000000..7d5c89f26eb --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectShortHashMap.java @@ -0,0 +1,631 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for Object keys and short values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TObjectShortHashMap extends TObjectHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TObjectShortProcedure PUT_ALL_PROC = new TObjectShortProcedure() { + public boolean execute(K key, short value) { + put(key, value); + return true; + } + }; + + /** + * the values of the map + */ + protected transient short[] _values; + + /** + * Creates a new TObjectShortHashMap instance with the default + * capacity and load factor. + */ + public TObjectShortHashMap() { + super(); + } + + /** + * Creates a new TObjectShortHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TObjectShortHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TObjectShortHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TObjectShortHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TObjectShortHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TObjectShortHashMap(TObjectHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TObjectShortHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TObjectShortHashMap(int initialCapacity, TObjectHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TObjectShortHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TObjectShortHashMap(int initialCapacity, float loadFactor, TObjectHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return an iterator over the entries in this map + */ + public TObjectShortIterator iterator() { + return new TObjectShortIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new short[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an short value + * @return the previous value associated with key, + * or (short)0 if none was found. + */ + public short put(K key, short value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an Object value + * @param value an short value + * @return the previous value associated with key, + * or (short)0 if none was found. + */ + public short putIfAbsent(K key, short value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private short doPut(K key, short value, int index) { + short previous = (short) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + K oldKey = (K) _set[index]; + _set[index] = key; + _values[index] = value; + + if (isNewMapping) { + postInsertHook(oldKey == FREE); + } + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TObjectShortHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + K oldKeys[] = (K[]) _set; + short oldVals[] = _values; + + _set = new Object[newCapacity]; + Arrays.fill(_set, FREE); + _values = new short[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldKeys[i] != FREE && oldKeys[i] != REMOVED) { + K o = oldKeys[i]; + int index = insertionIndex(o); + if (index < 0) { + throwObjectContractViolation(_set[(-index - 1)], o); + } + _set[index] = o; + _values[index] = oldVals[i]; + } + } + } + + /** + * retrieves the value for key + * + * @param key an Object value + * @return the value of key or (short)0 if no such mapping exists. + */ + public short get(K key) { + int index = index(key); + return index < 0 ? (short) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + Object[] keys = _set; + short[] vals = _values; + + Arrays.fill(_set, 0, _set.length, FREE); + Arrays.fill(_values, 0, _values.length, (short) 0); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return an short value or (short)0 if no such mapping exists. + */ + public short remove(K key) { + short prev = (short) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TObjectShortHashMap)) { + return false; + } + TObjectShortHashMap that = (TObjectShortHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + /** + * {@inheritDoc} + */ + @Override + public TObjectShortHashMap clone() { + TObjectShortHashMap clone = (TObjectShortHashMap) super.clone(); + clone._values = new short[_values.length]; + System.arraycopy(_values, 0, clone._values, 0, clone._values.length); + + return clone; + } + + + private static final class EqProcedure implements TObjectShortProcedure { + private final TObjectShortHashMap _otherMap; + + EqProcedure(TObjectShortHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(Object key, short value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two shorts for equality. + */ + private final boolean eq(short v1, short v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public short[] getValues() { + short[] vals = new short[size()]; + short[] v = _values; + Object[] keys = _set; + + for (int i = v.length, j = 0; i-- > 0;) { + if (keys[i] != FREE && keys[i] != REMOVED) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public Object[] keys() { + Object[] keys = new Object[size()]; + K[] k = (K[]) _set; + + for (int i = k.length, j = 0; i-- > 0;) { + if (k[i] != FREE && k[i] != REMOVED) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same runtime type is allocated for this purpose. + * @return a Set value + */ + public K[] keys(K[] a) { + int size = size(); + if (a.length < size) { + a = (K[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + K[] k = (K[]) _set; + + for (int i = k.length, j = 0; i-- > 0;) { + if (k[i] != FREE && k[i] != REMOVED) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an short value + * @return a boolean value + */ + public boolean containsValue(short val) { + Object[] keys = _set; + short[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (keys[i] != FREE && keys[i] != REMOVED && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(K key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TObjectProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TObjectProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TShortProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TShortProcedure procedure) { + Object[] keys = _set; + short[] values = _values; + for (int i = values.length; i-- > 0;) { + if (keys[i] != FREE && keys[i] != REMOVED + && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOObjectShortProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TObjectShortProcedure procedure) { + K[] keys = (K[]) _set; + short[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (keys[i] != FREE + && keys[i] != REMOVED + && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TObjectShortProcedure procedure) { + boolean modified = false; + K[] keys = (K[]) _set; + short[] values = _values; + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (keys[i] != FREE + && keys[i] != REMOVED + && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TShortFunction value + */ + public void transformValues(TShortFunction function) { + Object[] keys = _set; + short[] values = _values; + for (int i = values.length; i-- > 0;) { + if (keys[i] != null && keys[i] != REMOVED) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(K key) { + return adjustValue(key, (short) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(K key, short amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public short adjustOrPutValue(final K key, final short adjust_amount, final short put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final short newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + K oldKey = (K) _set[index]; + _set[index] = key; + + if (isNewMapping) { + postInsertHook(oldKey == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + K key = (K) in.readObject(); + short val = in.readShort(); + put(key, val); + } + } + + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TObjectShortProcedure() { + private boolean first = true; + + public boolean execute(K key, short value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TObjectShortHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectShortIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectShortIterator.java new file mode 100644 index 00000000000..65b83ce8ca5 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectShortIterator.java @@ -0,0 +1,168 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.util.ConcurrentModificationException; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type Object and short. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TObjectShortIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TObjectShortIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TObjectShortIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TObjectShortIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: O2PIterator.template,v 1.3 2007/01/22 16:56:39 robeden Exp $ + */ + +public class TObjectShortIterator extends TIterator { + private final TObjectShortHashMap _map; + + public TObjectShortIterator(TObjectShortHashMap map) { + super(map); + this._map = map; + } + + /** + * Returns the index of the next value in the data structure + * or a negative value if the iterator is exhausted. + * + * @return an short value + */ + protected final int nextIndex() { + if (_expectedSize != _hash.size()) { + throw new ConcurrentModificationException(); + } + + Object[] set = _map._set; + int i = _index; + while (i-- > 0 && (set[i] == null || set[i] == TObjectHash.REMOVED || + set[i] == TObjectHash.FREE)) ; + return i; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public K key() { + return (K) _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public short value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public short setValue(short val) { + short old = value(); + _map._values[_index] = val; + return old; + } +}// TObjectShortIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectShortProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectShortProcedure.java new file mode 100644 index 00000000000..9906d1a93cf --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TObjectShortProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type Object and short. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: O2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TObjectShortProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a an Object value + * @param b a short value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(K a, short b); +}// TObjectShortProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TPrimitiveHash.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TPrimitiveHash.java new file mode 100644 index 00000000000..f447847b2b4 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TPrimitiveHash.java @@ -0,0 +1,135 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +/** + * The base class for hashtables of primitive values. Since there is + * no notion of object equality for primitives, it isn't possible to + * use a `REMOVED' object to track deletions in an open-addressed table. + * So, we have to resort to using a parallel `bookkeeping' array of bytes, + * in which flags can be set to indicate that a particular slot in the + * hash table is FREE, FULL, or REMOVED. + *

    + * Created: Fri Jan 11 18:55:16 2002 + * + * @author Eric D. Friedman + * @version $Id: TPrimitiveHash.java,v 1.5 2008/10/08 16:39:10 robeden Exp $ + */ + +abstract public class TPrimitiveHash extends THash { + /** + * flags indicating whether each position in the hash is + * FREE, FULL, or REMOVED + */ + protected transient byte[] _states; + + /* constants used for state flags */ + + /** + * flag indicating that a slot in the hashtable is available + */ + protected static final byte FREE = 0; + + /** + * flag indicating that a slot in the hashtable is occupied + */ + protected static final byte FULL = 1; + + /** + * flag indicating that the value of a slot in the hashtable + * was deleted + */ + protected static final byte REMOVED = 2; + + /** + * Creates a new THash instance with the default + * capacity and load factor. + */ + public TPrimitiveHash() { + super(); + } + + /** + * Creates a new TPrimitiveHash instance with a prime + * capacity at or near the specified capacity and with the default + * load factor. + * + * @param initialCapacity an int value + */ + public TPrimitiveHash(int initialCapacity) { + this(initialCapacity, DEFAULT_LOAD_FACTOR); + } + + /** + * Creates a new TPrimitiveHash instance with a prime + * capacity at or near the minimum needed to hold + * initialCapacity elements with load factor + * loadFactor without triggering a rehash. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TPrimitiveHash(int initialCapacity, float loadFactor) { + super(); + _loadFactor = loadFactor; + setUp(HashFunctions.fastCeil(initialCapacity / loadFactor)); + } + + public Object clone() { + TPrimitiveHash h = (TPrimitiveHash) super.clone(); + h._states = (byte[]) this._states.clone(); + return h; + } + + /** + * Returns the capacity of the hash table. This is the true + * physical capacity, without adjusting for the load factor. + * + * @return the physical capacity of the hash table. + */ + protected int capacity() { + return _states.length; + } + + /** + * Delete the record at index. + * + * @param index an int value + */ + protected void removeAt(int index) { + _states[index] = REMOVED; + super.removeAt(index); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _states = new byte[capacity]; + return capacity; + } +} // TPrimitiveHash diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TPrimitiveIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TPrimitiveIterator.java new file mode 100644 index 00000000000..c5b32f0ac42 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TPrimitiveIterator.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.util.ConcurrentModificationException; + +/** + * Implements all iterator functions for the hashed object set. + * Subclasses may override objectAtIndex to vary the object + * returned by calls to next() (e.g. for values, and Map.Entry + * objects). + *

    + *

    Note that iteration is fastest if you forego the calls to + * hasNext in favor of checking the size of the structure + * yourself and then call next() that many times: + *

    + *

    + * Iterator i = collection.iterator();
    + * for (int size = collection.size(); size-- > 0;) {
    + *   Object o = i.next();
    + * }
    + * 
    + *

    + *

    You may, of course, use the hasNext(), next() idiom too if + * you aren't in a performance critical spot.

    + */ +abstract class TPrimitiveIterator extends TIterator { + /** + * the collection on which this iterator operates. + */ + protected final TPrimitiveHash _hash; + + /** + * Creates a TPrimitiveIterator for the specified collection. + */ + public TPrimitiveIterator(TPrimitiveHash hash) { + super(hash); + _hash = hash; + } + + /** + * Returns the index of the next value in the data structure + * or a negative value if the iterator is exhausted. + * + * @return an int value + * @throws ConcurrentModificationException + * if the underlying collection's + * size has been modified since the iterator was created. + */ + protected final int nextIndex() { + if (_expectedSize != _hash.size()) { + throw new ConcurrentModificationException(); + } + + byte[] states = _hash._states; + int i = _index; + while (i-- > 0 && (states[i] != TPrimitiveHash.FULL)) ; + return i; + } + +} // TPrimitiveIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortArrayList.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortArrayList.java new file mode 100644 index 00000000000..ad48b58b970 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortArrayList.java @@ -0,0 +1,935 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; +import java.util.Random; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * A resizable, array-backed list of short primitives. + *

    + * Created: Sat Dec 29 14:21:12 2001 + * + * @author Eric D. Friedman + * @author Rob Eden + */ + +public class TShortArrayList implements Externalizable, Cloneable { + static final long serialVersionUID = 1L; + + /** + * the data of the list + */ + protected short[] _data; + + /** + * the index after the last entry in the list + */ + protected int _pos; + + /** + * the default capacity for new lists + */ + protected static final int DEFAULT_CAPACITY = 10; + + /** + * Creates a new TShortArrayList instance with the + * default capacity. + */ + public TShortArrayList() { + this(DEFAULT_CAPACITY); + } + + /** + * Creates a new TShortArrayList instance with the + * specified capacity. + * + * @param capacity an int value + */ + public TShortArrayList(int capacity) { + _data = new short[capacity]; + _pos = 0; + } + + /** + * Creates a new TShortArrayList instance whose + * capacity is the greater of the length of values and + * DEFAULT_CAPACITY and whose initial contents are the specified + * values. + * + * @param values an short[] value + */ + public TShortArrayList(short[] values) { + this(Math.max(values.length, DEFAULT_CAPACITY)); + add(values); + } + + // sizing + + /** + * Grow the internal array as needed to accommodate the specified + * number of elements. The size of the array shorts on each + * resize unless capacity requires more than twice the + * current capacity. + * + * @param capacity an int value + */ + public void ensureCapacity(int capacity) { + if (capacity > _data.length) { + int newCap = Math.max(_data.length << 1, capacity); + short[] tmp = new short[newCap]; + System.arraycopy(_data, 0, tmp, 0, _data.length); + _data = tmp; + } + } + + /** + * Returns the number of values in the list. + * + * @return the number of values in the list. + */ + public int size() { + return _pos; + } + + /** + * Tests whether this list contains any values. + * + * @return true if the list is empty. + */ + public boolean isEmpty() { + return _pos == 0; + } + + /** + * Sheds any excess capacity above and beyond the current size of + * the list. + */ + public void trimToSize() { + if (_data.length > size()) { + short[] tmp = new short[size()]; + toNativeArray(tmp, 0, tmp.length); + _data = tmp; + } + } + + // modifying + + /** + * Adds val to the end of the list, growing as needed. + * + * @param val an short value + */ + public void add(short val) { + ensureCapacity(_pos + 1); + _data[_pos++] = val; + } + + /** + * Adds the values in the array vals to the end of the + * list, in order. + * + * @param vals an short[] value + */ + public void add(short[] vals) { + add(vals, 0, vals.length); + } + + /** + * Adds a subset of the values in the array vals to the + * end of the list, in order. + * + * @param vals an short[] value + * @param offset the offset at which to start copying + * @param length the number of values to copy. + */ + public void add(short[] vals, int offset, int length) { + ensureCapacity(_pos + length); + System.arraycopy(vals, offset, _data, _pos, length); + _pos += length; + } + + /** + * Inserts value into the list at offset. All + * values including and to the right of offset are shifted + * to the right. + * + * @param offset an int value + * @param value an short value + */ + public void insert(int offset, short value) { + if (offset == _pos) { + add(value); + return; + } + ensureCapacity(_pos + 1); + // shift right + System.arraycopy(_data, offset, _data, offset + 1, _pos - offset); + // insert + _data[offset] = value; + _pos++; + } + + /** + * Inserts the array of values into the list at + * offset. All values including and to the right of + * offset are shifted to the right. + * + * @param offset an int value + * @param values an short[] value + */ + public void insert(int offset, short[] values) { + insert(offset, values, 0, values.length); + } + + /** + * Inserts a slice of the array of values into the list + * at offset. All values including and to the right of + * offset are shifted to the right. + * + * @param offset an int value + * @param values an short[] value + * @param valOffset the offset in the values array at which to + * start copying. + * @param len the number of values to copy from the values array + */ + public void insert(int offset, short[] values, int valOffset, int len) { + if (offset == _pos) { + add(values, valOffset, len); + return; + } + + ensureCapacity(_pos + len); + // shift right + System.arraycopy(_data, offset, _data, offset + len, _pos - offset); + // insert + System.arraycopy(values, valOffset, _data, offset, len); + _pos += len; + } + + /** + * Returns the value at the specified offset. + * + * @param offset an int value + * @return an short value + */ + public short get(int offset) { + if (offset >= _pos) { + throw new ArrayIndexOutOfBoundsException(offset); + } + return _data[offset]; + } + + /** + * Returns the value at the specified offset without doing any + * bounds checking. + * + * @param offset an int value + * @return an short value + */ + public short getQuick(int offset) { + return _data[offset]; + } + + /** + * Sets the value at the specified offset. + * + * @param offset an int value + * @param val an short value + */ + public void set(int offset, short val) { + if (offset >= _pos) { + throw new ArrayIndexOutOfBoundsException(offset); + } + _data[offset] = val; + } + + /** + * Sets the value at the specified offset and returns the + * previously stored value. + * + * @param offset an int value + * @param val an short value + * @return the value previously stored at offset. + */ + public short getSet(int offset, short val) { + if (offset >= _pos) { + throw new ArrayIndexOutOfBoundsException(offset); + } + short old = _data[offset]; + _data[offset] = val; + return old; + } + + /** + * Replace the values in the list starting at offset with + * the contents of the values array. + * + * @param offset the first offset to replace + * @param values the source of the new values + */ + public void set(int offset, short[] values) { + set(offset, values, 0, values.length); + } + + /** + * Replace the values in the list starting at offset with + * length values from the values array, starting + * at valOffset. + * + * @param offset the first offset to replace + * @param values the source of the new values + * @param valOffset the first value to copy from the values array + * @param length the number of values to copy + */ + public void set(int offset, short[] values, int valOffset, int length) { + if (offset < 0 || offset + length > _pos) { + throw new ArrayIndexOutOfBoundsException(offset); + } + System.arraycopy(values, valOffset, _data, offset, length); + } + + /** + * Sets the value at the specified offset without doing any bounds + * checking. + * + * @param offset an int value + * @param val an short value + */ + public void setQuick(int offset, short val) { + _data[offset] = val; + } + + /** + * Flushes the internal state of the list, resetting the capacity + * to the default. + */ + public void clear() { + clear(DEFAULT_CAPACITY); + } + + /** + * Flushes the internal state of the list, setting the capacity of + * the empty list to capacity. + * + * @param capacity an int value + */ + public void clear(int capacity) { + _data = new short[capacity]; + _pos = 0; + } + + /** + * Sets the size of the list to 0, but does not change its + * capacity. This method can be used as an alternative to the + * {@link #clear clear} method if you want to recyle a list without + * allocating new backing arrays. + * + * @see #clear + */ + public void reset() { + _pos = 0; + fill((short) 0); + } + + /** + * Sets the size of the list to 0, but does not change its + * capacity. This method can be used as an alternative to the + * {@link #clear clear} method if you want to recyle a list + * without allocating new backing arrays. This method differs + * from {@link #reset reset} in that it does not clear the old + * values in the backing array. Thus, it is possible for {@link + * #getQuick getQuick} to return stale data if this method is used + * and the caller is careless about bounds checking. + * + * @see #reset + * @see #clear + * @see #getQuick + */ + public void resetQuick() { + _pos = 0; + } + + /** + * Removes the value at offset from the list. + * + * @param offset an int value + * @return the value previously stored at offset. + */ + public short remove(int offset) { + short old = get(offset); + remove(offset, 1); + return old; + } + + /** + * Removes length values from the list, starting at + * offset + * + * @param offset an int value + * @param length an int value + */ + public void remove(int offset, int length) { + if (offset < 0 || offset >= _pos) { + throw new ArrayIndexOutOfBoundsException(offset); + } + + if (offset == 0) { + // data at the front + System.arraycopy(_data, length, _data, 0, _pos - length); + } else if (_pos - length == offset) { + // no copy to make, decrementing pos "deletes" values at + // the end + } else { + // data in the middle + System.arraycopy(_data, offset + length, + _data, offset, _pos - (offset + length)); + } + _pos -= length; + // no need to clear old values beyond _pos, because this is a + // primitive collection and 0 takes as much room as any other + // value + } + + /** + * Transform each value in the list using the specified function. + * + * @param function a TShortFunction value + */ + public void transformValues(TShortFunction function) { + for (int i = _pos; i-- > 0;) { + _data[i] = function.execute(_data[i]); + } + } + + /** + * Reverse the order of the elements in the list. + */ + public void reverse() { + reverse(0, _pos); + } + + /** + * Reverse the order of the elements in the range of the list. + * + * @param from the inclusive index at which to start reversing + * @param to the exclusive index at which to stop reversing + */ + public void reverse(int from, int to) { + if (from == to) { + return; // nothing to do + } + if (from > to) { + throw new IllegalArgumentException("from cannot be greater than to"); + } + for (int i = from, j = to - 1; i < j; i++, j--) { + swap(i, j); + } + } + + /** + * Shuffle the elements of the list using the specified random + * number generator. + * + * @param rand a Random value + */ + public void shuffle(Random rand) { + for (int i = _pos; i-- > 1;) { + swap(i, rand.nextInt(i)); + } + } + + /** + * Swap the values at offsets i and j. + * + * @param i an offset into the data array + * @param j an offset into the data array + */ + private final void swap(int i, int j) { + short tmp = _data[i]; + _data[i] = _data[j]; + _data[j] = tmp; + } + + // copying + + /** + * Returns a clone of this list. Since this is a primitive + * collection, this will be a deep clone. + * + * @return a deep clone of the list. + */ + public Object clone() { + TShortArrayList list = null; + try { + list = (TShortArrayList) super.clone(); + list._data = toNativeArray(); + } catch (CloneNotSupportedException e) { + // it's supported + } // end of try-catch + return list; + } + + + /** + * Returns a sublist of this list. + * + * @param begin low endpoint (inclusive) of the subList. + * @param end high endpoint (exclusive) of the subList. + * @return sublist of this list from begin, inclusive to end, exclusive. + * @throws IndexOutOfBoundsException - endpoint out of range + * @throws IllegalArgumentException - endpoints out of order (end > begin) + */ + public TShortArrayList subList(int begin, int end) { + if (end < begin) throw new IllegalArgumentException("end index " + end + " greater than begin index " + begin); + if (begin < 0) throw new IndexOutOfBoundsException("begin index can not be < 0"); + if (end > _data.length) throw new IndexOutOfBoundsException("end index < " + _data.length); + TShortArrayList list = new TShortArrayList(end - begin); + for (int i = begin; i < end; i++) { + list.add(_data[i]); + } + return list; + } + + + /** + * Copies the contents of the list into a native array. + * + * @return an short[] value + */ + public short[] toNativeArray() { + return toNativeArray(0, _pos); + } + + /** + * Copies a slice of the list into a native array. + * + * @param offset the offset at which to start copying + * @param len the number of values to copy. + * @return an short[] value + */ + public short[] toNativeArray(int offset, int len) { + short[] rv = new short[len]; + toNativeArray(rv, offset, len); + return rv; + } + + /** + * Copies a slice of the list into a native array. + * + * @param dest the array to copy into. + * @param offset the offset of the first value to copy + * @param len the number of values to copy. + */ + public void toNativeArray(short[] dest, int offset, int len) { + if (len == 0) { + return; // nothing to copy + } + if (offset < 0 || offset >= _pos) { + throw new ArrayIndexOutOfBoundsException(offset); + } + System.arraycopy(_data, offset, dest, 0, len); + } + + // comparing + + /** + * Compares this list to another list, value by value. + * + * @param other the object to compare against + * @return true if other is a TShortArrayList and has exactly the + * same values. + */ + public boolean equals(Object other) { + if (other == this) { + return true; + } else if (other instanceof TShortArrayList) { + TShortArrayList that = (TShortArrayList) other; + if (that.size() != this.size()) { + return false; + } else { + for (int i = _pos; i-- > 0;) { + if (this._data[i] != that._data[i]) { + return false; + } + } + return true; + } + } else { + return false; + } + } + + public int hashCode() { + int h = 0; + for (int i = _pos; i-- > 0;) { + h = 37 * h + HashFunctions.hash(_data[i]); + } + return h; + } + + // procedures + + /** + * Applies the procedure to each value in the list in ascending + * (front to back) order. + * + * @param procedure a TShortProcedure value + * @return true if the procedure did not terminate prematurely. + */ + public boolean forEach(TShortProcedure procedure) { + for (int i = 0; i < _pos; i++) { + if (!procedure.execute(_data[i])) { + return false; + } + } + return true; + } + + /** + * Applies the procedure to each value in the list in descending + * (back to front) order. + * + * @param procedure a TShortProcedure value + * @return true if the procedure did not terminate prematurely. + */ + public boolean forEachDescending(TShortProcedure procedure) { + for (int i = _pos; i-- > 0;) { + if (!procedure.execute(_data[i])) { + return false; + } + } + return true; + } + + // sorting + + /** + * Sort the values in the list (ascending) using the Sun quicksort + * implementation. + * + * @see java.util.Arrays#sort + */ + public void sort() { + Arrays.sort(_data, 0, _pos); + } + + /** + * Sort a slice of the list (ascending) using the Sun quicksort + * implementation. + * + * @param fromIndex the index at which to start sorting (inclusive) + * @param toIndex the index at which to stop sorting (exclusive) + * @see java.util.Arrays#sort + */ + public void sort(int fromIndex, int toIndex) { + Arrays.sort(_data, fromIndex, toIndex); + } + + // filling + + /** + * Fills every slot in the list with the specified value. + * + * @param val the value to use when filling + */ + public void fill(short val) { + Arrays.fill(_data, 0, _pos, val); + } + + /** + * Fills a range in the list with the specified value. + * + * @param fromIndex the offset at which to start filling (inclusive) + * @param toIndex the offset at which to stop filling (exclusive) + * @param val the value to use when filling + */ + public void fill(int fromIndex, int toIndex, short val) { + if (toIndex > _pos) { + ensureCapacity(toIndex); + _pos = toIndex; + } + Arrays.fill(_data, fromIndex, toIndex, val); + } + + // searching + + /** + * Performs a binary search for value in the entire list. + * Note that you must @{link #sort sort} the list before + * doing a search. + * + * @param value the value to search for + * @return the absolute offset in the list of the value, or its + * negative insertion point into the sorted list. + */ + public int binarySearch(short value) { + return binarySearch(value, 0, _pos); + } + + /** + * Performs a binary search for value in the specified + * range. Note that you must @{link #sort sort} the list + * or the range before doing a search. + * + * @param value the value to search for + * @param fromIndex the lower boundary of the range (inclusive) + * @param toIndex the upper boundary of the range (exclusive) + * @return the absolute offset in the list of the value, or its + * negative insertion point into the sorted list. + */ + public int binarySearch(short value, int fromIndex, int toIndex) { + if (fromIndex < 0) { + throw new ArrayIndexOutOfBoundsException(fromIndex); + } + if (toIndex > _pos) { + throw new ArrayIndexOutOfBoundsException(toIndex); + } + + int low = fromIndex; + int high = toIndex - 1; + + while (low <= high) { + int mid = (low + high) >>> 1; + short midVal = _data[mid]; + + if (midVal < value) { + low = mid + 1; + } else if (midVal > value) { + high = mid - 1; + } else { + return mid; // value found + } + } + return -(low + 1); // value not found. + } + + /** + * Searches the list front to back for the index of + * value. + * + * @param value an short value + * @return the first offset of the value, or -1 if it is not in + * the list. + * @see #binarySearch for faster searches on sorted lists + */ + public int indexOf(short value) { + return indexOf(0, value); + } + + /** + * Searches the list front to back for the index of + * value, starting at offset. + * + * @param offset the offset at which to start the linear search + * (inclusive) + * @param value an short value + * @return the first offset of the value, or -1 if it is not in + * the list. + * @see #binarySearch for faster searches on sorted lists + */ + public int indexOf(int offset, short value) { + for (int i = offset; i < _pos; i++) { + if (_data[i] == value) { + return i; + } + } + return -1; + } + + /** + * Searches the list back to front for the last index of + * value. + * + * @param value an short value + * @return the last offset of the value, or -1 if it is not in + * the list. + * @see #binarySearch for faster searches on sorted lists + */ + public int lastIndexOf(short value) { + return lastIndexOf(_pos, value); + } + + /** + * Searches the list back to front for the last index of + * value, starting at offset. + * + * @param offset the offset at which to start the linear search + * (exclusive) + * @param value an short value + * @return the last offset of the value, or -1 if it is not in + * the list. + * @see #binarySearch for faster searches on sorted lists + */ + public int lastIndexOf(int offset, short value) { + for (int i = offset; i-- > 0;) { + if (_data[i] == value) { + return i; + } + } + return -1; + } + + /** + * Searches the list for value + * + * @param value an short value + * @return true if value is in the list. + */ + public boolean contains(short value) { + return lastIndexOf(value) >= 0; + } + + /** + * Searches the list for values satisfying condition in + * the manner of the *nix grep utility. + * + * @param condition a condition to apply to each element in the list + * @return a list of values which match the condition. + */ + public TShortArrayList grep(TShortProcedure condition) { + TShortArrayList list = new TShortArrayList(); + for (int i = 0; i < _pos; i++) { + if (condition.execute(_data[i])) { + list.add(_data[i]); + } + } + return list; + } + + /** + * Searches the list for values which do not satisfy + * condition. This is akin to *nix grep -v. + * + * @param condition a condition to apply to each element in the list + * @return a list of values which do not match the condition. + */ + public TShortArrayList inverseGrep(TShortProcedure condition) { + TShortArrayList list = new TShortArrayList(); + for (int i = 0; i < _pos; i++) { + if (!condition.execute(_data[i])) { + list.add(_data[i]); + } + } + return list; + } + + /** + * Finds the maximum value in the list. + * + * @return the largest value in the list. + * @throws IllegalStateException if the list is empty + */ + public short max() { + if (size() == 0) { + throw new IllegalStateException("cannot find maximum of an empty list"); + } + short max = Short.MIN_VALUE; + for (int i = 0; i < _pos; i++) { + if (_data[i] > max) { + max = _data[i]; + } + } + return max; + } + + /** + * Finds the minimum value in the list. + * + * @return the smallest value in the list. + * @throws IllegalStateException if the list is empty + */ + public short min() { + if (size() == 0) { + throw new IllegalStateException("cannot find minimum of an empty list"); + } + short min = Short.MAX_VALUE; + for (int i = 0; i < _pos; i++) { + if (_data[i] < min) { + min = _data[i]; + } + } + return min; + } + + // stringification + + /** + * Returns a String representation of the list, front to back. + * + * @return a String value + */ + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + for (int i = 0, end = _pos - 1; i < end; i++) { + buf.append(_data[i]); + buf.append(", "); + } + if (size() > 0) { + buf.append(_data[_pos - 1]); + } + buf.append("}"); + return buf.toString(); + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(1); + + // POSITION + out.writeInt(_pos); + + // ENTRIES + int len = _pos; + out.writeInt(_pos); // Written twice for backwards compatability with + // version 0 + for (int i = 0; i < len; i++) { + out.writeShort(_data[i]); + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // POSITION + _pos = in.readInt(); + + // ENTRIES + int len = in.readInt(); + _data = new short[len]; + for (int i = 0; i < len; i++) { + _data[i] = in.readShort(); + } + } +} // TShortArrayList diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortByteHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortByteHashMap.java new file mode 100644 index 00000000000..947058ad61f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortByteHashMap.java @@ -0,0 +1,650 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for short keys and byte values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TShortByteHashMap extends TShortHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TShortByteProcedure PUT_ALL_PROC = new TShortByteProcedure() { + public boolean execute(short key, byte value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient byte[] _values; + + /** + * Creates a new TShortByteHashMap instance with the default + * capacity and load factor. + */ + public TShortByteHashMap() { + super(); + } + + /** + * Creates a new TShortByteHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TShortByteHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TShortByteHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TShortByteHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TShortByteHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TShortByteHashMap(TShortHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TShortByteHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TShortByteHashMap(int initialCapacity, TShortHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TShortByteHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TShortByteHashMap(int initialCapacity, float loadFactor, TShortHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TShortByteHashMap m = (TShortByteHashMap) super.clone(); + m._values = (byte[]) this._values.clone(); + return m; + } + + /** + * @return a TShortByteIterator with access to this map's keys and values + */ + public TShortByteIterator iterator() { + return new TShortByteIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new byte[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an short value + * @param value an byte value + * @return the previous value associated with key, + * or (short)0 if none was found. + */ + public byte put(short key, byte value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an short value + * @param value an byte value + * @return the previous value associated with key, + * or (short)0 if none was found. + */ + public byte putIfAbsent(short key, byte value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private byte doPut(short key, byte value, int index) { + byte previousState; + byte previous = (byte) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TShortByteHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + short oldKeys[] = _set; + byte oldVals[] = _values; + byte oldStates[] = _states; + + _set = new short[newCapacity]; + _values = new byte[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + short o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an short value + * @return the value of key or (short)0 if no such mapping exists. + */ + public byte get(short key) { + int index = index(key); + return index < 0 ? (byte) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + short[] keys = _set; + byte[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (short) 0); + Arrays.fill(_values, 0, _values.length, (byte) 0); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an short value + * @return an byte value, or (short)0 if no mapping for key exists + */ + public byte remove(short key) { + byte prev = (byte) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TShortByteHashMap)) { + return false; + } + TShortByteHashMap that = (TShortByteHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TShortByteProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(short key, byte value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TShortByteProcedure { + private final TShortByteHashMap _otherMap; + + EqProcedure(TShortByteHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(short key, byte value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two bytes for equality. + */ + private final boolean eq(byte v1, byte v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = (byte) 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public byte[] getValues() { + byte[] vals = new byte[size()]; + byte[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public short[] keys() { + short[] keys = new short[size()]; + short[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public short[] keys(short[] a) { + int size = size(); + if (a.length < size) { + a = (short[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + short[] k = (short[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an byte value + * @return a boolean value + */ + public boolean containsValue(byte val) { + byte[] states = _states; + byte[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an short value + * @return a boolean value + */ + public boolean containsKey(short key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TShortProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TShortProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TByteProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TByteProcedure procedure) { + byte[] states = _states; + byte[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOShortByteProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TShortByteProcedure procedure) { + byte[] states = _states; + short[] keys = _set; + byte[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TShortByteProcedure procedure) { + boolean modified = false; + byte[] states = _states; + short[] keys = _set; + byte[] values = _values; + + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TByteFunction value + */ + public void transformValues(TByteFunction function) { + byte[] states = _states; + byte[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(short key) { + return adjustValue(key, (byte) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(short key, byte amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public byte adjustOrPutValue(final short key, final byte adjust_amount, final byte put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final byte newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + byte previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + short key = in.readShort(); + byte val = in.readByte(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TShortByteProcedure() { + private boolean first = true; + + public boolean execute(short key, byte value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TShortByteHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortByteIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortByteIterator.java new file mode 100644 index 00000000000..696a6267754 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortByteIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type short and byte. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TShortByteIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TShortByteIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TShortByteIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TShortByteIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TShortByteIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TShortByteHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TShortByteIterator(TShortByteHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public short key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public byte value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public byte setValue(byte val) { + byte old = value(); + _map._values[_index] = val; + return old; + } +}// TShortByteIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortByteProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortByteProcedure.java new file mode 100644 index 00000000000..c53062c8a1b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortByteProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type short and byte. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TShortByteProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a short value + * @param b a byte value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(short a, byte b); +}// TShortByteProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortDoubleHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortDoubleHashMap.java new file mode 100644 index 00000000000..91677f6401b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortDoubleHashMap.java @@ -0,0 +1,650 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for short keys and double values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TShortDoubleHashMap extends TShortHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TShortDoubleProcedure PUT_ALL_PROC = new TShortDoubleProcedure() { + public boolean execute(short key, double value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient double[] _values; + + /** + * Creates a new TShortDoubleHashMap instance with the default + * capacity and load factor. + */ + public TShortDoubleHashMap() { + super(); + } + + /** + * Creates a new TShortDoubleHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TShortDoubleHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TShortDoubleHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TShortDoubleHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TShortDoubleHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TShortDoubleHashMap(TShortHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TShortDoubleHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TShortDoubleHashMap(int initialCapacity, TShortHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TShortDoubleHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TShortDoubleHashMap(int initialCapacity, float loadFactor, TShortHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TShortDoubleHashMap m = (TShortDoubleHashMap) super.clone(); + m._values = (double[]) this._values.clone(); + return m; + } + + /** + * @return a TShortDoubleIterator with access to this map's keys and values + */ + public TShortDoubleIterator iterator() { + return new TShortDoubleIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new double[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an short value + * @param value an double value + * @return the previous value associated with key, + * or (short)0 if none was found. + */ + public double put(short key, double value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an short value + * @param value an double value + * @return the previous value associated with key, + * or (short)0 if none was found. + */ + public double putIfAbsent(short key, double value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private double doPut(short key, double value, int index) { + byte previousState; + double previous = (double) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TShortDoubleHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + short oldKeys[] = _set; + double oldVals[] = _values; + byte oldStates[] = _states; + + _set = new short[newCapacity]; + _values = new double[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + short o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an short value + * @return the value of key or (short)0 if no such mapping exists. + */ + public double get(short key) { + int index = index(key); + return index < 0 ? (double) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + short[] keys = _set; + double[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (short) 0); + Arrays.fill(_values, 0, _values.length, (double) 0); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an short value + * @return an double value, or (short)0 if no mapping for key exists + */ + public double remove(short key) { + double prev = (double) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TShortDoubleHashMap)) { + return false; + } + TShortDoubleHashMap that = (TShortDoubleHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TShortDoubleProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(short key, double value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TShortDoubleProcedure { + private final TShortDoubleHashMap _otherMap; + + EqProcedure(TShortDoubleHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(short key, double value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two doubles for equality. + */ + private final boolean eq(double v1, double v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = (double) 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public double[] getValues() { + double[] vals = new double[size()]; + double[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public short[] keys() { + short[] keys = new short[size()]; + short[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public short[] keys(short[] a) { + int size = size(); + if (a.length < size) { + a = (short[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + short[] k = (short[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an double value + * @return a boolean value + */ + public boolean containsValue(double val) { + byte[] states = _states; + double[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an short value + * @return a boolean value + */ + public boolean containsKey(short key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TShortProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TShortProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TDoubleProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TDoubleProcedure procedure) { + byte[] states = _states; + double[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOShortDoubleProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TShortDoubleProcedure procedure) { + byte[] states = _states; + short[] keys = _set; + double[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TShortDoubleProcedure procedure) { + boolean modified = false; + byte[] states = _states; + short[] keys = _set; + double[] values = _values; + + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TDoubleFunction value + */ + public void transformValues(TDoubleFunction function) { + byte[] states = _states; + double[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(short key) { + return adjustValue(key, (double) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(short key, double amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public double adjustOrPutValue(final short key, final double adjust_amount, final double put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final double newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + byte previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + short key = in.readShort(); + double val = in.readDouble(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TShortDoubleProcedure() { + private boolean first = true; + + public boolean execute(short key, double value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TShortDoubleHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortDoubleIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortDoubleIterator.java new file mode 100644 index 00000000000..6bb3331197a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortDoubleIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type short and double. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TShortDoubleIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TShortDoubleIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TShortDoubleIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TShortDoubleIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TShortDoubleIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TShortDoubleHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TShortDoubleIterator(TShortDoubleHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public short key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public double value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public double setValue(double val) { + double old = value(); + _map._values[_index] = val; + return old; + } +}// TShortDoubleIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortDoubleProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortDoubleProcedure.java new file mode 100644 index 00000000000..ca4ce24f8bf --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortDoubleProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type short and double. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TShortDoubleProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a short value + * @param b a double value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(short a, double b); +}// TShortDoubleProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortFloatHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortFloatHashMap.java new file mode 100644 index 00000000000..de590752663 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortFloatHashMap.java @@ -0,0 +1,650 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for short keys and float values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TShortFloatHashMap extends TShortHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TShortFloatProcedure PUT_ALL_PROC = new TShortFloatProcedure() { + public boolean execute(short key, float value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient float[] _values; + + /** + * Creates a new TShortFloatHashMap instance with the default + * capacity and load factor. + */ + public TShortFloatHashMap() { + super(); + } + + /** + * Creates a new TShortFloatHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TShortFloatHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TShortFloatHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TShortFloatHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TShortFloatHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TShortFloatHashMap(TShortHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TShortFloatHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TShortFloatHashMap(int initialCapacity, TShortHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TShortFloatHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TShortFloatHashMap(int initialCapacity, float loadFactor, TShortHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TShortFloatHashMap m = (TShortFloatHashMap) super.clone(); + m._values = (float[]) this._values.clone(); + return m; + } + + /** + * @return a TShortFloatIterator with access to this map's keys and values + */ + public TShortFloatIterator iterator() { + return new TShortFloatIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new float[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an short value + * @param value an float value + * @return the previous value associated with key, + * or (short)0 if none was found. + */ + public float put(short key, float value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an short value + * @param value an float value + * @return the previous value associated with key, + * or (short)0 if none was found. + */ + public float putIfAbsent(short key, float value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private float doPut(short key, float value, int index) { + byte previousState; + float previous = (float) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TShortFloatHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + short oldKeys[] = _set; + float oldVals[] = _values; + byte oldStates[] = _states; + + _set = new short[newCapacity]; + _values = new float[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + short o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an short value + * @return the value of key or (short)0 if no such mapping exists. + */ + public float get(short key) { + int index = index(key); + return index < 0 ? (float) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + short[] keys = _set; + float[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (short) 0); + Arrays.fill(_values, 0, _values.length, (float) 0); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an short value + * @return an float value, or (short)0 if no mapping for key exists + */ + public float remove(short key) { + float prev = (float) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TShortFloatHashMap)) { + return false; + } + TShortFloatHashMap that = (TShortFloatHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TShortFloatProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(short key, float value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TShortFloatProcedure { + private final TShortFloatHashMap _otherMap; + + EqProcedure(TShortFloatHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(short key, float value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two floats for equality. + */ + private final boolean eq(float v1, float v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = (float) 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public float[] getValues() { + float[] vals = new float[size()]; + float[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public short[] keys() { + short[] keys = new short[size()]; + short[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public short[] keys(short[] a) { + int size = size(); + if (a.length < size) { + a = (short[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + short[] k = (short[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an float value + * @return a boolean value + */ + public boolean containsValue(float val) { + byte[] states = _states; + float[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an short value + * @return a boolean value + */ + public boolean containsKey(short key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TShortProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TShortProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TFloatProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TFloatProcedure procedure) { + byte[] states = _states; + float[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOShortFloatProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TShortFloatProcedure procedure) { + byte[] states = _states; + short[] keys = _set; + float[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TShortFloatProcedure procedure) { + boolean modified = false; + byte[] states = _states; + short[] keys = _set; + float[] values = _values; + + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TFloatFunction value + */ + public void transformValues(TFloatFunction function) { + byte[] states = _states; + float[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(short key) { + return adjustValue(key, (float) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(short key, float amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public float adjustOrPutValue(final short key, final float adjust_amount, final float put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final float newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + byte previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + short key = in.readShort(); + float val = in.readFloat(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TShortFloatProcedure() { + private boolean first = true; + + public boolean execute(short key, float value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TShortFloatHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortFloatIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortFloatIterator.java new file mode 100644 index 00000000000..42625b98d16 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortFloatIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type short and float. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TShortFloatIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TShortFloatIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TShortFloatIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TShortFloatIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TShortFloatIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TShortFloatHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TShortFloatIterator(TShortFloatHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public short key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public float value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public float setValue(float val) { + float old = value(); + _map._values[_index] = val; + return old; + } +}// TShortFloatIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortFloatProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortFloatProcedure.java new file mode 100644 index 00000000000..ba515d87090 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortFloatProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type short and float. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TShortFloatProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a short value + * @param b a float value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(short a, float b); +}// TShortFloatProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortFunction.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortFunction.java new file mode 100644 index 00000000000..c4bec475628 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortFunction.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for functions that accept and return one short primitive. + *

    + * Created: Mon Nov 5 22:19:36 2001 + * + * @author Eric D. Friedman + * @version $Id: PFunction.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TShortFunction { + /** + * Execute this function with value + * + * @param value a short input + * @return a short result + */ + public short execute(short value); +}// TShortFunction diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortHash.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortHash.java new file mode 100644 index 00000000000..82736851eb4 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortHash.java @@ -0,0 +1,291 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed hashing implementation for short primitives. + *

    + * Created: Sun Nov 4 08:56:06 2001 + * + * @author Eric D. Friedman + * @version $Id: PHash.template,v 1.2 2007/06/29 22:39:46 robeden Exp $ + */ + +abstract public class TShortHash extends TPrimitiveHash implements TShortHashingStrategy { + + /** + * the set of shorts + */ + protected transient short[] _set; + + /** + * strategy used to hash values in this collection + */ + protected TShortHashingStrategy _hashingStrategy; + + /** + * Creates a new TShortHash instance with the default + * capacity and load factor. + */ + public TShortHash() { + super(); + this._hashingStrategy = this; + } + + /** + * Creates a new TShortHash instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + */ + public TShortHash(int initialCapacity) { + super(initialCapacity); + this._hashingStrategy = this; + } + + /** + * Creates a new TShortHash instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + */ + public TShortHash(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + this._hashingStrategy = this; + } + + /** + * Creates a new TShortHash instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TShortHash(TShortHashingStrategy strategy) { + super(); + this._hashingStrategy = strategy; + } + + /** + * Creates a new TShortHash instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TShortHash(int initialCapacity, TShortHashingStrategy strategy) { + super(initialCapacity); + this._hashingStrategy = strategy; + } + + /** + * Creates a new TShortHash instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TShortHash(int initialCapacity, float loadFactor, TShortHashingStrategy strategy) { + super(initialCapacity, loadFactor); + this._hashingStrategy = strategy; + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TShortHash h = (TShortHash) super.clone(); + h._set = (short[]) this._set.clone(); + return h; + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _set = new short[capacity]; + return capacity; + } + + /** + * Searches the set for val + * + * @param val an short value + * @return a boolean value + */ + public boolean contains(short val) { + return index(val) >= 0; + } + + /** + * Executes procedure for each element in the set. + * + * @param procedure a TObjectProcedure value + * @return false if the loop over the set terminated because + * the procedure returned false for some value. + */ + public boolean forEach(TShortProcedure procedure) { + byte[] states = _states; + short[] set = _set; + for (int i = set.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(set[i])) { + return false; + } + } + return true; + } + + /** + * Releases the element currently stored at index. + * + * @param index an int value + */ + protected void removeAt(int index) { + _set[index] = (short) 0; + super.removeAt(index); + } + + /** + * Locates the index of val. + * + * @param val an short value + * @return the index of val or -1 if it isn't in the set. + */ + protected int index(short val) { + int hash, probe, index, length; + + final byte[] states = _states; + final short[] set = _set; + length = states.length; + hash = _hashingStrategy.computeHashCode(val) & 0x7fffffff; + index = hash % length; + + if (states[index] != FREE && + (states[index] == REMOVED || set[index] != val)) { + // see Knuth, p. 529 + probe = 1 + (hash % (length - 2)); + + do { + index -= probe; + if (index < 0) { + index += length; + } + } while (states[index] != FREE && + (states[index] == REMOVED || set[index] != val)); + } + + return states[index] == FREE ? -1 : index; + } + + /** + * Locates the index at which val can be inserted. if + * there is already a value equal()ing val in the set, + * returns that value as a negative integer. + * + * @param val an short value + * @return an int value + */ + protected int insertionIndex(short val) { + int hash, probe, index, length; + + final byte[] states = _states; + final short[] set = _set; + length = states.length; + hash = _hashingStrategy.computeHashCode(val) & 0x7fffffff; + index = hash % length; + + if (states[index] == FREE) { + return index; // empty, all done + } else if (states[index] == FULL && set[index] == val) { + return -index - 1; // already stored + } else { // already FULL or REMOVED, must probe + // compute the double hash + probe = 1 + (hash % (length - 2)); + + // if the slot we landed on is FULL (but not removed), probe + // until we find an empty slot, a REMOVED slot, or an element + // equal to the one we are trying to insert. + // finding an empty slot means that the value is not present + // and that we should use that slot as the insertion point; + // finding a REMOVED slot means that we need to keep searching, + // however we want to remember the offset of that REMOVED slot + // so we can reuse it in case a "new" insertion (i.e. not an update) + // is possible. + // finding a matching value means that we've found that our desired + // key is already in the table + + if (states[index] != REMOVED) { + // starting at the natural offset, probe until we find an + // offset that isn't full. + do { + index -= probe; + if (index < 0) { + index += length; + } + } while (states[index] == FULL && set[index] != val); + } + + // if the index we found was removed: continue probing until we + // locate a free location or an element which equal()s the + // one we have. + if (states[index] == REMOVED) { + int firstRemoved = index; + while (states[index] != FREE && + (states[index] == REMOVED || set[index] != val)) { + index -= probe; + if (index < 0) { + index += length; + } + } + return states[index] == FULL ? -index - 1 : firstRemoved; + } + // if it's full, the key is already stored + return states[index] == FULL ? -index - 1 : index; + } + } + + /** + * Default implementation of TShortHashingStrategy: + * delegates hashing to HashFunctions.hash(short). + * + * @param val the value to hash + * @return the hashcode. + */ + public final int computeHashCode(short val) { + return HashFunctions.hash(val); + } +} // TShortHash diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortHashSet.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortHashSet.java new file mode 100644 index 00000000000..a7b8c0e8a14 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortHashSet.java @@ -0,0 +1,373 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed set implementation for short primitives. + * + * @author Eric D. Friedman + * @author Rob Eden + */ + +public class TShortHashSet extends TShortHash implements Externalizable { + static final long serialVersionUID = 1L; + + /** + * Creates a new TShortHashSet instance with the default + * capacity and load factor. + */ + public TShortHashSet() { + super(); + } + + /** + * Creates a new TShortHashSet instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TShortHashSet(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TShortHashSet instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TShortHashSet(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TShortHashSet instance containing the + * elements of array. + * + * @param array an array of short primitives + */ + public TShortHashSet(short[] array) { + this(array.length); + addAll(array); + } + + /** + * Creates a new TShortHash instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TShortHashSet(TShortHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TShortHash instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TShortHashSet(int initialCapacity, TShortHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TShortHash instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TShortHashSet(int initialCapacity, float loadFactor, TShortHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * Creates a new TShortHashSet instance containing the + * elements of array. + * + * @param array an array of short primitives + * @param strategy used to compute hash codes and to compare keys. + */ + public TShortHashSet(short[] array, TShortHashingStrategy strategy) { + this(array.length, strategy); + addAll(array); + } + + /** + * @return a TShortIterator with access to the values in this set + */ + public TShortIterator iterator() { + return new TShortIterator(this); + } + + /** + * Inserts a value into the set. + * + * @param val an short value + * @return true if the set was modified by the add operation + */ + public boolean add(short val) { + int index = insertionIndex(val); + + if (index < 0) { + return false; // already present in set, nothing to add + } + + byte previousState = _states[index]; + _set[index] = val; + _states[index] = FULL; + postInsertHook(previousState == FREE); + + return true; // yes, we added something + } + + /** + * Expands the set to accommodate new values. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + short oldSet[] = _set; + byte oldStates[] = _states; + + _set = new short[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + short o = oldSet[i]; + int index = insertionIndex(o); + _set[index] = o; + _states[index] = FULL; + } + } + } + + /** + * Returns a new array containing the values in the set. + * + * @return an short[] value + */ + public short[] toArray() { + short[] result = new short[size()]; + short[] set = _set; + byte[] states = _states; + + for (int i = states.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + result[j++] = set[i]; + } + } + return result; + } + + /** + * Empties the set. + */ + public void clear() { + super.clear(); + short[] set = _set; + byte[] states = _states; + + for (int i = set.length; i-- > 0;) { + set[i] = (short) 0; + states[i] = FREE; + } + } + + /** + * Compares this set with another set for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TShortHashSet)) { + return false; + } + final TShortHashSet that = (TShortHashSet) other; + if (that.size() != this.size()) { + return false; + } + return forEach(new TShortProcedure() { + public final boolean execute(short value) { + return that.contains(value); + } + }); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEach(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TShortProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(short key) { + h += _hashingStrategy.computeHashCode(key); + return true; + } + } + + /** + * Removes val from the set. + * + * @param val an short value + * @return true if the set was modified by the remove operation. + */ + public boolean remove(short val) { + int index = index(val); + if (index >= 0) { + removeAt(index); + return true; + } + return false; + } + + /** + * Tests the set to determine if all of the elements in + * array are present. + * + * @param array an array of short primitives. + * @return true if all elements were present in the set. + */ + public boolean containsAll(short[] array) { + for (int i = array.length; i-- > 0;) { + if (!contains(array[i])) { + return false; + } + } + return true; + } + + /** + * Adds all of the elements in array to the set. + * + * @param array an array of short primitives. + * @return true if the set was modified by the add all operation. + */ + public boolean addAll(short[] array) { + boolean changed = false; + for (int i = array.length; i-- > 0;) { + if (add(array[i])) { + changed = true; + } + } + return changed; + } + + /** + * Removes all of the elements in array from the set. + * + * @param array an array of short primitives. + * @return true if the set was modified by the remove all operation. + */ + public boolean removeAll(short[] array) { + boolean changed = false; + for (int i = array.length; i-- > 0;) { + if (remove(array[i])) { + changed = true; + } + } + return changed; + } + + /** + * Removes any values in the set which are not contained in + * array. + * + * @param array an array of short primitives. + * @return true if the set was modified by the retain all operation + */ + public boolean retainAll(short[] array) { + boolean changed = false; + Arrays.sort(array); + short[] set = _set; + byte[] states = _states; + + for (int i = set.length; i-- > 0;) { + if (states[i] == FULL && (Arrays.binarySearch(array, set[i]) < 0)) { + remove(set[i]); + changed = true; + } + } + return changed; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEach(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + + // ENTRIES + setUp(size); + while (size-- > 0) { + short val = in.readShort(); + add(val); + } + } +} // TShortHashSet diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortHashingStrategy.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortHashingStrategy.java new file mode 100644 index 00000000000..54dbc620f8a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortHashingStrategy.java @@ -0,0 +1,50 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Serializable; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface to support pluggable hashing strategies in maps and sets. + * Implementors can use this interface to make the trove hashing + * algorithms use an optimal strategy when computing hashcodes. + *

    + * Created: Sun Nov 4 08:56:06 2001 + * + * @author Eric D. Friedman + * @version $Id: PHashingStrategy.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TShortHashingStrategy extends Serializable { + /** + * Computes a hash code for the specified short. Implementors + * can use the short's own value or a custom scheme designed to + * minimize collisions for a known set of input. + * + * @param val short for which the hashcode is to be computed + * @return the hashCode + */ + public int computeHashCode(short val); +} // TShortHashingStrategy diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortIntHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortIntHashMap.java new file mode 100644 index 00000000000..f29b84567eb --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortIntHashMap.java @@ -0,0 +1,650 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for short keys and int values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TShortIntHashMap extends TShortHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TShortIntProcedure PUT_ALL_PROC = new TShortIntProcedure() { + public boolean execute(short key, int value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient int[] _values; + + /** + * Creates a new TShortIntHashMap instance with the default + * capacity and load factor. + */ + public TShortIntHashMap() { + super(); + } + + /** + * Creates a new TShortIntHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TShortIntHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TShortIntHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TShortIntHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TShortIntHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TShortIntHashMap(TShortHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TShortIntHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TShortIntHashMap(int initialCapacity, TShortHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TShortIntHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TShortIntHashMap(int initialCapacity, float loadFactor, TShortHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TShortIntHashMap m = (TShortIntHashMap) super.clone(); + m._values = (int[]) this._values.clone(); + return m; + } + + /** + * @return a TShortIntIterator with access to this map's keys and values + */ + public TShortIntIterator iterator() { + return new TShortIntIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new int[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an short value + * @param value an int value + * @return the previous value associated with key, + * or (short)0 if none was found. + */ + public int put(short key, int value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an short value + * @param value an int value + * @return the previous value associated with key, + * or (short)0 if none was found. + */ + public int putIfAbsent(short key, int value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private int doPut(short key, int value, int index) { + byte previousState; + int previous = (int) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TShortIntHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + short oldKeys[] = _set; + int oldVals[] = _values; + byte oldStates[] = _states; + + _set = new short[newCapacity]; + _values = new int[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + short o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an short value + * @return the value of key or (short)0 if no such mapping exists. + */ + public int get(short key) { + int index = index(key); + return index < 0 ? (int) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + short[] keys = _set; + int[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (short) 0); + Arrays.fill(_values, 0, _values.length, (int) 0); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an short value + * @return an int value, or (short)0 if no mapping for key exists + */ + public int remove(short key) { + int prev = (int) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TShortIntHashMap)) { + return false; + } + TShortIntHashMap that = (TShortIntHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TShortIntProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(short key, int value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TShortIntProcedure { + private final TShortIntHashMap _otherMap; + + EqProcedure(TShortIntHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(short key, int value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two ints for equality. + */ + private final boolean eq(int v1, int v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = (int) 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public int[] getValues() { + int[] vals = new int[size()]; + int[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public short[] keys() { + short[] keys = new short[size()]; + short[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public short[] keys(short[] a) { + int size = size(); + if (a.length < size) { + a = (short[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + short[] k = (short[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an int value + * @return a boolean value + */ + public boolean containsValue(int val) { + byte[] states = _states; + int[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an short value + * @return a boolean value + */ + public boolean containsKey(short key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TShortProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TShortProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TIntProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TIntProcedure procedure) { + byte[] states = _states; + int[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOShortIntProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TShortIntProcedure procedure) { + byte[] states = _states; + short[] keys = _set; + int[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TShortIntProcedure procedure) { + boolean modified = false; + byte[] states = _states; + short[] keys = _set; + int[] values = _values; + + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TIntFunction value + */ + public void transformValues(TIntFunction function) { + byte[] states = _states; + int[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(short key) { + return adjustValue(key, (int) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(short key, int amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public int adjustOrPutValue(final short key, final int adjust_amount, final int put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final int newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + byte previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + short key = in.readShort(); + int val = in.readInt(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TShortIntProcedure() { + private boolean first = true; + + public boolean execute(short key, int value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TShortIntHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortIntIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortIntIterator.java new file mode 100644 index 00000000000..aad56061aab --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortIntIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type short and int. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TShortIntIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TShortIntIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TShortIntIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TShortIntIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TShortIntIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TShortIntHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TShortIntIterator(TShortIntHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public short key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public int value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public int setValue(int val) { + int old = value(); + _map._values[_index] = val; + return old; + } +}// TShortIntIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortIntProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortIntProcedure.java new file mode 100644 index 00000000000..eb2333bb2b8 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortIntProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type short and int. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TShortIntProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a short value + * @param b a int value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(short a, int b); +}// TShortIntProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortIterator.java new file mode 100644 index 00000000000..7c954d8ae28 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortIterator.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for short collections. + * + * @author Eric D. Friedman + * @version $Id: PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TShortIterator extends TPrimitiveIterator { + /** + * the collection on which the iterator operates + */ + private final TShortHash _hash; + + /** + * Creates a TShortIterator for the elements in the specified collection. + */ + public TShortIterator(TShortHash hash) { + super(hash); + this._hash = hash; + } + + /** + * Advances the iterator to the next element in the underlying collection + * and returns it. + * + * @return the next short in the collection + * @throws NoSuchElementException if the iterator is already exhausted + */ + public short next() { + moveToNextIndex(); + return _hash._set[_index]; + } +}// TShortIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortLongHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortLongHashMap.java new file mode 100644 index 00000000000..9ebcdd78d7d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortLongHashMap.java @@ -0,0 +1,650 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for short keys and long values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TShortLongHashMap extends TShortHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TShortLongProcedure PUT_ALL_PROC = new TShortLongProcedure() { + public boolean execute(short key, long value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient long[] _values; + + /** + * Creates a new TShortLongHashMap instance with the default + * capacity and load factor. + */ + public TShortLongHashMap() { + super(); + } + + /** + * Creates a new TShortLongHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TShortLongHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TShortLongHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TShortLongHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TShortLongHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TShortLongHashMap(TShortHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TShortLongHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TShortLongHashMap(int initialCapacity, TShortHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TShortLongHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TShortLongHashMap(int initialCapacity, float loadFactor, TShortHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TShortLongHashMap m = (TShortLongHashMap) super.clone(); + m._values = (long[]) this._values.clone(); + return m; + } + + /** + * @return a TShortLongIterator with access to this map's keys and values + */ + public TShortLongIterator iterator() { + return new TShortLongIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new long[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an short value + * @param value an long value + * @return the previous value associated with key, + * or (short)0 if none was found. + */ + public long put(short key, long value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an short value + * @param value an long value + * @return the previous value associated with key, + * or (short)0 if none was found. + */ + public long putIfAbsent(short key, long value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private long doPut(short key, long value, int index) { + byte previousState; + long previous = (long) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TShortLongHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + short oldKeys[] = _set; + long oldVals[] = _values; + byte oldStates[] = _states; + + _set = new short[newCapacity]; + _values = new long[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + short o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an short value + * @return the value of key or (short)0 if no such mapping exists. + */ + public long get(short key) { + int index = index(key); + return index < 0 ? (long) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + short[] keys = _set; + long[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (short) 0); + Arrays.fill(_values, 0, _values.length, (long) 0); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an short value + * @return an long value, or (short)0 if no mapping for key exists + */ + public long remove(short key) { + long prev = (long) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TShortLongHashMap)) { + return false; + } + TShortLongHashMap that = (TShortLongHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TShortLongProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(short key, long value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TShortLongProcedure { + private final TShortLongHashMap _otherMap; + + EqProcedure(TShortLongHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(short key, long value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two longs for equality. + */ + private final boolean eq(long v1, long v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = (long) 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public long[] getValues() { + long[] vals = new long[size()]; + long[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public short[] keys() { + short[] keys = new short[size()]; + short[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public short[] keys(short[] a) { + int size = size(); + if (a.length < size) { + a = (short[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + short[] k = (short[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an long value + * @return a boolean value + */ + public boolean containsValue(long val) { + byte[] states = _states; + long[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an short value + * @return a boolean value + */ + public boolean containsKey(short key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TShortProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TShortProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TLongProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TLongProcedure procedure) { + byte[] states = _states; + long[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOShortLongProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TShortLongProcedure procedure) { + byte[] states = _states; + short[] keys = _set; + long[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TShortLongProcedure procedure) { + boolean modified = false; + byte[] states = _states; + short[] keys = _set; + long[] values = _values; + + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TLongFunction value + */ + public void transformValues(TLongFunction function) { + byte[] states = _states; + long[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(short key) { + return adjustValue(key, (long) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(short key, long amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public long adjustOrPutValue(final short key, final long adjust_amount, final long put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final long newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + byte previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + short key = in.readShort(); + long val = in.readLong(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TShortLongProcedure() { + private boolean first = true; + + public boolean execute(short key, long value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TShortLongHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortLongIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortLongIterator.java new file mode 100644 index 00000000000..92b4dfc01a8 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortLongIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type short and long. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TShortLongIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TShortLongIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TShortLongIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TShortLongIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TShortLongIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TShortLongHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TShortLongIterator(TShortLongHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public short key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public long value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public long setValue(long val) { + long old = value(); + _map._values[_index] = val; + return old; + } +}// TShortLongIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortLongProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortLongProcedure.java new file mode 100644 index 00000000000..3b6e072ee02 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortLongProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type short and long. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TShortLongProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a short value + * @param b a long value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(short a, long b); +}// TShortLongProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortObjectHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortObjectHashMap.java new file mode 100644 index 00000000000..69ba02a60b7 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortObjectHashMap.java @@ -0,0 +1,632 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for short keys and Object values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TShortObjectHashMap extends TShortHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TShortObjectProcedure PUT_ALL_PROC = new TShortObjectProcedure() { + public boolean execute(short key, V value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient V[] _values; + + /** + * Creates a new TShortObjectHashMap instance with the default + * capacity and load factor. + */ + public TShortObjectHashMap() { + super(); + } + + /** + * Creates a new TShortObjectHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TShortObjectHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TShortObjectHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TShortObjectHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TShortObjectHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TShortObjectHashMap(TShortHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TShortObjectHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TShortObjectHashMap(int initialCapacity, TShortHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TShortObjectHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TShortObjectHashMap(int initialCapacity, float loadFactor, TShortHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public TShortObjectHashMap clone() { + TShortObjectHashMap m = (TShortObjectHashMap) super.clone(); + m._values = (V[]) this._values.clone(); + return m; + } + + /** + * @return a TShortObjectIterator with access to this map's keys and values + */ + public TShortObjectIterator iterator() { + return new TShortObjectIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = (V[]) new Object[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an short value + * @param value an Object value + * @return the previous value associated with key, + * or {@code null} if none was found. + */ + public V put(short key, V value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an short value + * @param value an Object value + * @return the previous value associated with key, + * or {@code null} if none was found. + */ + public V putIfAbsent(short key, V value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private V doPut(short key, V value, int index) { + byte previousState; + V previous = null; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TShortObjectHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + short oldKeys[] = _set; + V oldVals[] = _values; + byte oldStates[] = _states; + + _set = new short[newCapacity]; + _values = (V[]) new Object[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + short o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an short value + * @return the value of key or (short)0 if no such mapping exists. + */ + public V get(short key) { + int index = index(key); + return index < 0 ? null : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + short[] keys = _set; + Object[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (short) 0); + Arrays.fill(_values, 0, _values.length, null); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an short value + * @return an Object value or (short)0 if no such mapping exists. + */ + public V remove(short key) { + V prev = null; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TShortObjectHashMap)) { + return false; + } + TShortObjectHashMap that = (TShortObjectHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TShortObjectProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(short key, Object value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TShortObjectProcedure { + private final TShortObjectHashMap _otherMap; + + EqProcedure(TShortObjectHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(short key, Object value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two objects for equality. + */ + private final boolean eq(Object o1, Object o2) { + return o1 == o2 || ((o1 != null) && o1.equals(o2)); + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = null; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + * @see #getValues(Object[]) + */ + public Object[] getValues() { + Object[] vals = new Object[size()]; + V[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * Return the values of the map; the runtime type of the returned array is that of + * the specified array. + * + * @param a the array into which the elements of this collection are to be + * stored, if it is big enough; otherwise, a new array of the same + * runtime type is allocated for this purpose. + * @return an array containing the elements of this collection + * @throws ArrayStoreException the runtime type of the specified array is + * not a supertype of the runtime type of every element in this + * collection. + * @throws NullPointerException if the specified array is null. + * @see #getValues() + */ + public T[] getValues(T[] a) { + if (a.length < _size) { + a = (T[]) java.lang.reflect.Array.newInstance(a.getClass().getComponentType(), + _size); + } + + V[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = (T) v[i]; + } + } + return a; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public short[] keys() { + short[] keys = new short[size()]; + short[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public short[] keys(short[] a) { + int size = size(); + if (a.length < size) { + a = (short[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + short[] k = (short[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(V val) { + byte[] states = _states; + V[] vals = _values; + + // special case null values so that we don't have to + // perform null checks before every call to equals() + if (null == val) { + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && + val == vals[i]) { + return true; + } + } + } else { + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && + (val == vals[i] || val.equals(vals[i]))) { + return true; + } + } + } // end of else + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an short value + * @return a boolean value + */ + public boolean containsKey(short key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TShortProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TShortProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TObjectProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TObjectProcedure procedure) { + byte[] states = _states; + V[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOShortObjectProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TShortObjectProcedure procedure) { + byte[] states = _states; + short[] keys = _set; + V[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TShortObjectProcedure procedure) { + boolean modified = false; + byte[] states = _states; + short[] keys = _set; + V[] values = _values; + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TObjectFunction value + */ + public void transformValues(TObjectFunction function) { + byte[] states = _states; + V[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + short key = in.readShort(); + V val = (V) in.readObject(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TShortObjectProcedure() { + private boolean first = true; + + public boolean execute(short key, Object value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TShortObjectHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortObjectIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortObjectIterator.java new file mode 100644 index 00000000000..eb3b539b482 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortObjectIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type short and Object. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TShortObjectIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TShortObjectIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TShortObjectIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TShortObjectIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2OIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TShortObjectIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TShortObjectHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TShortObjectIterator(TShortObjectHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public short key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public V value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public V setValue(V val) { + V old = value(); + _map._values[_index] = val; + return old; + } +}// TShortObjectIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortObjectProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortObjectProcedure.java new file mode 100644 index 00000000000..63b7c9d7881 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortObjectProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type short and Object. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2OProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TShortObjectProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a short value + * @param b an Object value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(short a, T b); +}// TShortObjectProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortProcedure.java new file mode 100644 index 00000000000..e5227769552 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortProcedure.java @@ -0,0 +1,47 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures with one short parameter. + *

    + * Created: Mon Nov 5 21:45:49 2001 + * + * @author Eric D. Friedman + * @version $Id: PProcedure.template,v 1.2 2007/11/01 16:08:14 robeden Exp $ + */ + +public interface TShortProcedure { + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param value a value of type short + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(short value); +}// TShortProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortShortHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortShortHashMap.java new file mode 100644 index 00000000000..1cfe5d70cf3 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortShortHashMap.java @@ -0,0 +1,650 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Arrays; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * An open addressed Map implementation for short keys and short values. + *

    + * Created: Sun Nov 4 08:52:45 2001 + * + * @author Eric D. Friedman + */ +public class TShortShortHashMap extends TShortHash implements Externalizable { + static final long serialVersionUID = 1L; + + private final TShortShortProcedure PUT_ALL_PROC = new TShortShortProcedure() { + public boolean execute(short key, short value) { + put(key, value); + return true; + } + }; + + + /** + * the values of the map + */ + protected transient short[] _values; + + /** + * Creates a new TShortShortHashMap instance with the default + * capacity and load factor. + */ + public TShortShortHashMap() { + super(); + } + + /** + * Creates a new TShortShortHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the default load factor. + * + * @param initialCapacity an int value + */ + public TShortShortHashMap(int initialCapacity) { + super(initialCapacity); + } + + /** + * Creates a new TShortShortHashMap instance with a prime + * capacity equal to or greater than initialCapacity and + * with the specified load factor. + * + * @param initialCapacity an int value + * @param loadFactor a float value + */ + public TShortShortHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + /** + * Creates a new TShortShortHashMap instance with the default + * capacity and load factor. + * + * @param strategy used to compute hash codes and to compare keys. + */ + public TShortShortHashMap(TShortHashingStrategy strategy) { + super(strategy); + } + + /** + * Creates a new TShortShortHashMap instance whose capacity + * is the next highest prime above initialCapacity + 1 + * unless that value is already prime. + * + * @param initialCapacity an int value + * @param strategy used to compute hash codes and to compare keys. + */ + public TShortShortHashMap(int initialCapacity, TShortHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + /** + * Creates a new TShortShortHashMap instance with a prime + * value at or near the specified capacity and load factor. + * + * @param initialCapacity used to find a prime capacity for the table. + * @param loadFactor used to calculate the threshold over which + * rehashing takes place. + * @param strategy used to compute hash codes and to compare keys. + */ + public TShortShortHashMap(int initialCapacity, float loadFactor, TShortHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + /** + * @return a deep clone of this collection + */ + public Object clone() { + TShortShortHashMap m = (TShortShortHashMap) super.clone(); + m._values = (short[]) this._values.clone(); + return m; + } + + /** + * @return a TShortShortIterator with access to this map's keys and values + */ + public TShortShortIterator iterator() { + return new TShortShortIterator(this); + } + + /** + * initializes the hashtable to a prime capacity which is at least + * initialCapacity + 1. + * + * @param initialCapacity an int value + * @return the actual capacity chosen + */ + protected int setUp(int initialCapacity) { + int capacity; + + capacity = super.setUp(initialCapacity); + _values = new short[capacity]; + return capacity; + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an short value + * @param value an short value + * @return the previous value associated with key, + * or (short)0 if none was found. + */ + public short put(short key, short value) { + int index = insertionIndex(key); + return doPut(key, value, index); + } + + /** + * Inserts a key/value pair into the map if the specified key is not already + * associated with a value. + * + * @param key an short value + * @param value an short value + * @return the previous value associated with key, + * or (short)0 if none was found. + */ + public short putIfAbsent(short key, short value) { + int index = insertionIndex(key); + if (index < 0) + return _values[-index - 1]; + return doPut(key, value, index); + } + + private short doPut(short key, short value, int index) { + byte previousState; + short previous = (short) 0; + boolean isNewMapping = true; + if (index < 0) { + index = -index - 1; + previous = _values[index]; + isNewMapping = false; + } + previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + _values[index] = value; + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return previous; + } + + + /** + * Put all the entries from the given map into this map. + * + * @param map The map from which entries will be obtained to put into this map. + */ + public void putAll(TShortShortHashMap map) { + map.forEachEntry(PUT_ALL_PROC); + } + + + /** + * rehashes the map to the new capacity. + * + * @param newCapacity an int value + */ + protected void rehash(int newCapacity) { + int oldCapacity = _set.length; + short oldKeys[] = _set; + short oldVals[] = _values; + byte oldStates[] = _states; + + _set = new short[newCapacity]; + _values = new short[newCapacity]; + _states = new byte[newCapacity]; + + for (int i = oldCapacity; i-- > 0;) { + if (oldStates[i] == FULL) { + short o = oldKeys[i]; + int index = insertionIndex(o); + _set[index] = o; + _values[index] = oldVals[i]; + _states[index] = FULL; + } + } + } + + /** + * retrieves the value for key + * + * @param key an short value + * @return the value of key or (short)0 if no such mapping exists. + */ + public short get(short key) { + int index = index(key); + return index < 0 ? (short) 0 : _values[index]; + } + + /** + * Empties the map. + */ + public void clear() { + super.clear(); + short[] keys = _set; + short[] vals = _values; + byte[] states = _states; + + Arrays.fill(_set, 0, _set.length, (short) 0); + Arrays.fill(_values, 0, _values.length, (short) 0); + Arrays.fill(_states, 0, _states.length, FREE); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an short value + * @return an short value, or (short)0 if no mapping for key exists + */ + public short remove(short key) { + short prev = (short) 0; + int index = index(key); + if (index >= 0) { + prev = _values[index]; + removeAt(index); // clear key,state; adjust size + } + return prev; + } + + /** + * Compares this map with another map for equality of their stored + * entries. + * + * @param other an Object value + * @return a boolean value + */ + public boolean equals(Object other) { + if (!(other instanceof TShortShortHashMap)) { + return false; + } + TShortShortHashMap that = (TShortShortHashMap) other; + if (that.size() != this.size()) { + return false; + } + return forEachEntry(new EqProcedure(that)); + } + + public int hashCode() { + HashProcedure p = new HashProcedure(); + forEachEntry(p); + return p.getHashCode(); + } + + private final class HashProcedure implements TShortShortProcedure { + private int h = 0; + + public int getHashCode() { + return h; + } + + public final boolean execute(short key, short value) { + h += (_hashingStrategy.computeHashCode(key) ^ HashFunctions.hash(value)); + return true; + } + } + + private static final class EqProcedure implements TShortShortProcedure { + private final TShortShortHashMap _otherMap; + + EqProcedure(TShortShortHashMap otherMap) { + _otherMap = otherMap; + } + + public final boolean execute(short key, short value) { + int index = _otherMap.index(key); + if (index >= 0 && eq(value, _otherMap.get(key))) { + return true; + } + return false; + } + + /** + * Compare two shorts for equality. + */ + private final boolean eq(short v1, short v2) { + return v1 == v2; + } + + } + + /** + * removes the mapping at index from the map. + * + * @param index an int value + */ + protected void removeAt(int index) { + _values[index] = (short) 0; + super.removeAt(index); // clear key, state; adjust size + } + + /** + * Returns the values of the map. + * + * @return a Collection value + */ + public short[] getValues() { + short[] vals = new short[size()]; + short[] v = _values; + byte[] states = _states; + + for (int i = v.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + vals[j++] = v[i]; + } + } + return vals; + } + + /** + * returns the keys of the map. + * + * @return a Set value + */ + public short[] keys() { + short[] keys = new short[size()]; + short[] k = _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + keys[j++] = k[i]; + } + } + return keys; + } + + /** + * returns the keys of the map. + * + * @param a the array into which the elements of the list are to + * be stored, if it is big enough; otherwise, a new array of the + * same type is allocated for this purpose. + * @return a Set value + */ + public short[] keys(short[] a) { + int size = size(); + if (a.length < size) { + a = (short[]) java.lang.reflect.Array.newInstance( + a.getClass().getComponentType(), size); + } + + short[] k = (short[]) _set; + byte[] states = _states; + + for (int i = k.length, j = 0; i-- > 0;) { + if (states[i] == FULL) { + a[j++] = k[i]; + } + } + return a; + } + + /** + * checks for the presence of val in the values of the map. + * + * @param val an short value + * @return a boolean value + */ + public boolean containsValue(short val) { + byte[] states = _states; + short[] vals = _values; + + for (int i = vals.length; i-- > 0;) { + if (states[i] == FULL && val == vals[i]) { + return true; + } + } + return false; + } + + + /** + * checks for the present of key in the keys of the map. + * + * @param key an short value + * @return a boolean value + */ + public boolean containsKey(short key) { + return contains(key); + } + + /** + * Executes procedure for each key in the map. + * + * @param procedure a TShortProcedure value + * @return false if the loop over the keys terminated because + * the procedure returned false for some key. + */ + public boolean forEachKey(TShortProcedure procedure) { + return forEach(procedure); + } + + /** + * Executes procedure for each value in the map. + * + * @param procedure a TShortProcedure value + * @return false if the loop over the values terminated because + * the procedure returned false for some value. + */ + public boolean forEachValue(TShortProcedure procedure) { + byte[] states = _states; + short[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(values[i])) { + return false; + } + } + return true; + } + + /** + * Executes procedure for each key/value entry in the + * map. + * + * @param procedure a TOShortShortProcedure value + * @return false if the loop over the entries terminated because + * the procedure returned false for some entry. + */ + public boolean forEachEntry(TShortShortProcedure procedure) { + byte[] states = _states; + short[] keys = _set; + short[] values = _values; + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + return false; + } + } + return true; + } + + /** + * Retains only those entries in the map for which the procedure + * returns a true value. + * + * @param procedure determines which entries to keep + * @return true if the map was modified. + */ + public boolean retainEntries(TShortShortProcedure procedure) { + boolean modified = false; + byte[] states = _states; + short[] keys = _set; + short[] values = _values; + + + // Temporarily disable compaction. This is a fix for bug #1738760 + tempDisableAutoCompaction(); + try { + for (int i = keys.length; i-- > 0;) { + if (states[i] == FULL && !procedure.execute(keys[i], values[i])) { + removeAt(i); + modified = true; + } + } + } + finally { + reenableAutoCompaction(true); + } + + return modified; + } + + /** + * Transform the values in this map using function. + * + * @param function a TShortFunction value + */ + public void transformValues(TShortFunction function) { + byte[] states = _states; + short[] values = _values; + for (int i = values.length; i-- > 0;) { + if (states[i] == FULL) { + values[i] = function.execute(values[i]); + } + } + } + + /** + * Increments the primitive value mapped to key by 1 + * + * @param key the key of the value to increment + * @return true if a mapping was found and modified. + */ + public boolean increment(short key) { + return adjustValue(key, (short) 1); + } + + /** + * Adjusts the primitive value mapped to key. + * + * @param key the key of the value to increment + * @param amount the amount to adjust the value by. + * @return true if a mapping was found and modified. + */ + public boolean adjustValue(short key, short amount) { + int index = index(key); + if (index < 0) { + return false; + } else { + _values[index] += amount; + return true; + } + } + + /** + * Adjusts the primitive value mapped to the key if the key is present in the map. + * Otherwise, the initial_value is put in the map. + * + * @param key the key of the value to increment + * @param adjust_amount the amount to adjust the value by + * @param put_amount the value put into the map if the key is not initial present + * @return the value present in the map after the adjustment or put operation + * @since 2.0b1 + */ + public short adjustOrPutValue(final short key, final short adjust_amount, final short put_amount) { + int index = insertionIndex(key); + final boolean isNewMapping; + final short newValue; + if (index < 0) { + index = -index - 1; + newValue = (_values[index] += adjust_amount); + isNewMapping = false; + } else { + newValue = (_values[index] = put_amount); + isNewMapping = true; + } + + byte previousState = _states[index]; + _set[index] = key; + _states[index] = FULL; + + if (isNewMapping) { + postInsertHook(previousState == FREE); + } + + return newValue; + } + + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // NUMBER OF ENTRIES + out.writeInt(_size); + + // ENTRIES + SerializationProcedure writeProcedure = new SerializationProcedure(out); + if (!forEachEntry(writeProcedure)) { + throw writeProcedure.exception; + } + } + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // NUMBER OF ENTRIES + int size = in.readInt(); + setUp(size); + + // ENTRIES + while (size-- > 0) { + short key = in.readShort(); + short val = in.readShort(); + put(key, val); + } + } + + public String toString() { + final StringBuilder buf = new StringBuilder("{"); + forEachEntry(new TShortShortProcedure() { + private boolean first = true; + + public boolean execute(short key, short value) { + if (first) first = false; + else buf.append(","); + + buf.append(key); + buf.append("="); + buf.append(value); + return true; + } + }); + buf.append("}"); + return buf.toString(); + } +} // TShortShortHashMap diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortShortIterator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortShortIterator.java new file mode 100644 index 00000000000..8e1a2e77a77 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortShortIterator.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Iterator for maps of type short and short. + *

    + *

    The iterator semantics for Trove's primitive maps is slightly different + * from those defined in java.util.Iterator, but still well within + * the scope of the pattern, as defined by Gamma, et al.

    + *

    + *

    This iterator does not implicitly advance to the next entry when + * the value at the current position is retrieved. Rather, you must explicitly + * ask the iterator to advance() and then retrieve either the key(), + * the value() or both. This is done so that you have the option, but not + * the obligation, to retrieve keys and/or values as your application requires, and + * without introducing wrapper objects that would carry both. As the iteration is + * stateful, access to the key/value parts of the current map entry happens in + * constant time.

    + *

    + *

    In practice, the iterator is akin to a "search finger" that you move from + * position to position. Read or write operations affect the current entry only and + * do not assume responsibility for moving the finger.

    + *

    + *

    Here are some sample scenarios for this class of iterator:

    + *

    + *

    + * // accessing keys/values through an iterator:
    + * for (TShortShortIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     doSomethingWithValue(it.value());
    + *   }
    + * }
    + * 
    + *

    + *

    + * // modifying values in-place through iteration:
    + * for (TShortShortIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.setValue(newValueForKey(it.key()));
    + *   }
    + * }
    + * 
    + *

    + *

    + * // deleting entries during iteration:
    + * for (TShortShortIterator it = map.iterator();
    + *      it.hasNext();) {
    + *   it.advance();
    + *   if (satisfiesCondition(it.key()) {
    + *     it.remove();
    + *   }
    + * }
    + * 
    + *

    + *

    + * // faster iteration by avoiding hasNext():
    + * TShortShortIterator iterator = map.iterator();
    + * for (int i = map.size(); i-- > 0;) {
    + *   iterator.advance();
    + *   doSomethingWithKeyAndValue(iterator.key(), iterator.value());
    + * }
    + * 
    + * + * @author Eric D. Friedman + * @version $Id: P2PIterator.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public class TShortShortIterator extends TPrimitiveIterator { + /** + * the collection being iterated over + */ + private final TShortShortHashMap _map; + + /** + * Creates an iterator over the specified map + */ + public TShortShortIterator(TShortShortHashMap map) { + super(map); + this._map = map; + } + + /** + * Moves the iterator forward to the next entry in the underlying map. + * + * @throws java.util.NoSuchElementException + * if the iterator is already exhausted + */ + public void advance() { + moveToNextIndex(); + } + + /** + * Provides access to the key of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the key of the entry at the iterator's current position. + */ + public short key() { + return _map._set[_index]; + } + + /** + * Provides access to the value of the mapping at the iterator's position. + * Note that you must advance() the iterator at least once + * before invoking this method. + * + * @return the value of the entry at the iterator's current position. + */ + public short value() { + return _map._values[_index]; + } + + /** + * Replace the value of the mapping at the iterator's position with the + * specified value. Note that you must advance() the iterator at + * least once before invoking this method. + * + * @param val the value to set in the current entry + * @return the old value of the entry. + */ + public short setValue(short val) { + short old = value(); + _map._values[_index] = val; + return old; + } +}// TShortShortIterator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortShortProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortShortProcedure.java new file mode 100644 index 00000000000..2b59d0640ed --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortShortProcedure.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Interface for procedures that take two parameters of type short and short. + *

    + * Created: Mon Nov 5 22:03:30 2001 + * + * @author Eric D. Friedman + * @version $Id: P2PProcedure.template,v 1.1 2006/11/10 23:28:00 robeden Exp $ + */ + +public interface TShortShortProcedure { + + /** + * Executes this procedure. A false return value indicates that + * the application executing this procedure should not invoke this + * procedure again. + * + * @param a a short value + * @param b a short value + * @return true if additional invocations of the procedure are + * allowed. + */ + public boolean execute(short a, short b); +}// TShortShortProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortStack.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortStack.java new file mode 100644 index 00000000000..a73ec56db8b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/TShortStack.java @@ -0,0 +1,124 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + + +package org.elasticsearch.util.gnu.trove; + +/** + * A stack of short primitives, backed by a TShortArrayList. + * + * @author Eric D. Friedman, Rob Eden + * @version $Id: PStack.template,v 1.2 2007/02/28 23:03:57 robeden Exp $ + */ + +public class TShortStack { + + /** + * the list used to hold the stack values. + */ + protected TShortArrayList _list; + + public static final int DEFAULT_CAPACITY = TShortArrayList.DEFAULT_CAPACITY; + + /** + * Creates a new TShortStack instance with the default + * capacity. + */ + public TShortStack() { + this(DEFAULT_CAPACITY); + } + + /** + * Creates a new TShortStack instance with the + * specified capacity. + * + * @param capacity the initial depth of the stack + */ + public TShortStack(int capacity) { + _list = new TShortArrayList(capacity); + } + + /** + * Pushes the value onto the top of the stack. + * + * @param val an short value + */ + public void push(short val) { + _list.add(val); + } + + /** + * Removes and returns the value at the top of the stack. + * + * @return an short value + */ + public short pop() { + return _list.remove(_list.size() - 1); + } + + /** + * Returns the value at the top of the stack. + * + * @return an short value + */ + public short peek() { + return _list.get(_list.size() - 1); + } + + /** + * Returns the current depth of the stack. + */ + public int size() { + return _list.size(); + } + + /** + * Clears the stack, reseting its capacity to the default. + */ + public void clear() { + _list.clear(DEFAULT_CAPACITY); + } + + /** + * Clears the stack without releasing its internal capacity allocation. + */ + public void reset() { + _list.reset(); + } + + /** + * Copies the contents of the stack into a native array. Note that this will NOT + * pop them out of the stack. + * + * @return an short[] value + */ + public short[] toNativeArray() { + return _list.toNativeArray(); + } + + /** + * Copies a slice of the list into a native array. Note that this will NOT + * pop them out of the stack. + * + * @param dest the array to copy into. + */ + public void toNativeArray(short[] dest) { + _list.toNativeArray(dest, 0, size()); + } +} // TShortStack diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/ToObjectArrayProcedure.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/ToObjectArrayProcedure.java new file mode 100644 index 00000000000..12f28b3c517 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/ToObjectArrayProcedure.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove; + +/** + * A procedure which stores each value it receives into a target array. + *

    + * Created: Sat Jan 12 10:13:42 2002 + * + * @author Eric D. Friedman + * @version $Id: ToObjectArrayProcedure.java,v 1.2 2006/11/10 23:27:57 robeden Exp $ + */ + +final class ToObjectArrayProcedure implements TObjectProcedure { + private final T[] target; + private int pos = 0; + + public ToObjectArrayProcedure(final T[] target) { + this.target = target; + } + + public final boolean execute(T value) { + target[pos++] = value; + return true; + } +} // ToObjectArrayProcedure diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TByteByteHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TByteByteHashMapDecorator.java new file mode 100644 index 00000000000..2f505387fbf --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TByteByteHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TByteByteHashMap; +import org.elasticsearch.util.gnu.trove.TByteByteIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TByteByteHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TByteByteHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + * @author Rob Eden + */ +public class TByteByteHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TByteByteHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TByteByteHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TByteByteHashMapDecorator(TByteByteHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TByteByteHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TByteByteHashMapDecorator clone() { + try { + TByteByteHashMapDecorator copy = (TByteByteHashMapDecorator) super.clone(); + copy._map = (TByteByteHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Byte(0) if none was found. + */ + public Byte put(Byte key, Byte value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Byte get(Byte key) { + byte k = unwrapKey(key); + byte v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Byte(0) if it was not found in the map + */ + public Byte remove(Byte key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TByteByteHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TByteByteHashMapDecorator.this.containsKey(k) + && TByteByteHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TByteByteIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Byte key = wrapKey(it.key()); + final Byte v = wrapValue(it.value()); + return new Map.Entry() { + private Byte val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Byte getKey() { + return key; + } + + public Byte getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Byte setValue(Byte value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Byte o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TByteByteHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Byte wrapKey(byte k) { + return Byte.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected byte unwrapKey(Object key) { + return ((Byte) key).byteValue(); + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Byte wrapValue(byte k) { + return Byte.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected byte unwrapValue(Object value) { + return ((Byte) value).byteValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TByteByteHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TByteByteHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TByteDoubleHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TByteDoubleHashMapDecorator.java new file mode 100644 index 00000000000..7575ec1036d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TByteDoubleHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TByteDoubleHashMap; +import org.elasticsearch.util.gnu.trove.TByteDoubleIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TByteDoubleHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TByteDoubleHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + * @author Rob Eden + */ +public class TByteDoubleHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TByteDoubleHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TByteDoubleHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TByteDoubleHashMapDecorator(TByteDoubleHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TByteDoubleHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TByteDoubleHashMapDecorator clone() { + try { + TByteDoubleHashMapDecorator copy = (TByteDoubleHashMapDecorator) super.clone(); + copy._map = (TByteDoubleHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Double(0) if none was found. + */ + public Double put(Byte key, Double value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Double get(Byte key) { + byte k = unwrapKey(key); + double v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Double(0) if it was not found in the map + */ + public Double remove(Byte key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TByteDoubleHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TByteDoubleHashMapDecorator.this.containsKey(k) + && TByteDoubleHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TByteDoubleIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Byte key = wrapKey(it.key()); + final Double v = wrapValue(it.value()); + return new Map.Entry() { + private Double val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Byte getKey() { + return key; + } + + public Double getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Double setValue(Double value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Double o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TByteDoubleHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Byte wrapKey(byte k) { + return Byte.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected byte unwrapKey(Object key) { + return ((Byte) key).byteValue(); + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Double wrapValue(double k) { + return Double.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected double unwrapValue(Object value) { + return ((Double) value).doubleValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TByteDoubleHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TByteDoubleHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TByteFloatHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TByteFloatHashMapDecorator.java new file mode 100644 index 00000000000..0da46db28c5 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TByteFloatHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TByteFloatHashMap; +import org.elasticsearch.util.gnu.trove.TByteFloatIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TByteFloatHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TByteFloatHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + * @author Rob Eden + */ +public class TByteFloatHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TByteFloatHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TByteFloatHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TByteFloatHashMapDecorator(TByteFloatHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TByteFloatHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TByteFloatHashMapDecorator clone() { + try { + TByteFloatHashMapDecorator copy = (TByteFloatHashMapDecorator) super.clone(); + copy._map = (TByteFloatHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Float(0) if none was found. + */ + public Float put(Byte key, Float value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Float get(Byte key) { + byte k = unwrapKey(key); + float v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Float(0) if it was not found in the map + */ + public Float remove(Byte key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TByteFloatHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TByteFloatHashMapDecorator.this.containsKey(k) + && TByteFloatHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TByteFloatIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Byte key = wrapKey(it.key()); + final Float v = wrapValue(it.value()); + return new Map.Entry() { + private Float val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Byte getKey() { + return key; + } + + public Float getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Float setValue(Float value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Float o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TByteFloatHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Byte wrapKey(byte k) { + return Byte.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected byte unwrapKey(Object key) { + return ((Byte) key).byteValue(); + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Float wrapValue(float k) { + return Float.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected float unwrapValue(Object value) { + return ((Float) value).floatValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TByteFloatHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TByteFloatHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TByteHashSetDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TByteHashSetDecorator.java new file mode 100644 index 00000000000..5306cb7d021 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TByteHashSetDecorator.java @@ -0,0 +1,249 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TByteHashSet; +import org.elasticsearch.util.gnu.trove.TByteIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.AbstractSet; +import java.util.Iterator; +import java.util.Set; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TByteHashSet conform to the java.util.Set API. + * This class simply decorates an underlying TByteHashSet and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Tue Sep 24 22:08:17 PDT 2002 + * + * @author Eric D. Friedman + */ +public class TByteHashSetDecorator extends AbstractSet + implements Set, Externalizable { + + /** + * the wrapped primitive set + */ + protected TByteHashSet _set; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TByteHashSetDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive set. + */ + public TByteHashSetDecorator(TByteHashSet set) { + super(); + this._set = set; + } + + + /** + * Returns a reference to the set wrapped by this decorator. + */ + public TByteHashSet getSet() { + return _set; + } + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TByteHashSetDecorator clone() { + try { + TByteHashSetDecorator copy = (TByteHashSetDecorator) super.clone(); + copy._set = (TByteHashSet) _set.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable + } + } + + /** + * Inserts a value into the set. + * + * @param value true if the set was modified by the insertion + */ + public boolean add(Byte value) { + return _set.add(unwrap(value)); + } + + /** + * Compares this set with another set for equality of their stored + * entries. + * + * @param other an Object value + * @return true if the sets are identical + */ + public boolean equals(Object other) { + if (_set.equals(other)) { + return true; // comparing two trove sets + } else if (other instanceof Set) { + Set that = (Set) other; + if (that.size() != _set.size()) { + return false; // different sizes, no need to compare + } else { // now we have to do it the hard way + Iterator it = that.iterator(); + for (int i = that.size(); i-- > 0;) { + Object val = it.next(); + if (val instanceof Byte) { + byte v = unwrap(val); + if (_set.contains(v)) { + // match, ok to continue + } else { + return false; // no match: we're done + } + } else { + return false; // different type in other set + } + } + return true; // all entries match + } + } else { + return false; + } + } + + /** + * Empties the set. + */ + public void clear() { + this._set.clear(); + } + + /** + * Deletes a value from the set. + * + * @param value an Object value + * @return true if the set was modified + */ + public boolean remove(Object value) { + return _set.remove(unwrap(value)); + } + + /** + * Creates an iterator over the values of the set. + * + * @return an iterator with support for removals in the underlying set + */ + public Iterator iterator() { + return new Iterator() { + private final TByteIterator it = _set.iterator(); + + public Byte next() { + return wrap(it.next()); + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + /** + * Returns the number of entries in the set. + * + * @return the set's size. + */ + public int size() { + return this._set.size(); + } + + /** + * Indicates whether set has any entries. + * + * @return true if the set is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Wraps a value + * + * @param k value in the underlying set + * @return an Object representation of the value + */ + protected Byte wrap(byte k) { + return Byte.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected byte unwrap(Object value) { + return ((Byte) value).byteValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // SET + _set = (TByteHashSet) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // SET + out.writeObject(_set); + } +} // TByteHashSetDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TByteIntHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TByteIntHashMapDecorator.java new file mode 100644 index 00000000000..17197077caf --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TByteIntHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TByteIntHashMap; +import org.elasticsearch.util.gnu.trove.TByteIntIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TByteIntHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TByteIntHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + * @author Rob Eden + */ +public class TByteIntHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TByteIntHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TByteIntHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TByteIntHashMapDecorator(TByteIntHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TByteIntHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TByteIntHashMapDecorator clone() { + try { + TByteIntHashMapDecorator copy = (TByteIntHashMapDecorator) super.clone(); + copy._map = (TByteIntHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Integer(0) if none was found. + */ + public Integer put(Byte key, Integer value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Integer get(Byte key) { + byte k = unwrapKey(key); + int v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Integer(0) if it was not found in the map + */ + public Integer remove(Byte key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TByteIntHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TByteIntHashMapDecorator.this.containsKey(k) + && TByteIntHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TByteIntIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Byte key = wrapKey(it.key()); + final Integer v = wrapValue(it.value()); + return new Map.Entry() { + private Integer val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Byte getKey() { + return key; + } + + public Integer getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Integer setValue(Integer value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Integer o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TByteIntHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Byte wrapKey(byte k) { + return Byte.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected byte unwrapKey(Object key) { + return ((Byte) key).byteValue(); + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Integer wrapValue(int k) { + return Integer.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected int unwrapValue(Object value) { + return ((Integer) value).intValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TByteIntHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TByteIntHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TByteLongHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TByteLongHashMapDecorator.java new file mode 100644 index 00000000000..3dcf5c7d792 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TByteLongHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TByteLongHashMap; +import org.elasticsearch.util.gnu.trove.TByteLongIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TByteLongHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TByteLongHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + * @author Rob Eden + */ +public class TByteLongHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TByteLongHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TByteLongHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TByteLongHashMapDecorator(TByteLongHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TByteLongHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TByteLongHashMapDecorator clone() { + try { + TByteLongHashMapDecorator copy = (TByteLongHashMapDecorator) super.clone(); + copy._map = (TByteLongHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Long(0) if none was found. + */ + public Long put(Byte key, Long value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Long get(Byte key) { + byte k = unwrapKey(key); + long v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Long(0) if it was not found in the map + */ + public Long remove(Byte key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TByteLongHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TByteLongHashMapDecorator.this.containsKey(k) + && TByteLongHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TByteLongIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Byte key = wrapKey(it.key()); + final Long v = wrapValue(it.value()); + return new Map.Entry() { + private Long val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Byte getKey() { + return key; + } + + public Long getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Long setValue(Long value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Long o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TByteLongHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Byte wrapKey(byte k) { + return Byte.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected byte unwrapKey(Object key) { + return ((Byte) key).byteValue(); + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Long wrapValue(long k) { + return Long.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected long unwrapValue(Object value) { + return ((Long) value).longValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TByteLongHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TByteLongHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TByteObjectHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TByteObjectHashMapDecorator.java new file mode 100644 index 00000000000..702887fd9dc --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TByteObjectHashMapDecorator.java @@ -0,0 +1,356 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TByteObjectHashMap; +import org.elasticsearch.util.gnu.trove.TByteObjectIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TByteObjectHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TByteObjectHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + */ +public class TByteObjectHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TByteObjectHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TByteObjectHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TByteObjectHashMapDecorator(TByteObjectHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TByteObjectHashMap getMap() { + return _map; + } + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TByteObjectHashMapDecorator clone() { + try { + TByteObjectHashMapDecorator copy = (TByteObjectHashMapDecorator) super.clone(); + copy._map = (TByteObjectHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Integer(0) if none was found. + */ + public V put(Byte key, V value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public V get(Object key) { + return _map.get(unwrapKey(key)); + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Integer(0) if it was not found in the map + */ + public V remove(Object key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TByteObjectHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TByteObjectHashMapDecorator.this.containsKey(k) && + TByteObjectHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TByteObjectIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Byte key = wrapKey(it.key()); + final V v = wrapValue(it.value()); + return new Map.Entry() { + private V val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Byte getKey() { + return key; + } + + public V getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public V setValue(V value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Map.Entry o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TByteObjectHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue((V) val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Byte wrapKey(byte k) { + return Byte.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected byte unwrapKey(Object key) { + return ((Byte) key).byteValue(); + } + + /** + * Wraps a value + * + * @param o value in the underlying map + * @return an Object representation of the value + */ + protected final V wrapValue(V o) { + return o; + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected final V unwrapValue(V value) { + return value; + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TByteObjectHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TByteObjectHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TByteShortHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TByteShortHashMapDecorator.java new file mode 100644 index 00000000000..c6c1847e849 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TByteShortHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TByteShortHashMap; +import org.elasticsearch.util.gnu.trove.TByteShortIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TByteShortHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TByteShortHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + * @author Rob Eden + */ +public class TByteShortHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TByteShortHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TByteShortHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TByteShortHashMapDecorator(TByteShortHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TByteShortHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TByteShortHashMapDecorator clone() { + try { + TByteShortHashMapDecorator copy = (TByteShortHashMapDecorator) super.clone(); + copy._map = (TByteShortHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Short(0) if none was found. + */ + public Short put(Byte key, Short value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Short get(Byte key) { + byte k = unwrapKey(key); + short v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Short(0) if it was not found in the map + */ + public Short remove(Byte key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TByteShortHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TByteShortHashMapDecorator.this.containsKey(k) + && TByteShortHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TByteShortIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Byte key = wrapKey(it.key()); + final Short v = wrapValue(it.value()); + return new Map.Entry() { + private Short val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Byte getKey() { + return key; + } + + public Short getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Short setValue(Short value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Short o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TByteShortHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Byte wrapKey(byte k) { + return Byte.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected byte unwrapKey(Object key) { + return ((Byte) key).byteValue(); + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Short wrapValue(short k) { + return Short.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected short unwrapValue(Object value) { + return ((Short) value).shortValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TByteShortHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TByteShortHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TDoubleByteHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TDoubleByteHashMapDecorator.java new file mode 100644 index 00000000000..6f896f953c2 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TDoubleByteHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TDoubleByteHashMap; +import org.elasticsearch.util.gnu.trove.TDoubleByteIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TDoubleByteHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TDoubleByteHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + * @author Rob Eden + */ +public class TDoubleByteHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TDoubleByteHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TDoubleByteHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TDoubleByteHashMapDecorator(TDoubleByteHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TDoubleByteHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TDoubleByteHashMapDecorator clone() { + try { + TDoubleByteHashMapDecorator copy = (TDoubleByteHashMapDecorator) super.clone(); + copy._map = (TDoubleByteHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Byte(0) if none was found. + */ + public Byte put(Double key, Byte value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Byte get(Double key) { + double k = unwrapKey(key); + byte v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Byte(0) if it was not found in the map + */ + public Byte remove(Double key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TDoubleByteHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TDoubleByteHashMapDecorator.this.containsKey(k) + && TDoubleByteHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TDoubleByteIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Double key = wrapKey(it.key()); + final Byte v = wrapValue(it.value()); + return new Map.Entry() { + private Byte val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Double getKey() { + return key; + } + + public Byte getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Byte setValue(Byte value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Byte o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TDoubleByteHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Double wrapKey(double k) { + return Double.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected double unwrapKey(Object key) { + return ((Double) key).doubleValue(); + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Byte wrapValue(byte k) { + return Byte.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected byte unwrapValue(Object value) { + return ((Byte) value).byteValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TDoubleByteHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TDoubleByteHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TDoubleDoubleHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TDoubleDoubleHashMapDecorator.java new file mode 100644 index 00000000000..92d6ac56596 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TDoubleDoubleHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TDoubleDoubleHashMap; +import org.elasticsearch.util.gnu.trove.TDoubleDoubleIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TDoubleDoubleHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TDoubleDoubleHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + * @author Rob Eden + */ +public class TDoubleDoubleHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TDoubleDoubleHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TDoubleDoubleHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TDoubleDoubleHashMapDecorator(TDoubleDoubleHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TDoubleDoubleHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TDoubleDoubleHashMapDecorator clone() { + try { + TDoubleDoubleHashMapDecorator copy = (TDoubleDoubleHashMapDecorator) super.clone(); + copy._map = (TDoubleDoubleHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Double(0) if none was found. + */ + public Double put(Double key, Double value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Double get(Double key) { + double k = unwrapKey(key); + double v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Double(0) if it was not found in the map + */ + public Double remove(Double key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TDoubleDoubleHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TDoubleDoubleHashMapDecorator.this.containsKey(k) + && TDoubleDoubleHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TDoubleDoubleIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Double key = wrapKey(it.key()); + final Double v = wrapValue(it.value()); + return new Map.Entry() { + private Double val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Double getKey() { + return key; + } + + public Double getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Double setValue(Double value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Double o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TDoubleDoubleHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Double wrapKey(double k) { + return Double.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected double unwrapKey(Object key) { + return ((Double) key).doubleValue(); + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Double wrapValue(double k) { + return Double.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected double unwrapValue(Object value) { + return ((Double) value).doubleValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TDoubleDoubleHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TDoubleDoubleHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TDoubleFloatHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TDoubleFloatHashMapDecorator.java new file mode 100644 index 00000000000..66abb2ae499 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TDoubleFloatHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TDoubleFloatHashMap; +import org.elasticsearch.util.gnu.trove.TDoubleFloatIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TDoubleFloatHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TDoubleFloatHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + * @author Rob Eden + */ +public class TDoubleFloatHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TDoubleFloatHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TDoubleFloatHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TDoubleFloatHashMapDecorator(TDoubleFloatHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TDoubleFloatHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TDoubleFloatHashMapDecorator clone() { + try { + TDoubleFloatHashMapDecorator copy = (TDoubleFloatHashMapDecorator) super.clone(); + copy._map = (TDoubleFloatHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Float(0) if none was found. + */ + public Float put(Double key, Float value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Float get(Double key) { + double k = unwrapKey(key); + float v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Float(0) if it was not found in the map + */ + public Float remove(Double key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TDoubleFloatHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TDoubleFloatHashMapDecorator.this.containsKey(k) + && TDoubleFloatHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TDoubleFloatIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Double key = wrapKey(it.key()); + final Float v = wrapValue(it.value()); + return new Map.Entry() { + private Float val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Double getKey() { + return key; + } + + public Float getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Float setValue(Float value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Float o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TDoubleFloatHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Double wrapKey(double k) { + return Double.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected double unwrapKey(Object key) { + return ((Double) key).doubleValue(); + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Float wrapValue(float k) { + return Float.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected float unwrapValue(Object value) { + return ((Float) value).floatValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TDoubleFloatHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TDoubleFloatHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TDoubleHashSetDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TDoubleHashSetDecorator.java new file mode 100644 index 00000000000..291b43ea23f --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TDoubleHashSetDecorator.java @@ -0,0 +1,249 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TDoubleHashSet; +import org.elasticsearch.util.gnu.trove.TDoubleIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.AbstractSet; +import java.util.Iterator; +import java.util.Set; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TDoubleHashSet conform to the java.util.Set API. + * This class simply decorates an underlying TDoubleHashSet and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Tue Sep 24 22:08:17 PDT 2002 + * + * @author Eric D. Friedman + */ +public class TDoubleHashSetDecorator extends AbstractSet + implements Set, Externalizable { + + /** + * the wrapped primitive set + */ + protected TDoubleHashSet _set; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TDoubleHashSetDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive set. + */ + public TDoubleHashSetDecorator(TDoubleHashSet set) { + super(); + this._set = set; + } + + + /** + * Returns a reference to the set wrapped by this decorator. + */ + public TDoubleHashSet getSet() { + return _set; + } + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TDoubleHashSetDecorator clone() { + try { + TDoubleHashSetDecorator copy = (TDoubleHashSetDecorator) super.clone(); + copy._set = (TDoubleHashSet) _set.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable + } + } + + /** + * Inserts a value into the set. + * + * @param value true if the set was modified by the insertion + */ + public boolean add(Double value) { + return _set.add(unwrap(value)); + } + + /** + * Compares this set with another set for equality of their stored + * entries. + * + * @param other an Object value + * @return true if the sets are identical + */ + public boolean equals(Object other) { + if (_set.equals(other)) { + return true; // comparing two trove sets + } else if (other instanceof Set) { + Set that = (Set) other; + if (that.size() != _set.size()) { + return false; // different sizes, no need to compare + } else { // now we have to do it the hard way + Iterator it = that.iterator(); + for (int i = that.size(); i-- > 0;) { + Object val = it.next(); + if (val instanceof Double) { + double v = unwrap(val); + if (_set.contains(v)) { + // match, ok to continue + } else { + return false; // no match: we're done + } + } else { + return false; // different type in other set + } + } + return true; // all entries match + } + } else { + return false; + } + } + + /** + * Empties the set. + */ + public void clear() { + this._set.clear(); + } + + /** + * Deletes a value from the set. + * + * @param value an Object value + * @return true if the set was modified + */ + public boolean remove(Object value) { + return _set.remove(unwrap(value)); + } + + /** + * Creates an iterator over the values of the set. + * + * @return an iterator with support for removals in the underlying set + */ + public Iterator iterator() { + return new Iterator() { + private final TDoubleIterator it = _set.iterator(); + + public Double next() { + return wrap(it.next()); + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + /** + * Returns the number of entries in the set. + * + * @return the set's size. + */ + public int size() { + return this._set.size(); + } + + /** + * Indicates whether set has any entries. + * + * @return true if the set is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Wraps a value + * + * @param k value in the underlying set + * @return an Object representation of the value + */ + protected Double wrap(double k) { + return Double.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected double unwrap(Object value) { + return ((Double) value).doubleValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // SET + _set = (TDoubleHashSet) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // SET + out.writeObject(_set); + } +} // TDoubleHashSetDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TDoubleIntHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TDoubleIntHashMapDecorator.java new file mode 100644 index 00000000000..909b1d19493 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TDoubleIntHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TDoubleIntHashMap; +import org.elasticsearch.util.gnu.trove.TDoubleIntIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TDoubleIntHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TDoubleIntHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + * @author Rob Eden + */ +public class TDoubleIntHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TDoubleIntHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TDoubleIntHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TDoubleIntHashMapDecorator(TDoubleIntHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TDoubleIntHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TDoubleIntHashMapDecorator clone() { + try { + TDoubleIntHashMapDecorator copy = (TDoubleIntHashMapDecorator) super.clone(); + copy._map = (TDoubleIntHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Integer(0) if none was found. + */ + public Integer put(Double key, Integer value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Integer get(Double key) { + double k = unwrapKey(key); + int v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Integer(0) if it was not found in the map + */ + public Integer remove(Double key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TDoubleIntHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TDoubleIntHashMapDecorator.this.containsKey(k) + && TDoubleIntHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TDoubleIntIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Double key = wrapKey(it.key()); + final Integer v = wrapValue(it.value()); + return new Map.Entry() { + private Integer val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Double getKey() { + return key; + } + + public Integer getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Integer setValue(Integer value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Integer o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TDoubleIntHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Double wrapKey(double k) { + return Double.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected double unwrapKey(Object key) { + return ((Double) key).doubleValue(); + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Integer wrapValue(int k) { + return Integer.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected int unwrapValue(Object value) { + return ((Integer) value).intValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TDoubleIntHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TDoubleIntHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TDoubleLongHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TDoubleLongHashMapDecorator.java new file mode 100644 index 00000000000..e482cecfc10 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TDoubleLongHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TDoubleLongHashMap; +import org.elasticsearch.util.gnu.trove.TDoubleLongIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TDoubleLongHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TDoubleLongHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + * @author Rob Eden + */ +public class TDoubleLongHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TDoubleLongHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TDoubleLongHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TDoubleLongHashMapDecorator(TDoubleLongHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TDoubleLongHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TDoubleLongHashMapDecorator clone() { + try { + TDoubleLongHashMapDecorator copy = (TDoubleLongHashMapDecorator) super.clone(); + copy._map = (TDoubleLongHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Long(0) if none was found. + */ + public Long put(Double key, Long value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Long get(Double key) { + double k = unwrapKey(key); + long v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Long(0) if it was not found in the map + */ + public Long remove(Double key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TDoubleLongHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TDoubleLongHashMapDecorator.this.containsKey(k) + && TDoubleLongHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TDoubleLongIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Double key = wrapKey(it.key()); + final Long v = wrapValue(it.value()); + return new Map.Entry() { + private Long val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Double getKey() { + return key; + } + + public Long getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Long setValue(Long value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Long o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TDoubleLongHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Double wrapKey(double k) { + return Double.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected double unwrapKey(Object key) { + return ((Double) key).doubleValue(); + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Long wrapValue(long k) { + return Long.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected long unwrapValue(Object value) { + return ((Long) value).longValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TDoubleLongHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TDoubleLongHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TDoubleObjectHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TDoubleObjectHashMapDecorator.java new file mode 100644 index 00000000000..c23707143f6 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TDoubleObjectHashMapDecorator.java @@ -0,0 +1,356 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TDoubleObjectHashMap; +import org.elasticsearch.util.gnu.trove.TDoubleObjectIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TDoubleObjectHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TDoubleObjectHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + */ +public class TDoubleObjectHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TDoubleObjectHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TDoubleObjectHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TDoubleObjectHashMapDecorator(TDoubleObjectHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TDoubleObjectHashMap getMap() { + return _map; + } + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TDoubleObjectHashMapDecorator clone() { + try { + TDoubleObjectHashMapDecorator copy = (TDoubleObjectHashMapDecorator) super.clone(); + copy._map = (TDoubleObjectHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Integer(0) if none was found. + */ + public V put(Double key, V value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public V get(Object key) { + return _map.get(unwrapKey(key)); + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Integer(0) if it was not found in the map + */ + public V remove(Object key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TDoubleObjectHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TDoubleObjectHashMapDecorator.this.containsKey(k) && + TDoubleObjectHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TDoubleObjectIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Double key = wrapKey(it.key()); + final V v = wrapValue(it.value()); + return new Map.Entry() { + private V val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Double getKey() { + return key; + } + + public V getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public V setValue(V value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Map.Entry o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TDoubleObjectHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue((V) val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Double wrapKey(double k) { + return Double.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected double unwrapKey(Object key) { + return ((Double) key).doubleValue(); + } + + /** + * Wraps a value + * + * @param o value in the underlying map + * @return an Object representation of the value + */ + protected final V wrapValue(V o) { + return o; + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected final V unwrapValue(V value) { + return value; + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TDoubleObjectHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TDoubleObjectHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TDoubleShortHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TDoubleShortHashMapDecorator.java new file mode 100644 index 00000000000..5d5f0dc4c4d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TDoubleShortHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TDoubleShortHashMap; +import org.elasticsearch.util.gnu.trove.TDoubleShortIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TDoubleShortHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TDoubleShortHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + * @author Rob Eden + */ +public class TDoubleShortHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TDoubleShortHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TDoubleShortHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TDoubleShortHashMapDecorator(TDoubleShortHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TDoubleShortHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TDoubleShortHashMapDecorator clone() { + try { + TDoubleShortHashMapDecorator copy = (TDoubleShortHashMapDecorator) super.clone(); + copy._map = (TDoubleShortHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Short(0) if none was found. + */ + public Short put(Double key, Short value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Short get(Double key) { + double k = unwrapKey(key); + short v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Short(0) if it was not found in the map + */ + public Short remove(Double key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TDoubleShortHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TDoubleShortHashMapDecorator.this.containsKey(k) + && TDoubleShortHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TDoubleShortIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Double key = wrapKey(it.key()); + final Short v = wrapValue(it.value()); + return new Map.Entry() { + private Short val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Double getKey() { + return key; + } + + public Short getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Short setValue(Short value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Short o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TDoubleShortHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Double wrapKey(double k) { + return Double.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected double unwrapKey(Object key) { + return ((Double) key).doubleValue(); + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Short wrapValue(short k) { + return Short.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected short unwrapValue(Object value) { + return ((Short) value).shortValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TDoubleShortHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TDoubleShortHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TFloatByteHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TFloatByteHashMapDecorator.java new file mode 100644 index 00000000000..f13a4b5f7df --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TFloatByteHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TFloatByteHashMap; +import org.elasticsearch.util.gnu.trove.TFloatByteIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TFloatByteHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TFloatByteHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + * @author Rob Eden + */ +public class TFloatByteHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TFloatByteHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TFloatByteHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TFloatByteHashMapDecorator(TFloatByteHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TFloatByteHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TFloatByteHashMapDecorator clone() { + try { + TFloatByteHashMapDecorator copy = (TFloatByteHashMapDecorator) super.clone(); + copy._map = (TFloatByteHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Byte(0) if none was found. + */ + public Byte put(Float key, Byte value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Byte get(Float key) { + float k = unwrapKey(key); + byte v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Byte(0) if it was not found in the map + */ + public Byte remove(Float key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TFloatByteHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TFloatByteHashMapDecorator.this.containsKey(k) + && TFloatByteHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TFloatByteIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Float key = wrapKey(it.key()); + final Byte v = wrapValue(it.value()); + return new Map.Entry() { + private Byte val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Float getKey() { + return key; + } + + public Byte getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Byte setValue(Byte value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Byte o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TFloatByteHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Float wrapKey(float k) { + return Float.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected float unwrapKey(Object key) { + return ((Float) key).floatValue(); + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Byte wrapValue(byte k) { + return Byte.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected byte unwrapValue(Object value) { + return ((Byte) value).byteValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TFloatByteHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TFloatByteHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TFloatDoubleHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TFloatDoubleHashMapDecorator.java new file mode 100644 index 00000000000..f07ca079e62 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TFloatDoubleHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TFloatDoubleHashMap; +import org.elasticsearch.util.gnu.trove.TFloatDoubleIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TFloatDoubleHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TFloatDoubleHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + * @author Rob Eden + */ +public class TFloatDoubleHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TFloatDoubleHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TFloatDoubleHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TFloatDoubleHashMapDecorator(TFloatDoubleHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TFloatDoubleHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TFloatDoubleHashMapDecorator clone() { + try { + TFloatDoubleHashMapDecorator copy = (TFloatDoubleHashMapDecorator) super.clone(); + copy._map = (TFloatDoubleHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Double(0) if none was found. + */ + public Double put(Float key, Double value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Double get(Float key) { + float k = unwrapKey(key); + double v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Double(0) if it was not found in the map + */ + public Double remove(Float key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TFloatDoubleHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TFloatDoubleHashMapDecorator.this.containsKey(k) + && TFloatDoubleHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TFloatDoubleIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Float key = wrapKey(it.key()); + final Double v = wrapValue(it.value()); + return new Map.Entry() { + private Double val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Float getKey() { + return key; + } + + public Double getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Double setValue(Double value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Double o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TFloatDoubleHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Float wrapKey(float k) { + return Float.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected float unwrapKey(Object key) { + return ((Float) key).floatValue(); + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Double wrapValue(double k) { + return Double.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected double unwrapValue(Object value) { + return ((Double) value).doubleValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TFloatDoubleHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TFloatDoubleHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TFloatFloatHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TFloatFloatHashMapDecorator.java new file mode 100644 index 00000000000..7a45c7f634e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TFloatFloatHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TFloatFloatHashMap; +import org.elasticsearch.util.gnu.trove.TFloatFloatIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TFloatFloatHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TFloatFloatHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + * @author Rob Eden + */ +public class TFloatFloatHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TFloatFloatHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TFloatFloatHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TFloatFloatHashMapDecorator(TFloatFloatHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TFloatFloatHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TFloatFloatHashMapDecorator clone() { + try { + TFloatFloatHashMapDecorator copy = (TFloatFloatHashMapDecorator) super.clone(); + copy._map = (TFloatFloatHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Float(0) if none was found. + */ + public Float put(Float key, Float value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Float get(Float key) { + float k = unwrapKey(key); + float v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Float(0) if it was not found in the map + */ + public Float remove(Float key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TFloatFloatHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TFloatFloatHashMapDecorator.this.containsKey(k) + && TFloatFloatHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TFloatFloatIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Float key = wrapKey(it.key()); + final Float v = wrapValue(it.value()); + return new Map.Entry() { + private Float val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Float getKey() { + return key; + } + + public Float getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Float setValue(Float value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Float o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TFloatFloatHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Float wrapKey(float k) { + return Float.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected float unwrapKey(Object key) { + return ((Float) key).floatValue(); + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Float wrapValue(float k) { + return Float.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected float unwrapValue(Object value) { + return ((Float) value).floatValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TFloatFloatHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TFloatFloatHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TFloatHashSetDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TFloatHashSetDecorator.java new file mode 100644 index 00000000000..3764f1028dc --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TFloatHashSetDecorator.java @@ -0,0 +1,249 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TFloatHashSet; +import org.elasticsearch.util.gnu.trove.TFloatIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.AbstractSet; +import java.util.Iterator; +import java.util.Set; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TFloatHashSet conform to the java.util.Set API. + * This class simply decorates an underlying TFloatHashSet and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Tue Sep 24 22:08:17 PDT 2002 + * + * @author Eric D. Friedman + */ +public class TFloatHashSetDecorator extends AbstractSet + implements Set, Externalizable { + + /** + * the wrapped primitive set + */ + protected TFloatHashSet _set; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TFloatHashSetDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive set. + */ + public TFloatHashSetDecorator(TFloatHashSet set) { + super(); + this._set = set; + } + + + /** + * Returns a reference to the set wrapped by this decorator. + */ + public TFloatHashSet getSet() { + return _set; + } + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TFloatHashSetDecorator clone() { + try { + TFloatHashSetDecorator copy = (TFloatHashSetDecorator) super.clone(); + copy._set = (TFloatHashSet) _set.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable + } + } + + /** + * Inserts a value into the set. + * + * @param value true if the set was modified by the insertion + */ + public boolean add(Float value) { + return _set.add(unwrap(value)); + } + + /** + * Compares this set with another set for equality of their stored + * entries. + * + * @param other an Object value + * @return true if the sets are identical + */ + public boolean equals(Object other) { + if (_set.equals(other)) { + return true; // comparing two trove sets + } else if (other instanceof Set) { + Set that = (Set) other; + if (that.size() != _set.size()) { + return false; // different sizes, no need to compare + } else { // now we have to do it the hard way + Iterator it = that.iterator(); + for (int i = that.size(); i-- > 0;) { + Object val = it.next(); + if (val instanceof Float) { + float v = unwrap(val); + if (_set.contains(v)) { + // match, ok to continue + } else { + return false; // no match: we're done + } + } else { + return false; // different type in other set + } + } + return true; // all entries match + } + } else { + return false; + } + } + + /** + * Empties the set. + */ + public void clear() { + this._set.clear(); + } + + /** + * Deletes a value from the set. + * + * @param value an Object value + * @return true if the set was modified + */ + public boolean remove(Object value) { + return _set.remove(unwrap(value)); + } + + /** + * Creates an iterator over the values of the set. + * + * @return an iterator with support for removals in the underlying set + */ + public Iterator iterator() { + return new Iterator() { + private final TFloatIterator it = _set.iterator(); + + public Float next() { + return wrap(it.next()); + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + /** + * Returns the number of entries in the set. + * + * @return the set's size. + */ + public int size() { + return this._set.size(); + } + + /** + * Indicates whether set has any entries. + * + * @return true if the set is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Wraps a value + * + * @param k value in the underlying set + * @return an Object representation of the value + */ + protected Float wrap(float k) { + return Float.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected float unwrap(Object value) { + return ((Float) value).floatValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // SET + _set = (TFloatHashSet) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // SET + out.writeObject(_set); + } +} // TFloatHashSetDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TFloatIntHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TFloatIntHashMapDecorator.java new file mode 100644 index 00000000000..368006fcd22 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TFloatIntHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TFloatIntHashMap; +import org.elasticsearch.util.gnu.trove.TFloatIntIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TFloatIntHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TFloatIntHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + * @author Rob Eden + */ +public class TFloatIntHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TFloatIntHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TFloatIntHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TFloatIntHashMapDecorator(TFloatIntHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TFloatIntHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TFloatIntHashMapDecorator clone() { + try { + TFloatIntHashMapDecorator copy = (TFloatIntHashMapDecorator) super.clone(); + copy._map = (TFloatIntHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Integer(0) if none was found. + */ + public Integer put(Float key, Integer value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Integer get(Float key) { + float k = unwrapKey(key); + int v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Integer(0) if it was not found in the map + */ + public Integer remove(Float key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TFloatIntHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TFloatIntHashMapDecorator.this.containsKey(k) + && TFloatIntHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TFloatIntIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Float key = wrapKey(it.key()); + final Integer v = wrapValue(it.value()); + return new Map.Entry() { + private Integer val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Float getKey() { + return key; + } + + public Integer getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Integer setValue(Integer value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Integer o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TFloatIntHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Float wrapKey(float k) { + return Float.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected float unwrapKey(Object key) { + return ((Float) key).floatValue(); + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Integer wrapValue(int k) { + return Integer.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected int unwrapValue(Object value) { + return ((Integer) value).intValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TFloatIntHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TFloatIntHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TFloatLongHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TFloatLongHashMapDecorator.java new file mode 100644 index 00000000000..b0bb89849aa --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TFloatLongHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TFloatLongHashMap; +import org.elasticsearch.util.gnu.trove.TFloatLongIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TFloatLongHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TFloatLongHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + * @author Rob Eden + */ +public class TFloatLongHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TFloatLongHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TFloatLongHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TFloatLongHashMapDecorator(TFloatLongHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TFloatLongHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TFloatLongHashMapDecorator clone() { + try { + TFloatLongHashMapDecorator copy = (TFloatLongHashMapDecorator) super.clone(); + copy._map = (TFloatLongHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Long(0) if none was found. + */ + public Long put(Float key, Long value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Long get(Float key) { + float k = unwrapKey(key); + long v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Long(0) if it was not found in the map + */ + public Long remove(Float key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TFloatLongHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TFloatLongHashMapDecorator.this.containsKey(k) + && TFloatLongHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TFloatLongIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Float key = wrapKey(it.key()); + final Long v = wrapValue(it.value()); + return new Map.Entry() { + private Long val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Float getKey() { + return key; + } + + public Long getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Long setValue(Long value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Long o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TFloatLongHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Float wrapKey(float k) { + return Float.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected float unwrapKey(Object key) { + return ((Float) key).floatValue(); + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Long wrapValue(long k) { + return Long.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected long unwrapValue(Object value) { + return ((Long) value).longValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TFloatLongHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TFloatLongHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TFloatObjectHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TFloatObjectHashMapDecorator.java new file mode 100644 index 00000000000..7060a831dfd --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TFloatObjectHashMapDecorator.java @@ -0,0 +1,356 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TFloatObjectHashMap; +import org.elasticsearch.util.gnu.trove.TFloatObjectIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TFloatObjectHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TFloatObjectHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + */ +public class TFloatObjectHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TFloatObjectHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TFloatObjectHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TFloatObjectHashMapDecorator(TFloatObjectHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TFloatObjectHashMap getMap() { + return _map; + } + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TFloatObjectHashMapDecorator clone() { + try { + TFloatObjectHashMapDecorator copy = (TFloatObjectHashMapDecorator) super.clone(); + copy._map = (TFloatObjectHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Integer(0) if none was found. + */ + public V put(Float key, V value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public V get(Object key) { + return _map.get(unwrapKey(key)); + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Integer(0) if it was not found in the map + */ + public V remove(Object key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TFloatObjectHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TFloatObjectHashMapDecorator.this.containsKey(k) && + TFloatObjectHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TFloatObjectIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Float key = wrapKey(it.key()); + final V v = wrapValue(it.value()); + return new Map.Entry() { + private V val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Float getKey() { + return key; + } + + public V getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public V setValue(V value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Map.Entry o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TFloatObjectHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue((V) val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Float wrapKey(float k) { + return Float.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected float unwrapKey(Object key) { + return ((Float) key).floatValue(); + } + + /** + * Wraps a value + * + * @param o value in the underlying map + * @return an Object representation of the value + */ + protected final V wrapValue(V o) { + return o; + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected final V unwrapValue(V value) { + return value; + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TFloatObjectHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TFloatObjectHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TFloatShortHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TFloatShortHashMapDecorator.java new file mode 100644 index 00000000000..0dc76e4c61b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TFloatShortHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TFloatShortHashMap; +import org.elasticsearch.util.gnu.trove.TFloatShortIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TFloatShortHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TFloatShortHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + * @author Rob Eden + */ +public class TFloatShortHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TFloatShortHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TFloatShortHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TFloatShortHashMapDecorator(TFloatShortHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TFloatShortHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TFloatShortHashMapDecorator clone() { + try { + TFloatShortHashMapDecorator copy = (TFloatShortHashMapDecorator) super.clone(); + copy._map = (TFloatShortHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Short(0) if none was found. + */ + public Short put(Float key, Short value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Short get(Float key) { + float k = unwrapKey(key); + short v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Short(0) if it was not found in the map + */ + public Short remove(Float key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TFloatShortHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TFloatShortHashMapDecorator.this.containsKey(k) + && TFloatShortHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TFloatShortIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Float key = wrapKey(it.key()); + final Short v = wrapValue(it.value()); + return new Map.Entry() { + private Short val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Float getKey() { + return key; + } + + public Short getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Short setValue(Short value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Short o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TFloatShortHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Float wrapKey(float k) { + return Float.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected float unwrapKey(Object key) { + return ((Float) key).floatValue(); + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Short wrapValue(short k) { + return Short.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected short unwrapValue(Object value) { + return ((Short) value).shortValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TFloatShortHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TFloatShortHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TIntByteHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TIntByteHashMapDecorator.java new file mode 100644 index 00000000000..d39303aefe9 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TIntByteHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TIntByteHashMap; +import org.elasticsearch.util.gnu.trove.TIntByteIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TIntByteHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TIntByteHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + * @author Rob Eden + */ +public class TIntByteHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TIntByteHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TIntByteHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TIntByteHashMapDecorator(TIntByteHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TIntByteHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TIntByteHashMapDecorator clone() { + try { + TIntByteHashMapDecorator copy = (TIntByteHashMapDecorator) super.clone(); + copy._map = (TIntByteHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Byte(0) if none was found. + */ + public Byte put(Integer key, Byte value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Byte get(Integer key) { + int k = unwrapKey(key); + byte v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Byte(0) if it was not found in the map + */ + public Byte remove(Integer key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TIntByteHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TIntByteHashMapDecorator.this.containsKey(k) + && TIntByteHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TIntByteIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Integer key = wrapKey(it.key()); + final Byte v = wrapValue(it.value()); + return new Map.Entry() { + private Byte val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Integer getKey() { + return key; + } + + public Byte getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Byte setValue(Byte value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Byte o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TIntByteHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Integer wrapKey(int k) { + return Integer.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected int unwrapKey(Object key) { + return ((Integer) key).intValue(); + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Byte wrapValue(byte k) { + return Byte.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected byte unwrapValue(Object value) { + return ((Byte) value).byteValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TIntByteHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TIntByteHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TIntDoubleHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TIntDoubleHashMapDecorator.java new file mode 100644 index 00000000000..528cdeda43c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TIntDoubleHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TIntDoubleHashMap; +import org.elasticsearch.util.gnu.trove.TIntDoubleIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TIntDoubleHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TIntDoubleHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + * @author Rob Eden + */ +public class TIntDoubleHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TIntDoubleHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TIntDoubleHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TIntDoubleHashMapDecorator(TIntDoubleHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TIntDoubleHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TIntDoubleHashMapDecorator clone() { + try { + TIntDoubleHashMapDecorator copy = (TIntDoubleHashMapDecorator) super.clone(); + copy._map = (TIntDoubleHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Double(0) if none was found. + */ + public Double put(Integer key, Double value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Double get(Integer key) { + int k = unwrapKey(key); + double v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Double(0) if it was not found in the map + */ + public Double remove(Integer key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TIntDoubleHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TIntDoubleHashMapDecorator.this.containsKey(k) + && TIntDoubleHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TIntDoubleIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Integer key = wrapKey(it.key()); + final Double v = wrapValue(it.value()); + return new Map.Entry() { + private Double val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Integer getKey() { + return key; + } + + public Double getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Double setValue(Double value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Double o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TIntDoubleHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Integer wrapKey(int k) { + return Integer.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected int unwrapKey(Object key) { + return ((Integer) key).intValue(); + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Double wrapValue(double k) { + return Double.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected double unwrapValue(Object value) { + return ((Double) value).doubleValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TIntDoubleHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TIntDoubleHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TIntFloatHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TIntFloatHashMapDecorator.java new file mode 100644 index 00000000000..e88b03c04d7 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TIntFloatHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TIntFloatHashMap; +import org.elasticsearch.util.gnu.trove.TIntFloatIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TIntFloatHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TIntFloatHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + * @author Rob Eden + */ +public class TIntFloatHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TIntFloatHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TIntFloatHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TIntFloatHashMapDecorator(TIntFloatHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TIntFloatHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TIntFloatHashMapDecorator clone() { + try { + TIntFloatHashMapDecorator copy = (TIntFloatHashMapDecorator) super.clone(); + copy._map = (TIntFloatHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Float(0) if none was found. + */ + public Float put(Integer key, Float value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Float get(Integer key) { + int k = unwrapKey(key); + float v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Float(0) if it was not found in the map + */ + public Float remove(Integer key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TIntFloatHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TIntFloatHashMapDecorator.this.containsKey(k) + && TIntFloatHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TIntFloatIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Integer key = wrapKey(it.key()); + final Float v = wrapValue(it.value()); + return new Map.Entry() { + private Float val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Integer getKey() { + return key; + } + + public Float getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Float setValue(Float value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Float o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TIntFloatHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Integer wrapKey(int k) { + return Integer.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected int unwrapKey(Object key) { + return ((Integer) key).intValue(); + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Float wrapValue(float k) { + return Float.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected float unwrapValue(Object value) { + return ((Float) value).floatValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TIntFloatHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TIntFloatHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TIntHashSetDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TIntHashSetDecorator.java new file mode 100644 index 00000000000..9c9b1b3e552 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TIntHashSetDecorator.java @@ -0,0 +1,249 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TIntHashSet; +import org.elasticsearch.util.gnu.trove.TIntIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.AbstractSet; +import java.util.Iterator; +import java.util.Set; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TIntHashSet conform to the java.util.Set API. + * This class simply decorates an underlying TIntHashSet and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Tue Sep 24 22:08:17 PDT 2002 + * + * @author Eric D. Friedman + */ +public class TIntHashSetDecorator extends AbstractSet + implements Set, Externalizable { + + /** + * the wrapped primitive set + */ + protected TIntHashSet _set; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TIntHashSetDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive set. + */ + public TIntHashSetDecorator(TIntHashSet set) { + super(); + this._set = set; + } + + + /** + * Returns a reference to the set wrapped by this decorator. + */ + public TIntHashSet getSet() { + return _set; + } + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TIntHashSetDecorator clone() { + try { + TIntHashSetDecorator copy = (TIntHashSetDecorator) super.clone(); + copy._set = (TIntHashSet) _set.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable + } + } + + /** + * Inserts a value into the set. + * + * @param value true if the set was modified by the insertion + */ + public boolean add(Integer value) { + return _set.add(unwrap(value)); + } + + /** + * Compares this set with another set for equality of their stored + * entries. + * + * @param other an Object value + * @return true if the sets are identical + */ + public boolean equals(Object other) { + if (_set.equals(other)) { + return true; // comparing two trove sets + } else if (other instanceof Set) { + Set that = (Set) other; + if (that.size() != _set.size()) { + return false; // different sizes, no need to compare + } else { // now we have to do it the hard way + Iterator it = that.iterator(); + for (int i = that.size(); i-- > 0;) { + Object val = it.next(); + if (val instanceof Integer) { + int v = unwrap(val); + if (_set.contains(v)) { + // match, ok to continue + } else { + return false; // no match: we're done + } + } else { + return false; // different type in other set + } + } + return true; // all entries match + } + } else { + return false; + } + } + + /** + * Empties the set. + */ + public void clear() { + this._set.clear(); + } + + /** + * Deletes a value from the set. + * + * @param value an Object value + * @return true if the set was modified + */ + public boolean remove(Object value) { + return _set.remove(unwrap(value)); + } + + /** + * Creates an iterator over the values of the set. + * + * @return an iterator with support for removals in the underlying set + */ + public Iterator iterator() { + return new Iterator() { + private final TIntIterator it = _set.iterator(); + + public Integer next() { + return wrap(it.next()); + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + /** + * Returns the number of entries in the set. + * + * @return the set's size. + */ + public int size() { + return this._set.size(); + } + + /** + * Indicates whether set has any entries. + * + * @return true if the set is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Wraps a value + * + * @param k value in the underlying set + * @return an Object representation of the value + */ + protected Integer wrap(int k) { + return Integer.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected int unwrap(Object value) { + return ((Integer) value).intValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // SET + _set = (TIntHashSet) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // SET + out.writeObject(_set); + } +} // TIntHashSetDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TIntIntHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TIntIntHashMapDecorator.java new file mode 100644 index 00000000000..40e8602d40b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TIntIntHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TIntIntHashMap; +import org.elasticsearch.util.gnu.trove.TIntIntIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TIntIntHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TIntIntHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + * @author Rob Eden + */ +public class TIntIntHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TIntIntHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TIntIntHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TIntIntHashMapDecorator(TIntIntHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TIntIntHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TIntIntHashMapDecorator clone() { + try { + TIntIntHashMapDecorator copy = (TIntIntHashMapDecorator) super.clone(); + copy._map = (TIntIntHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Integer(0) if none was found. + */ + public Integer put(Integer key, Integer value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Integer get(Integer key) { + int k = unwrapKey(key); + int v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Integer(0) if it was not found in the map + */ + public Integer remove(Integer key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TIntIntHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TIntIntHashMapDecorator.this.containsKey(k) + && TIntIntHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TIntIntIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Integer key = wrapKey(it.key()); + final Integer v = wrapValue(it.value()); + return new Map.Entry() { + private Integer val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Integer getKey() { + return key; + } + + public Integer getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Integer setValue(Integer value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Integer o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TIntIntHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Integer wrapKey(int k) { + return Integer.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected int unwrapKey(Object key) { + return ((Integer) key).intValue(); + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Integer wrapValue(int k) { + return Integer.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected int unwrapValue(Object value) { + return ((Integer) value).intValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TIntIntHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TIntIntHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TIntLongHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TIntLongHashMapDecorator.java new file mode 100644 index 00000000000..3df8cb41f06 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TIntLongHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TIntLongHashMap; +import org.elasticsearch.util.gnu.trove.TIntLongIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TIntLongHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TIntLongHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + * @author Rob Eden + */ +public class TIntLongHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TIntLongHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TIntLongHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TIntLongHashMapDecorator(TIntLongHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TIntLongHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TIntLongHashMapDecorator clone() { + try { + TIntLongHashMapDecorator copy = (TIntLongHashMapDecorator) super.clone(); + copy._map = (TIntLongHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Long(0) if none was found. + */ + public Long put(Integer key, Long value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Long get(Integer key) { + int k = unwrapKey(key); + long v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Long(0) if it was not found in the map + */ + public Long remove(Integer key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TIntLongHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TIntLongHashMapDecorator.this.containsKey(k) + && TIntLongHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TIntLongIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Integer key = wrapKey(it.key()); + final Long v = wrapValue(it.value()); + return new Map.Entry() { + private Long val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Integer getKey() { + return key; + } + + public Long getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Long setValue(Long value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Long o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TIntLongHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Integer wrapKey(int k) { + return Integer.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected int unwrapKey(Object key) { + return ((Integer) key).intValue(); + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Long wrapValue(long k) { + return Long.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected long unwrapValue(Object value) { + return ((Long) value).longValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TIntLongHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TIntLongHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TIntObjectHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TIntObjectHashMapDecorator.java new file mode 100644 index 00000000000..f8d13acd898 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TIntObjectHashMapDecorator.java @@ -0,0 +1,356 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TIntObjectHashMap; +import org.elasticsearch.util.gnu.trove.TIntObjectIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TIntObjectHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TIntObjectHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + */ +public class TIntObjectHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TIntObjectHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TIntObjectHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TIntObjectHashMapDecorator(TIntObjectHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TIntObjectHashMap getMap() { + return _map; + } + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TIntObjectHashMapDecorator clone() { + try { + TIntObjectHashMapDecorator copy = (TIntObjectHashMapDecorator) super.clone(); + copy._map = (TIntObjectHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Integer(0) if none was found. + */ + public V put(Integer key, V value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public V get(Object key) { + return _map.get(unwrapKey(key)); + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Integer(0) if it was not found in the map + */ + public V remove(Object key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TIntObjectHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TIntObjectHashMapDecorator.this.containsKey(k) && + TIntObjectHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TIntObjectIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Integer key = wrapKey(it.key()); + final V v = wrapValue(it.value()); + return new Map.Entry() { + private V val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Integer getKey() { + return key; + } + + public V getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public V setValue(V value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Map.Entry o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TIntObjectHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue((V) val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Integer wrapKey(int k) { + return Integer.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected int unwrapKey(Object key) { + return ((Integer) key).intValue(); + } + + /** + * Wraps a value + * + * @param o value in the underlying map + * @return an Object representation of the value + */ + protected final V wrapValue(V o) { + return o; + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected final V unwrapValue(V value) { + return value; + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TIntObjectHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TIntObjectHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TIntShortHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TIntShortHashMapDecorator.java new file mode 100644 index 00000000000..726e7ab82e2 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TIntShortHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TIntShortHashMap; +import org.elasticsearch.util.gnu.trove.TIntShortIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TIntShortHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TIntShortHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + * @author Rob Eden + */ +public class TIntShortHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TIntShortHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TIntShortHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TIntShortHashMapDecorator(TIntShortHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TIntShortHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TIntShortHashMapDecorator clone() { + try { + TIntShortHashMapDecorator copy = (TIntShortHashMapDecorator) super.clone(); + copy._map = (TIntShortHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Short(0) if none was found. + */ + public Short put(Integer key, Short value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Short get(Integer key) { + int k = unwrapKey(key); + short v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Short(0) if it was not found in the map + */ + public Short remove(Integer key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TIntShortHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TIntShortHashMapDecorator.this.containsKey(k) + && TIntShortHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TIntShortIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Integer key = wrapKey(it.key()); + final Short v = wrapValue(it.value()); + return new Map.Entry() { + private Short val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Integer getKey() { + return key; + } + + public Short getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Short setValue(Short value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Short o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TIntShortHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Integer wrapKey(int k) { + return Integer.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected int unwrapKey(Object key) { + return ((Integer) key).intValue(); + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Short wrapValue(short k) { + return Short.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected short unwrapValue(Object value) { + return ((Short) value).shortValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TIntShortHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TIntShortHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TLongByteHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TLongByteHashMapDecorator.java new file mode 100644 index 00000000000..dee248eb2ab --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TLongByteHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TLongByteHashMap; +import org.elasticsearch.util.gnu.trove.TLongByteIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TLongByteHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TLongByteHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + * @author Rob Eden + */ +public class TLongByteHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TLongByteHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TLongByteHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TLongByteHashMapDecorator(TLongByteHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TLongByteHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TLongByteHashMapDecorator clone() { + try { + TLongByteHashMapDecorator copy = (TLongByteHashMapDecorator) super.clone(); + copy._map = (TLongByteHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Byte(0) if none was found. + */ + public Byte put(Long key, Byte value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Byte get(Long key) { + long k = unwrapKey(key); + byte v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Byte(0) if it was not found in the map + */ + public Byte remove(Long key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TLongByteHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TLongByteHashMapDecorator.this.containsKey(k) + && TLongByteHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TLongByteIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Long key = wrapKey(it.key()); + final Byte v = wrapValue(it.value()); + return new Map.Entry() { + private Byte val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Long getKey() { + return key; + } + + public Byte getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Byte setValue(Byte value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Byte o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TLongByteHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Long wrapKey(long k) { + return Long.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected long unwrapKey(Object key) { + return ((Long) key).longValue(); + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Byte wrapValue(byte k) { + return Byte.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected byte unwrapValue(Object value) { + return ((Byte) value).byteValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TLongByteHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TLongByteHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TLongDoubleHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TLongDoubleHashMapDecorator.java new file mode 100644 index 00000000000..f0b13f5fa81 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TLongDoubleHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TLongDoubleHashMap; +import org.elasticsearch.util.gnu.trove.TLongDoubleIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TLongDoubleHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TLongDoubleHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + * @author Rob Eden + */ +public class TLongDoubleHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TLongDoubleHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TLongDoubleHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TLongDoubleHashMapDecorator(TLongDoubleHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TLongDoubleHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TLongDoubleHashMapDecorator clone() { + try { + TLongDoubleHashMapDecorator copy = (TLongDoubleHashMapDecorator) super.clone(); + copy._map = (TLongDoubleHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Double(0) if none was found. + */ + public Double put(Long key, Double value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Double get(Long key) { + long k = unwrapKey(key); + double v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Double(0) if it was not found in the map + */ + public Double remove(Long key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TLongDoubleHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TLongDoubleHashMapDecorator.this.containsKey(k) + && TLongDoubleHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TLongDoubleIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Long key = wrapKey(it.key()); + final Double v = wrapValue(it.value()); + return new Map.Entry() { + private Double val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Long getKey() { + return key; + } + + public Double getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Double setValue(Double value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Double o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TLongDoubleHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Long wrapKey(long k) { + return Long.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected long unwrapKey(Object key) { + return ((Long) key).longValue(); + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Double wrapValue(double k) { + return Double.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected double unwrapValue(Object value) { + return ((Double) value).doubleValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TLongDoubleHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TLongDoubleHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TLongFloatHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TLongFloatHashMapDecorator.java new file mode 100644 index 00000000000..922d5549e2b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TLongFloatHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TLongFloatHashMap; +import org.elasticsearch.util.gnu.trove.TLongFloatIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TLongFloatHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TLongFloatHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + * @author Rob Eden + */ +public class TLongFloatHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TLongFloatHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TLongFloatHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TLongFloatHashMapDecorator(TLongFloatHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TLongFloatHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TLongFloatHashMapDecorator clone() { + try { + TLongFloatHashMapDecorator copy = (TLongFloatHashMapDecorator) super.clone(); + copy._map = (TLongFloatHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Float(0) if none was found. + */ + public Float put(Long key, Float value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Float get(Long key) { + long k = unwrapKey(key); + float v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Float(0) if it was not found in the map + */ + public Float remove(Long key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TLongFloatHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TLongFloatHashMapDecorator.this.containsKey(k) + && TLongFloatHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TLongFloatIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Long key = wrapKey(it.key()); + final Float v = wrapValue(it.value()); + return new Map.Entry() { + private Float val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Long getKey() { + return key; + } + + public Float getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Float setValue(Float value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Float o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TLongFloatHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Long wrapKey(long k) { + return Long.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected long unwrapKey(Object key) { + return ((Long) key).longValue(); + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Float wrapValue(float k) { + return Float.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected float unwrapValue(Object value) { + return ((Float) value).floatValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TLongFloatHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TLongFloatHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TLongHashSetDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TLongHashSetDecorator.java new file mode 100644 index 00000000000..27b8aea889a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TLongHashSetDecorator.java @@ -0,0 +1,249 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TLongHashSet; +import org.elasticsearch.util.gnu.trove.TLongIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.AbstractSet; +import java.util.Iterator; +import java.util.Set; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TLongHashSet conform to the java.util.Set API. + * This class simply decorates an underlying TLongHashSet and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Tue Sep 24 22:08:17 PDT 2002 + * + * @author Eric D. Friedman + */ +public class TLongHashSetDecorator extends AbstractSet + implements Set, Externalizable { + + /** + * the wrapped primitive set + */ + protected TLongHashSet _set; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TLongHashSetDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive set. + */ + public TLongHashSetDecorator(TLongHashSet set) { + super(); + this._set = set; + } + + + /** + * Returns a reference to the set wrapped by this decorator. + */ + public TLongHashSet getSet() { + return _set; + } + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TLongHashSetDecorator clone() { + try { + TLongHashSetDecorator copy = (TLongHashSetDecorator) super.clone(); + copy._set = (TLongHashSet) _set.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable + } + } + + /** + * Inserts a value into the set. + * + * @param value true if the set was modified by the insertion + */ + public boolean add(Long value) { + return _set.add(unwrap(value)); + } + + /** + * Compares this set with another set for equality of their stored + * entries. + * + * @param other an Object value + * @return true if the sets are identical + */ + public boolean equals(Object other) { + if (_set.equals(other)) { + return true; // comparing two trove sets + } else if (other instanceof Set) { + Set that = (Set) other; + if (that.size() != _set.size()) { + return false; // different sizes, no need to compare + } else { // now we have to do it the hard way + Iterator it = that.iterator(); + for (int i = that.size(); i-- > 0;) { + Object val = it.next(); + if (val instanceof Long) { + long v = unwrap(val); + if (_set.contains(v)) { + // match, ok to continue + } else { + return false; // no match: we're done + } + } else { + return false; // different type in other set + } + } + return true; // all entries match + } + } else { + return false; + } + } + + /** + * Empties the set. + */ + public void clear() { + this._set.clear(); + } + + /** + * Deletes a value from the set. + * + * @param value an Object value + * @return true if the set was modified + */ + public boolean remove(Object value) { + return _set.remove(unwrap(value)); + } + + /** + * Creates an iterator over the values of the set. + * + * @return an iterator with support for removals in the underlying set + */ + public Iterator iterator() { + return new Iterator() { + private final TLongIterator it = _set.iterator(); + + public Long next() { + return wrap(it.next()); + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + /** + * Returns the number of entries in the set. + * + * @return the set's size. + */ + public int size() { + return this._set.size(); + } + + /** + * Indicates whether set has any entries. + * + * @return true if the set is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Wraps a value + * + * @param k value in the underlying set + * @return an Object representation of the value + */ + protected Long wrap(long k) { + return Long.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected long unwrap(Object value) { + return ((Long) value).longValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // SET + _set = (TLongHashSet) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // SET + out.writeObject(_set); + } +} // TLongHashSetDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TLongIntHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TLongIntHashMapDecorator.java new file mode 100644 index 00000000000..714970b870a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TLongIntHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TLongIntHashMap; +import org.elasticsearch.util.gnu.trove.TLongIntIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TLongIntHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TLongIntHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + * @author Rob Eden + */ +public class TLongIntHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TLongIntHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TLongIntHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TLongIntHashMapDecorator(TLongIntHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TLongIntHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TLongIntHashMapDecorator clone() { + try { + TLongIntHashMapDecorator copy = (TLongIntHashMapDecorator) super.clone(); + copy._map = (TLongIntHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Integer(0) if none was found. + */ + public Integer put(Long key, Integer value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Integer get(Long key) { + long k = unwrapKey(key); + int v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Integer(0) if it was not found in the map + */ + public Integer remove(Long key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TLongIntHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TLongIntHashMapDecorator.this.containsKey(k) + && TLongIntHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TLongIntIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Long key = wrapKey(it.key()); + final Integer v = wrapValue(it.value()); + return new Map.Entry() { + private Integer val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Long getKey() { + return key; + } + + public Integer getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Integer setValue(Integer value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Integer o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TLongIntHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Long wrapKey(long k) { + return Long.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected long unwrapKey(Object key) { + return ((Long) key).longValue(); + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Integer wrapValue(int k) { + return Integer.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected int unwrapValue(Object value) { + return ((Integer) value).intValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TLongIntHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TLongIntHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TLongLongHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TLongLongHashMapDecorator.java new file mode 100644 index 00000000000..fb98fcdd854 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TLongLongHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TLongLongHashMap; +import org.elasticsearch.util.gnu.trove.TLongLongIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TLongLongHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TLongLongHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + * @author Rob Eden + */ +public class TLongLongHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TLongLongHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TLongLongHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TLongLongHashMapDecorator(TLongLongHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TLongLongHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TLongLongHashMapDecorator clone() { + try { + TLongLongHashMapDecorator copy = (TLongLongHashMapDecorator) super.clone(); + copy._map = (TLongLongHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Long(0) if none was found. + */ + public Long put(Long key, Long value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Long get(Long key) { + long k = unwrapKey(key); + long v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Long(0) if it was not found in the map + */ + public Long remove(Long key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TLongLongHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TLongLongHashMapDecorator.this.containsKey(k) + && TLongLongHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TLongLongIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Long key = wrapKey(it.key()); + final Long v = wrapValue(it.value()); + return new Map.Entry() { + private Long val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Long getKey() { + return key; + } + + public Long getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Long setValue(Long value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Long o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TLongLongHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Long wrapKey(long k) { + return Long.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected long unwrapKey(Object key) { + return ((Long) key).longValue(); + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Long wrapValue(long k) { + return Long.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected long unwrapValue(Object value) { + return ((Long) value).longValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TLongLongHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TLongLongHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TLongObjectHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TLongObjectHashMapDecorator.java new file mode 100644 index 00000000000..6cf00022843 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TLongObjectHashMapDecorator.java @@ -0,0 +1,356 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TLongObjectHashMap; +import org.elasticsearch.util.gnu.trove.TLongObjectIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TLongObjectHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TLongObjectHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + */ +public class TLongObjectHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TLongObjectHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TLongObjectHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TLongObjectHashMapDecorator(TLongObjectHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TLongObjectHashMap getMap() { + return _map; + } + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TLongObjectHashMapDecorator clone() { + try { + TLongObjectHashMapDecorator copy = (TLongObjectHashMapDecorator) super.clone(); + copy._map = (TLongObjectHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Integer(0) if none was found. + */ + public V put(Long key, V value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public V get(Object key) { + return _map.get(unwrapKey(key)); + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Integer(0) if it was not found in the map + */ + public V remove(Object key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TLongObjectHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TLongObjectHashMapDecorator.this.containsKey(k) && + TLongObjectHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TLongObjectIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Long key = wrapKey(it.key()); + final V v = wrapValue(it.value()); + return new Map.Entry() { + private V val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Long getKey() { + return key; + } + + public V getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public V setValue(V value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Map.Entry o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TLongObjectHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue((V) val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Long wrapKey(long k) { + return Long.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected long unwrapKey(Object key) { + return ((Long) key).longValue(); + } + + /** + * Wraps a value + * + * @param o value in the underlying map + * @return an Object representation of the value + */ + protected final V wrapValue(V o) { + return o; + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected final V unwrapValue(V value) { + return value; + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TLongObjectHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TLongObjectHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TLongShortHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TLongShortHashMapDecorator.java new file mode 100644 index 00000000000..7e3bc28b7b7 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TLongShortHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TLongShortHashMap; +import org.elasticsearch.util.gnu.trove.TLongShortIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TLongShortHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TLongShortHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + * @author Rob Eden + */ +public class TLongShortHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TLongShortHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TLongShortHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TLongShortHashMapDecorator(TLongShortHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TLongShortHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TLongShortHashMapDecorator clone() { + try { + TLongShortHashMapDecorator copy = (TLongShortHashMapDecorator) super.clone(); + copy._map = (TLongShortHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Short(0) if none was found. + */ + public Short put(Long key, Short value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Short get(Long key) { + long k = unwrapKey(key); + short v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Short(0) if it was not found in the map + */ + public Short remove(Long key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TLongShortHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TLongShortHashMapDecorator.this.containsKey(k) + && TLongShortHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TLongShortIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Long key = wrapKey(it.key()); + final Short v = wrapValue(it.value()); + return new Map.Entry() { + private Short val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Long getKey() { + return key; + } + + public Short getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Short setValue(Short value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Short o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TLongShortHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Long wrapKey(long k) { + return Long.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected long unwrapKey(Object key) { + return ((Long) key).longValue(); + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Short wrapValue(short k) { + return Short.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected short unwrapValue(Object value) { + return ((Short) value).shortValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TLongShortHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TLongShortHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TObjectByteHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TObjectByteHashMapDecorator.java new file mode 100644 index 00000000000..c3ad7fbf3d9 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TObjectByteHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TObjectByteHashMap; +import org.elasticsearch.util.gnu.trove.TObjectByteIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TObjectByteHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TObjectByteHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + */ +public class TObjectByteHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TObjectByteHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TObjectByteHashMapDecorator() { + } + + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TObjectByteHashMapDecorator(TObjectByteHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TObjectByteHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TObjectByteHashMapDecorator clone() { + try { + TObjectByteHashMapDecorator copy = (TObjectByteHashMapDecorator) super.clone(); + copy._map = (TObjectByteHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Integer(0) if none was found. + */ + public Byte put(V key, Byte value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Byte get(Object key) { + V k = unwrapKey(key); + byte v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Integer(0) if it was not found in the map + */ + public Byte remove(Object key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TObjectByteHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TObjectByteHashMapDecorator.this.containsKey(k) && + TObjectByteHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TObjectByteIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final V key = wrapKey(it.key()); + final Byte v = wrapValue(it.value()); + return new Map.Entry() { + private Byte val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry && + ((Map.Entry) o).getKey().equals(key) && + ((Map.Entry) o).getValue().equals(val); + } + + public V getKey() { + return key; + } + + public Byte getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Byte setValue(Byte value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Byte o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TObjectByteHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param o key in the underlying map + * @return an Object representation of the key + */ + protected final V wrapKey(Object o) { + return (V) o; + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected final V unwrapKey(Object key) { + return (V) key; + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Byte wrapValue(byte k) { + return Byte.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected byte unwrapValue(Object value) { + return ((Byte) value).byteValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TObjectByteHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TObjectByteHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TObjectDoubleHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TObjectDoubleHashMapDecorator.java new file mode 100644 index 00000000000..93cd1352054 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TObjectDoubleHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TObjectDoubleHashMap; +import org.elasticsearch.util.gnu.trove.TObjectDoubleIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TObjectDoubleHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TObjectDoubleHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + */ +public class TObjectDoubleHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TObjectDoubleHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TObjectDoubleHashMapDecorator() { + } + + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TObjectDoubleHashMapDecorator(TObjectDoubleHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TObjectDoubleHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TObjectDoubleHashMapDecorator clone() { + try { + TObjectDoubleHashMapDecorator copy = (TObjectDoubleHashMapDecorator) super.clone(); + copy._map = (TObjectDoubleHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Integer(0) if none was found. + */ + public Double put(V key, Double value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Double get(Object key) { + V k = unwrapKey(key); + double v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Integer(0) if it was not found in the map + */ + public Double remove(Object key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TObjectDoubleHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TObjectDoubleHashMapDecorator.this.containsKey(k) && + TObjectDoubleHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TObjectDoubleIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final V key = wrapKey(it.key()); + final Double v = wrapValue(it.value()); + return new Map.Entry() { + private Double val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry && + ((Map.Entry) o).getKey().equals(key) && + ((Map.Entry) o).getValue().equals(val); + } + + public V getKey() { + return key; + } + + public Double getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Double setValue(Double value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Double o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TObjectDoubleHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param o key in the underlying map + * @return an Object representation of the key + */ + protected final V wrapKey(Object o) { + return (V) o; + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected final V unwrapKey(Object key) { + return (V) key; + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Double wrapValue(double k) { + return Double.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected double unwrapValue(Object value) { + return ((Double) value).doubleValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TObjectDoubleHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TObjectDoubleHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TObjectFloatHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TObjectFloatHashMapDecorator.java new file mode 100644 index 00000000000..a0fe440c4d0 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TObjectFloatHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TObjectFloatHashMap; +import org.elasticsearch.util.gnu.trove.TObjectFloatIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TObjectFloatHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TObjectFloatHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + */ +public class TObjectFloatHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TObjectFloatHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TObjectFloatHashMapDecorator() { + } + + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TObjectFloatHashMapDecorator(TObjectFloatHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TObjectFloatHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TObjectFloatHashMapDecorator clone() { + try { + TObjectFloatHashMapDecorator copy = (TObjectFloatHashMapDecorator) super.clone(); + copy._map = (TObjectFloatHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Integer(0) if none was found. + */ + public Float put(V key, Float value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Float get(Object key) { + V k = unwrapKey(key); + float v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Integer(0) if it was not found in the map + */ + public Float remove(Object key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TObjectFloatHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TObjectFloatHashMapDecorator.this.containsKey(k) && + TObjectFloatHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TObjectFloatIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final V key = wrapKey(it.key()); + final Float v = wrapValue(it.value()); + return new Map.Entry() { + private Float val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry && + ((Map.Entry) o).getKey().equals(key) && + ((Map.Entry) o).getValue().equals(val); + } + + public V getKey() { + return key; + } + + public Float getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Float setValue(Float value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Float o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TObjectFloatHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param o key in the underlying map + * @return an Object representation of the key + */ + protected final V wrapKey(Object o) { + return (V) o; + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected final V unwrapKey(Object key) { + return (V) key; + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Float wrapValue(float k) { + return Float.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected float unwrapValue(Object value) { + return ((Float) value).floatValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TObjectFloatHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TObjectFloatHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TObjectIntHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TObjectIntHashMapDecorator.java new file mode 100644 index 00000000000..03ceb8bbcc7 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TObjectIntHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TObjectIntHashMap; +import org.elasticsearch.util.gnu.trove.TObjectIntIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TObjectIntHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TObjectIntHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + */ +public class TObjectIntHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TObjectIntHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TObjectIntHashMapDecorator() { + } + + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TObjectIntHashMapDecorator(TObjectIntHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TObjectIntHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TObjectIntHashMapDecorator clone() { + try { + TObjectIntHashMapDecorator copy = (TObjectIntHashMapDecorator) super.clone(); + copy._map = (TObjectIntHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Integer(0) if none was found. + */ + public Integer put(V key, Integer value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Integer get(Object key) { + V k = unwrapKey(key); + int v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Integer(0) if it was not found in the map + */ + public Integer remove(Object key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TObjectIntHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TObjectIntHashMapDecorator.this.containsKey(k) && + TObjectIntHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TObjectIntIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final V key = wrapKey(it.key()); + final Integer v = wrapValue(it.value()); + return new Map.Entry() { + private Integer val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry && + ((Map.Entry) o).getKey().equals(key) && + ((Map.Entry) o).getValue().equals(val); + } + + public V getKey() { + return key; + } + + public Integer getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Integer setValue(Integer value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Integer o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TObjectIntHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param o key in the underlying map + * @return an Object representation of the key + */ + protected final V wrapKey(Object o) { + return (V) o; + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected final V unwrapKey(Object key) { + return (V) key; + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Integer wrapValue(int k) { + return Integer.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected int unwrapValue(Object value) { + return ((Integer) value).intValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TObjectIntHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TObjectIntHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TObjectLongHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TObjectLongHashMapDecorator.java new file mode 100644 index 00000000000..d8fa772994d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TObjectLongHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TObjectLongHashMap; +import org.elasticsearch.util.gnu.trove.TObjectLongIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TObjectLongHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TObjectLongHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + */ +public class TObjectLongHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TObjectLongHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TObjectLongHashMapDecorator() { + } + + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TObjectLongHashMapDecorator(TObjectLongHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TObjectLongHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TObjectLongHashMapDecorator clone() { + try { + TObjectLongHashMapDecorator copy = (TObjectLongHashMapDecorator) super.clone(); + copy._map = (TObjectLongHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Integer(0) if none was found. + */ + public Long put(V key, Long value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Long get(Object key) { + V k = unwrapKey(key); + long v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Integer(0) if it was not found in the map + */ + public Long remove(Object key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TObjectLongHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TObjectLongHashMapDecorator.this.containsKey(k) && + TObjectLongHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TObjectLongIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final V key = wrapKey(it.key()); + final Long v = wrapValue(it.value()); + return new Map.Entry() { + private Long val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry && + ((Map.Entry) o).getKey().equals(key) && + ((Map.Entry) o).getValue().equals(val); + } + + public V getKey() { + return key; + } + + public Long getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Long setValue(Long value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Long o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TObjectLongHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param o key in the underlying map + * @return an Object representation of the key + */ + protected final V wrapKey(Object o) { + return (V) o; + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected final V unwrapKey(Object key) { + return (V) key; + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Long wrapValue(long k) { + return Long.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected long unwrapValue(Object value) { + return ((Long) value).longValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TObjectLongHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TObjectLongHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TObjectShortHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TObjectShortHashMapDecorator.java new file mode 100644 index 00000000000..63b4c09029a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TObjectShortHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TObjectShortHashMap; +import org.elasticsearch.util.gnu.trove.TObjectShortIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TObjectShortHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TObjectShortHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + */ +public class TObjectShortHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TObjectShortHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TObjectShortHashMapDecorator() { + } + + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TObjectShortHashMapDecorator(TObjectShortHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TObjectShortHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TObjectShortHashMapDecorator clone() { + try { + TObjectShortHashMapDecorator copy = (TObjectShortHashMapDecorator) super.clone(); + copy._map = (TObjectShortHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Integer(0) if none was found. + */ + public Short put(V key, Short value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Short get(Object key) { + V k = unwrapKey(key); + short v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Integer(0) if it was not found in the map + */ + public Short remove(Object key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TObjectShortHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TObjectShortHashMapDecorator.this.containsKey(k) && + TObjectShortHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TObjectShortIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final V key = wrapKey(it.key()); + final Short v = wrapValue(it.value()); + return new Map.Entry() { + private Short val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry && + ((Map.Entry) o).getKey().equals(key) && + ((Map.Entry) o).getValue().equals(val); + } + + public V getKey() { + return key; + } + + public Short getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Short setValue(Short value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Short o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TObjectShortHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param o key in the underlying map + * @return an Object representation of the key + */ + protected final V wrapKey(Object o) { + return (V) o; + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected final V unwrapKey(Object key) { + return (V) key; + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Short wrapValue(short k) { + return Short.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected short unwrapValue(Object value) { + return ((Short) value).shortValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TObjectShortHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TObjectShortHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TShortByteHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TShortByteHashMapDecorator.java new file mode 100644 index 00000000000..c8eed2c1600 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TShortByteHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TShortByteHashMap; +import org.elasticsearch.util.gnu.trove.TShortByteIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TShortByteHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TShortByteHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + * @author Rob Eden + */ +public class TShortByteHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TShortByteHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TShortByteHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TShortByteHashMapDecorator(TShortByteHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TShortByteHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TShortByteHashMapDecorator clone() { + try { + TShortByteHashMapDecorator copy = (TShortByteHashMapDecorator) super.clone(); + copy._map = (TShortByteHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Byte(0) if none was found. + */ + public Byte put(Short key, Byte value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Byte get(Short key) { + short k = unwrapKey(key); + byte v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Byte(0) if it was not found in the map + */ + public Byte remove(Short key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TShortByteHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TShortByteHashMapDecorator.this.containsKey(k) + && TShortByteHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TShortByteIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Short key = wrapKey(it.key()); + final Byte v = wrapValue(it.value()); + return new Map.Entry() { + private Byte val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Short getKey() { + return key; + } + + public Byte getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Byte setValue(Byte value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Byte o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TShortByteHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Short wrapKey(short k) { + return Short.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected short unwrapKey(Object key) { + return ((Short) key).shortValue(); + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Byte wrapValue(byte k) { + return Byte.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected byte unwrapValue(Object value) { + return ((Byte) value).byteValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TShortByteHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TShortByteHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TShortDoubleHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TShortDoubleHashMapDecorator.java new file mode 100644 index 00000000000..612dab08918 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TShortDoubleHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TShortDoubleHashMap; +import org.elasticsearch.util.gnu.trove.TShortDoubleIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TShortDoubleHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TShortDoubleHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + * @author Rob Eden + */ +public class TShortDoubleHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TShortDoubleHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TShortDoubleHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TShortDoubleHashMapDecorator(TShortDoubleHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TShortDoubleHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TShortDoubleHashMapDecorator clone() { + try { + TShortDoubleHashMapDecorator copy = (TShortDoubleHashMapDecorator) super.clone(); + copy._map = (TShortDoubleHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Double(0) if none was found. + */ + public Double put(Short key, Double value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Double get(Short key) { + short k = unwrapKey(key); + double v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Double(0) if it was not found in the map + */ + public Double remove(Short key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TShortDoubleHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TShortDoubleHashMapDecorator.this.containsKey(k) + && TShortDoubleHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TShortDoubleIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Short key = wrapKey(it.key()); + final Double v = wrapValue(it.value()); + return new Map.Entry() { + private Double val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Short getKey() { + return key; + } + + public Double getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Double setValue(Double value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Double o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TShortDoubleHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Short wrapKey(short k) { + return Short.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected short unwrapKey(Object key) { + return ((Short) key).shortValue(); + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Double wrapValue(double k) { + return Double.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected double unwrapValue(Object value) { + return ((Double) value).doubleValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TShortDoubleHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TShortDoubleHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TShortFloatHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TShortFloatHashMapDecorator.java new file mode 100644 index 00000000000..eb636fa4900 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TShortFloatHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TShortFloatHashMap; +import org.elasticsearch.util.gnu.trove.TShortFloatIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TShortFloatHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TShortFloatHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + * @author Rob Eden + */ +public class TShortFloatHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TShortFloatHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TShortFloatHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TShortFloatHashMapDecorator(TShortFloatHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TShortFloatHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TShortFloatHashMapDecorator clone() { + try { + TShortFloatHashMapDecorator copy = (TShortFloatHashMapDecorator) super.clone(); + copy._map = (TShortFloatHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Float(0) if none was found. + */ + public Float put(Short key, Float value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Float get(Short key) { + short k = unwrapKey(key); + float v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Float(0) if it was not found in the map + */ + public Float remove(Short key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TShortFloatHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TShortFloatHashMapDecorator.this.containsKey(k) + && TShortFloatHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TShortFloatIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Short key = wrapKey(it.key()); + final Float v = wrapValue(it.value()); + return new Map.Entry() { + private Float val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Short getKey() { + return key; + } + + public Float getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Float setValue(Float value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Float o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TShortFloatHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Short wrapKey(short k) { + return Short.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected short unwrapKey(Object key) { + return ((Short) key).shortValue(); + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Float wrapValue(float k) { + return Float.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected float unwrapValue(Object value) { + return ((Float) value).floatValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TShortFloatHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TShortFloatHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TShortHashSetDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TShortHashSetDecorator.java new file mode 100644 index 00000000000..267c1ee26f6 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TShortHashSetDecorator.java @@ -0,0 +1,249 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TShortHashSet; +import org.elasticsearch.util.gnu.trove.TShortIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.AbstractSet; +import java.util.Iterator; +import java.util.Set; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TShortHashSet conform to the java.util.Set API. + * This class simply decorates an underlying TShortHashSet and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Tue Sep 24 22:08:17 PDT 2002 + * + * @author Eric D. Friedman + */ +public class TShortHashSetDecorator extends AbstractSet + implements Set, Externalizable { + + /** + * the wrapped primitive set + */ + protected TShortHashSet _set; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TShortHashSetDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive set. + */ + public TShortHashSetDecorator(TShortHashSet set) { + super(); + this._set = set; + } + + + /** + * Returns a reference to the set wrapped by this decorator. + */ + public TShortHashSet getSet() { + return _set; + } + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TShortHashSetDecorator clone() { + try { + TShortHashSetDecorator copy = (TShortHashSetDecorator) super.clone(); + copy._set = (TShortHashSet) _set.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable + } + } + + /** + * Inserts a value into the set. + * + * @param value true if the set was modified by the insertion + */ + public boolean add(Short value) { + return _set.add(unwrap(value)); + } + + /** + * Compares this set with another set for equality of their stored + * entries. + * + * @param other an Object value + * @return true if the sets are identical + */ + public boolean equals(Object other) { + if (_set.equals(other)) { + return true; // comparing two trove sets + } else if (other instanceof Set) { + Set that = (Set) other; + if (that.size() != _set.size()) { + return false; // different sizes, no need to compare + } else { // now we have to do it the hard way + Iterator it = that.iterator(); + for (int i = that.size(); i-- > 0;) { + Object val = it.next(); + if (val instanceof Short) { + short v = unwrap(val); + if (_set.contains(v)) { + // match, ok to continue + } else { + return false; // no match: we're done + } + } else { + return false; // different type in other set + } + } + return true; // all entries match + } + } else { + return false; + } + } + + /** + * Empties the set. + */ + public void clear() { + this._set.clear(); + } + + /** + * Deletes a value from the set. + * + * @param value an Object value + * @return true if the set was modified + */ + public boolean remove(Object value) { + return _set.remove(unwrap(value)); + } + + /** + * Creates an iterator over the values of the set. + * + * @return an iterator with support for removals in the underlying set + */ + public Iterator iterator() { + return new Iterator() { + private final TShortIterator it = _set.iterator(); + + public Short next() { + return wrap(it.next()); + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + /** + * Returns the number of entries in the set. + * + * @return the set's size. + */ + public int size() { + return this._set.size(); + } + + /** + * Indicates whether set has any entries. + * + * @return true if the set is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Wraps a value + * + * @param k value in the underlying set + * @return an Object representation of the value + */ + protected Short wrap(short k) { + return Short.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected short unwrap(Object value) { + return ((Short) value).shortValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // SET + _set = (TShortHashSet) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // SET + out.writeObject(_set); + } +} // TShortHashSetDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TShortIntHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TShortIntHashMapDecorator.java new file mode 100644 index 00000000000..66e9bd1c477 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TShortIntHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TShortIntHashMap; +import org.elasticsearch.util.gnu.trove.TShortIntIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TShortIntHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TShortIntHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + * @author Rob Eden + */ +public class TShortIntHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TShortIntHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TShortIntHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TShortIntHashMapDecorator(TShortIntHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TShortIntHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TShortIntHashMapDecorator clone() { + try { + TShortIntHashMapDecorator copy = (TShortIntHashMapDecorator) super.clone(); + copy._map = (TShortIntHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Integer(0) if none was found. + */ + public Integer put(Short key, Integer value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Integer get(Short key) { + short k = unwrapKey(key); + int v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Integer(0) if it was not found in the map + */ + public Integer remove(Short key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TShortIntHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TShortIntHashMapDecorator.this.containsKey(k) + && TShortIntHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TShortIntIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Short key = wrapKey(it.key()); + final Integer v = wrapValue(it.value()); + return new Map.Entry() { + private Integer val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Short getKey() { + return key; + } + + public Integer getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Integer setValue(Integer value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Integer o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TShortIntHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Short wrapKey(short k) { + return Short.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected short unwrapKey(Object key) { + return ((Short) key).shortValue(); + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Integer wrapValue(int k) { + return Integer.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected int unwrapValue(Object value) { + return ((Integer) value).intValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TShortIntHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TShortIntHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TShortLongHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TShortLongHashMapDecorator.java new file mode 100644 index 00000000000..eaec110af35 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TShortLongHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TShortLongHashMap; +import org.elasticsearch.util.gnu.trove.TShortLongIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TShortLongHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TShortLongHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + * @author Rob Eden + */ +public class TShortLongHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TShortLongHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TShortLongHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TShortLongHashMapDecorator(TShortLongHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TShortLongHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TShortLongHashMapDecorator clone() { + try { + TShortLongHashMapDecorator copy = (TShortLongHashMapDecorator) super.clone(); + copy._map = (TShortLongHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Long(0) if none was found. + */ + public Long put(Short key, Long value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Long get(Short key) { + short k = unwrapKey(key); + long v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Long(0) if it was not found in the map + */ + public Long remove(Short key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TShortLongHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TShortLongHashMapDecorator.this.containsKey(k) + && TShortLongHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TShortLongIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Short key = wrapKey(it.key()); + final Long v = wrapValue(it.value()); + return new Map.Entry() { + private Long val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Short getKey() { + return key; + } + + public Long getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Long setValue(Long value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Long o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TShortLongHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Short wrapKey(short k) { + return Short.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected short unwrapKey(Object key) { + return ((Short) key).shortValue(); + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Long wrapValue(long k) { + return Long.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected long unwrapValue(Object value) { + return ((Long) value).longValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TShortLongHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TShortLongHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TShortObjectHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TShortObjectHashMapDecorator.java new file mode 100644 index 00000000000..9ae384cdc25 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TShortObjectHashMapDecorator.java @@ -0,0 +1,356 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TShortObjectHashMap; +import org.elasticsearch.util.gnu.trove.TShortObjectIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TShortObjectHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TShortObjectHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + */ +public class TShortObjectHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TShortObjectHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TShortObjectHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TShortObjectHashMapDecorator(TShortObjectHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TShortObjectHashMap getMap() { + return _map; + } + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TShortObjectHashMapDecorator clone() { + try { + TShortObjectHashMapDecorator copy = (TShortObjectHashMapDecorator) super.clone(); + copy._map = (TShortObjectHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Integer(0) if none was found. + */ + public V put(Short key, V value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public V get(Object key) { + return _map.get(unwrapKey(key)); + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Integer(0) if it was not found in the map + */ + public V remove(Object key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TShortObjectHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TShortObjectHashMapDecorator.this.containsKey(k) && + TShortObjectHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TShortObjectIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Short key = wrapKey(it.key()); + final V v = wrapValue(it.value()); + return new Map.Entry() { + private V val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Short getKey() { + return key; + } + + public V getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public V setValue(V value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Map.Entry o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TShortObjectHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue((V) val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Short wrapKey(short k) { + return Short.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected short unwrapKey(Object key) { + return ((Short) key).shortValue(); + } + + /** + * Wraps a value + * + * @param o value in the underlying map + * @return an Object representation of the value + */ + protected final V wrapValue(V o) { + return o; + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected final V unwrapValue(V value) { + return value; + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TShortObjectHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TShortObjectHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TShortShortHashMapDecorator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TShortShortHashMapDecorator.java new file mode 100644 index 00000000000..2bb7e12d348 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/TShortShortHashMapDecorator.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.gnu.trove.decorator; + +import org.elasticsearch.util.gnu.trove.TShortShortHashMap; +import org.elasticsearch.util.gnu.trove.TShortShortIterator; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.*; + + +////////////////////////////////////////////////// +// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! // +////////////////////////////////////////////////// + + +/** + * Wrapper class to make a TShortShortHashMap conform to the java.util.Map API. + * This class simply decorates an underlying TShortShortHashMap and translates the Object-based + * APIs into their Trove primitive analogs. + *

    + *

    + * Note that wrapping and unwrapping primitive values is extremely inefficient. If + * possible, users of this class should override the appropriate methods in this class + * and use a table of canonical values. + *

    + *

    + * Created: Mon Sep 23 22:07:40 PDT 2002 + * + * @author Eric D. Friedman + * @author Rob Eden + */ +public class TShortShortHashMapDecorator extends AbstractMap + implements Map, Externalizable, Cloneable { + + /** + * the wrapped primitive map + */ + protected TShortShortHashMap _map; + + + /** + * FOR EXTERNALIZATION ONLY!! + */ + public TShortShortHashMapDecorator() { + } + + /** + * Creates a wrapper that decorates the specified primitive map. + */ + public TShortShortHashMapDecorator(TShortShortHashMap map) { + super(); + this._map = map; + } + + + /** + * Returns a reference to the map wrapped by this decorator. + */ + public TShortShortHashMap getMap() { + return _map; + } + + + /** + * Clones the underlying trove collection and returns the clone wrapped in a new + * decorator instance. This is a shallow clone except where primitives are + * concerned. + * + * @return a copy of the receiver + */ + public TShortShortHashMapDecorator clone() { + try { + TShortShortHashMapDecorator copy = (TShortShortHashMapDecorator) super.clone(); + copy._map = (TShortShortHashMap) _map.clone(); + return copy; + } catch (CloneNotSupportedException e) { + // assert(false); + throw new InternalError(); // we are cloneable, so this does not happen + } + } + + /** + * Inserts a key/value pair into the map. + * + * @param key an Object value + * @param value an Object value + * @return the previous value associated with key, + * or Short(0) if none was found. + */ + public Short put(Short key, Short value) { + return wrapValue(_map.put(unwrapKey(key), unwrapValue(value))); + } + + /** + * Retrieves the value for key + * + * @param key an Object value + * @return the value of key or null if no such mapping exists. + */ + public Short get(Short key) { + short k = unwrapKey(key); + short v = _map.get(k); + // 0 may be a false positive since primitive maps + // cannot return null, so we have to do an extra + // check here. + if (v == 0) { + return _map.containsKey(k) ? wrapValue(v) : null; + } else { + return wrapValue(v); + } + } + + + /** + * Empties the map. + */ + public void clear() { + this._map.clear(); + } + + /** + * Deletes a key/value pair from the map. + * + * @param key an Object value + * @return the removed value, or Short(0) if it was not found in the map + */ + public Short remove(Short key) { + return wrapValue(_map.remove(unwrapKey(key))); + } + + /** + * Returns a Set view on the entries of the map. + * + * @return a Set value + */ + public Set> entrySet() { + return new AbstractSet>() { + public int size() { + return _map.size(); + } + + public boolean isEmpty() { + return TShortShortHashMapDecorator.this.isEmpty(); + } + + public boolean contains(Object o) { + if (o instanceof Map.Entry) { + Object k = ((Map.Entry) o).getKey(); + Object v = ((Map.Entry) o).getValue(); + return TShortShortHashMapDecorator.this.containsKey(k) + && TShortShortHashMapDecorator.this.get(k).equals(v); + } else { + return false; + } + } + + public Iterator> iterator() { + return new Iterator>() { + private final TShortShortIterator it = _map.iterator(); + + public Map.Entry next() { + it.advance(); + final Short key = wrapKey(it.key()); + final Short v = wrapValue(it.value()); + return new Map.Entry() { + private Short val = v; + + public boolean equals(Object o) { + return o instanceof Map.Entry + && ((Map.Entry) o).getKey().equals(key) + && ((Map.Entry) o).getValue().equals(val); + } + + public Short getKey() { + return key; + } + + public Short getValue() { + return val; + } + + public int hashCode() { + return key.hashCode() + val.hashCode(); + } + + public Short setValue(Short value) { + val = value; + return put(key, value); + } + }; + } + + public boolean hasNext() { + return it.hasNext(); + } + + public void remove() { + it.remove(); + } + }; + } + + public boolean add(Short o) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + TShortShortHashMapDecorator.this.clear(); + } + }; + } + + /** + * Checks for the presence of val in the values of the map. + * + * @param val an Object value + * @return a boolean value + */ + public boolean containsValue(Object val) { + return _map.containsValue(unwrapValue(val)); + } + + /** + * Checks for the present of key in the keys of the map. + * + * @param key an Object value + * @return a boolean value + */ + public boolean containsKey(Object key) { + return _map.containsKey(unwrapKey(key)); + } + + /** + * Returns the number of entries in the map. + * + * @return the map's size. + */ + public int size() { + return this._map.size(); + } + + /** + * Indicates whether map has any entries. + * + * @return true if the map is empty + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Copies the key/value mappings in map into this map. + * Note that this will be a deep copy, as storage is by + * primitive value. + * + * @param map a Map value + */ + public void putAll(Map map) { + Iterator> it = map.entrySet().iterator(); + for (int i = map.size(); i-- > 0;) { + Entry e = it.next(); + this.put(e.getKey(), e.getValue()); + } + } + + /** + * Wraps a key + * + * @param k key in the underlying map + * @return an Object representation of the key + */ + protected Short wrapKey(short k) { + return Short.valueOf(k); + } + + /** + * Unwraps a key + * + * @param key wrapped key + * @return an unwrapped representation of the key + */ + protected short unwrapKey(Object key) { + return ((Short) key).shortValue(); + } + + /** + * Wraps a value + * + * @param k value in the underlying map + * @return an Object representation of the value + */ + protected Short wrapValue(short k) { + return Short.valueOf(k); + } + + /** + * Unwraps a value + * + * @param value wrapped value + * @return an unwrapped representation of the value + */ + protected short unwrapValue(Object value) { + return ((Short) value).shortValue(); + } + + + // Implements Externalizable + + public void readExternal(ObjectInput in) + throws IOException, ClassNotFoundException { + + // VERSION + in.readByte(); + + // MAP + _map = (TShortShortHashMap) in.readObject(); + } + + + // Implements Externalizable + + public void writeExternal(ObjectOutput out) throws IOException { + // VERSION + out.writeByte(0); + + // MAP + out.writeObject(_map); + } + +} // TShortShortHashMapDecorator diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/package.html b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/package.html new file mode 100644 index 00000000000..2b9296cfdf0 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/decorator/package.html @@ -0,0 +1,58 @@ + + + + + + GNU Trove API Documentation + + + +

    GNU Trove: Decorators for java.util.{Map,Set} compatability

    + +

    The classes in this package serve to wrap the Trove primitive + collections so that they can be used in operations that require a + java.util.Map or java.util.Set.

    + +

    This is form of adaptation is extremely inefficient and + so should only be undertaken as a last resort or when you don't + care about performance (in which case Trove is probably not + appropriate anyway).

    + +

    The general pattern here is that you "wrap" a Trove collection + with the appropriate decorator object to obtain a java.util.Map or + Set. The implementations do not retain references to the Objects + they accept/return (all calls are delegated to the underlying + trove collection), so you should not rely on object identity + within those collections.

    + +

    You may extend the decorator classes to use canonical values if + your dataset permits. For some applications, this will help + reduce the cost of (un)wrapping primitive values. Note, however, + that such datasets are probably small/restricted enough that you + should again ask yourself whether Trove is appropriate in the + first place. Caveat programmer. + + +Last modified: Mon Sep 23 22:55:32 PDT 2002 + + + diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/package.html b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/package.html new file mode 100644 index 00000000000..258186920a5 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/gnu/trove/package.html @@ -0,0 +1,160 @@ + + + + + + GNU Trove API Documentation + + + +

    Taken AS IS to use in Elastic Search under Trove License, version 2.1.0

    + +

    GNU Trove: High performance collections for Java.

    + +

    Objectives

    + +

    The GNU Trove library has two objectives: +

      + +
    1. Provide "free" (as in "free speech" + and "free beer"), fast, lightweight + implementations of the java.util Collections API. + These implementations are designed to be pluggable + replacements for their JDK equivalents. +
    2. + +
    3. Whenever possible, provide the same collections support + for primitive types. This gap in the JDK is + often addressed by using the "wrapper" classes + (java.lang.Integer, java.lang.Float, etc.) + with Object-based collections. For most applications, + however, collections which store primitives directly will + require less space and yield significant performance gains. +
    +

    + +

    Hashtable techniques

    + +

    The Trove maps/sets use open addressing instead of the chaining + approach taken by the JDK hashtables. This eliminates the need + to create Map.Entry wrappper objects for every item in a + table and so reduces the O (big-oh) in the performance of + the hashtable algorithm. The size of the tables used in Trove's maps/sets is + always a prime number, improving the probability of an optimal distribution + of entries across the table, and so reducing the the likelihood + of performance-degrading collisions. Trove sets are not + backed by maps, and so using a THashSet does not result in + the allocation of an unused "values" array. +

    + +

    Hashing strategies

    + +

    Trove's maps/sets support the use of custom hashing + strategies, allowing you to tune collections based on + characteristics of the input data. This feature also allows you + to define hash functions when it is not feasible to override + Object.hashCode(). For example, the java.lang.String class is + final, and its implementation of hashCode() takes O(n) + time to complete. In some applications, however, it may be + possible for a custom hashing function to save time by skipping + portions of the string that are invariant.

    + +

    Using java.util.HashMap, it is not possible to use Java + language arrays as keys. For example, this code: +

    +    char[] foo, bar;
    +    foo = new char[] {'a','b','c'};
    +    bar = new char[] {'a','b','c'};
    +    System.out.println(foo.hashCode() == bar.hashCode() ? "equal" : "not equal");
    +    System.out.println(foo.equals(bar) ? "equal" : "not equal");
    +    
    + +produces this output: + +
    +    not equal
    +    not equal
    +    
    + +And so an entry stored in a java.util.HashMap with foo as a +key could not be retrieved with bar, since there is no way +to override hashCode() or equals() on language array objects. +

    + +

    In a gnu.trove.THashMap, however, you can implement a TObjectHashingStrategy + to enable hashing on arrays: + +

    +    class CharArrayStrategy implements TObjectHashingStrategy {
    +        public int computeHashCode(Object o) {
    +            char[] c = (char[])o;
    +            // use the shift-add-xor class of string hashing functions
    +            // cf. Ramakrishna and Zobel, "Performance in Practice of String Hashing Functions"
    +            int h = 31; // seed chosen at random
    +            for (int i = 0; i < c.length; i++) { // could skip invariants
    +                h = h ^ ((h << 5) + (h >> 2) + c[i]); // L=5, R=2 works well for ASCII input
    +            }
    +            return h;
    +        }
    +
    +        public boolean equals(Object o1, Object o2) {
    +            char[] c1 = (char[])o1;
    +            char[] c2 = (char[])o2;
    +            if (c1.length != c2.length) { // could drop this check for fixed-length keys
    +                return false;
    +            }
    +            for (int i = 0, len = c1.length; i < len; i++) { // could skip invariants
    +                if (c1[i] != c2[i]) {
    +                    return false;
    +                }
    +            }
    +            return true;
    +        }
    +    }
    +    
    +

    + +

    Iterators in primitive collections

    + +

    As of release 0.1.7, Trove's primitive mappings include access through Iterators + as well as procedures and functions. The API documentation on those classes + contains several examples showing how these can be used effectively and + explaining why their semantics differ from those of java.util.Iterator.

    + +

    Miscellaneous

    + +

    N.B. using Map.entrySet on a Trove Map is supported, but + not encouraged. The reason is that this API requires the creation of + the Map.Entry Objects that all other parts of Trove manage to avoid. + An alternative is to implement the appropriate Procedure + interface and use it to invoke the Map's forEachEntry + API. Map.keySet and Map.values are not + similarly encumbered; nevertheless, the forEachKey, + forEachValue, and transformValues APIs will yield + slightly better performance at the cost of compatibility with the + interface of java.util.Map.

    + +
    + + +Last modified: Mon Sep 23 18:22:39 PDT 2002 + + + diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/guice/Injectors.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/guice/Injectors.java new file mode 100644 index 00000000000..a7e38c4dbb5 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/guice/Injectors.java @@ -0,0 +1,224 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.guice; + +import com.google.inject.*; +import com.google.inject.internal.Sets; +import com.google.inject.matcher.Matcher; +import com.google.inject.name.Names; + +import java.lang.reflect.Type; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; + +/** + * @author kimchy (Shay Banon) + */ +public class Injectors { + + /** + * Returns an instance of the given type with the {@link com.google.inject.name.Named} + * annotation value. + *

    + * This method allows you to switch this code + * injector.getInstance(Key.get(type, Names.named(name))); + *

    + * to the more concise + * Injectors.getInstance(injector, type, name); + */ + public static T getInstance(Injector injector, java.lang.Class type, String name) { + return injector.getInstance(Key.get(type, Names.named(name))); + } + + /** + * Returns a collection of all instances of the given base type + * + * @param baseClass the base type of objects required + * @param the base type + * @return a set of objects returned from this injector + */ + public static Set getInstancesOf(Injector injector, Class baseClass) { + Set answer = Sets.newHashSet(); + Set, Binding>> entries = injector.getBindings().entrySet(); + for (Entry, Binding> entry : entries) { + Key key = entry.getKey(); + Class keyType = getKeyType(key); + if (keyType != null && baseClass.isAssignableFrom(keyType)) { + Binding binding = entry.getValue(); + Object value = binding.getProvider().get(); + if (value != null) { + T castValue = baseClass.cast(value); + answer.add(castValue); + } + } + } + return answer; + } + + /** + * Returns a collection of all instances matching the given matcher + * + * @param matcher matches the types to return instances + * @return a set of objects returned from this injector + */ + public static Set getInstancesOf(Injector injector, Matcher matcher) { + Set answer = Sets.newHashSet(); + Set, Binding>> entries = injector.getBindings().entrySet(); + for (Entry, Binding> entry : entries) { + Key key = entry.getKey(); + Class keyType = getKeyType(key); + if (keyType != null && matcher.matches(keyType)) { + Binding binding = entry.getValue(); + Object value = binding.getProvider().get(); + answer.add((T) value); + } + } + return answer; + } + + /** + * Returns a collection of all of the providers matching the given matcher + * + * @param matcher matches the types to return instances + * @return a set of objects returned from this injector + */ + public static Set> getProvidersOf(Injector injector, Matcher matcher) { + Set> answer = Sets.newHashSet(); + Set, Binding>> entries = injector.getBindings().entrySet(); + for (Entry, Binding> entry : entries) { + Key key = entry.getKey(); + Class keyType = getKeyType(key); + if (keyType != null && matcher.matches(keyType)) { + Binding binding = entry.getValue(); + answer.add((Provider) binding.getProvider()); + } + } + return answer; + } + + /** + * Returns a collection of all providers of the given base type + * + * @param baseClass the base type of objects required + * @param the base type + * @return a set of objects returned from this injector + */ + public static Set> getProvidersOf(Injector injector, Class baseClass) { + Set> answer = Sets.newHashSet(); + Set, Binding>> entries = injector.getBindings().entrySet(); + for (Entry, Binding> entry : entries) { + Key key = entry.getKey(); + Class keyType = getKeyType(key); + if (keyType != null && baseClass.isAssignableFrom(keyType)) { + Binding binding = entry.getValue(); + answer.add((Provider) binding.getProvider()); + } + } + return answer; + } + + /** + * Returns true if a binding exists for the given matcher + */ + public static boolean hasBinding(Injector injector, Matcher matcher) { + return !getBindingsOf(injector, matcher).isEmpty(); + } + + /** + * Returns true if a binding exists for the given base class + */ + public static boolean hasBinding(Injector injector, Class baseClass) { + return !getBindingsOf(injector, baseClass).isEmpty(); + } + + /** + * Returns true if a binding exists for the given key + */ + public static boolean hasBinding(Injector injector, Key key) { + Binding binding = getBinding(injector, key); + return binding != null; + } + + /** + * Returns the binding for the given key or null if there is no such binding + */ + public static Binding getBinding(Injector injector, Key key) { + Map, Binding> bindings = injector.getBindings(); + Binding binding = bindings.get(key); + return binding; + } + + /** + * Returns a collection of all of the bindings matching the given matcher + * + * @param matcher matches the types to return instances + * @return a set of objects returned from this injector + */ + public static Set> getBindingsOf(Injector injector, Matcher matcher) { + Set> answer = Sets.newHashSet(); + Set, Binding>> entries = injector.getBindings().entrySet(); + for (Entry, Binding> entry : entries) { + Key key = entry.getKey(); + Class keyType = getKeyType(key); + if (keyType != null && matcher.matches(keyType)) { + answer.add(entry.getValue()); + } + } + return answer; + } + + /** + * Returns a collection of all bindings of the given base type + * + * @param baseClass the base type of objects required + * @return a set of objects returned from this injector + */ + public static Set> getBindingsOf(Injector injector, Class baseClass) { + Set> answer = Sets.newHashSet(); + Set, Binding>> entries = injector.getBindings().entrySet(); + for (Entry, Binding> entry : entries) { + Key key = entry.getKey(); + Class keyType = getKeyType(key); + if (keyType != null && baseClass.isAssignableFrom(keyType)) { + answer.add(entry.getValue()); + } + } + return answer; + } + + /** + * Returns the key type of the given key + */ + public static Class getKeyType(Key key) { + Class keyType = null; + TypeLiteral typeLiteral = key.getTypeLiteral(); + Type type = typeLiteral.getType(); + if (type instanceof Class) { + keyType = (Class) type; + } + return keyType; + } + + + public static void close(Injector injector) { + + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/guice/ModulesFactory.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/guice/ModulesFactory.java new file mode 100644 index 00000000000..77e657e9695 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/guice/ModulesFactory.java @@ -0,0 +1,60 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.guice; + +import com.google.inject.Module; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.util.Nullable; +import org.elasticsearch.util.settings.Settings; + +import java.lang.reflect.Constructor; + +/** + * @author kimchy (Shay Banon) + */ +public class ModulesFactory { + + public static Module createModule(String moduleClass, Settings settings) throws ClassNotFoundException { + return createModule((Class) settings.getClassLoader().loadClass(moduleClass), settings); + } + + public static Module createModule(Class moduleClass, @Nullable Settings settings) { + Constructor constructor; + try { + constructor = moduleClass.getConstructor(Settings.class); + try { + return constructor.newInstance(settings); + } catch (Exception e) { + throw new ElasticSearchException("Failed to create module [" + moduleClass + "]", e); + } + } catch (NoSuchMethodException e) { + try { + constructor = moduleClass.getConstructor(); + try { + return constructor.newInstance(); + } catch (Exception e1) { + throw new ElasticSearchException("Failed to create module [" + moduleClass + "]", e); + } + } catch (NoSuchMethodException e1) { + throw new ElasticSearchException("No constructor for [" + moduleClass + "]"); + } + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/BooleanStreamable.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/BooleanStreamable.java new file mode 100644 index 00000000000..da9ce074de0 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/BooleanStreamable.java @@ -0,0 +1,55 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.io; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class BooleanStreamable implements Streamable { + + private boolean value; + + public BooleanStreamable() { + } + + public BooleanStreamable(boolean value) { + this.value = value; + } + + public void set(boolean newValue) { + value = newValue; + } + + public boolean get() { + return this.value; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + value = in.readBoolean(); + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeBoolean(value); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/ByteArrayDataInputStream.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/ByteArrayDataInputStream.java new file mode 100644 index 00000000000..823064c3f0a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/ByteArrayDataInputStream.java @@ -0,0 +1,41 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.io; + +import org.elasticsearch.util.concurrent.NotThreadSafe; + +import java.io.DataInputStream; + +/** + * @author kimchy (Shay Banon) + */ +@NotThreadSafe +public class ByteArrayDataInputStream extends DataInputStream { + + /** + * Creates a DataInputStream that uses the specified + * underlying InputStream. + * + * @param source the specified source + */ + public ByteArrayDataInputStream(byte[] source) { + super(new FastByteArrayInputStream(source)); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/ByteArrayDataOutputStream.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/ByteArrayDataOutputStream.java new file mode 100644 index 00000000000..3d84a6369be --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/ByteArrayDataOutputStream.java @@ -0,0 +1,69 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.io; + +import org.elasticsearch.util.concurrent.NotThreadSafe; + +import java.io.DataOutputStream; + +/** + * @author kimchy (Shay Banon) + */ +@NotThreadSafe +public class ByteArrayDataOutputStream extends DataOutputStream { + + /** + * A thread local based cache of {@link ByteArrayDataOutputStream}. + */ + public static class Cached { + + private static final ThreadLocal cache = new ThreadLocal() { + @Override protected ByteArrayDataOutputStream initialValue() { + return new ByteArrayDataOutputStream(); + } + }; + + /** + * Returns the cached thread local byte strean, with its internal stream cleared. + */ + public static ByteArrayDataOutputStream cached() { + ByteArrayDataOutputStream os = cache.get(); + ((FastByteArrayOutputStream) os.out).reset(); + return os; + } + } + + + public ByteArrayDataOutputStream() { + super(new FastByteArrayOutputStream()); + } + + public byte[] copiedByteArray() { + return outputStream().copiedByteArray(); + } + + public byte[] unsafeByteArray() { + return outputStream().unsafeByteArray(); + } + + private FastByteArrayOutputStream outputStream() { + return (FastByteArrayOutputStream) out; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/CompactObjectInputStream.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/CompactObjectInputStream.java new file mode 100644 index 00000000000..88d8dfce77b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/CompactObjectInputStream.java @@ -0,0 +1,93 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.io; + +import org.elasticsearch.util.Classes; + +import java.io.*; + +/** + * @author kimchy (Shay Banon) + */ +class CompactObjectInputStream extends ObjectInputStream { + + private final ClassLoader classLoader; + + CompactObjectInputStream(InputStream in) throws IOException { + this(in, null); + } + + CompactObjectInputStream(InputStream in, ClassLoader classLoader) throws IOException { + super(in); + this.classLoader = classLoader; + } + + @Override protected void readStreamHeader() throws IOException, + StreamCorruptedException { + int version = readByte() & 0xFF; + if (version != STREAM_VERSION) { + throw new StreamCorruptedException( + "Unsupported version: " + version); + } + } + + @Override protected ObjectStreamClass readClassDescriptor() + throws IOException, ClassNotFoundException { + int type = read(); + if (type < 0) { + throw new EOFException(); + } + switch (type) { + case CompactObjectOutputStream.TYPE_FAT_DESCRIPTOR: + return super.readClassDescriptor(); + case CompactObjectOutputStream.TYPE_THIN_DESCRIPTOR: + String className = readUTF(); + Class clazz = loadClass(className); + return ObjectStreamClass.lookup(clazz); + default: + throw new StreamCorruptedException( + "Unexpected class descriptor type: " + type); + } + } + + @Override protected Class resolveClass(ObjectStreamClass desc) throws IOException, ClassNotFoundException { + String className = desc.getName(); + try { + return loadClass(className); + } catch (ClassNotFoundException ex) { + return super.resolveClass(desc); + } + } + + protected Class loadClass(String className) throws ClassNotFoundException { + Class clazz; + ClassLoader classLoader = this.classLoader; + if (classLoader == null) { + classLoader = Classes.getDefaultClassLoader(); + } + + if (classLoader != null) { + clazz = classLoader.loadClass(className); + } else { + clazz = Class.forName(className); + } + return clazz; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/CompactObjectOutputStream.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/CompactObjectOutputStream.java new file mode 100644 index 00000000000..24d4f3d0c53 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/CompactObjectOutputStream.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.io; + +import java.io.IOException; +import java.io.ObjectOutputStream; +import java.io.ObjectStreamClass; +import java.io.OutputStream; + +/** + * @author kimchy (Shay Banon) + */ +class CompactObjectOutputStream extends ObjectOutputStream { + + static final int TYPE_FAT_DESCRIPTOR = 0; + static final int TYPE_THIN_DESCRIPTOR = 1; + + CompactObjectOutputStream(OutputStream out) throws IOException { + super(out); + } + + @Override protected void writeStreamHeader() throws IOException { + writeByte(STREAM_VERSION); + } + + @Override protected void writeClassDescriptor(ObjectStreamClass desc) throws IOException { + Class clazz = desc.forClass(); + if (clazz.isPrimitive() || clazz.isArray()) { + write(TYPE_FAT_DESCRIPTOR); + super.writeClassDescriptor(desc); + } else { + write(TYPE_THIN_DESCRIPTOR); + writeUTF(desc.getName()); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/DataInputInputStream.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/DataInputInputStream.java new file mode 100644 index 00000000000..f1361939a72 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/DataInputInputStream.java @@ -0,0 +1,46 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.io; + +import java.io.DataInput; +import java.io.IOException; +import java.io.InputStream; + +/** + * A wrapper {@link java.io.InputStream} around {@link java.io.DataInput}. + * + * @author kimchy (Shay Banon) + */ +public class DataInputInputStream extends InputStream { + + private final DataInput dataInput; + + public DataInputInputStream(DataInput dataInput) { + this.dataInput = dataInput; + } + + @Override public int read() throws IOException { + return dataInput.readByte(); + } + + @Override public long skip(long n) throws IOException { + return dataInput.skipBytes((int) n); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/FastByteArrayInputStream.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/FastByteArrayInputStream.java new file mode 100644 index 00000000000..637a254ac40 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/FastByteArrayInputStream.java @@ -0,0 +1,264 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.io; + +import java.io.IOException; +import java.io.InputStream; + +/** + * @author kimchy (Shay Banon) + */ +public class FastByteArrayInputStream extends InputStream { + + /** + * An array of bytes that was provided + * by the creator of the stream. Elements buf[0] + * through buf[count-1] are the + * only bytes that can ever be read from the + * stream; element buf[pos] is + * the next byte to be read. + */ + protected byte buf[]; + + /** + * The index of the next character to read from the input stream buffer. + * This value should always be nonnegative + * and not larger than the value of count. + * The next byte to be read from the input stream buffer + * will be buf[pos]. + */ + protected int pos; + + /** + * The currently marked position in the stream. + * ByteArrayInputStream objects are marked at position zero by + * default when constructed. They may be marked at another + * position within the buffer by the mark() method. + * The current buffer position is set to this point by the + * reset() method. + *

    + * If no mark has been set, then the value of mark is the offset + * passed to the constructor (or 0 if the offset was not supplied). + * + * @since JDK1.1 + */ + protected int mark = 0; + + /** + * The index one greater than the last valid character in the input + * stream buffer. + * This value should always be nonnegative + * and not larger than the length of buf. + * It is one greater than the position of + * the last byte within buf that + * can ever be read from the input stream buffer. + */ + protected int count; + + /** + * Creates a ByteArrayInputStream + * so that it uses buf as its + * buffer array. + * The buffer array is not copied. + * The initial value of pos + * is 0 and the initial value + * of count is the length of + * buf. + * + * @param buf the input buffer. + */ + public FastByteArrayInputStream(byte buf[]) { + this.buf = buf; + this.pos = 0; + this.count = buf.length; + } + + /** + * Creates ByteArrayInputStream + * that uses buf as its + * buffer array. The initial value of pos + * is offset and the initial value + * of count is the minimum of offset+length + * and buf.length. + * The buffer array is not copied. The buffer's mark is + * set to the specified offset. + * + * @param buf the input buffer. + * @param offset the offset in the buffer of the first byte to read. + * @param length the maximum number of bytes to read from the buffer. + */ + public FastByteArrayInputStream(byte buf[], int offset, int length) { + this.buf = buf; + this.pos = offset; + this.count = Math.min(offset + length, buf.length); + this.mark = offset; + } + + /** + * Reads the next byte of data from this input stream. The value + * byte is returned as an int in the range + * 0 to 255. If no byte is available + * because the end of the stream has been reached, the value + * -1 is returned. + *

    + * This read method + * cannot block. + * + * @return the next byte of data, or -1 if the end of the + * stream has been reached. + */ + public int read() { + return (pos < count) ? (buf[pos++] & 0xff) : -1; + } + + /** + * Reads up to len bytes of data into an array of bytes + * from this input stream. + * If pos equals count, + * then -1 is returned to indicate + * end of file. Otherwise, the number k + * of bytes read is equal to the smaller of + * len and count-pos. + * If k is positive, then bytes + * buf[pos] through buf[pos+k-1] + * are copied into b[off] through + * b[off+k-1] in the manner performed + * by System.arraycopy. The + * value k is added into pos + * and k is returned. + *

    + * This read method cannot block. + * + * @param b the buffer into which the data is read. + * @param off the start offset in the destination array b + * @param len the maximum number of bytes read. + * @return the total number of bytes read into the buffer, or + * -1 if there is no more data because the end of + * the stream has been reached. + * @throws NullPointerException If b is null. + * @throws IndexOutOfBoundsException If off is negative, + * len is negative, or len is greater than + * b.length - off + */ + public int read(byte b[], int off, int len) { + if (b == null) { + throw new NullPointerException(); + } else if (off < 0 || len < 0 || len > b.length - off) { + throw new IndexOutOfBoundsException(); + } + if (pos >= count) { + return -1; + } + if (pos + len > count) { + len = count - pos; + } + if (len <= 0) { + return 0; + } + System.arraycopy(buf, pos, b, off, len); + pos += len; + return len; + } + + /** + * Skips n bytes of input from this input stream. Fewer + * bytes might be skipped if the end of the input stream is reached. + * The actual number k + * of bytes to be skipped is equal to the smaller + * of n and count-pos. + * The value k is added into pos + * and k is returned. + * + * @param n the number of bytes to be skipped. + * @return the actual number of bytes skipped. + */ + public long skip(long n) { + if (pos + n > count) { + n = count - pos; + } + if (n < 0) { + return 0; + } + pos += n; + return n; + } + + /** + * Returns the number of remaining bytes that can be read (or skipped over) + * from this input stream. + *

    + * The value returned is count - pos, + * which is the number of bytes remaining to be read from the input buffer. + * + * @return the number of remaining bytes that can be read (or skipped + * over) from this input stream without blocking. + */ + public int available() { + return count - pos; + } + + /** + * Tests if this InputStream supports mark/reset. The + * markSupported method of ByteArrayInputStream + * always returns true. + * + * @since JDK1.1 + */ + public boolean markSupported() { + return true; + } + + /** + * Set the current marked position in the stream. + * ByteArrayInputStream objects are marked at position zero by + * default when constructed. They may be marked at another + * position within the buffer by this method. + *

    + * If no mark has been set, then the value of the mark is the + * offset passed to the constructor (or 0 if the offset was not + * supplied). + *

    + *

    Note: The readAheadLimit for this class + * has no meaning. + * + * @since JDK1.1 + */ + public void mark(int readAheadLimit) { + mark = pos; + } + + /** + * Resets the buffer to the marked position. The marked position + * is 0 unless another position was marked or an offset was specified + * in the constructor. + */ + public void reset() { + pos = mark; + } + + /** + * Closing a ByteArrayInputStream has no effect. The methods in + * this class can be called after the stream has been closed without + * generating an IOException. + *

    + */ + public void close() throws IOException { + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/FastByteArrayOutputStream.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/FastByteArrayOutputStream.java new file mode 100644 index 00000000000..d4d42e995b1 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/FastByteArrayOutputStream.java @@ -0,0 +1,209 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.io; + +import java.io.IOException; +import java.io.OutputStream; +import java.io.UnsupportedEncodingException; +import java.util.Arrays; + +/** + * Similar to {@link java.io.ByteArrayOutputStream} just not synced. + * + * @author kimchy (Shay Banon) + */ +public class FastByteArrayOutputStream extends OutputStream { + + /** + * The buffer where data is stored. + */ + protected byte buf[]; + + /** + * The number of valid bytes in the buffer. + */ + protected int count; + + /** + * Creates a new byte array output stream. The buffer capacity is + * initially 32 bytes, though its size increases if necessary. + */ + public FastByteArrayOutputStream() { + this(32); + } + + /** + * Creates a new byte array output stream, with a buffer capacity of + * the specified size, in bytes. + * + * @param size the initial size. + * @throws IllegalArgumentException if size is negative. + */ + public FastByteArrayOutputStream(int size) { + if (size < 0) { + throw new IllegalArgumentException("Negative initial size: " + + size); + } + buf = new byte[size]; + } + + /** + * Writes the specified byte to this byte array output stream. + * + * @param b the byte to be written. + */ + public void write(int b) { + int newcount = count + 1; + if (newcount > buf.length) { + buf = Arrays.copyOf(buf, Math.max(buf.length << 1, newcount)); + } + buf[count] = (byte) b; + count = newcount; + } + + /** + * Writes len bytes from the specified byte array + * starting at offset off to this byte array output stream. + * + * @param b the data. + * @param off the start offset in the data. + * @param len the number of bytes to write. + */ + public void write(byte b[], int off, int len) { + if ((off < 0) || (off > b.length) || (len < 0) || + ((off + len) > b.length) || ((off + len) < 0)) { + throw new IndexOutOfBoundsException(); + } else if (len == 0) { + return; + } + int newcount = count + len; + if (newcount > buf.length) { + buf = Arrays.copyOf(buf, Math.max(buf.length << 1, newcount)); + } + System.arraycopy(b, off, buf, count, len); + count = newcount; + } + + /** + * Writes the complete contents of this byte array output stream to + * the specified output stream argument, as if by calling the output + * stream's write method using out.write(buf, 0, count). + * + * @param out the output stream to which to write the data. + * @throws IOException if an I/O error occurs. + */ + public void writeTo(OutputStream out) throws IOException { + out.write(buf, 0, count); + } + + /** + * Resets the count field of this byte array output + * stream to zero, so that all currently accumulated output in the + * output stream is discarded. The output stream can be used again, + * reusing the already allocated buffer space. + * + * @see java.io.ByteArrayInputStream#count + */ + public void reset() { + count = 0; + } + + /** + * Creates a newly allocated byte array. Its size is the current + * size of this output stream and the valid contents of the buffer + * have been copied into it. + * + * @return the current contents of this output stream, as a byte array. + * @see java.io.ByteArrayOutputStream#size() + */ + public byte copiedByteArray()[] { + return Arrays.copyOf(buf, count); + } + + /** + * Returns the underlying byte array. Note, use {@link #size()} in order to know + * the length of it. + */ + public byte[] unsafeByteArray() { + return buf; + } + + /** + * Returns the current size of the buffer. + * + * @return the value of the count field, which is the number + * of valid bytes in this output stream. + * @see java.io.ByteArrayOutputStream#count + */ + public int size() { + return count; + } + + /** + * Converts the buffer's contents into a string decoding bytes using the + * platform's default character set. The length of the new String + * is a function of the character set, and hence may not be equal to the + * size of the buffer. + *

    + *

    This method always replaces malformed-input and unmappable-character + * sequences with the default replacement string for the platform's + * default character set. The {@linkplain java.nio.charset.CharsetDecoder} + * class should be used when more control over the decoding process is + * required. + * + * @return String decoded from the buffer's contents. + * @since JDK1.1 + */ + public String toString() { + return new String(buf, 0, count); + } + + /** + * Converts the buffer's contents into a string by decoding the bytes using + * the specified {@link java.nio.charset.Charset charsetName}. The length of + * the new String is a function of the charset, and hence may not be + * equal to the length of the byte array. + *

    + *

    + */ + public void close() throws IOException { + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/FastCharArrayWriter.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/FastCharArrayWriter.java new file mode 100644 index 00000000000..97d953a188e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/FastCharArrayWriter.java @@ -0,0 +1,276 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.io; + +import org.elasticsearch.util.concurrent.NotThreadSafe; + +import java.io.IOException; +import java.io.Writer; +import java.util.Arrays; + +/** + * A similar class to {@link java.io.CharArrayWriter} allowing to get the underlying char[] buffer. + * + * @author kimchy (Shay Banon) + */ +@NotThreadSafe +public class FastCharArrayWriter extends Writer { + + /** + * The buffer where data is stored. + */ + protected char buf[]; + + /** + * The number of chars in the buffer. + */ + protected int count; + + /** + * Creates a new CharArrayWriter. + */ + public FastCharArrayWriter() { + this(32); + } + + /** + * Creates a new CharArrayWriter with the specified initial size. + * + * @param initialSize an int specifying the initial buffer size. + * @throws IllegalArgumentException if initialSize is negative + */ + public FastCharArrayWriter(int initialSize) { + if (initialSize < 0) { + throw new IllegalArgumentException("Negative initial size: " + + initialSize); + } + buf = new char[initialSize]; + } + + /** + * Writes a character to the buffer. + */ + public void write(int c) { + int newcount = count + 1; + if (newcount > buf.length) { + buf = Arrays.copyOf(buf, Math.max(buf.length << 1, newcount)); + } + buf[count] = (char) c; + count = newcount; + } + + /** + * Writes characters to the buffer. + * + * @param c the data to be written + * @param off the start offset in the data + * @param len the number of chars that are written + */ + public void write(char c[], int off, int len) { + if ((off < 0) || (off > c.length) || (len < 0) || + ((off + len) > c.length) || ((off + len) < 0)) { + throw new IndexOutOfBoundsException(); + } else if (len == 0) { + return; + } + int newcount = count + len; + if (newcount > buf.length) { + buf = Arrays.copyOf(buf, Math.max(buf.length << 1, newcount)); + } + System.arraycopy(c, off, buf, count, len); + count = newcount; + } + + /** + * Write a portion of a string to the buffer. + * + * @param str String to be written from + * @param off Offset from which to start reading characters + * @param len Number of characters to be written + */ + public void write(String str, int off, int len) { + int newcount = count + len; + if (newcount > buf.length) { + buf = Arrays.copyOf(buf, Math.max(buf.length << 1, newcount)); + } + str.getChars(off, off + len, buf, count); + count = newcount; + } + + /** + * Writes the contents of the buffer to another character stream. + * + * @param out the output stream to write to + * @throws java.io.IOException If an I/O error occurs. + */ + public void writeTo(Writer out) throws IOException { + out.write(buf, 0, count); + } + + /** + * Appends the specified character sequence to this writer. + *

    + *

    An invocation of this method of the form out.append(csq) + * behaves in exactly the same way as the invocation + *

    + *

    +     *     out.write(csq.toString()) 
    + *

    + *

    Depending on the specification of toString for the + * character sequence csq, the entire sequence may not be + * appended. For instance, invoking the toString method of a + * character buffer will return a subsequence whose content depends upon + * the buffer's position and limit. + * + * @param csq The character sequence to append. If csq is + * null, then the four characters "null" are + * appended to this writer. + * @return This writer + * @since 1.5 + */ + public FastCharArrayWriter append(CharSequence csq) { + String s = (csq == null ? "null" : csq.toString()); + write(s, 0, s.length()); + return this; + } + + /** + * Appends a subsequence of the specified character sequence to this writer. + *

    + *

    An invocation of this method of the form out.append(csq, start, + * end) when csq is not null, behaves in + * exactly the same way as the invocation + *

    + *

    +     *     out.write(csq.subSequence(start, end).toString()) 
    + * + * @param csq The character sequence from which a subsequence will be + * appended. If csq is null, then characters + * will be appended as if csq contained the four + * characters "null". + * @param start The index of the first character in the subsequence + * @param end The index of the character following the last character in the + * subsequence + * @return This writer + * @throws IndexOutOfBoundsException If start or end are negative, start + * is greater than end, or end is greater than + * csq.length() + * @since 1.5 + */ + public FastCharArrayWriter append(CharSequence csq, int start, int end) { + String s = (csq == null ? "null" : csq).subSequence(start, end).toString(); + write(s, 0, s.length()); + return this; + } + + /** + * Appends the specified character to this writer. + *

    + *

    An invocation of this method of the form out.append(c) + * behaves in exactly the same way as the invocation + *

    + *

    +     *     out.write(c) 
    + * + * @param c The 16-bit character to append + * @return This writer + * @since 1.5 + */ + public FastCharArrayWriter append(char c) { + write(c); + return this; + } + + /** + * Resets the buffer so that you can use it again without + * throwing away the already allocated buffer. + */ + public void reset() { + count = 0; + } + + /** + * Returns a copy of the input data. + * + * @return an array of chars copied from the input data. + */ + public char toCharArray()[] { + return Arrays.copyOf(buf, count); + } + + /** + * Returns the underlying char array. Note, use {@link #size()} in order to know the size of + * of the actual content within the array. + */ + public char[] unsafeCharArray() { + return buf; + } + + /** + * Returns the current size of the buffer. + * + * @return an int representing the current size of the buffer. + */ + public int size() { + return count; + } + + /** + * Converts input data to a string. + * + * @return the string. + */ + public String toString() { + return new String(buf, 0, count); + } + + /** + * Converts the input data to a string with trimmed whitespaces. + */ + public String toStringTrim() { + int st = 0; + int len = count; + char[] val = buf; /* avoid getfield opcode */ + + while ((st < len) && (val[st] <= ' ')) { + st++; + len--; + } + while ((st < len) && (val[len - 1] <= ' ')) { + len--; + } + return new String(buf, st, len); + } + + /** + * Flush the stream. + */ + public void flush() { + } + + /** + * Close the stream. This method does not release the buffer, since its + * contents might still be required. Note: Invoking this method in this class + * will have no effect. + */ + public void close() { + } + +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/FastDataOutputStream.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/FastDataOutputStream.java new file mode 100644 index 00000000000..f590774334d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/FastDataOutputStream.java @@ -0,0 +1,405 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.io; + +import java.io.*; + +/** + * @author kimchy (Shay Banon) + */ +public class FastDataOutputStream extends FilterOutputStream implements DataOutput { + /** + * The number of bytes written to the data output stream so far. + * If this counter overflows, it will be wrapped to Integer.MAX_VALUE. + */ + protected int written; + + /** + * bytearr is initialized on demand by writeUTF + */ + private byte[] bytearr = null; + + /** + * Creates a new data output stream to write data to the specified + * underlying output stream. The counter written is + * set to zero. + * + * @param out the underlying output stream, to be saved for later + * use. + * @see java.io.FilterOutputStream#out + */ + public FastDataOutputStream(OutputStream out) { + super(out); + } + + /** + * Increases the written counter by the specified value + * until it reaches Integer.MAX_VALUE. + */ + private void incCount(int value) { + int temp = written + value; + if (temp < 0) { + temp = Integer.MAX_VALUE; + } + written = temp; + } + + /** + * Writes the specified byte (the low eight bits of the argument + * b) to the underlying output stream. If no exception + * is thrown, the counter written is incremented by + * 1. + *

    + * Implements the write method of OutputStream. + * + * @param b the byte to be written. + * @throws java.io.IOException if an I/O error occurs. + * @see java.io.FilterOutputStream#out + */ + public void write(int b) throws IOException { + out.write(b); + incCount(1); + } + + /** + * Writes len bytes from the specified byte array + * starting at offset off to the underlying output stream. + * If no exception is thrown, the counter written is + * incremented by len. + * + * @param b the data. + * @param off the start offset in the data. + * @param len the number of bytes to write. + * @throws IOException if an I/O error occurs. + * @see java.io.FilterOutputStream#out + */ + public void write(byte b[], int off, int len) + throws IOException { + out.write(b, off, len); + incCount(len); + } + + /** + * Flushes this data output stream. This forces any buffered output + * bytes to be written out to the stream. + *

    + * The flush method of DataOutputStream + * calls the flush method of its underlying output stream. + * + * @throws IOException if an I/O error occurs. + * @see java.io.FilterOutputStream#out + * @see java.io.OutputStream#flush() + */ + public void flush() throws IOException { + out.flush(); + } + + /** + * Writes a boolean to the underlying output stream as + * a 1-byte value. The value true is written out as the + * value (byte)1; the value false is + * written out as the value (byte)0. If no exception is + * thrown, the counter written is incremented by + * 1. + * + * @param v a boolean value to be written. + * @throws IOException if an I/O error occurs. + * @see java.io.FilterOutputStream#out + */ + public final void writeBoolean(boolean v) throws IOException { + out.write(v ? 1 : 0); + incCount(1); + } + + /** + * Writes out a byte to the underlying output stream as + * a 1-byte value. If no exception is thrown, the counter + * written is incremented by 1. + * + * @param v a byte value to be written. + * @throws IOException if an I/O error occurs. + * @see java.io.FilterOutputStream#out + */ + public final void writeByte(int v) throws IOException { + out.write(v); + incCount(1); + } + + /** + * Writes a short to the underlying output stream as two + * bytes, high byte first. If no exception is thrown, the counter + * written is incremented by 2. + * + * @param v a short to be written. + * @throws IOException if an I/O error occurs. + * @see java.io.FilterOutputStream#out + */ + public final void writeShort(int v) throws IOException { + out.write((v >>> 8) & 0xFF); + out.write((v >>> 0) & 0xFF); + incCount(2); + } + + /** + * Writes a char to the underlying output stream as a + * 2-byte value, high byte first. If no exception is thrown, the + * counter written is incremented by 2. + * + * @param v a char value to be written. + * @throws IOException if an I/O error occurs. + * @see java.io.FilterOutputStream#out + */ + public final void writeChar(int v) throws IOException { + out.write((v >>> 8) & 0xFF); + out.write((v >>> 0) & 0xFF); + incCount(2); + } + + /** + * Writes an int to the underlying output stream as four + * bytes, high byte first. If no exception is thrown, the counter + * written is incremented by 4. + * + * @param v an int to be written. + * @throws IOException if an I/O error occurs. + * @see java.io.FilterOutputStream#out + */ + public final void writeInt(int v) throws IOException { + out.write((v >>> 24) & 0xFF); + out.write((v >>> 16) & 0xFF); + out.write((v >>> 8) & 0xFF); + out.write((v >>> 0) & 0xFF); + incCount(4); + } + + private byte writeBuffer[] = new byte[8]; + + /** + * Writes a long to the underlying output stream as eight + * bytes, high byte first. In no exception is thrown, the counter + * written is incremented by 8. + * + * @param v a long to be written. + * @throws IOException if an I/O error occurs. + * @see java.io.FilterOutputStream#out + */ + public final void writeLong(long v) throws IOException { + writeBuffer[0] = (byte) (v >>> 56); + writeBuffer[1] = (byte) (v >>> 48); + writeBuffer[2] = (byte) (v >>> 40); + writeBuffer[3] = (byte) (v >>> 32); + writeBuffer[4] = (byte) (v >>> 24); + writeBuffer[5] = (byte) (v >>> 16); + writeBuffer[6] = (byte) (v >>> 8); + writeBuffer[7] = (byte) (v >>> 0); + out.write(writeBuffer, 0, 8); + incCount(8); + } + + /** + * Converts the float argument to an int using the + * floatToIntBits method in class Float, + * and then writes that int value to the underlying + * output stream as a 4-byte quantity, high byte first. If no + * exception is thrown, the counter written is + * incremented by 4. + * + * @param v a float value to be written. + * @throws IOException if an I/O error occurs. + * @see java.io.FilterOutputStream#out + * @see java.lang.Float#floatToIntBits(float) + */ + public final void writeFloat(float v) throws IOException { + writeInt(Float.floatToIntBits(v)); + } + + /** + * Converts the double argument to a long using the + * doubleToLongBits method in class Double, + * and then writes that long value to the underlying + * output stream as an 8-byte quantity, high byte first. If no + * exception is thrown, the counter written is + * incremented by 8. + * + * @param v a double value to be written. + * @throws IOException if an I/O error occurs. + * @see java.io.FilterOutputStream#out + * @see java.lang.Double#doubleToLongBits(double) + */ + public final void writeDouble(double v) throws IOException { + writeLong(Double.doubleToLongBits(v)); + } + + /** + * Writes out the string to the underlying output stream as a + * sequence of bytes. Each character in the string is written out, in + * sequence, by discarding its high eight bits. If no exception is + * thrown, the counter written is incremented by the + * length of s. + * + * @param s a string of bytes to be written. + * @throws IOException if an I/O error occurs. + * @see java.io.FilterOutputStream#out + */ + public final void writeBytes(String s) throws IOException { + int len = s.length(); + for (int i = 0; i < len; i++) { + out.write((byte) s.charAt(i)); + } + incCount(len); + } + + /** + * Writes a string to the underlying output stream as a sequence of + * characters. Each character is written to the data output stream as + * if by the writeChar method. If no exception is + * thrown, the counter written is incremented by twice + * the length of s. + * + * @param s a String value to be written. + * @throws IOException if an I/O error occurs. + * @see java.io.DataOutputStream#writeChar(int) + * @see java.io.FilterOutputStream#out + */ + public final void writeChars(String s) throws IOException { + int len = s.length(); + for (int i = 0; i < len; i++) { + int v = s.charAt(i); + out.write((v >>> 8) & 0xFF); + out.write((v >>> 0) & 0xFF); + } + incCount(len * 2); + } + + /** + * Writes a string to the underlying output stream using + * modified UTF-8 + * encoding in a machine-independent manner. + *

    + * First, two bytes are written to the output stream as if by the + * writeShort method giving the number of bytes to + * follow. This value is the number of bytes actually written out, + * not the length of the string. Following the length, each character + * of the string is output, in sequence, using the modified UTF-8 encoding + * for the character. If no exception is thrown, the counter + * written is incremented by the total number of + * bytes written to the output stream. This will be at least two + * plus the length of str, and at most two plus + * thrice the length of str. + * + * @param str a string to be written. + * @throws IOException if an I/O error occurs. + */ + public final void writeUTF(String str) throws IOException { + writeUTF(str, this); + } + + /** + * Writes a string to the specified DataOutput using + * modified UTF-8 + * encoding in a machine-independent manner. + *

    + * First, two bytes are written to out as if by the writeShort + * method giving the number of bytes to follow. This value is the number of + * bytes actually written out, not the length of the string. Following the + * length, each character of the string is output, in sequence, using the + * modified UTF-8 encoding for the character. If no exception is thrown, the + * counter written is incremented by the total number of + * bytes written to the output stream. This will be at least two + * plus the length of str, and at most two plus + * thrice the length of str. + * + * @param str a string to be written. + * @param out destination to write to + * @return The number of bytes written out. + * @throws IOException if an I/O error occurs. + */ + static int writeUTF(String str, DataOutput out) throws IOException { + int strlen = str.length(); + int utflen = 0; + int c, count = 0; + + /* use charAt instead of copying String to char array */ + for (int i = 0; i < strlen; i++) { + c = str.charAt(i); + if ((c >= 0x0001) && (c <= 0x007F)) { + utflen++; + } else if (c > 0x07FF) { + utflen += 3; + } else { + utflen += 2; + } + } + + if (utflen > 65535) + throw new UTFDataFormatException( + "encoded string too long: " + utflen + " bytes"); + + byte[] bytearr = null; + if (out instanceof FastDataOutputStream) { + FastDataOutputStream dos = (FastDataOutputStream) out; + if (dos.bytearr == null || (dos.bytearr.length < (utflen + 2))) + dos.bytearr = new byte[(utflen * 2) + 2]; + bytearr = dos.bytearr; + } else { + bytearr = new byte[utflen + 2]; + } + + bytearr[count++] = (byte) ((utflen >>> 8) & 0xFF); + bytearr[count++] = (byte) ((utflen >>> 0) & 0xFF); + + int i = 0; + for (i = 0; i < strlen; i++) { + c = str.charAt(i); + if (!((c >= 0x0001) && (c <= 0x007F))) break; + bytearr[count++] = (byte) c; + } + + for (; i < strlen; i++) { + c = str.charAt(i); + if ((c >= 0x0001) && (c <= 0x007F)) { + bytearr[count++] = (byte) c; + + } else if (c > 0x07FF) { + bytearr[count++] = (byte) (0xE0 | ((c >> 12) & 0x0F)); + bytearr[count++] = (byte) (0x80 | ((c >> 6) & 0x3F)); + bytearr[count++] = (byte) (0x80 | ((c >> 0) & 0x3F)); + } else { + bytearr[count++] = (byte) (0xC0 | ((c >> 6) & 0x1F)); + bytearr[count++] = (byte) (0x80 | ((c >> 0) & 0x3F)); + } + } + out.write(bytearr, 0, utflen + 2); + return utflen + 2; + } + + /** + * Returns the current value of the counter written, + * the number of bytes written to this data output stream so far. + * If the counter overflows, it will be wrapped to Integer.MAX_VALUE. + * + * @return the value of the written field. + * @see java.io.DataOutputStream#written + */ + public final int size() { + return written; + } +} + diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/FastStringReader.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/FastStringReader.java new file mode 100644 index 00000000000..fb1d56809b9 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/FastStringReader.java @@ -0,0 +1,187 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.io; + +import org.elasticsearch.util.concurrent.NotThreadSafe; + +import java.io.IOException; +import java.io.Reader; + +/** + * A character stream whose source is a string that is not thread safe + * + * @author kimchy (Shay Banon) + */ +@NotThreadSafe +public class FastStringReader extends Reader { + + private String str; + private int length; + private int next = 0; + private int mark = 0; + + /** + * Creates a new string reader. + * + * @param s String providing the character stream. + */ + public FastStringReader(String s) { + this.str = s; + this.length = s.length(); + } + + /** + * Check to make sure that the stream has not been closed + */ + private void ensureOpen() throws IOException { + if (length == -1) + throw new IOException("Stream closed"); + } + + /** + * Reads a single character. + * + * @return The character read, or -1 if the end of the stream has been + * reached + * @throws IOException If an I/O error occurs + */ + @Override public int read() throws IOException { + ensureOpen(); + if (next >= length) + return -1; + return str.charAt(next++); + } + + /** + * Reads characters into a portion of an array. + * + * @param cbuf Destination buffer + * @param off Offset at which to start writing characters + * @param len Maximum number of characters to read + * @return The number of characters read, or -1 if the end of the + * stream has been reached + * @throws IOException If an I/O error occurs + */ + @Override public int read(char cbuf[], int off, int len) throws IOException { + ensureOpen(); + if ((off < 0) || (off > cbuf.length) || (len < 0) || + ((off + len) > cbuf.length) || ((off + len) < 0)) { + throw new IndexOutOfBoundsException(); + } else if (len == 0) { + return 0; + } + if (next >= length) + return -1; + int n = Math.min(length - next, len); + str.getChars(next, next + n, cbuf, off); + next += n; + return n; + } + + /** + * Skips the specified number of characters in the stream. Returns + * the number of characters that were skipped. + *

    + *

    The ns parameter may be negative, even though the + * skip method of the {@link Reader} superclass throws + * an exception in this case. Negative values of ns cause the + * stream to skip backwards. Negative return values indicate a skip + * backwards. It is not possible to skip backwards past the beginning of + * the string. + *

    + *

    If the entire string has been read or skipped, then this method has + * no effect and always returns 0. + * + * @throws IOException If an I/O error occurs + */ + @Override public long skip(long ns) throws IOException { + ensureOpen(); + if (next >= length) + return 0; + // Bound skip by beginning and end of the source + long n = Math.min(length - next, ns); + n = Math.max(-next, n); + next += n; + return n; + } + + /** + * Tells whether this stream is ready to be read. + * + * @return True if the next read() is guaranteed not to block for input + * @throws IOException If the stream is closed + */ + @Override public boolean ready() throws IOException { + ensureOpen(); + return true; + } + + /** + * Tells whether this stream supports the mark() operation, which it does. + */ + @Override public boolean markSupported() { + return true; + } + + /** + * Marks the present position in the stream. Subsequent calls to reset() + * will reposition the stream to this point. + * + * @param readAheadLimit Limit on the number of characters that may be + * read while still preserving the mark. Because + * the stream's input comes from a string, there + * is no actual limit, so this argument must not + * be negative, but is otherwise ignored. + * @throws IllegalArgumentException If readAheadLimit is < 0 + * @throws IOException If an I/O error occurs + */ + @Override public void mark(int readAheadLimit) throws IOException { + if (readAheadLimit < 0) { + throw new IllegalArgumentException("Read-ahead limit < 0"); + } + ensureOpen(); + mark = next; + } + + /** + * Resets the stream to the most recent mark, or to the beginning of the + * string if it has never been marked. + * + * @throws IOException If an I/O error occurs + */ + @Override public void reset() throws IOException { + ensureOpen(); + next = mark; + } + + /** + * Closes the stream and releases any system resources associated with + * it. Once the stream has been closed, further read(), + * ready(), mark(), or reset() invocations will throw an IOException. + * Closing a previously closed stream has no effect. + */ + public void close() { + length = -1; + } + + @Override public String toString() { + return str; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/FileSystemUtils.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/FileSystemUtils.java new file mode 100644 index 00000000000..9fda7a3f3df --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/FileSystemUtils.java @@ -0,0 +1,97 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.io; + +import java.io.File; +import java.io.IOException; +import java.io.InterruptedIOException; +import java.io.RandomAccessFile; + +/** + * @author kimchy (Shay Banon) + */ +public class FileSystemUtils { + + public static boolean deleteRecursively(File root) { + return deleteRecursively(root, true); + } + + /** + * Delete the supplied {@link java.io.File} - for directories, + * recursively delete any nested directories or files as well. + * + * @param root the root File to delete + * @param deleteRoot whether or not to delete the root itself or just the content of the root. + * @return true if the File was deleted, + * otherwise false + */ + public static boolean deleteRecursively(File root, boolean deleteRoot) { + if (root != null && root.exists()) { + if (root.isDirectory()) { + File[] children = root.listFiles(); + if (children != null) { + for (File aChildren : children) { + deleteRecursively(aChildren); + } + } + } + + if (deleteRoot) { + return root.delete(); + } else { + return true; + } + } + return false; + } + + public static void syncFile(File fileToSync) throws IOException { + boolean success = false; + int retryCount = 0; + IOException exc = null; + while (!success && retryCount < 5) { + retryCount++; + RandomAccessFile file = null; + try { + try { + file = new RandomAccessFile(fileToSync, "rw"); + file.getFD().sync(); + success = true; + } finally { + if (file != null) + file.close(); + } + } catch (IOException ioe) { + if (exc == null) + exc = ioe; + try { + // Pause 5 msec + Thread.sleep(5); + } catch (InterruptedException ie) { + throw new InterruptedIOException(ie.getMessage()); + } + } + } + } + + private FileSystemUtils() { + + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/HostResolver.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/HostResolver.java new file mode 100644 index 00000000000..05409fbab28 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/HostResolver.java @@ -0,0 +1,141 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.io; + +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.NetworkInterface; +import java.net.SocketException; +import java.net.UnknownHostException; +import java.util.Enumeration; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class HostResolver { + + public static final String GLOBAL_NETWORK_BINDHOST_SETTING = "network.bindHost"; + public static final String GLOBAL_NETWORK_PUBLISHHOST_SETTING = "network.publishHost"; + + public static final String LOCAL_IP = "#local:ip#"; + public static final String LOCAL_HOST = "#local:host#"; + public static final String LOCAL_CANONICALHOST = "#local:canonicalhost#"; + + public static boolean isIPv4() { + return System.getProperty("java.net.preferIPv4Stack") != null && System.getProperty("java.net.preferIPv4Stack").equals("true"); + } + + public static InetAddress resultBindHostAddress(String bindHost, Settings settings) throws IOException { + return resultBindHostAddress(bindHost, settings, null); + } + + public static InetAddress resultBindHostAddress(String bindHost, Settings settings, String defaultValue2) throws IOException { + return resolveInetAddress(bindHost, settings.get(GLOBAL_NETWORK_BINDHOST_SETTING), defaultValue2); + } + + public static InetAddress resultPublishHostAddress(String publishHost, Settings settings) throws IOException { + return resultPublishHostAddress(publishHost, settings, null); + } + + public static InetAddress resultPublishHostAddress(String publishHost, Settings settings, String defaultValue2) throws IOException { + return resolveInetAddress(publishHost, settings.get(GLOBAL_NETWORK_PUBLISHHOST_SETTING), defaultValue2); + } + + public static InetAddress resolveInetAddress(String host, String defaultValue1, String defaultValue2) throws UnknownHostException, IOException { + String resolvedHost = resolveHost(host, defaultValue1, defaultValue2); + if (resolvedHost == null) { + return null; + } + return InetAddress.getByName(resolvedHost); + } + + public static String resolveHost(String host, String defaultValue1, String defaultValue2) throws UnknownHostException, IOException { + if (host == null) { + host = defaultValue1; + } + if (host == null) { + host = defaultValue2; + } + if (host == null) { + return null; + } + if (host.startsWith("#") && host.endsWith("#")) { + host = host.substring(1, host.length() - 1); + if (host.equals("local:ip")) { + return InetAddress.getLocalHost().getHostAddress(); + } else if (host.equalsIgnoreCase("local:host")) { + return InetAddress.getLocalHost().getHostName(); + } else if (host.equalsIgnoreCase("local:canonicalhost")) { + return InetAddress.getLocalHost().getCanonicalHostName(); + } else { + String name = host.substring(0, host.indexOf(':')); + String type = host.substring(host.indexOf(':') + 1); + Enumeration niEnum; + try { + niEnum = NetworkInterface.getNetworkInterfaces(); + } catch (SocketException e) { + throw new IOException("Failed to get network interfaces", e); + } + while (niEnum.hasMoreElements()) { + NetworkInterface ni = niEnum.nextElement(); + if (name.equals(ni.getName()) || name.equals(ni.getDisplayName())) { + Enumeration inetEnum = ni.getInetAddresses(); + while (inetEnum.hasMoreElements()) { + InetAddress addr = inetEnum.nextElement(); + if (addr.getHostAddress().equals("127.0.0.1")) { + // ignore local host + continue; + } + if (addr.getHostAddress().indexOf(".") == -1) { + // ignore address like 0:0:0:0:0:0:0:1 + continue; + } + if ("host".equalsIgnoreCase(type)) { + return addr.getHostName(); + } else if ("canonicalhost".equalsIgnoreCase(type)) { + return addr.getCanonicalHostName(); + } else { + return addr.getHostAddress(); + } + } + } + } + } + throw new IOException("Failed to find network interface for [" + host + "]"); + } + InetAddress inetAddress = java.net.InetAddress.getByName(host); + String hostAddress = inetAddress.getHostAddress(); + String hostName = inetAddress.getHostName(); + String canonicalHostName = inetAddress.getCanonicalHostName(); + if (host.equalsIgnoreCase(hostAddress)) { + return hostAddress; + } else if (host.equalsIgnoreCase(canonicalHostName)) { + return canonicalHostName; + } else { + return hostName; //resolve property into actual lower/upper case + } + } + + private HostResolver() { + + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/Serializers.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/Serializers.java new file mode 100644 index 00000000000..0aa382f3148 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/Serializers.java @@ -0,0 +1,79 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.io; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.io.Serializable; + +/** + * @author kimchy (Shay Banon) + */ +public final class Serializers { + + public static byte[] throwableToBytes(Throwable t) throws IOException { + FastByteArrayOutputStream os = new FastByteArrayOutputStream(); + ThrowableObjectOutputStream oos = new ThrowableObjectOutputStream(os); + oos.writeObject(t); + oos.close(); + return os.unsafeByteArray(); + } + + public static Throwable throwableFromBytes(byte[] bytes) throws IOException, ClassNotFoundException { + FastByteArrayInputStream is = new FastByteArrayInputStream(bytes); + ThrowableObjectInputStream ois = new ThrowableObjectInputStream(is); + Throwable t = (Throwable) ois.readObject(); + ois.close(); + return t; + } + + public static void objectToStream(Serializable obj, DataOutput out) throws IOException { + byte[] bytes = objectToBytes(obj); + out.writeInt(bytes.length); + out.write(bytes); + } + + public static Object objectFromStream(DataInput in) throws ClassNotFoundException, IOException { + byte[] bytes = new byte[in.readInt()]; + in.readFully(bytes); + return objectFromBytes(bytes); + } + + public static byte[] objectToBytes(Serializable obj) throws IOException { + FastByteArrayOutputStream os = new FastByteArrayOutputStream(); + CompactObjectOutputStream oos = new CompactObjectOutputStream(os); + oos.writeObject(obj); + oos.close(); + return os.unsafeByteArray(); + } + + public static Object objectFromBytes(byte[] bytes) throws IOException, ClassNotFoundException { + FastByteArrayInputStream is = new FastByteArrayInputStream(bytes); + CompactObjectInputStream ois = new CompactObjectInputStream(is); + Object obj = ois.readObject(); + ois.close(); + return obj; + } + + private Serializers() { + + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/Streamable.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/Streamable.java new file mode 100644 index 00000000000..15f93bf5a16 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/Streamable.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.io; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public interface Streamable { + + void readFrom(DataInput in) throws IOException, ClassNotFoundException; + + void writeTo(DataOutput out) throws IOException; +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/Streams.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/Streams.java new file mode 100644 index 00000000000..3516ff95143 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/Streams.java @@ -0,0 +1,266 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.io; + +import org.elasticsearch.util.Preconditions; + +import java.io.*; + +/** + * Simple utility methods for file and stream copying. + * All copy methods use a block size of 4096 bytes, + * and close all affected streams when done. + *

    + *

    Mainly for use within the framework, + * but also useful for application code. + * + * @author kimchy (Shay Banon) + */ +public abstract class Streams { + + public static final int BUFFER_SIZE = 4096; + + + //--------------------------------------------------------------------- + // Copy methods for java.io.File + //--------------------------------------------------------------------- + + /** + * Copy the contents of the given input File to the given output File. + * + * @param in the file to copy from + * @param out the file to copy to + * @return the number of bytes copied + * @throws IOException in case of I/O errors + */ + public static int copy(File in, File out) throws IOException { + Preconditions.checkNotNull(in, "No input File specified"); + Preconditions.checkNotNull(out, "No output File specified"); + return copy(new BufferedInputStream(new FileInputStream(in)), + new BufferedOutputStream(new FileOutputStream(out))); + } + + /** + * Copy the contents of the given byte array to the given output File. + * + * @param in the byte array to copy from + * @param out the file to copy to + * @throws IOException in case of I/O errors + */ + public static void copy(byte[] in, File out) throws IOException { + Preconditions.checkNotNull(in, "No input byte array specified"); + Preconditions.checkNotNull(out, "No output File specified"); + ByteArrayInputStream inStream = new ByteArrayInputStream(in); + OutputStream outStream = new BufferedOutputStream(new FileOutputStream(out)); + copy(inStream, outStream); + } + + /** + * Copy the contents of the given input File into a new byte array. + * + * @param in the file to copy from + * @return the new byte array that has been copied to + * @throws IOException in case of I/O errors + */ + public static byte[] copyToByteArray(File in) throws IOException { + Preconditions.checkNotNull(in, "No input File specified"); + return copyToByteArray(new BufferedInputStream(new FileInputStream(in))); + } + + + //--------------------------------------------------------------------- + // Copy methods for java.io.InputStream / java.io.OutputStream + //--------------------------------------------------------------------- + + /** + * Copy the contents of the given InputStream to the given OutputStream. + * Closes both streams when done. + * + * @param in the stream to copy from + * @param out the stream to copy to + * @return the number of bytes copied + * @throws IOException in case of I/O errors + */ + public static int copy(InputStream in, OutputStream out) throws IOException { + Preconditions.checkNotNull(in, "No InputStream specified"); + Preconditions.checkNotNull(out, "No OutputStream specified"); + try { + int byteCount = 0; + byte[] buffer = new byte[BUFFER_SIZE]; + int bytesRead; + while ((bytesRead = in.read(buffer)) != -1) { + out.write(buffer, 0, bytesRead); + byteCount += bytesRead; + } + out.flush(); + return byteCount; + } + finally { + try { + in.close(); + } + catch (IOException ex) { + // do nothing + } + try { + out.close(); + } + catch (IOException ex) { + // do nothing + } + } + } + + /** + * Copy the contents of the given byte array to the given OutputStream. + * Closes the stream when done. + * + * @param in the byte array to copy from + * @param out the OutputStream to copy to + * @throws IOException in case of I/O errors + */ + public static void copy(byte[] in, OutputStream out) throws IOException { + Preconditions.checkNotNull(in, "No input byte array specified"); + Preconditions.checkNotNull(out, "No OutputStream specified"); + try { + out.write(in); + } + finally { + try { + out.close(); + } + catch (IOException ex) { + // do nothing + } + } + } + + /** + * Copy the contents of the given InputStream into a new byte array. + * Closes the stream when done. + * + * @param in the stream to copy from + * @return the new byte array that has been copied to + * @throws IOException in case of I/O errors + */ + public static byte[] copyToByteArray(InputStream in) throws IOException { + ByteArrayOutputStream out = new ByteArrayOutputStream(BUFFER_SIZE); + copy(in, out); + return out.toByteArray(); + } + + + //--------------------------------------------------------------------- + // Copy methods for java.io.Reader / java.io.Writer + //--------------------------------------------------------------------- + + /** + * Copy the contents of the given Reader to the given Writer. + * Closes both when done. + * + * @param in the Reader to copy from + * @param out the Writer to copy to + * @return the number of characters copied + * @throws IOException in case of I/O errors + */ + public static int copy(Reader in, Writer out) throws IOException { + Preconditions.checkNotNull(in, "No Reader specified"); + Preconditions.checkNotNull(out, "No Writer specified"); + try { + int byteCount = 0; + char[] buffer = new char[BUFFER_SIZE]; + int bytesRead; + while ((bytesRead = in.read(buffer)) != -1) { + out.write(buffer, 0, bytesRead); + byteCount += bytesRead; + } + out.flush(); + return byteCount; + } + finally { + try { + in.close(); + } + catch (IOException ex) { + // do nothing + } + try { + out.close(); + } + catch (IOException ex) { + // do nothing + } + } + } + + /** + * Copy the contents of the given String to the given output Writer. + * Closes the write when done. + * + * @param in the String to copy from + * @param out the Writer to copy to + * @throws IOException in case of I/O errors + */ + public static void copy(String in, Writer out) throws IOException { + Preconditions.checkNotNull(in, "No input String specified"); + Preconditions.checkNotNull(out, "No Writer specified"); + try { + out.write(in); + } + finally { + try { + out.close(); + } + catch (IOException ex) { + // do nothing + } + } + } + + /** + * Copy the contents of the given Reader into a String. + * Closes the reader when done. + * + * @param in the reader to copy from + * @return the String that has been copied to + * @throws IOException in case of I/O errors + */ + public static String copyToString(Reader in) throws IOException { + StringWriter out = new StringWriter(); + copy(in, out); + return out.toString(); + } + + public static String copyToStringFromClasspath(ClassLoader classLoader, String path) throws IOException { + InputStream is = classLoader.getResourceAsStream(path); + if (is == null) { + throw new FileNotFoundException("Resource [" + path + "] not found in classpath with class loader [" + classLoader + "]"); + } + return copyToString(new InputStreamReader(is)); + } + + public static String copyToStringFromClasspath(String path) throws IOException { + InputStream is = Streams.class.getResourceAsStream(path); + if (is == null) { + throw new FileNotFoundException("Resource [" + path + "] not found in classpath"); + } + return copyToString(new InputStreamReader(is)); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/StringBuilderWriter.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/StringBuilderWriter.java new file mode 100644 index 00000000000..073bf834659 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/StringBuilderWriter.java @@ -0,0 +1,174 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.io; + +import org.elasticsearch.util.concurrent.NotThreadSafe; + +import java.io.Writer; + +/** + * A Writer based on {@link StringBuilder}. Also alows for thread local reuse of {@link StringBuilder} + * by using: StringBuilderWriter.Cached.cached() in order to obtain the cached writer. Note, + * in such cases, the {@link #getBuilder()} should be called and used (usually toString it) + * before another usage of the writer. + * + * @author kimchy (Shay Banon) + */ +@NotThreadSafe +public class StringBuilderWriter extends Writer { + + /** + * A thread local based cache of {@link StringBuilderWriter}. + */ + public static class Cached { + + private static final ThreadLocal cache = new ThreadLocal() { + @Override protected StringBuilderWriter initialValue() { + return new StringBuilderWriter(); + } + }; + + /** + * Returns the cached thread local writer, with its internal {@link StringBuilder} cleared. + */ + public static StringBuilderWriter cached() { + StringBuilderWriter writer = cache.get(); + writer.getBuilder().setLength(0); + return writer; + } + } + + private final StringBuilder builder; + + /** + * Construct a new {@link StringBuilder} instance with default capacity. + */ + public StringBuilderWriter() { + this.builder = new StringBuilder(); + } + + /** + * Construct a new {@link StringBuilder} instance with the specified capacity. + * + * @param capacity The initial capacity of the underlying {@link StringBuilder} + */ + public StringBuilderWriter(int capacity) { + this.builder = new StringBuilder(capacity); + } + + /** + * Construct a new instance with the specified {@link StringBuilder}. + * + * @param builder The String builder + */ + public StringBuilderWriter(StringBuilder builder) { + this.builder = (builder != null ? builder : new StringBuilder()); + } + + /** + * Append a single character to this Writer. + * + * @param value The character to append + * @return This writer instance + */ + public Writer append(char value) { + builder.append(value); + return this; + } + + /** + * Append a character sequence to this Writer. + * + * @param value The character to append + * @return This writer instance + */ + public Writer append(CharSequence value) { + builder.append(value); + return this; + } + + /** + * Append a portion of a character sequence to the {@link StringBuilder}. + * + * @param value The character to append + * @param start The index of the first character + * @param end The index of the last character + 1 + * @return This writer instance + */ + public Writer append(CharSequence value, int start, int end) { + builder.append(value, start, end); + return this; + } + + /** + * Closing this writer has no effect. + */ + public void close() { + } + + /** + * Flushing this writer has no effect. + */ + public void flush() { + } + + + /** + * Write a String to the {@link StringBuilder}. + * + * @param value The value to write + */ + public void write(String value) { + if (value != null) { + builder.append(value); + } + } + + /** + * Write a portion of a character array to the {@link StringBuilder}. + * + * @param value The value to write + * @param offset The index of the first character + * @param length The number of characters to write + */ + public void write(char[] value, int offset, int length) { + if (value != null) { + builder.append(value, offset, length); + } + } + + /** + * Return the underlying builder. + * + * @return The underlying builder + */ + public StringBuilder getBuilder() { + return builder; + } + + /** + * Returns {@link StringBuilder#toString()}. + * + * @return The contents of the String builder. + */ + public String toString() { + return builder.toString(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/StringStreamable.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/StringStreamable.java new file mode 100644 index 00000000000..aba3dffe29a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/StringStreamable.java @@ -0,0 +1,55 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.io; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class StringStreamable implements Streamable { + + private String value; + + public StringStreamable() { + } + + public StringStreamable(String value) { + this.value = value; + } + + public void set(String newValue) { + value = newValue; + } + + public String get() { + return this.value; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + value = in.readUTF(); + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeUTF(value); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/ThrowableObjectInputStream.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/ThrowableObjectInputStream.java new file mode 100644 index 00000000000..ad3ce1a7746 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/ThrowableObjectInputStream.java @@ -0,0 +1,97 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.io; + +import java.io.*; + +/** + * @author kimchy (Shay Banon) + */ +public class ThrowableObjectInputStream extends ObjectInputStream { + + private final ClassLoader classLoader; + + public ThrowableObjectInputStream(InputStream in) throws IOException { + this(in, null); + } + + public ThrowableObjectInputStream(InputStream in, ClassLoader classLoader) throws IOException { + super(in); + this.classLoader = classLoader; + } + + @Override + protected void readStreamHeader() throws IOException, StreamCorruptedException { + int version = readByte() & 0xFF; + if (version != STREAM_VERSION) { + throw new StreamCorruptedException( + "Unsupported version: " + version); + } + } + + @Override + protected ObjectStreamClass readClassDescriptor() + throws IOException, ClassNotFoundException { + int type = read(); + if (type < 0) { + throw new EOFException(); + } + switch (type) { + case ThrowableObjectOutputStream.TYPE_EXCEPTION: + return ObjectStreamClass.lookup(Exception.class); + case ThrowableObjectOutputStream.TYPE_STACKTRACEELEMENT: + return ObjectStreamClass.lookup(StackTraceElement.class); + case ThrowableObjectOutputStream.TYPE_FAT_DESCRIPTOR: + return super.readClassDescriptor(); + case ThrowableObjectOutputStream.TYPE_THIN_DESCRIPTOR: + String className = readUTF(); + Class clazz = loadClass(className); + return ObjectStreamClass.lookup(clazz); + default: + throw new StreamCorruptedException( + "Unexpected class descriptor type: " + type); + } + } + + @Override + protected Class resolveClass(ObjectStreamClass desc) throws IOException, ClassNotFoundException { + String className = desc.getName(); + try { + return loadClass(className); + } catch (ClassNotFoundException ex) { + return super.resolveClass(desc); + } + } + + protected Class loadClass(String className) throws ClassNotFoundException { + Class clazz; + ClassLoader classLoader = this.classLoader; + if (classLoader == null) { + classLoader = Thread.currentThread().getContextClassLoader(); + } + + if (classLoader != null) { + clazz = classLoader.loadClass(className); + } else { + clazz = Class.forName(className); + } + return clazz; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/ThrowableObjectOutputStream.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/ThrowableObjectOutputStream.java new file mode 100644 index 00000000000..b0039ad8451 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/ThrowableObjectOutputStream.java @@ -0,0 +1,66 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.io; + +import java.io.IOException; +import java.io.ObjectOutputStream; +import java.io.ObjectStreamClass; +import java.io.OutputStream; + +/** + * @author kimchy (Shay Banon) + */ +public class ThrowableObjectOutputStream extends ObjectOutputStream { + + static final int TYPE_FAT_DESCRIPTOR = 0; + static final int TYPE_THIN_DESCRIPTOR = 1; + + private static final String EXCEPTION_CLASSNAME = Exception.class.getName(); + static final int TYPE_EXCEPTION = 2; + + private static final String STACKTRACEELEMENT_CLASSNAME = StackTraceElement.class.getName(); + static final int TYPE_STACKTRACEELEMENT = 3; + + + public ThrowableObjectOutputStream(OutputStream out) throws IOException { + super(out); + } + + @Override protected void writeStreamHeader() throws IOException { + writeByte(STREAM_VERSION); + } + + @Override protected void writeClassDescriptor(ObjectStreamClass desc) throws IOException { + if (desc.getName().equals(EXCEPTION_CLASSNAME)) { + write(TYPE_EXCEPTION); + } else if (desc.getName().equals(STACKTRACEELEMENT_CLASSNAME)) { + write(TYPE_STACKTRACEELEMENT); + } else { + Class clazz = desc.forClass(); + if (clazz.isPrimitive() || clazz.isArray()) { + write(TYPE_FAT_DESCRIPTOR); + super.writeClassDescriptor(desc); + } else { + write(TYPE_THIN_DESCRIPTOR); + writeUTF(desc.getName()); + } + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/VoidStreamable.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/VoidStreamable.java new file mode 100644 index 00000000000..242777a05f0 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/VoidStreamable.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.io; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class VoidStreamable implements Streamable { + + public static final VoidStreamable INSTANCE = new VoidStreamable(); + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + } + + @Override public void writeTo(DataOutput out) throws IOException { + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/compression/CompressedString.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/compression/CompressedString.java new file mode 100644 index 00000000000..79478c23419 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/compression/CompressedString.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.io.compression; + +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class CompressedString implements Streamable { + + private byte[] compressedString; + + private transient String string; + + CompressedString() { + } + + public CompressedString(String string) throws IOException { + this.string = string; + this.compressedString = new ZipCompressor().compressString(string); + } + + public String string() throws IOException { + if (string != null) { + return string; + } + string = new ZipCompressor().decompressString(compressedString); + return string; + } + + public static CompressedString readCompressedString(DataInput in) throws IOException, ClassNotFoundException { + CompressedString result = new CompressedString(); + result.readFrom(in); + return result; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + compressedString = new byte[in.readInt()]; + in.readFully(compressedString); + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeInt(compressedString.length); + out.write(compressedString); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/compression/Compressor.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/compression/Compressor.java new file mode 100644 index 00000000000..5fc4178455d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/compression/Compressor.java @@ -0,0 +1,36 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.io.compression; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public interface Compressor { + + byte[] compress(byte[] value) throws IOException; + + byte[] compressString(String value) throws IOException; + + byte[] decompress(byte[] value) throws IOException; + + String decompressString(byte[] value) throws IOException; +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/compression/GZIPCompressor.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/compression/GZIPCompressor.java new file mode 100644 index 00000000000..e2b271c2573 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/compression/GZIPCompressor.java @@ -0,0 +1,115 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.io.compression; + +import org.apache.lucene.util.UnicodeUtil; +import org.elasticsearch.util.SizeUnit; +import org.elasticsearch.util.io.FastByteArrayInputStream; +import org.elasticsearch.util.io.FastByteArrayOutputStream; + +import java.io.IOException; +import java.util.zip.GZIPInputStream; +import java.util.zip.GZIPOutputStream; + +/** + * @author kimchy (Shay Banon) + */ +public class GZIPCompressor implements Compressor { + + private static class Cached { + + private static final ThreadLocal cache = new ThreadLocal() { + @Override protected CompressHolder initialValue() { + return new CompressHolder(); + } + }; + + /** + * Returns the cached thread local byte strean, with its internal stream cleared. + */ + public static CompressHolder cached() { + CompressHolder ch = cache.get(); + ch.bos.reset(); + return ch; + } + } + + private static class CompressHolder { + final FastByteArrayOutputStream bos = new FastByteArrayOutputStream(); + final byte[] buffer = new byte[(int) SizeUnit.KB.toBytes(5)]; + final UnicodeUtil.UTF16Result utf16Result = new UnicodeUtil.UTF16Result(); + final UnicodeUtil.UTF8Result utf8Result = new UnicodeUtil.UTF8Result(); + } + + + public byte[] compress(byte[] value, int offset, int length) throws IOException { + return compress(value, offset, length, Cached.cached()); + } + + @Override public byte[] compress(byte[] value) throws IOException { + return compress(value, 0, value.length); + } + + @Override public byte[] compressString(String value) throws IOException { + CompressHolder ch = Cached.cached(); + UnicodeUtil.UTF16toUTF8(value, 0, value.length(), ch.utf8Result); + return compress(ch.utf8Result.result, 0, ch.utf8Result.length, ch); + } + + @Override public byte[] decompress(byte[] value) throws IOException { + CompressHolder ch = Cached.cached(); + decompress(value, ch); + return ch.bos.copiedByteArray(); + } + + @Override public String decompressString(byte[] value) throws IOException { + CompressHolder ch = Cached.cached(); + decompress(value); + UnicodeUtil.UTF8toUTF16(ch.bos.unsafeByteArray(), 0, ch.bos.size(), ch.utf16Result); + return new String(ch.utf16Result.result, 0, ch.utf16Result.length); + } + + private static void decompress(byte[] value, CompressHolder ch) throws IOException { + GZIPInputStream in = new GZIPInputStream(new FastByteArrayInputStream(value)); + try { + int bytesRead; + while ((bytesRead = in.read(ch.buffer)) != -1) { + ch.bos.write(ch.buffer, 0, bytesRead); + } + ch.bos.flush(); + } + finally { + try { + in.close(); + } + catch (IOException ex) { + // do nothing + } + } + } + + private static byte[] compress(byte[] value, int offset, int length, CompressHolder ch) throws IOException { + GZIPOutputStream os = new GZIPOutputStream(ch.bos); + os.write(value, offset, length); + os.finish(); + os.close(); + return ch.bos.copiedByteArray(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/compression/LzfCompressor.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/compression/LzfCompressor.java new file mode 100644 index 00000000000..7dfb847b126 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/compression/LzfCompressor.java @@ -0,0 +1,71 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.io.compression; + +import org.apache.lucene.util.UnicodeUtil; +import org.elasticsearch.util.io.compression.lzf.LZFDecoder; +import org.elasticsearch.util.io.compression.lzf.LZFEncoder; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class LzfCompressor implements Compressor { + + private static class Cached { + + private static final ThreadLocal cache = new ThreadLocal() { + @Override protected CompressHolder initialValue() { + return new CompressHolder(); + } + }; + + public static CompressHolder cached() { + return cache.get(); + } + } + + private static class CompressHolder { + final UnicodeUtil.UTF16Result utf16Result = new UnicodeUtil.UTF16Result(); + final UnicodeUtil.UTF8Result utf8Result = new UnicodeUtil.UTF8Result(); + } + + @Override public byte[] compress(byte[] value) throws IOException { + return LZFEncoder.encode(value, value.length); + } + + @Override public byte[] compressString(String value) throws IOException { + CompressHolder ch = Cached.cached(); + UnicodeUtil.UTF16toUTF8(value, 0, value.length(), ch.utf8Result); + return LZFEncoder.encode(ch.utf8Result.result, ch.utf8Result.length); + } + + @Override public byte[] decompress(byte[] value) throws IOException { + return LZFDecoder.decode(value, value.length); + } + + @Override public String decompressString(byte[] value) throws IOException { + CompressHolder ch = Cached.cached(); + byte[] result = decompress(value); + UnicodeUtil.UTF8toUTF16(result, 0, result.length, ch.utf16Result); + return new String(ch.utf16Result.result, 0, ch.utf16Result.length); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/compression/ZipCompressor.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/compression/ZipCompressor.java new file mode 100644 index 00000000000..2c6249baed8 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/compression/ZipCompressor.java @@ -0,0 +1,132 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.io.compression; + +import org.apache.lucene.util.UnicodeUtil; +import org.elasticsearch.util.SizeUnit; +import org.elasticsearch.util.io.FastByteArrayOutputStream; + +import java.io.IOException; +import java.util.zip.DataFormatException; +import java.util.zip.Deflater; +import java.util.zip.Inflater; + +/** + * @author kimchy (Shay Banon) + */ +public class ZipCompressor implements Compressor { + + private static class Cached { + + private static final ThreadLocal cache = new ThreadLocal() { + @Override protected CompressHolder initialValue() { + return new CompressHolder(); + } + }; + + /** + * Returns the cached thread local byte strean, with its internal stream cleared. + */ + public static CompressHolder cached() { + CompressHolder ch = cache.get(); + ch.bos.reset(); + return ch; + } + } + + + private static class CompressHolder { + final FastByteArrayOutputStream bos = new FastByteArrayOutputStream(); + final Deflater deflater = new Deflater(); + final Inflater inflater = new Inflater(); + final byte[] buffer = new byte[(int) SizeUnit.KB.toBytes(5)]; + final UnicodeUtil.UTF16Result utf16Result = new UnicodeUtil.UTF16Result(); + final UnicodeUtil.UTF8Result utf8Result = new UnicodeUtil.UTF8Result(); + } + + private final int compressionLevel; + + public ZipCompressor() { + this(Deflater.BEST_COMPRESSION); + } + + public ZipCompressor(int compressionLevel) { + this.compressionLevel = compressionLevel; + } + + public byte[] compress(byte[] value, int offset, int length) throws IOException { + return compress(value, offset, length, compressionLevel, Cached.cached()); + } + + @Override public byte[] compress(byte[] value) throws IOException { + return compress(value, 0, value.length); + } + + @Override public byte[] compressString(String value) throws IOException { + CompressHolder ch = Cached.cached(); + UnicodeUtil.UTF16toUTF8(value, 0, value.length(), ch.utf8Result); + return compress(ch.utf8Result.result, 0, ch.utf8Result.length, compressionLevel, ch); + } + + @Override public byte[] decompress(byte[] value) throws IOException { + CompressHolder ch = Cached.cached(); + decompress(value, ch); + return ch.bos.copiedByteArray(); + } + + @Override public String decompressString(byte[] value) throws IOException { + CompressHolder ch = Cached.cached(); + decompress(value, ch); + UnicodeUtil.UTF8toUTF16(ch.bos.unsafeByteArray(), 0, ch.bos.size(), ch.utf16Result); + return new String(ch.utf16Result.result, 0, ch.utf16Result.length); + } + + private static void decompress(byte[] value, CompressHolder ch) throws IOException { + try { + ch.inflater.reset(); + ch.inflater.setInput(value); + + // Decompress the data + final byte[] buf = ch.buffer; + while (!ch.inflater.finished()) { + int count = ch.inflater.inflate(buf); + ch.bos.write(buf, 0, count); + } + } catch (DataFormatException e) { + throw new IOException("Failed to decompress", e); + } // don't close the inflater, we reuse it... + } + + private static byte[] compress(byte[] value, int offset, int length, int compressionLevel, CompressHolder ch) throws IOException { + ch.deflater.reset(); + ch.deflater.setLevel(compressionLevel); + ch.deflater.setInput(value, offset, length); + ch.deflater.finish(); + + // Compress the data + final byte[] buf = ch.buffer; + while (!ch.deflater.finished()) { + int count = ch.deflater.deflate(buf); + ch.bos.write(buf, 0, count); + } + + return ch.bos.copiedByteArray(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/compression/lzf/ChunkEncoder.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/compression/lzf/ChunkEncoder.java new file mode 100644 index 00000000000..d6a41fecb37 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/compression/lzf/ChunkEncoder.java @@ -0,0 +1,194 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.io.compression.lzf; + +/** + * Class that handles actual encoding of individual chunks. + * Resulting chunks can be compressed or non-compressed; compression + * is only used if it actually reduces chunk size (including overhead + * of additional header bytes) + *

    + * Code adapted from H2 project (http://www.h2database.com) Java LZF implementation + * by Thomas (which itself was inspired by original C code by Marc A Lehmann) + */ +public class ChunkEncoder { + // Beyond certain point we won't be able to compress: + private static final int MIN_BLOCK_TO_COMPRESS = 16; + + private static final int MIN_HASH_SIZE = 256; + // Not much point in bigger tables, with 8k window + private static final int MAX_HASH_SIZE = 16384; + + private static final int MAX_OFF = 1 << 13; // 8k + private static final int MAX_REF = (1 << 8) + (1 << 3); // 264 + + // // Encoding tables + + /** + * Buffer in which encoded content is stored during processing + */ + private final byte[] _encodeBuffer; + + private final int[] _hashTable; + + private final int _hashModulo; + + /** + * @param totalLength Total encoded length; used for calculating size + * of hash table to use + */ + public ChunkEncoder(int totalLength) { + int largestChunkLen = Math.max(totalLength, LZFChunk.MAX_CHUNK_LEN); + + int hashLen = calcHashLen(largestChunkLen); + _hashTable = new int[hashLen]; + _hashModulo = hashLen - 1; + // Ok, then, what's the worst case output buffer length? + // length indicator for each 32 literals, so: + int bufferLen = largestChunkLen + ((largestChunkLen + 31) >> 5); + _encodeBuffer = new byte[bufferLen]; + } + + /** + * Method for compressing (or not) individual chunks + */ + public LZFChunk encodeChunk(byte[] data, int offset, int len) { + if (len >= MIN_BLOCK_TO_COMPRESS) { + /* If we have non-trivial block, and can compress it by at least + * 2 bytes (since header is 2 bytes longer), let's compress: + */ + int compLen = tryCompress(data, offset, offset + len, _encodeBuffer, 0); + if (compLen < (len - 2)) { // nah; just return uncompressed + return LZFChunk.createCompressed(len, _encodeBuffer, 0, compLen); + } + } + // Otherwise leave uncompressed: + return LZFChunk.createNonCompressed(data, offset, len); + } + + private static int calcHashLen(int chunkSize) { + // in general try get hash table size of 2x input size + chunkSize += chunkSize; + // but no larger than max size: + if (chunkSize >= MAX_HASH_SIZE) { + return MAX_HASH_SIZE; + } + // otherwise just need to round up to nearest 2x + int hashLen = MIN_HASH_SIZE; + while (hashLen < chunkSize) { + hashLen += hashLen; + } + return hashLen; + } + + private int first(byte[] in, int inPos) { + return (in[inPos] << 8) + (in[inPos + 1] & 255); + } + + private static int next(int v, byte[] in, int inPos) { + return (v << 8) + (in[inPos + 2] & 255); + } + + + private int hash(int h) { + // or 184117; but this seems to give better hashing? + return ((h * 57321) >> 9) & _hashModulo; + // original lzf-c.c used this: + //return (((h ^ (h << 5)) >> (24 - HLOG) - h*5) & _hashModulo; + // but that didn't seem to provide better matches + } + + private int tryCompress(byte[] in, int inPos, int inEnd, byte[] out, int outPos) { + int literals = 0; + outPos++; + int hash = first(in, 0); + inEnd -= 4; + final int firstPos = inPos; // so that we won't have back references across block boundary + while (inPos < inEnd) { + byte p2 = in[inPos + 2]; + // next + hash = (hash << 8) + (p2 & 255); + int off = hash(hash); + int ref = _hashTable[off]; + _hashTable[off] = inPos; + if (ref < inPos + && ref >= firstPos + && (off = inPos - ref - 1) < MAX_OFF + && in[ref + 2] == p2 + && in[ref + 1] == (byte) (hash >> 8) + && in[ref] == (byte) (hash >> 16)) { + // match + int maxLen = inEnd - inPos + 2; + if (maxLen > MAX_REF) { + maxLen = MAX_REF; + } + if (literals == 0) { + outPos--; + } else { + out[outPos - literals - 1] = (byte) (literals - 1); + literals = 0; + } + int len = 3; + while (len < maxLen && in[ref + len] == in[inPos + len]) { + len++; + } + len -= 2; + if (len < 7) { + out[outPos++] = (byte) ((off >> 8) + (len << 5)); + } else { + out[outPos++] = (byte) ((off >> 8) + (7 << 5)); + out[outPos++] = (byte) (len - 7); + } + out[outPos++] = (byte) off; + outPos++; + inPos += len; + hash = first(in, inPos); + hash = next(hash, in, inPos); + _hashTable[hash(hash)] = inPos++; + hash = next(hash, in, inPos); + _hashTable[hash(hash)] = inPos++; + } else { + out[outPos++] = in[inPos++]; + literals++; + if (literals == LZFChunk.MAX_LITERAL) { + out[outPos - literals - 1] = (byte) (literals - 1); + literals = 0; + outPos++; + } + } + } + inEnd += 4; + while (inPos < inEnd) { + out[outPos++] = in[inPos++]; + literals++; + if (literals == LZFChunk.MAX_LITERAL) { + out[outPos - literals - 1] = (byte) (literals - 1); + literals = 0; + outPos++; + } + } + out[outPos - literals - 1] = (byte) (literals - 1); + if (literals == 0) { + outPos--; + } + return outPos; + } + +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/compression/lzf/LZFChunk.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/compression/lzf/LZFChunk.java new file mode 100644 index 00000000000..5078fb20239 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/compression/lzf/LZFChunk.java @@ -0,0 +1,100 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.io.compression.lzf; + +/** + * Helper class used to store LZF encoded segments (compressed and non-compressed) + * that can be sequenced to produce LZF files/streams. + */ +public class LZFChunk { + /** + * Maximum length of literal run for LZF encoding. + */ + public static final int MAX_LITERAL = 1 << 5; // 32 + + // Chunk length is limited by 2-byte length indicator, to 64k + public static final int MAX_CHUNK_LEN = 0xFFFF; + + public final static byte BYTE_Z = 'Z'; + public final static byte BYTE_V = 'V'; + + public final static int BLOCK_TYPE_NON_COMPRESSED = 0; + public final static int BLOCK_TYPE_COMPRESSED = 1; + + + final byte[] _data; + LZFChunk _next; + + private LZFChunk(byte[] data) { + _data = data; + } + + /** + * Factory method for constructing compressed chunk + */ + public static LZFChunk createCompressed(int origLen, byte[] encData, int encPtr, int encLen) { + byte[] result = new byte[encLen + 7]; + result[0] = BYTE_Z; + result[1] = BYTE_V; + result[2] = BLOCK_TYPE_COMPRESSED; + result[3] = (byte) (encLen >> 8); + result[4] = (byte) encLen; + result[5] = (byte) (origLen >> 8); + result[6] = (byte) origLen; + System.arraycopy(encData, encPtr, result, 7, encLen); + return new LZFChunk(result); + } + + /** + * Factory method for constructing compressed chunk + */ + public static LZFChunk createNonCompressed(byte[] plainData, int ptr, int len) { + byte[] result = new byte[len + 5]; + result[0] = BYTE_Z; + result[1] = BYTE_V; + result[2] = BLOCK_TYPE_NON_COMPRESSED; + result[3] = (byte) (len >> 8); + result[4] = (byte) len; + System.arraycopy(plainData, ptr, result, 5, len); + return new LZFChunk(result); + } + + public void setNext(LZFChunk next) { + _next = next; + } + + public LZFChunk next() { + return _next; + } + + public int length() { + return _data.length; + } + + public byte[] getData() { + return _data; + } + + public int copyTo(byte[] dst, int ptr) { + int len = _data.length; + System.arraycopy(_data, 0, dst, ptr, len); + return ptr + len; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/compression/lzf/LZFDecoder.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/compression/lzf/LZFDecoder.java new file mode 100644 index 00000000000..c7b0c94c681 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/compression/lzf/LZFDecoder.java @@ -0,0 +1,160 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.io.compression.lzf; + +import java.io.IOException; + +/** + * Decoder that handles decoding of sequence of encoded LZF chunks, + * combining them into a single contiguous result byte array + *

    + * Code adapted from H2 project (http://www.h2database.com) Java LZF implementation + * by Thomas (which itself was inspired by original C code by Marc A Lehmann) + */ +public class LZFDecoder { + final static byte BYTE_NULL = 0; + + // static methods, no need to instantiate + + private LZFDecoder() { + } + + /** + * Method for decompressing whole input data, which encoded in LZF + * block structure (compatible with lzf command line utility), + * and can consist of any number of blocks + */ + public static byte[] decode(byte[] data, int length) throws IOException { + /* First: let's calculate actual size, so we can allocate + * exact result size. Also useful for basic sanity checking; + * so that after call we know header structure is not corrupt + * (to the degree that lengths etc seem valid) + */ + byte[] result = new byte[calculateUncompressedSize(data, length)]; + int inPtr = 0; + int outPtr = 0; + + while (inPtr < (length - 1)) { // -1 to offset possible end marker + inPtr += 2; // skip 'ZV' marker + int type = data[inPtr++]; + int len = uint16(data, inPtr); + inPtr += 2; + if (type == LZFChunk.BLOCK_TYPE_NON_COMPRESSED) { // uncompressed + System.arraycopy(data, inPtr, result, outPtr, len); + outPtr += len; + } else { // compressed + int uncompLen = uint16(data, inPtr); + inPtr += 2; + decompressChunk(data, inPtr, result, outPtr, outPtr + uncompLen); + outPtr += uncompLen; + } + inPtr += len; + } + return result; + } + + private static int calculateUncompressedSize(byte[] data, int length) throws IOException { + int uncompressedSize = 0; + int ptr = 0; + int blockNr = 0; + + while (ptr < length) { + // can use optional end marker + if (ptr == (length + 1) && data[ptr] == BYTE_NULL) { + ++ptr; // so that we'll be at end + break; + } + // simpler to handle bounds checks by catching exception here... + try { + if (data[ptr] != LZFChunk.BYTE_Z || data[ptr + 1] != LZFChunk.BYTE_V) { + throw new IOException("Corrupt input data, block #" + blockNr + " (at offset " + ptr + "): did not start with 'ZV' signature bytes"); + } + int type = (int) data[ptr + 2]; + int blockLen = uint16(data, ptr + 3); + if (type == LZFChunk.BLOCK_TYPE_NON_COMPRESSED) { // uncompressed + ptr += 5; + uncompressedSize += blockLen; + } else if (type == LZFChunk.BLOCK_TYPE_COMPRESSED) { // compressed + uncompressedSize += uint16(data, ptr + 5); + ptr += 7; + } else { // unknown... CRC-32 would be 2, but that's not implemented by cli tool + throw new IOException("Corrupt input data, block #" + blockNr + " (at offset " + ptr + "): unrecognized block type " + (type & 0xFF)); + } + ptr += blockLen; + } catch (ArrayIndexOutOfBoundsException e) { + throw new IOException("Corrupt input data, block #" + blockNr + " (at offset " + ptr + "): truncated block header"); + } + ++blockNr; + } + // one more sanity check: + if (ptr != length) { + throw new IOException("Corrupt input data: block #" + blockNr + " extends " + (data.length - ptr) + " beyond end of input"); + } + return uncompressedSize; + } + + /** + * Main decode method for individual chunks. + */ + public static void decompressChunk(byte[] in, int inPos, byte[] out, int outPos, int outEnd) + throws IOException { + do { + int ctrl = in[inPos++] & 255; + if (ctrl < LZFChunk.MAX_LITERAL) { // literal run + ctrl += inPos; + do { + out[outPos++] = in[inPos]; + } while (inPos++ < ctrl); + } else { + // back reference + int len = ctrl >> 5; + ctrl = -((ctrl & 0x1f) << 8) - 1; + if (len == 7) { + len += in[inPos++] & 255; + } + ctrl -= in[inPos++] & 255; + len += outPos + 2; + out[outPos] = out[outPos++ + ctrl]; + out[outPos] = out[outPos++ + ctrl]; + while (outPos < len - 8) { + out[outPos] = out[outPos++ + ctrl]; + out[outPos] = out[outPos++ + ctrl]; + out[outPos] = out[outPos++ + ctrl]; + out[outPos] = out[outPos++ + ctrl]; + out[outPos] = out[outPos++ + ctrl]; + out[outPos] = out[outPos++ + ctrl]; + out[outPos] = out[outPos++ + ctrl]; + out[outPos] = out[outPos++ + ctrl]; + } + while (outPos < len) { + out[outPos] = out[outPos++ + ctrl]; + } + } + } while (outPos < outEnd); + + // sanity check to guard against corrupt data: + if (outPos != outEnd) + throw new IOException("Corrupt data: overrun in decompress, input offset " + inPos + ", output offset " + outPos); + } + + private static int uint16(byte[] data, int ptr) { + return ((data[ptr] & 0xFF) << 8) + (data[ptr + 1] & 0xFF); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/compression/lzf/LZFEncoder.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/compression/lzf/LZFEncoder.java new file mode 100644 index 00000000000..73c68e0f2cf --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/io/compression/lzf/LZFEncoder.java @@ -0,0 +1,75 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.io.compression.lzf; + +import java.io.IOException; + +/** + * Encoder that handles splitting of input into chunks to encode, + * calls {@link ChunkEncoder} to compress individual chunks and + * combines resulting chunks into contiguous output byte array. + *

    + * Code adapted from H2 project (http://www.h2database.com) Java LZF implementation + * by Thomas (which itself was inspired by original C code by Marc A Lehmann) + */ +public class LZFEncoder { + // Static methods only, no point in instantiating + + private LZFEncoder() { + } + + /** + * Method for compressing given input data using LZF encoding and + * block structure (compatible with lzf command line utility). + * Result consists of a sequence of chunks. + */ + public static byte[] encode(byte[] data, int length) throws IOException { + int left = length; + ChunkEncoder enc = new ChunkEncoder(left); + int chunkLen = Math.min(LZFChunk.MAX_CHUNK_LEN, left); + LZFChunk first = enc.encodeChunk(data, 0, chunkLen); + left -= chunkLen; + // shortcut: if it all fit in, no need to coalesce: + if (left < 1) { + return first.getData(); + } + // otherwise need to get other chunks: + int resultBytes = first.length(); + int inputOffset = chunkLen; + LZFChunk last = first; + + do { + chunkLen = Math.min(left, LZFChunk.MAX_CHUNK_LEN); + LZFChunk chunk = enc.encodeChunk(data, inputOffset, chunkLen); + inputOffset += chunkLen; + left -= chunkLen; + resultBytes += chunk.length(); + last.setNext(chunk); + last = chunk; + } while (left > 0); + // and then coalesce returns into single contiguous byte array + byte[] result = new byte[resultBytes]; + int ptr = 0; + for (; first != null; first = first.next()) { + ptr = first.copyTo(result, ptr); + } + return result; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/jline/ANSI.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/jline/ANSI.java new file mode 100644 index 00000000000..91501447d47 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/jline/ANSI.java @@ -0,0 +1,383 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.jline; + +import jline.ANSIBuffer; +import jline.Terminal; + +import java.io.OutputStream; +import java.io.PrintWriter; +import java.io.Writer; +import java.lang.reflect.Field; +import java.lang.reflect.Modifier; +import java.util.HashMap; +import java.util.Map; + +/** + * @author kimchy (Shay Banon) + */ +public class ANSI { + // + // Detection/Enabled Muck + // + + /** + * Tries to detect if the current system supports ANSI. + */ + private static boolean detect() { + if (System.getProperty("jline.enabled", "false").equalsIgnoreCase("false")) { + return false; + } + boolean enabled = Terminal.getTerminal().isANSISupported(); + + if (!enabled) { + String force = System.getProperty(ANSI.class.getName() + ".force", "false"); + enabled = Boolean.valueOf(force).booleanValue(); + } + + return enabled; + } + + public static boolean isDetected() { + return detect(); + } + + private static Boolean enabled; + + public static void setEnabled(final boolean flag) { + enabled = Boolean.valueOf(flag); + } + + public static boolean isEnabled() { + if (enabled == null) { + enabled = Boolean.valueOf(isDetected()); + } + + return enabled.booleanValue(); + } + + // + // Code + // + + public static class Code { + // + // NOTE: Some fields duplicated from jline.ANSIBuffer.ANSICodes to change access modifiers + // + + public static final int OFF = 0; + public static final int BOLD = 1; + public static final int UNDERSCORE = 4; + public static final int BLINK = 5; + public static final int REVERSE = 7; + public static final int CONCEALED = 8; + + public static final int FG_BLACK = 30; + public static final int FG_RED = 31; + public static final int FG_GREEN = 32; + public static final int FG_YELLOW = 33; + public static final int FG_BLUE = 34; + public static final int FG_MAGENTA = 35; + public static final int FG_CYAN = 36; + public static final int FG_WHITE = 37; + + public static final int BLACK = FG_BLACK; + public static final int RED = FG_RED; + public static final int GREEN = FG_GREEN; + public static final int YELLOW = FG_YELLOW; + public static final int BLUE = FG_BLUE; + public static final int MAGENTA = FG_MAGENTA; + public static final int CYAN = FG_CYAN; + public static final int WHITE = FG_WHITE; + + public static final int BG_BLACK = 40; + public static final int BG_RED = 41; + public static final int BG_GREEN = 42; + public static final int BG_YELLOW = 43; + public static final int BG_BLUE = 44; + public static final int BG_MAGENTA = 45; + public static final int BG_CYAN = 46; + public static final int BG_WHITE = 47; + + /** + * A map of code names to values. + */ + private static final Map NAMES_TO_CODES; + + /** + * A map of codes to name. + */ + private static final Map CODES_TO_NAMES; + + static { + Field[] fields = Code.class.getDeclaredFields(); + Map names = new HashMap(fields.length); + Map codes = new HashMap(fields.length); + + try { + for (int i = 0; i < fields.length; i++) { + // Skip anything non-public, all public fields are codes + int mods = fields[i].getModifiers(); + if (!Modifier.isPublic(mods)) { + continue; + } + + String name = fields[i].getName(); + Number code = (Number) fields[i].get(Code.class); + + names.put(name, code); + codes.put(code, name); + } + } + catch (IllegalAccessException e) { + // This should never happen + throw new Error(e); + } + + NAMES_TO_CODES = names; + CODES_TO_NAMES = codes; + } + + /** + * Returns the ANSI code for the given symbolic name. Supported symbolic names are all defined as + * fields in {@link ANSI.Code} where the case is not significant. + */ + public static int forName(final String name) throws IllegalArgumentException { + assert name != null; + + // All names in the map are upper-case + String tmp = name.toUpperCase(); + Number code = (Number) NAMES_TO_CODES.get(tmp); + + if (code == null) { + throw new IllegalArgumentException("Invalid ANSI code name: " + name); + } + + return code.intValue(); + } + + /** + * Returns the symbolic name for the given ANSI code. + */ + public static String name(final int code) throws IllegalArgumentException { + String name = (String) CODES_TO_NAMES.get(Integer.valueOf(code)); + + if (name == null) { + throw new IllegalArgumentException("Invalid ANSI code: " + code); + } + + return name; + } + } + + // + // Buffer + // + + public static class Buffer { + private final StringBuffer buff = new StringBuffer(); + + public final boolean autoClear = true; + + public String toString() { + try { + return buff.toString(); + } + finally { + if (autoClear) clear(); + } + } + + public void clear() { + buff.setLength(0); + } + + public int size() { + return buff.length(); + } + + public Buffer append(final String text) { + buff.append(text); + + return this; + } + + public Buffer append(final Object obj) { + return append(String.valueOf(obj)); + } + + public Buffer attrib(final int code) { + if (isEnabled()) { + buff.append(ANSIBuffer.ANSICodes.attrib(code)); + } + + return this; + } + + public Buffer attrib(final String text, final int code) { + assert text != null; + + if (isEnabled()) { + buff.append(ANSIBuffer.ANSICodes.attrib(code)).append(text).append(ANSIBuffer.ANSICodes.attrib(Code.OFF)); + } else { + buff.append(text); + } + + return this; + } + + public Buffer attrib(final String text, final String codeName) { + return attrib(text, Code.forName(codeName)); + } + } + + // + // Renderer + // + + public static class Renderer { + public static final String BEGIN_TOKEN = "@|"; + + private static final int BEGIN_TOKEN_SIZE = BEGIN_TOKEN.length(); + + public static final String END_TOKEN = "|"; + + private static final int END_TOKEN_SIZE = END_TOKEN.length(); + + public static final String CODE_TEXT_SEPARATOR = " "; + + public static final String CODE_LIST_SEPARATOR = ","; + + private final Buffer buff = new Buffer(); + + public String render(final String input) throws RenderException { + assert input != null; + + // current, prefix and suffix positions + int c = 0, p, s; + + while (c < input.length()) { + p = input.indexOf(BEGIN_TOKEN, c); + if (p < 0) { + break; + } + + s = input.indexOf(END_TOKEN, p + BEGIN_TOKEN_SIZE); + if (s < 0) { + throw new RenderException("Missing '" + END_TOKEN + "': " + input); + } + + String expr = input.substring(p + BEGIN_TOKEN_SIZE, s); + + buff.append(input.substring(c, p)); + + evaluate(expr); + + c = s + END_TOKEN_SIZE; + } + + buff.append(input.substring(c)); + + return buff.toString(); + } + + private void evaluate(final String input) throws RenderException { + assert input != null; + + int i = input.indexOf(CODE_TEXT_SEPARATOR); + if (i < 0) { + throw new RenderException("Missing ANSI code/text separator '" + CODE_TEXT_SEPARATOR + "': " + input); + } + + String tmp = input.substring(0, i); + String[] codes = tmp.split(CODE_LIST_SEPARATOR); + String text = input.substring(i + 1, input.length()); + + for (int j = 0; j < codes.length; j++) { + int code = Code.forName(codes[j]); + buff.attrib(code); + } + + buff.append(text); + + buff.attrib(Code.OFF); + } + + // + // RenderException + // + + public static class RenderException + extends RuntimeException { + public RenderException(final String msg) { + super(msg); + } + } + + // + // Helpers + // + + public static boolean test(final String text) { + return text != null && text.indexOf(BEGIN_TOKEN) >= 0; + } + + public static String encode(final String text, final int code) { + return new StringBuffer(BEGIN_TOKEN). + append(Code.name(code)). + append(CODE_TEXT_SEPARATOR). + append(text). + append(END_TOKEN). + toString(); + } + } + + // + // RenderWriter + // + + public static class RenderWriter extends PrintWriter { + private final Renderer renderer = new Renderer(); + + public RenderWriter(final OutputStream out) { + super(out); + } + + public RenderWriter(final OutputStream out, final boolean autoFlush) { + super(out, autoFlush); + } + + public RenderWriter(final Writer out) { + super(out); + } + + public RenderWriter(final Writer out, final boolean autoFlush) { + super(out, autoFlush); + } + + public void write(final String s) { + if (Renderer.test(s)) { + super.write(renderer.render(s)); + } else { + super.write(s); + } + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/joda/Joda.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/joda/Joda.java new file mode 100644 index 00000000000..4081485ca8b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/joda/Joda.java @@ -0,0 +1,121 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.joda; + +import org.joda.time.format.DateTimeFormat; +import org.joda.time.format.DateTimeFormatter; +import org.joda.time.format.ISODateTimeFormat; + +/** + * @author kimchy (Shay Banon) + */ +public class Joda { + + /** + * Parses a joda based pattern, including some named ones (similar to the built in Joda ISO ones). + */ + public static DateTimeFormatter forPattern(String input) { + DateTimeFormatter formatter; + if ("basicDate".equals(input)) { + formatter = ISODateTimeFormat.basicDate(); + } else if ("basicDateTime".equals(input)) { + formatter = ISODateTimeFormat.basicDateTime(); + } else if ("basicDateTimeNoMillis".equals(input)) { + formatter = ISODateTimeFormat.basicDateTimeNoMillis(); + } else if ("basicOrdinalDate".equals(input)) { + formatter = ISODateTimeFormat.basicOrdinalDate(); + } else if ("basicOrdinalDateTime".equals(input)) { + formatter = ISODateTimeFormat.basicOrdinalDateTime(); + } else if ("basicOrdinalDateTimeNoMillis".equals(input)) { + formatter = ISODateTimeFormat.basicOrdinalDateTimeNoMillis(); + } else if ("basicTime".equals(input)) { + formatter = ISODateTimeFormat.basicTime(); + } else if ("basicTimeNoMillis".equals(input)) { + formatter = ISODateTimeFormat.basicTimeNoMillis(); + } else if ("basicTTime".equals(input)) { + formatter = ISODateTimeFormat.basicTTime(); + } else if ("basicTTimeNoMillis".equals(input)) { + formatter = ISODateTimeFormat.basicTTimeNoMillis(); + } else if ("basicWeekDate".equals(input)) { + formatter = ISODateTimeFormat.basicWeekDate(); + } else if ("basicWeekDateTime".equals(input)) { + formatter = ISODateTimeFormat.basicWeekDateTime(); + } else if ("basicWeekDateTimeNoMillis".equals(input)) { + formatter = ISODateTimeFormat.basicWeekDateTimeNoMillis(); + } else if ("date".equals(input)) { + formatter = ISODateTimeFormat.date(); + } else if ("dateHour".equals(input)) { + formatter = ISODateTimeFormat.dateHour(); + } else if ("dateHourMinute".equals(input)) { + formatter = ISODateTimeFormat.dateHourMinute(); + } else if ("dateHourMinuteSecond".equals(input)) { + formatter = ISODateTimeFormat.dateHourMinuteSecond(); + } else if ("dateHourMinuteSecondFraction".equals(input)) { + formatter = ISODateTimeFormat.dateHourMinuteSecondFraction(); + } else if ("dateHourMinuteSecondMillis".equals(input)) { + formatter = ISODateTimeFormat.dateHourMinuteSecondMillis(); + } else if ("dateOptionalTime".equals(input)) { + formatter = ISODateTimeFormat.dateOptionalTimeParser(); + } else if ("dateTime".equals(input)) { + formatter = ISODateTimeFormat.dateTime(); + } else if ("dateTimeNoMillis".equals(input)) { + formatter = ISODateTimeFormat.dateTimeNoMillis(); + } else if ("hour".equals(input)) { + formatter = ISODateTimeFormat.hour(); + } else if ("hourMinute".equals(input)) { + formatter = ISODateTimeFormat.hourMinute(); + } else if ("hourMinuteSecond".equals(input)) { + formatter = ISODateTimeFormat.hourMinuteSecond(); + } else if ("hourMinuteSecondFraction".equals(input)) { + formatter = ISODateTimeFormat.hourMinuteSecondFraction(); + } else if ("hourMinuteSecondMillis".equals(input)) { + formatter = ISODateTimeFormat.hourMinuteSecondMillis(); + } else if ("ordinalDate".equals(input)) { + formatter = ISODateTimeFormat.ordinalDate(); + } else if ("ordinalDateTime".equals(input)) { + formatter = ISODateTimeFormat.ordinalDateTime(); + } else if ("ordinalDateTimeNoMillis".equals(input)) { + formatter = ISODateTimeFormat.ordinalDateTimeNoMillis(); + } else if ("time".equals(input)) { + formatter = ISODateTimeFormat.time(); + } else if ("tTime".equals(input)) { + formatter = ISODateTimeFormat.tTime(); + } else if ("tTimeNoMillis".equals(input)) { + formatter = ISODateTimeFormat.tTimeNoMillis(); + } else if ("weekDate".equals(input)) { + formatter = ISODateTimeFormat.weekDate(); + } else if ("weekDateTime".equals(input)) { + formatter = ISODateTimeFormat.weekDateTime(); + } else if ("weekyear".equals(input)) { + formatter = ISODateTimeFormat.weekyear(); + } else if ("weekyearWeek".equals(input)) { + formatter = ISODateTimeFormat.weekyearWeek(); + } else if ("year".equals(input)) { + formatter = ISODateTimeFormat.year(); + } else if ("yearMonth".equals(input)) { + formatter = ISODateTimeFormat.yearMonth(); + } else if ("yearMonthDay".equals(input)) { + formatter = ISODateTimeFormat.yearMonthDay(); + } else { + formatter = DateTimeFormat.forPattern(input); + } + return formatter; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/json/Jackson.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/json/Jackson.java new file mode 100644 index 00000000000..b26249deed6 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/json/Jackson.java @@ -0,0 +1,61 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.json; + +import org.codehaus.jackson.JsonFactory; +import org.codehaus.jackson.JsonGenerator; +import org.codehaus.jackson.JsonParser; +import org.codehaus.jackson.map.ObjectMapper; + +/** + * A set of helper methods for Jackson. + * + * @author kimchy (Shay Banon) + */ +public final class Jackson { + + private static final JsonFactory defaultJsonFactory; + + static { + defaultJsonFactory = newJsonFactory(); + } + + public static JsonFactory defaultJsonFactory() { + return defaultJsonFactory; + } + + public static JsonFactory newJsonFactory() { + JsonFactory jsonFactory = new JsonFactory(); + jsonFactory.configure(JsonParser.Feature.ALLOW_UNQUOTED_FIELD_NAMES, true); + jsonFactory.configure(JsonGenerator.Feature.QUOTE_FIELD_NAMES, true); + return jsonFactory; + } + + public static ObjectMapper newObjectMapper() { + ObjectMapper mapper = new ObjectMapper(); + mapper.configure(JsonParser.Feature.ALLOW_UNQUOTED_FIELD_NAMES, true); + mapper.configure(JsonGenerator.Feature.QUOTE_FIELD_NAMES, true); + return mapper; + } + + private Jackson() { + + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/json/JsonBuilder.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/json/JsonBuilder.java new file mode 100644 index 00000000000..e5ab9f13e9c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/json/JsonBuilder.java @@ -0,0 +1,366 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.json; + +import org.apache.lucene.util.UnicodeUtil; +import org.codehaus.jackson.JsonFactory; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.util.concurrent.NotThreadSafe; +import org.elasticsearch.util.io.FastCharArrayWriter; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +@NotThreadSafe +public class JsonBuilder { + + /** + * A thread local based cache of {@link JsonBuilder}. + */ + public static class Cached { + + private JsonBuilder generator; + + public Cached(JsonBuilder generator) { + this.generator = generator; + } + + private static final ThreadLocal cache = new ThreadLocal() { + @Override protected Cached initialValue() { + try { + return new Cached(new JsonBuilder()); + } catch (IOException e) { + throw new ElasticSearchException("Failed to create json generator", e); + } + } + }; + + /** + * Returns the cached thread local generator, with its internal {@link StringBuilder} cleared. + */ + public static JsonBuilder cached() throws IOException { + Cached cached = cache.get(); + cached.generator.reset(); + return cached.generator; + } + + public static JsonBuilder cachedNoReset() { + Cached cached = cache.get(); + return cached.generator; + } + } + + public static JsonBuilder cached() throws IOException { + return Cached.cached(); + } + + + private final FastCharArrayWriter writer; + + private final JsonFactory factory; + + private org.codehaus.jackson.JsonGenerator generator; + + final UnicodeUtil.UTF8Result utf8Result = new UnicodeUtil.UTF8Result(); + + public JsonBuilder() throws IOException { + this(Jackson.defaultJsonFactory()); + } + + public JsonBuilder(JsonFactory factory) throws IOException { + this.writer = new FastCharArrayWriter(); + this.factory = factory; + this.generator = factory.createJsonGenerator(writer); + } + + public JsonBuilder prettyPrint() { + generator.useDefaultPrettyPrinter(); + return this; + } + + public JsonBuilder startJsonp(String callback) throws IOException { + flush(); + writer.append(callback).append('('); + return this; + } + + public JsonBuilder endJsonp() throws IOException { + flush(); + writer.append(");"); + return this; + } + + public JsonBuilder startObject(String name) throws IOException { + field(name); + startObject(); + return this; + } + + public JsonBuilder startObject() throws IOException { + generator.writeStartObject(); + return this; + } + + public JsonBuilder endObject() throws IOException { + generator.writeEndObject(); + return this; + } + + public JsonBuilder startArray(String name) throws IOException { + field(name); + startArray(); + return this; + } + + public JsonBuilder startArray() throws IOException { + generator.writeStartArray(); + return this; + } + + public JsonBuilder endArray() throws IOException { + generator.writeEndArray(); + return this; + } + + public JsonBuilder field(String name) throws IOException { + generator.writeFieldName(name); + return this; + } + + public JsonBuilder field(String name, char[] value, int offset, int length) throws IOException { + generator.writeFieldName(name); + if (value == null) { + generator.writeNull(); + } else { + generator.writeString(value, offset, length); + } + return this; + } + + public JsonBuilder field(String name, String value) throws IOException { + generator.writeFieldName(name); + if (value == null) { + generator.writeNull(); + } else { + generator.writeString(value); + } + return this; + } + + public JsonBuilder field(String name, int value) throws IOException { + generator.writeFieldName(name); + generator.writeNumber(value); + return this; + } + + public JsonBuilder field(String name, long value) throws IOException { + generator.writeFieldName(name); + generator.writeNumber(value); + return this; + } + + public JsonBuilder field(String name, float value) throws IOException { + generator.writeFieldName(name); + generator.writeNumber(value); + return this; + } + + public JsonBuilder field(String name, double value) throws IOException { + generator.writeFieldName(name); + generator.writeNumber(value); + return this; + } + + public JsonBuilder field(String name, Object value) throws IOException { + if (value == null) { + nullField(name); + return this; + } + Class type = value.getClass(); + if (type == String.class) { + field(name, (String) value); + } else if (type == Float.class) { + field(name, ((Float) value).floatValue()); + } else if (type == Double.class) { + field(name, ((Double) value).doubleValue()); + } else if (type == Integer.class) { + field(name, ((Integer) value).intValue()); + } else if (type == Long.class) { + field(name, ((Long) value).longValue()); + } else if (type == Boolean.class) { + field(name, ((Boolean) value).booleanValue()); + } else { + field(name, value.toString()); + } + return this; + } + + public JsonBuilder field(String name, boolean value) throws IOException { + generator.writeFieldName(name); + generator.writeBoolean(value); + return this; + } + + public JsonBuilder field(String name, byte[] value) throws IOException { + generator.writeFieldName(name); + generator.writeBinary(value); + return this; + } + + public JsonBuilder nullField(String name) throws IOException { + generator.writeNullField(name); + return this; + } + + public JsonBuilder binary(byte[] bytes) throws IOException { + generator.writeBinary(bytes); + return this; + } + + public JsonBuilder raw(String json) throws IOException { + generator.writeRaw(json); + return this; + } + + public JsonBuilder string(String value) throws IOException { + generator.writeString(value); + return this; + } + + public JsonBuilder number(int value) throws IOException { + generator.writeNumber(value); + return this; + } + + public JsonBuilder number(long value) throws IOException { + generator.writeNumber(value); + return this; + } + + public JsonBuilder number(double value) throws IOException { + generator.writeNumber(value); + return this; + } + + public JsonBuilder number(Integer value) throws IOException { + generator.writeNumber(value.intValue()); + return this; + } + + public JsonBuilder number(Long value) throws IOException { + generator.writeNumber(value.longValue()); + return this; + } + + public JsonBuilder number(Float value) throws IOException { + generator.writeNumber(value.floatValue()); + return this; + } + + public JsonBuilder number(Double value) throws IOException { + generator.writeNumber(value.doubleValue()); + return this; + } + + public JsonBuilder bool(boolean value) throws IOException { + generator.writeBoolean(value); + return this; + } + + public JsonBuilder value(Object value) throws IOException { + Class type = value.getClass(); + if (type == String.class) { + string((String) value); + } else if (type == Float.class) { + number(((Float) value).floatValue()); + } else if (type == Double.class) { + number(((Double) value).doubleValue()); + } else if (type == Integer.class) { + number(((Integer) value).intValue()); + } else if (type == Long.class) { + number(((Long) value).longValue()); + } else if (type == Boolean.class) { + bool((Boolean) value); + } else if (type == byte[].class) { + binary((byte[]) value); + } else { + throw new IOException("Type not allowed [" + type + "]"); + } + return this; + } + + public JsonBuilder flush() throws IOException { + generator.flush(); + return this; + } + + public JsonBuilder reset() throws IOException { + writer.reset(); + generator = factory.createJsonGenerator(writer); + return this; + } + + public String string() throws IOException { + flush(); + return writer.toStringTrim(); + } + + /** + * Returns the byte[] that represents the utf8 of the json written up until now. + * Note, the result is shared within this instance, so copy the byte array if needed + * or use {@link #utf8copied()}. + */ + public UnicodeUtil.UTF8Result utf8() throws IOException { + flush(); + + // ignore whitepsaces + int st = 0; + int len = writer.size(); + char[] val = writer.unsafeCharArray(); + + while ((st < len) && (val[st] <= ' ')) { + st++; + len--; + } + while ((st < len) && (val[len - 1] <= ' ')) { + len--; + } + + UnicodeUtil.UTF16toUTF8(val, st, len, utf8Result); + + return utf8Result; + } + + /** + * Returns a copied byte[] that represnts the utf8 o fthe json written up until now. + */ + public byte[] utf8copied() throws IOException { + utf8(); + byte[] result = new byte[utf8Result.length]; + System.arraycopy(utf8Result.result, 0, result, 0, utf8Result.length); + return result; + } + + public void close() throws IOException { + generator.close(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/json/ToJson.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/json/ToJson.java new file mode 100644 index 00000000000..1c0d37eec85 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/json/ToJson.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.json; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public interface ToJson { + + void toJson(JsonBuilder builder) throws IOException; +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/lease/Releasable.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/lease/Releasable.java new file mode 100644 index 00000000000..6b85be38a78 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/lease/Releasable.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.lease; + +import org.elasticsearch.ElasticSearchException; + +/** + * @author kimchy (Shay Banon) + */ +public interface Releasable { + + boolean release() throws ElasticSearchException; +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/logging/JLinePatternLayout.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/logging/JLinePatternLayout.java new file mode 100644 index 00000000000..f2ad325e1c8 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/logging/JLinePatternLayout.java @@ -0,0 +1,104 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.logging; + +import org.apache.log4j.Level; +import org.apache.log4j.PatternLayout; +import org.apache.log4j.helpers.FormattingInfo; +import org.apache.log4j.helpers.PatternConverter; +import org.apache.log4j.helpers.PatternParser; +import org.apache.log4j.spi.LoggingEvent; +import org.elasticsearch.util.jline.ANSI; + +import java.lang.reflect.Field; + +import static jline.ANSIBuffer.ANSICodes.*; +import static org.elasticsearch.util.jline.ANSI.Code.FG_BLUE; +import static org.elasticsearch.util.jline.ANSI.Code.FG_CYAN; +import static org.elasticsearch.util.jline.ANSI.Code.FG_GREEN; +import static org.elasticsearch.util.jline.ANSI.Code.FG_RED; +import static org.elasticsearch.util.jline.ANSI.Code.FG_YELLOW; +import static org.elasticsearch.util.jline.ANSI.Code.OFF; + +/** + * @author kimchy (Shay Banon) + */ +public class JLinePatternLayout extends PatternLayout { + + @Override protected PatternParser createPatternParser(String pattern) { + try { + return new JLinePatternParser(pattern); + } catch (Throwable t) { + return super.createPatternParser(pattern); + } + } + + private final static class JLinePatternParser extends PatternParser { + + private JLinePatternParser(String pattern) { + super(pattern); + } + + @Override protected void addConverter(PatternConverter pc) { + try { + if (ANSI.isEnabled()) { + if (pc.getClass().getName().endsWith("BasicPatternConverter")) { + Field typeField = pc.getClass().getDeclaredField("type"); + typeField.setAccessible(true); + Integer type = (Integer) typeField.get(pc); + if (type == 2002) { + pc = new ColoredLevelPatternConverter(formattingInfo); + } + } + } + } catch (Throwable t) { + // ignore + } + super.addConverter(pc); + } + + private static class ColoredLevelPatternConverter extends PatternConverter { + + ColoredLevelPatternConverter(FormattingInfo formattingInfo) { + super(formattingInfo); + } + + public String convert(LoggingEvent event) { + if (!ANSI.isEnabled()) { + return event.getLevel().toString(); + } + if (event.getLevel() == Level.FATAL) { + return attrib(FG_RED) + event.getLevel().toString() + attrib(OFF); + } else if (event.getLevel() == Level.ERROR) { + return attrib(FG_RED) + event.getLevel().toString() + attrib(OFF); + } else if (event.getLevel() == Level.WARN) { + return attrib(FG_YELLOW) + event.getLevel().toString() + ' ' + attrib(OFF); + } else if (event.getLevel() == Level.INFO) { + return attrib(FG_GREEN) + event.getLevel().toString() + ' ' + attrib(OFF); + } else if (event.getLevel() == Level.DEBUG) { + return attrib(FG_CYAN) + event.getLevel().toString() + attrib(OFF); + } else if (event.getLevel() == Level.TRACE) { + return attrib(FG_BLUE) + event.getLevel().toString() + attrib(OFF); + } + return event.getLevel().toString(); + } + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/logging/Loggers.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/logging/Loggers.java new file mode 100644 index 00000000000..a20eceeac03 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/logging/Loggers.java @@ -0,0 +1,147 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.logging; + +import com.google.common.collect.Lists; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.util.Classes; +import org.elasticsearch.util.settings.Settings; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.List; + +import static com.google.common.collect.Lists.*; +import static java.util.Arrays.asList; + +/** + * A set of utilities around Logging. + *

    + *

    The most important is the {@link #getLogger(Class)} which should be used instead of + * {@link org.slf4j.LoggerFactory#getLogger(Class)}. It will use the package name as the + * logging level without the actual class name. + * + * @author kimchy (Shay Banon) + */ +public class Loggers { + + private static boolean consoleLoggingEnabled = true; + + public static void disableConsoleLogging() { + consoleLoggingEnabled = false; + } + + public static void enableConsoleLogging() { + consoleLoggingEnabled = true; + } + + public static boolean consoleLoggingEnabled() { + return consoleLoggingEnabled; + } + + public static Logger getLogger(Class clazz, Settings settings, ShardId shardId, String... prefixes) { + return getLogger(clazz, settings, shardId.index(), Lists.asList(Integer.toString(shardId.id()), prefixes).toArray(new String[0])); + } + + public static Logger getLogger(Class clazz, Settings settings, Index index, String... prefixes) { + return getLogger(clazz, settings, Lists.asList(index.name(), prefixes).toArray(new String[0])); + } + + public static Logger getLogger(Class clazz, Settings settings, String... prefixes) { + List prefixesList = newArrayList(); + if (settings.getAsBoolean("logger.logHostAddress", false)) { + try { + prefixesList.add(InetAddress.getLocalHost().getHostAddress()); + } catch (UnknownHostException e) { + // ignore + } + } + if (settings.getAsBoolean("logger.logHostName", false)) { + try { + prefixesList.add(InetAddress.getLocalHost().getHostName()); + } catch (UnknownHostException e) { + // ignore + } + } + String name = settings.get("name"); + if (name != null) { + prefixesList.add(name); + } + if (prefixes != null && prefixes.length > 0) { + prefixesList.addAll(asList(prefixes)); + } + return getLogger(clazz, prefixesList.toArray(new String[prefixesList.size()])); + } + + public static Logger getLogger(Logger parentLogger, String s) { + Logger logger = getLogger(parentLogger.getName() + s); + if (parentLogger instanceof PrefixLoggerAdapter) { + return new PrefixLoggerAdapter(((PrefixLoggerAdapter) parentLogger).prefix(), logger); + } + return logger; + } + + public static Logger getLogger(String s) { + return LoggerFactory.getLogger(s); + } + + public static Logger getLogger(Class clazz) { + return LoggerFactory.getLogger(getLoggerName(clazz)); + } + + public static Logger getLogger(Class clazz, String... prefixes) { + return getLogger(LoggerFactory.getLogger(getLoggerName(clazz)), prefixes); + } + + public static Logger getLogger(Logger logger, String... prefixes) { + if (prefixes == null || prefixes.length == 0) { + return logger; + } + StringBuilder sb = new StringBuilder(); + for (String prefix : prefixes) { + if (prefix != null) { + sb.append("[").append(prefix).append("]"); + } + } + if (sb.length() == 0) { + return logger; + } + sb.append(" "); + return new PrefixLoggerAdapter(sb.toString(), logger); + } + + private static String getLoggerName(Class clazz) { + String name = clazz.getName(); + if (name.startsWith("org.elasticsearch.")) { + name = Classes.getPackageName(clazz); + } + return getLoggerName(name); + } + + private static String getLoggerName(String name) { + if (name.startsWith("org.elasticsearch.")) { + return name.substring("org.elasticsearch.".length()); + } + return name; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/logging/PrefixLoggerAdapter.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/logging/PrefixLoggerAdapter.java new file mode 100644 index 00000000000..ae5da2ec183 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/logging/PrefixLoggerAdapter.java @@ -0,0 +1,294 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.logging; + +import org.slf4j.Logger; +import org.slf4j.Marker; + +/** + * A Logger that wraps another logger and adds the provided prefix to every log + * message. + * + * @author kimchy (Shay Banon) + */ +// TODO is there a way to do this without String concatenation? +public class PrefixLoggerAdapter implements Logger { + + private final String prefix; + + private final Logger logger; + + public PrefixLoggerAdapter(String prefix, Logger logger) { + this.prefix = prefix; + this.logger = logger; + } + + public String prefix() { + return this.prefix; + } + + private String wrap(String s) { + return prefix + s; + } + + public String getName() { + return logger.getName(); + } + + public boolean isTraceEnabled() { + return logger.isTraceEnabled(); + } + + public void trace(String s) { + logger.trace(wrap(s)); + } + + public void trace(String s, Object o) { + logger.trace(wrap(s), o); + } + + public void trace(String s, Object o, Object o1) { + logger.trace(wrap(s), o, o1); + } + + public void trace(String s, Object[] objects) { + logger.trace(wrap(s), objects); + } + + public void trace(String s, Throwable throwable) { + logger.trace(wrap(s), throwable); + } + + public boolean isTraceEnabled(Marker marker) { + return logger.isTraceEnabled(marker); + } + + public void trace(Marker marker, String s) { + logger.trace(marker, wrap(s)); + } + + public void trace(Marker marker, String s, Object o) { + logger.trace(marker, wrap(s), o); + } + + public void trace(Marker marker, String s, Object o, Object o1) { + logger.trace(marker, wrap(s), o, o1); + } + + public void trace(Marker marker, String s, Object[] objects) { + logger.trace(marker, wrap(s), objects); + } + + public void trace(Marker marker, String s, Throwable throwable) { + logger.trace(marker, wrap(s), throwable); + } + + public boolean isDebugEnabled() { + return logger.isDebugEnabled(); + } + + public void debug(String s) { + logger.debug(wrap(s)); + } + + public void debug(String s, Object o) { + logger.debug(wrap(s), o); + } + + public void debug(String s, Object o, Object o1) { + logger.debug(wrap(s), o, o1); + } + + public void debug(String s, Object[] objects) { + logger.debug(wrap(s), objects); + } + + public void debug(String s, Throwable throwable) { + logger.debug(wrap(s), throwable); + } + + public boolean isDebugEnabled(Marker marker) { + return logger.isDebugEnabled(marker); + } + + public void debug(Marker marker, String s) { + logger.debug(marker, wrap(s)); + } + + public void debug(Marker marker, String s, Object o) { + logger.debug(marker, wrap(s), o); + } + + public void debug(Marker marker, String s, Object o, Object o1) { + logger.debug(marker, wrap(s), o, o1); + } + + public void debug(Marker marker, String s, Object[] objects) { + logger.debug(marker, wrap(s), objects); + } + + public void debug(Marker marker, String s, Throwable throwable) { + logger.debug(marker, wrap(s), throwable); + } + + public boolean isInfoEnabled() { + return logger.isInfoEnabled(); + } + + public void info(String s) { + logger.info(wrap(s)); + } + + public void info(String s, Object o) { + logger.info(wrap(s), o); + } + + public void info(String s, Object o, Object o1) { + logger.info(wrap(s), o, o1); + } + + public void info(String s, Object[] objects) { + logger.info(wrap(s), objects); + } + + public void info(String s, Throwable throwable) { + logger.info(wrap(s), throwable); + } + + public boolean isInfoEnabled(Marker marker) { + return logger.isInfoEnabled(marker); + } + + public void info(Marker marker, String s) { + logger.info(marker, wrap(s)); + } + + public void info(Marker marker, String s, Object o) { + logger.info(marker, wrap(s), o); + } + + public void info(Marker marker, String s, Object o, Object o1) { + logger.info(marker, wrap(s), o, o1); + } + + public void info(Marker marker, String s, Object[] objects) { + logger.info(marker, wrap(s), objects); + } + + public void info(Marker marker, String s, Throwable throwable) { + logger.info(marker, wrap(s), throwable); + } + + public boolean isWarnEnabled() { + return logger.isWarnEnabled(); + } + + public void warn(String s) { + logger.warn(wrap(s)); + } + + public void warn(String s, Object o) { + logger.warn(wrap(s), o); + } + + public void warn(String s, Object[] objects) { + logger.warn(wrap(s), objects); + } + + public void warn(String s, Object o, Object o1) { + logger.warn(wrap(s), o, o1); + } + + public void warn(String s, Throwable throwable) { + logger.warn(wrap(s), throwable); + } + + public boolean isWarnEnabled(Marker marker) { + return logger.isWarnEnabled(marker); + } + + public void warn(Marker marker, String s) { + logger.warn(marker, wrap(s)); + } + + public void warn(Marker marker, String s, Object o) { + logger.warn(marker, wrap(s), o); + } + + public void warn(Marker marker, String s, Object o, Object o1) { + logger.warn(marker, wrap(s), o, o1); + } + + public void warn(Marker marker, String s, Object[] objects) { + logger.warn(marker, wrap(s), objects); + } + + public void warn(Marker marker, String s, Throwable throwable) { + logger.warn(marker, wrap(s), throwable); + } + + public boolean isErrorEnabled() { + return logger.isErrorEnabled(); + } + + public void error(String s) { + logger.error(wrap(s)); + } + + public void error(String s, Object o) { + logger.error(wrap(s), o); + } + + public void error(String s, Object o, Object o1) { + logger.error(wrap(s), o, o1); + } + + public void error(String s, Object[] objects) { + logger.error(wrap(s), objects); + } + + public void error(String s, Throwable throwable) { + logger.error(wrap(s), throwable); + } + + public boolean isErrorEnabled(Marker marker) { + return logger.isErrorEnabled(marker); + } + + public void error(Marker marker, String s) { + logger.error(marker, wrap(s)); + } + + public void error(Marker marker, String s, Object o) { + logger.error(marker, wrap(s), o); + } + + public void error(Marker marker, String s, Object o, Object o1) { + logger.error(marker, wrap(s), o, o1); + } + + public void error(Marker marker, String s, Object[] objects) { + logger.error(marker, wrap(s), objects); + } + + public void error(Marker marker, String s, Throwable throwable) { + logger.error(marker, wrap(s), throwable); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/logging/log4j/ConsoleAppender.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/logging/log4j/ConsoleAppender.java new file mode 100644 index 00000000000..771a1f43146 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/logging/log4j/ConsoleAppender.java @@ -0,0 +1,245 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.logging.log4j; + +import org.apache.log4j.Layout; +import org.apache.log4j.WriterAppender; +import org.apache.log4j.helpers.LogLog; +import org.elasticsearch.util.logging.Loggers; + +import java.io.IOException; +import java.io.OutputStream; + +/** + * ConsoleAppender appends log events to System.out or + * System.err using a layout specified by the user. The + * default target is System.out. + *

    + *

    ElasticSearch: Adapter from log4j to allow to disable console logging...

    + * + * @author Ceki Gülcü + * @author Curt Arnold + * @since 1.1 + */ +public class ConsoleAppender extends WriterAppender { + + public static final String SYSTEM_OUT = "System.out"; + public static final String SYSTEM_ERR = "System.err"; + + protected String target = SYSTEM_OUT; + + /** + * Determines if the appender honors reassignments of System.out + * or System.err made after configuration. + */ + private boolean follow = true; + + /** + * Constructs an unconfigured appender. + */ + public ConsoleAppender() { + } + + /** + * Creates a configured appender. + * + * @param layout layout, may not be null. + */ + public ConsoleAppender(Layout layout) { + this(layout, SYSTEM_OUT); + } + + /** + * Creates a configured appender. + * + * @param layout layout, may not be null. + * @param target target, either "System.err" or "System.out". + */ + public ConsoleAppender(Layout layout, String target) { + setLayout(layout); + setTarget(target); + activateOptions(); + } + + /** + * Sets the value of the Target option. Recognized values + * are "System.out" and "System.err". Any other value will be + * ignored. + */ + public void setTarget(String value) { + String v = value.trim(); + + if (SYSTEM_OUT.equalsIgnoreCase(v)) { + target = SYSTEM_OUT; + } else if (SYSTEM_ERR.equalsIgnoreCase(v)) { + target = SYSTEM_ERR; + } else { + targetWarn(value); + } + } + + /** + * Returns the current value of the Target property. The + * default value of the option is "System.out". + *

    + * See also {@link #setTarget}. + */ + public String getTarget() { + return target; + } + + /** + * Sets whether the appender honors reassignments of System.out + * or System.err made after configuration. + * + * @param newValue if true, appender will use value of System.out or + * System.err in force at the time when logging events are appended. + * @since 1.2.13 + */ + public final void setFollow(final boolean newValue) { + follow = newValue; + } + + /** + * Gets whether the appender honors reassignments of System.out + * or System.err made after configuration. + * + * @return true if appender will use value of System.out or + * System.err in force at the time when logging events are appended. + * @since 1.2.13 + */ + public final boolean getFollow() { + return follow; + } + + void targetWarn(String val) { + LogLog.warn("[" + val + "] should be System.out or System.err."); + LogLog.warn("Using previously set target, System.out by default."); + } + + /** + * Prepares the appender for use. + */ + public void activateOptions() { + if (follow) { + if (target.equals(SYSTEM_ERR)) { + setWriter(createWriter(new SystemErrStream())); + } else { + setWriter(createWriter(new SystemOutStream())); + } + } else { + if (target.equals(SYSTEM_ERR)) { + setWriter(createWriter(System.err)); + } else { + setWriter(createWriter(System.out)); + } + } + + super.activateOptions(); + } + + /** + * {@inheritDoc} + */ + protected + final void closeWriter() { + if (follow) { + super.closeWriter(); + } + } + + + /** + * An implementation of OutputStream that redirects to the + * current System.err. + */ + private static class SystemErrStream extends OutputStream { + public SystemErrStream() { + } + + public void close() { + } + + public void flush() { + System.err.flush(); + } + + public void write(final byte[] b) throws IOException { + if (!Loggers.consoleLoggingEnabled()) { + return; + } + System.err.write(b); + } + + public void write(final byte[] b, final int off, final int len) + throws IOException { + if (!Loggers.consoleLoggingEnabled()) { + return; + } + System.err.write(b, off, len); + } + + public void write(final int b) throws IOException { + if (!Loggers.consoleLoggingEnabled()) { + return; + } + System.err.write(b); + } + } + + /** + * An implementation of OutputStream that redirects to the + * current System.out. + */ + private static class SystemOutStream extends OutputStream { + public SystemOutStream() { + } + + public void close() { + } + + public void flush() { + System.out.flush(); + } + + public void write(final byte[] b) throws IOException { + if (!Loggers.consoleLoggingEnabled()) { + return; + } + System.out.write(b); + } + + public void write(final byte[] b, final int off, final int len) + throws IOException { + if (!Loggers.consoleLoggingEnabled()) { + return; + } + System.out.write(b, off, len); + } + + public void write(final int b) throws IOException { + if (!Loggers.consoleLoggingEnabled()) { + return; + } + System.out.write(b); + } + } + +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/logging/log4j/LogConfigurator.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/logging/log4j/LogConfigurator.java new file mode 100644 index 00000000000..e0ced310f1b --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/logging/log4j/LogConfigurator.java @@ -0,0 +1,113 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.logging.log4j; + +import com.google.common.collect.ImmutableMap; +import org.apache.log4j.PropertyConfigurator; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.FailedToResolveConfigException; +import org.elasticsearch.util.MapBuilder; +import org.elasticsearch.util.settings.ImmutableSettings; +import org.elasticsearch.util.settings.Settings; + +import java.util.Map; +import java.util.Properties; + +import static org.elasticsearch.util.settings.ImmutableSettings.*; + +/** + * @author kimchy (Shay Banon) + */ +public class LogConfigurator { + + private static boolean loaded; + + private static ImmutableMap replacements = new MapBuilder() + .put("console", "org.elasticsearch.util.logging.log4j.ConsoleAppender") + .put("async", "org.apache.log4j.AsyncAppender") + .put("dailyRollingFile", "org.apache.log4j.DailyRollingFileAppender") + .put("externallyRolledFile", "org.apache.log4j.ExternallyRolledFileAppender") + .put("file", "org.apache.log4j.FileAppender") + .put("jdbc", "org.apache.log4j.JDBCAppender") + .put("jms", "org.apache.log4j.JMSAppender") + .put("lf5", "org.apache.log4j.LF5Appender") + .put("ntevent", "org.apache.log4j.NTEventLogAppender") + .put("null", "org.apache.log4j.NullAppender") + .put("rollingFile", "org.apache.log4j.RollingFileAppender") + .put("smtp", "org.apache.log4j.SMTPAppender") + .put("socket", "org.apache.log4j.SocketAppender") + .put("socketHub", "org.apache.log4j.SocketHubAppender") + .put("syslog", "org.apache.log4j.SyslogAppender") + .put("telnet", "org.apache.log4j.TelnetAppender") + // layouts + .put("simple", "org.apache.log4j.SimpleLayout") + .put("html", "org.apache.log4j.HTMLLayout") + .put("pattern", "org.apache.log4j.PatternLayout") + .put("consolePattern", "org.elasticsearch.util.logging.JLinePatternLayout") + .put("ttcc", "org.apache.log4j.TTCCLayout") + .put("xml", "org.apache.log4j.XMLLayout") + .immutableMap(); + + public static void configure(Settings settings) { + if (loaded) { + return; + } + loaded = true; + Environment environment = new Environment(settings); + ImmutableSettings.Builder settingsBuilder = settingsBuilder().putAll(settings); + try { + settingsBuilder.loadFromUrl(environment.resolveConfig("logging.yml")); + } catch (FailedToResolveConfigException e) { + // ignore + } catch (NoClassDefFoundError e) { + // ignore, no yaml + } + try { + settingsBuilder.loadFromUrl(environment.resolveConfig("logging.json")); + } catch (FailedToResolveConfigException e) { + // ignore + } + try { + settingsBuilder.loadFromUrl(environment.resolveConfig("logging.properties")); + } catch (FailedToResolveConfigException e) { + // ignore + } + settingsBuilder + .putProperties("elasticsearch.", System.getProperties()) + .putProperties("es.", System.getProperties()) + .replacePropertyPlaceholders(); + Properties props = new Properties(); + for (Map.Entry entry : settingsBuilder.build().getAsMap().entrySet()) { + String key = "log4j." + entry.getKey(); + String value = entry.getValue(); + if (replacements.containsKey(value)) { + value = replacements.get(value); + } + if (key.endsWith(".value")) { + props.setProperty(key.substring(0, key.length() - ".value".length()), value); + } else if (key.endsWith(".type")) { + props.setProperty(key.substring(0, key.length() - ".type".length()), value); + } else { + props.setProperty(key, value); + } + } + PropertyConfigurator.configure(props); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/Directories.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/Directories.java new file mode 100644 index 00000000000..956cdea949d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/Directories.java @@ -0,0 +1,208 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.lucene; + +import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.store.*; +import org.elasticsearch.util.SizeValue; +import org.elasticsearch.util.io.FileSystemUtils; + +import java.io.*; +import java.nio.channels.FileChannel; +import java.util.Collection; + +/** + * A set of utilities for Lucene {@link Directory}. + * + * @author kimchy (Shay Banon) + */ +public class Directories { + + /** + * Deletes all the files from a directory. + * + * @param directory The directoy to delete all the files from + * @throws IOException if an exception occurs during the delete process + */ + public static void deleteFiles(Directory directory) throws IOException { + String[] files = directory.listAll(); + for (String file : files) { + directory.deleteFile(file); + } + } + + /** + * Returns the estimated size of a {@link Directory}. + */ + public static SizeValue estimateSize(Directory directory) throws IOException { + long estimatedSize = 0; + String[] files = directory.listAll(); + for (String file : files) { + try { + estimatedSize += directory.fileLength(file); + } catch (FileNotFoundException e) { + // ignore, the file is not there no more + } + } + return new SizeValue(estimatedSize); + } + + /** + * Lists all the commit point in a directory. + */ + public static Collection listCommits(Directory directory) throws IOException { + return IndexReader.listCommits(directory); + } + + /** + * Computes the checksum of the given file name with the directory. + */ + public static long checksum(Directory dir, String name) throws IOException { + return checksum(dir.openInput(name)); + } + + public static void copyFromDirectory(Directory dir, String fileName, File copyTo) throws IOException { + if (dir instanceof FSDirectory) { + if (!copyTo.exists()) { + copyTo.createNewFile(); + } + FileChannel source = null; + FileChannel destination = null; + try { + source = new FileInputStream(new File(((FSDirectory) dir).getFile(), fileName)).getChannel(); + destination = new FileOutputStream(copyTo).getChannel(); + destination.transferFrom(source, 0, source.size()); + } finally { + if (source != null) { + source.close(); + } + if (destination != null) { + destination.close(); + } + } + } else { + copyFromDirectory(dir.openInput(fileName), new FileOutputStream(copyTo)); + } + // sync the file + FileSystemUtils.syncFile(copyTo); + } + + public static void copyFromDirectory(IndexInput ii, OutputStream os) throws IOException { + final int BUFFER_SIZE = ii.length() < 16384 ? (int) ii.length() : 16384; + byte[] buffer = new byte[BUFFER_SIZE]; + try { + long len = ii.length(); + long readCount = 0; + while (readCount < len) { + int toRead = readCount + BUFFER_SIZE > len ? (int) (len - readCount) : BUFFER_SIZE; + ii.readBytes(buffer, 0, toRead, false); + readCount += toRead; + os.write(buffer, 0, toRead); + } + } finally { + if (os != null) { + try { + os.close(); + } catch (Exception e) { + // ignore + } + } + if (ii != null) { + try { + ii.close(); + } catch (Exception e) { + // ignore + } + } + } + } + + public static void copyToDirectory(File copyFrom, Directory dir, String fileName) throws IOException { + if (dir instanceof FSDirectory) { + File destinationFile = new File(((FSDirectory) dir).getFile(), fileName); + if (!destinationFile.exists()) { + destinationFile.createNewFile(); + } + FileChannel source = null; + FileChannel destination = null; + try { + source = new FileInputStream(copyFrom).getChannel(); + destination = new FileOutputStream(destinationFile).getChannel(); + destination.transferFrom(source, 0, source.size()); + } finally { + if (source != null) { + source.close(); + } + if (destination != null) { + destination.close(); + } + } + } else { + copyToDirectory(new FileInputStream(copyFrom), dir.createOutput(fileName)); + } + dir.sync(fileName); + } + + public static void copyToDirectory(InputStream is, IndexOutput io) throws IOException { + byte[] buffer = new byte[16384]; + int len; + try { + while ((len = is.read(buffer)) != -1) { + io.writeBytes(buffer, len); + } + } finally { + try { + io.close(); + } catch (Exception e) { + // ignore + } + try { + is.close(); + } catch (Exception e) { + // ignore + } + } + } + + /** + * Computes the checksum of the content represented by the provided index input. + * + *

    Closes the index input once checksum is computed. + */ + public static long checksum(IndexInput indexInput) throws IOException { + final int BUFFER_SIZE = 16384; + byte[] buf = new byte[BUFFER_SIZE]; + ChecksumIndexInput cii = new ChecksumIndexInput(indexInput); + long len = cii.length(); + long readCount = 0; + while (readCount < len) { + int toRead = readCount + BUFFER_SIZE > len ? (int) (len - readCount) : BUFFER_SIZE; + cii.readBytes(buf, 0, toRead); + readCount += toRead; + } + cii.close(); + return cii.getChecksum(); + } + + private Directories() { + + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/DocumentBuilder.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/DocumentBuilder.java new file mode 100644 index 00000000000..d3c39f7ad2a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/DocumentBuilder.java @@ -0,0 +1,79 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.lucene; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.Fieldable; + +/** + * @author kimchy (Shay Banon) + */ +public class DocumentBuilder { + + public static DocumentBuilder doc() { + return new DocumentBuilder(); + } + + public static FieldBuilder field(String name, String value) { + return field(name, value, Field.Store.YES, Field.Index.ANALYZED); + } + + public static FieldBuilder field(String name, String value, Field.Store store, Field.Index index) { + return new FieldBuilder(name, value, store, index); + } + + public static FieldBuilder field(String name, String value, Field.Store store, Field.Index index, Field.TermVector termVector) { + return new FieldBuilder(name, value, store, index, termVector); + } + + public static FieldBuilder field(String name, byte[] value, Field.Store store) { + return new FieldBuilder(name, value, store); + } + + public static FieldBuilder field(String name, byte[] value, int offset, int length, Field.Store store) { + return new FieldBuilder(name, value, offset, length, store); + } + + private final Document document; + + private DocumentBuilder() { + this.document = new Document(); + } + + public DocumentBuilder boost(float boost) { + document.setBoost(boost); + return this; + } + + public DocumentBuilder add(Fieldable field) { + document.add(field); + return this; + } + + public DocumentBuilder add(FieldBuilder fieldBuilder) { + document.add(fieldBuilder.build()); + return this; + } + + public Document build() { + return document; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/FieldBuilder.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/FieldBuilder.java new file mode 100644 index 00000000000..fdaa347dd1e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/FieldBuilder.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.lucene; + +import org.apache.lucene.document.Field; + +/** + * @author kimchy (Shay Banon) + */ +public class FieldBuilder { + + private final Field field; + + FieldBuilder(String name, String value, Field.Store store, Field.Index index) { + field = new Field(name, value, store, index); + } + + FieldBuilder(String name, String value, Field.Store store, Field.Index index, Field.TermVector termVector) { + field = new Field(name, value, store, index, termVector); + } + + FieldBuilder(String name, byte[] value, Field.Store store) { + field = new Field(name, value, store); + } + + FieldBuilder(String name, byte[] value, int offset, int length, Field.Store store) { + field = new Field(name, value, offset, length, store); + } + + public FieldBuilder boost(float boost) { + field.setBoost(boost); + return this; + } + + public FieldBuilder omitNorms(boolean omitNorms) { + field.setOmitNorms(omitNorms); + return this; + } + + public FieldBuilder omitTermFreqAndPositions(boolean omitTermFreqAndPositions) { + field.setOmitTermFreqAndPositions(omitTermFreqAndPositions); + return this; + } + + public Field build() { + return field; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/IndexCommitDelegate.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/IndexCommitDelegate.java new file mode 100644 index 00000000000..5d406572f85 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/IndexCommitDelegate.java @@ -0,0 +1,96 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.lucene; + +import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.store.Directory; + +import java.io.IOException; +import java.util.Collection; +import java.util.Map; + +/** + * A simple delegate that delegates all {@link IndexCommit} calls to a delegated + * {@link IndexCommit}. + * + * @author kimchy (Shay Banon) + */ +public abstract class IndexCommitDelegate extends IndexCommit { + + protected final IndexCommit delegate; + + /** + * Constructs a new {@link IndexCommit} that will delegate all calls + * to the provided delegate. + * + * @param delegate The delegate + */ + public IndexCommitDelegate(IndexCommit delegate) { + this.delegate = delegate; + } + + @Override public String getSegmentsFileName() { + return delegate.getSegmentsFileName(); + } + + @Override public Collection getFileNames() throws IOException { + return delegate.getFileNames(); + } + + @Override public Directory getDirectory() { + return delegate.getDirectory(); + } + + @Override public void delete() { + delegate.delete(); + } + + @Override public boolean isDeleted() { + return delegate.isDeleted(); + } + + @Override public boolean isOptimized() { + return delegate.isOptimized(); + } + + @Override public boolean equals(Object other) { + return delegate.equals(other); + } + + @Override public int hashCode() { + return delegate.hashCode(); + } + + @Override public long getVersion() { + return delegate.getVersion(); + } + + @Override public long getGeneration() { + return delegate.getGeneration(); + } + + @Override public long getTimestamp() throws IOException { + return delegate.getTimestamp(); + } + + @Override public Map getUserData() throws IOException { + return delegate.getUserData(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/IndexWriters.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/IndexWriters.java new file mode 100644 index 00000000000..2aa701c92f2 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/IndexWriters.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.lucene; + +import org.apache.lucene.index.IndexWriter; +import org.elasticsearch.util.logging.Loggers; +import org.slf4j.Logger; + +import java.lang.reflect.Field; +import java.lang.reflect.Method; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class IndexWriters { + + private static Logger logger = Loggers.getLogger(IndexWriters.class); + + private static Field docWriterField; + + private static Method docWriterGetRAMUsed; + + private static final boolean docWriterReflection; + + static { + boolean docWriterReflectionX = false; + try { + docWriterField = IndexWriter.class.getDeclaredField("docWriter"); + docWriterField.setAccessible(true); + Class docWriter = IndexWriters.class.getClassLoader().loadClass("org.apache.lucene.index.DocumentsWriter"); + docWriterGetRAMUsed = docWriter.getDeclaredMethod("getRAMUsed"); + docWriterGetRAMUsed.setAccessible(true); + docWriterReflectionX = true; + } catch (Exception e) { + logger.warn("Failed to get doc writer field", e); + } + docWriterReflection = docWriterReflectionX; + } + + public static long estimateRamSize(IndexWriter indexWriter) throws Exception { + if (!docWriterReflection) { + return -1; + } + Object docWriter = docWriterField.get(indexWriter); + return (Long) docWriterGetRAMUsed.invoke(docWriter); + } + + private IndexWriters() { + + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/LoggerInfoStream.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/LoggerInfoStream.java new file mode 100644 index 00000000000..34e190f7452 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/LoggerInfoStream.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.lucene; + +import org.elasticsearch.util.logging.Loggers; +import org.slf4j.Logger; + +import java.io.OutputStream; +import java.io.PrintStream; + +/** + * A {@link java.io.PrintStream} that logs each {@link #println(String)} into a logger + * under trace level. + *

    + *

    Provides also factory methods that basically append to the logger name provide the + * {@link #SUFFIX}. + * + * @author kimchy (Shay Banon) + */ +public class LoggerInfoStream extends PrintStream { + + public static final String SUFFIX = ".lucene"; + + /** + * Creates a new {@link LoggerInfoStream} based on the provided logger + * by appending to its {@link Logger#getName()} the {@link #SUFFIX}. + */ + public static LoggerInfoStream getInfoStream(Logger logger) { + return new LoggerInfoStream(Loggers.getLogger(logger, SUFFIX)); + } + + /** + * Creates a new {@link LoggerInfoStream} based on the provided name + * by appending to it the {@link #SUFFIX}. + */ + public static LoggerInfoStream getInfoStream(String name) { + return new LoggerInfoStream(Loggers.getLogger(name + SUFFIX)); + } + + private final Logger logger; + + /** + * Constucts a new instance based on the provided logger. Will output + * each {@link #println(String)} operation as a trace level. + */ + public LoggerInfoStream(Logger logger) { + super((OutputStream) null); + this.logger = logger; + } + + /** + * Override only the method Lucene actually uses. + */ + @Override public void println(String x) { + logger.trace(x); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/Lucene.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/Lucene.java new file mode 100644 index 00000000000..cd61cbd5f18 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/Lucene.java @@ -0,0 +1,293 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.lucene; + +import org.apache.lucene.analysis.KeywordAnalyzer; +import org.apache.lucene.analysis.standard.StandardAnalyzer; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.Term; +import org.apache.lucene.index.TermDocs; +import org.apache.lucene.search.*; +import org.apache.lucene.util.Version; +import org.elasticsearch.util.gnu.trove.TIntArrayList; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class Lucene { + + public static final StandardAnalyzer STANDARD_ANALYZER = new StandardAnalyzer(Version.LUCENE_CURRENT); + public static final KeywordAnalyzer KEYWORD_ANALYZER = new KeywordAnalyzer(); + + public static final int NO_DOC = -1; + + public static long count(IndexSearcher searcher, Query query, float minScore) throws IOException { + CountCollector countCollector = new CountCollector(minScore); + searcher.search(query, countCollector); + return countCollector.count(); + } + + public static int docId(IndexReader reader, Term term) throws IOException { + TermDocs termDocs = reader.termDocs(term); + try { + if (termDocs.next()) { + return termDocs.doc(); + } + return NO_DOC; + } finally { + termDocs.close(); + } + } + + public static TIntArrayList docIds(IndexReader reader, Term term, int expectedSize) throws IOException { + TermDocs termDocs = reader.termDocs(term); + TIntArrayList list = new TIntArrayList(expectedSize); + try { + while (termDocs.next()) { + list.add(termDocs.doc()); + } + } finally { + termDocs.close(); + } + return list; + } + + /** + * Closes the index reader, returning false if it failed to close. + */ + public static boolean safeClose(IndexReader reader) { + if (reader == null) { + return true; + } + try { + reader.close(); + return true; + } catch (IOException e) { + return false; + } + } + + /** + * Closes the index writer, returning false if it failed to close. + */ + public static boolean safeClose(IndexWriter writer) { + if (writer == null) { + return true; + } + try { + writer.close(); + return true; + } catch (IOException e) { + return false; + } + } + + public static TopDocs readTopDocs(DataInput in) throws IOException { + if (!in.readBoolean()) { + // no docs + return null; + } + if (in.readBoolean()) { + int totalHits = in.readInt(); + float maxScore = in.readFloat(); + + SortField[] fields = new SortField[in.readInt()]; + for (int i = 0; i < fields.length; i++) { + fields[i] = new SortField(in.readUTF(), in.readInt(), in.readBoolean()); + } + + FieldDoc[] fieldDocs = new FieldDoc[in.readInt()]; + for (int i = 0; i < fieldDocs.length; i++) { + Comparable[] cFields = new Comparable[in.readInt()]; + for (int j = 0; j < cFields.length; j++) { + byte type = in.readByte(); + if (type == 0) { + cFields[j] = in.readUTF(); + } else if (type == 1) { + cFields[j] = in.readInt(); + } else if (type == 2) { + cFields[j] = in.readLong(); + } else if (type == 3) { + cFields[j] = in.readFloat(); + } else if (type == 4) { + cFields[j] = in.readDouble(); + } else if (type == 5) { + cFields[j] = in.readByte(); + } else { + throw new IOException("Can't match type [" + type + "]"); + } + } + fieldDocs[i] = new FieldDoc(in.readInt(), in.readFloat(), cFields); + } + return new TopFieldDocs(totalHits, fieldDocs, fields, maxScore); + } else { + int totalHits = in.readInt(); + float maxScore = in.readFloat(); + + ScoreDoc[] scoreDocs = new ScoreDoc[in.readInt()]; + for (int i = 0; i < scoreDocs.length; i++) { + scoreDocs[i] = new ScoreDoc(in.readInt(), in.readFloat()); + } + return new TopDocs(totalHits, scoreDocs, maxScore); + } + } + + public static void writeTopDocs(DataOutput out, TopDocs topDocs, int from) throws IOException { + if (topDocs.scoreDocs.length - from < 0) { + out.writeBoolean(false); + return; + } + out.writeBoolean(true); + if (topDocs instanceof TopFieldDocs) { + out.writeBoolean(true); + TopFieldDocs topFieldDocs = (TopFieldDocs) topDocs; + + out.writeInt(topDocs.totalHits); + out.writeFloat(topDocs.getMaxScore()); + + out.writeInt(topFieldDocs.fields.length); + for (SortField sortField : topFieldDocs.fields) { + out.writeUTF(sortField.getField()); + out.writeInt(sortField.getType()); + out.writeBoolean(sortField.getReverse()); + } + + out.writeInt(topDocs.scoreDocs.length - from); + int index = 0; + for (ScoreDoc doc : topFieldDocs.scoreDocs) { + if (index++ < from) { + continue; + } + FieldDoc fieldDoc = (FieldDoc) doc; + out.writeInt(fieldDoc.fields.length); + for (Comparable field : fieldDoc.fields) { + Class type = field.getClass(); + if (type == String.class) { + out.write(0); + out.writeUTF((String) field); + } else if (type == Integer.class) { + out.write(1); + out.writeInt((Integer) field); + } else if (type == Long.class) { + out.write(2); + out.writeLong((Long) field); + } else if (type == Float.class) { + out.write(3); + out.writeFloat((Float) field); + } else if (type == Double.class) { + out.write(4); + out.writeDouble((Double) field); + } else if (type == Byte.class) { + out.write(5); + out.write((Byte) field); + } else { + throw new IOException("Can't handle sort field value of type [" + type + "]"); + } + } + + out.writeInt(doc.doc); + out.writeFloat(doc.score); + } + } else { + out.writeBoolean(false); + out.writeInt(topDocs.totalHits); + out.writeFloat(topDocs.getMaxScore()); + + out.writeInt(topDocs.scoreDocs.length - from); + int index = 0; + for (ScoreDoc doc : topDocs.scoreDocs) { + if (index++ < from) { + continue; + } + out.writeInt(doc.doc); + out.writeFloat(doc.score); + } + } + } + + public static Explanation readExplanation(DataInput in) throws IOException { + float value = in.readFloat(); + String description = in.readUTF(); + Explanation explanation = new Explanation(value, description); + if (in.readBoolean()) { + int size = in.readInt(); + for (int i = 0; i < size; i++) { + explanation.addDetail(readExplanation(in)); + } + } + return explanation; + } + + public static void writeExplanation(DataOutput out, Explanation explanation) throws IOException { + out.writeFloat(explanation.getValue()); + out.writeUTF(explanation.getDescription()); + Explanation[] subExplanations = explanation.getDetails(); + if (subExplanations == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeInt(subExplanations.length); + for (Explanation subExp : subExplanations) { + writeExplanation(out, subExp); + } + } + } + + public static class CountCollector extends Collector { + + private final float minScore; + private Scorer scorer; + private long count; + + public CountCollector(float minScore) { + this.minScore = minScore; + } + + public long count() { + return this.count; + } + + @Override public void setScorer(Scorer scorer) throws IOException { + this.scorer = scorer; + } + + @Override public void collect(int doc) throws IOException { + if (scorer.score() > minScore) { + count++; + } + } + + @Override public void setNextReader(IndexReader reader, int docBase) throws IOException { + } + + @Override public boolean acceptsDocsOutOfOrder() { + return true; + } + } + + private Lucene() { + + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/ReaderSearcherHolder.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/ReaderSearcherHolder.java new file mode 100644 index 00000000000..fbe53769f6c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/ReaderSearcherHolder.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.lucene; + +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.search.IndexSearcher; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.util.lease.Releasable; + +/** + * A very simple holder for a tuple of reader and searcher. + * + * @author kimchy (Shay Banon) + */ +public class ReaderSearcherHolder implements Releasable { + + private final IndexReader indexReader; + + private final IndexSearcher indexSearcher; + + public ReaderSearcherHolder(IndexReader indexReader) { + this(indexReader, new IndexSearcher(indexReader)); + } + + public ReaderSearcherHolder(IndexReader indexReader, IndexSearcher indexSearcher) { + this.indexReader = indexReader; + this.indexSearcher = indexSearcher; + } + + public IndexReader reader() { + return indexReader; + } + + public IndexSearcher searcher() { + return indexSearcher; + } + + @Override public boolean release() throws ElasticSearchException { + try { + indexSearcher.close(); + } catch (Exception e) { + // do nothing + } + try { + indexReader.close(); + } catch (Exception e) { + // do nothing + } + return true; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/docidset/DocIdSetCollector.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/docidset/DocIdSetCollector.java new file mode 100644 index 00000000000..f79be4efcae --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/docidset/DocIdSetCollector.java @@ -0,0 +1,66 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.lucene.docidset; + +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.search.Collector; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.util.OpenBitSetDISI; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class DocIdSetCollector extends Collector { + + private final Collector collector; + + private final OpenBitSetDISI docIdSet; + + private int base; + + public DocIdSetCollector(Collector collector, IndexReader reader) { + this.collector = collector; + this.docIdSet = new OpenBitSetDISI(reader.maxDoc()); + } + + public OpenBitSetDISI docIdSet() { + return docIdSet; + } + + @Override public void setScorer(Scorer scorer) throws IOException { + collector.setScorer(scorer); + } + + @Override public void collect(int doc) throws IOException { + collector.collect(doc); + docIdSet.fastSet(base + doc); + } + + @Override public void setNextReader(IndexReader reader, int docBase) throws IOException { + base = docBase; + collector.setNextReader(reader, docBase); + } + + @Override public boolean acceptsDocsOutOfOrder() { + return collector.acceptsDocsOutOfOrder(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/docidset/DocIdSets.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/docidset/DocIdSets.java new file mode 100644 index 00000000000..84bd078a5b6 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/docidset/DocIdSets.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.lucene.docidset; + +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.search.DocIdSet; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.util.OpenBitSetDISI; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class DocIdSets { + + /** + * Returns a cacheable version of the doc id set (might be the same instance provided as a parameter). + */ + public static DocIdSet cacheable(IndexReader reader, DocIdSet docIdSet) throws IOException { + if (docIdSet.isCacheable()) { + return docIdSet; + } else { + final DocIdSetIterator it = docIdSet.iterator(); + // null is allowed to be returned by iterator(), + // in this case we wrap with the empty set, + // which is cacheable. + return (it == null) ? DocIdSet.EMPTY_DOCIDSET : new OpenBitSetDISI(it, reader.maxDoc()); + } + } + + private DocIdSets() { + + } + +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/search/TermFilter.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/search/TermFilter.java new file mode 100644 index 00000000000..44b96e0e0be --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/search/TermFilter.java @@ -0,0 +1,79 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.lucene.search; + +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.Term; +import org.apache.lucene.index.TermDocs; +import org.apache.lucene.search.DocIdSet; +import org.apache.lucene.search.Filter; +import org.apache.lucene.util.OpenBitSet; + +import java.io.IOException; + +/** + * A simple filter for a specific term. + * + * @author kimchy (Shay Banon) + */ +public class TermFilter extends Filter { + + private final Term term; + + public TermFilter(Term term) { + this.term = term; + } + + public Term getTerm() { + return term; + } + + @Override public DocIdSet getDocIdSet(IndexReader reader) throws IOException { + OpenBitSet result = new OpenBitSet(reader.maxDoc()); + TermDocs td = reader.termDocs(); + try { + td.seek(term); + while (td.next()) { + result.set(td.doc()); + } + } + finally { + td.close(); + } + return result; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + TermFilter that = (TermFilter) o; + + if (term != null ? !term.equals(that.term) : that.term != null) return false; + + return true; + } + + @Override + public int hashCode() { + return term != null ? term.hashCode() : 0; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/versioned/ConcurrentVersionedMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/versioned/ConcurrentVersionedMap.java new file mode 100644 index 00000000000..326477df0d4 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/versioned/ConcurrentVersionedMap.java @@ -0,0 +1,52 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.lucene.versioned; + +import org.elasticsearch.util.concurrent.ThreadSafe; + +import java.util.concurrent.ConcurrentHashMap; + +/** + * An implementation of {@link VersionedMap} based on {@link ConcurrentHashMap}. + * + * @author kimchy (Shay Banon) + */ +@ThreadSafe +public class ConcurrentVersionedMap implements VersionedMap { + + private final ConcurrentHashMap map = new ConcurrentHashMap(); + + @Override public boolean beforeVersion(int key, int versionToCheck) { + Integer result = map.get(key); + return result == null || versionToCheck < result; + } + + @Override public void putVersion(int key, int version) { + map.put(key, version); + } + + @Override public void putVersionIfAbsent(int key, int version) { + map.putIfAbsent(key, version); + } + + @Override public void clear() { + map.clear(); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/versioned/NativeVersionedMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/versioned/NativeVersionedMap.java new file mode 100644 index 00000000000..9e3687aa3bf --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/versioned/NativeVersionedMap.java @@ -0,0 +1,149 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.lucene.versioned; + +import org.elasticsearch.util.concurrent.ThreadSafe; +import org.elasticsearch.util.trove.ExtTIntIntHashMap; + +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +/** + * An implementation of {@link VersionedMap} based on trove {@link org.elasticsearch.util.gnu.trove.TIntIntHashMap}. + * + * @author kimchy (Shay Banon) + */ +@ThreadSafe +public class NativeVersionedMap implements VersionedMap { + + /** + * Mask value for indexing into segments. The upper bits of a + * key's hash code are used to choose the segment. + */ + private final int segmentMask; + + /** + * Shift value for indexing within segments. + */ + private final int segmentShift; + + private final Segment[] segments; + + public NativeVersionedMap() { + this(16); + } + + public NativeVersionedMap(int concurrencyLevel) { + // Find power-of-two sizes best matching arguments + int sshift = 0; + int ssize = 1; + while (ssize < concurrencyLevel) { + ++sshift; + ssize <<= 1; + } + segmentShift = 32 - sshift; + segmentMask = ssize - 1; + this.segments = new Segment[ssize]; + for (int i = 0; i < segments.length; i++) { + segments[i] = new Segment(); + } + } + + @Override public boolean beforeVersion(int key, int versionToCheck) { + Segment segment = segmentFor(hash(key)); + segment.rwl.readLock().lock(); + try { + int result = segment.map.get(key); + return result == -1 || versionToCheck < result; + } finally { + segment.rwl.readLock().unlock(); + } + } + + @Override public void putVersion(int key, int version) { + Segment segment = segmentFor(hash(key)); + segment.rwl.writeLock().lock(); + try { + segment.map.put(key, version); + } finally { + segment.rwl.writeLock().unlock(); + } + } + + @Override public void putVersionIfAbsent(int key, int version) { + Segment segment = segmentFor(hash(key)); + segment.rwl.writeLock().lock(); + try { + if (!segment.map.containsKey(key)) { + segment.map.put(key, version); + } + } finally { + segment.rwl.writeLock().unlock(); + } + } + + @Override public void clear() { + for (Segment segment : segments) { + segment.rwl.writeLock().lock(); + try { + segment.map.clear(); + } finally { + segment.rwl.writeLock().unlock(); + } + } + } + + /** + * Applies a supplemental hash function to a given hashCode, which + * defends against poor quality hash functions. This is critical + * because ConcurrentHashMap uses power-of-two length hash tables, + * that otherwise encounter collisions for hashCodes that do not + * differ in lower or upper bits. + */ + private static int hash(int h) { + // Spread bits to regularize both segment and index locations, + // using variant of single-word Wang/Jenkins hash. + h += (h << 15) ^ 0xffffcd7d; + h ^= (h >>> 10); + h += (h << 3); + h ^= (h >>> 6); + h += (h << 2) + (h << 14); + return h ^ (h >>> 16); + } + + /** + * Returns the segment that should be used for key with given hash + * + * @param hash the hash code for the key + * @return the segment + */ + final Segment segmentFor(int hash) { + return segments[(hash >>> segmentShift) & segmentMask]; + } + + private static class Segment { + final ReadWriteLock rwl = new ReentrantReadWriteLock(); + final ExtTIntIntHashMap map = new ExtTIntIntHashMap(); + + private Segment() { + map.defaultReturnValue(-1); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/versioned/NonBlockingVersionedMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/versioned/NonBlockingVersionedMap.java new file mode 100644 index 00000000000..7ca21f1763d --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/versioned/NonBlockingVersionedMap.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.lucene.versioned; + +import org.elasticsearch.util.concurrent.ThreadSafe; +import org.elasticsearch.util.concurrent.highscalelib.NonBlockingHashMapLong; + +/** + * An implementation of {@link VersionedMap} based on {@link NonBlockingHashMapLong}. + * + * @author kimchy (Shay Banon) + */ +@ThreadSafe +public class NonBlockingVersionedMap implements VersionedMap { + + private final NonBlockingHashMapLong map = new NonBlockingHashMapLong(); + + @Override public boolean beforeVersion(int key, int versionToCheck) { + Integer result = map.get(key); + return result == null || versionToCheck < result; + } + + @Override public void putVersion(int key, int version) { + map.put(key, version); + } + + @Override public void putVersionIfAbsent(int key, int version) { + map.putIfAbsent(key, version); + } + + @Override public void clear() { + map.clear(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/versioned/VersionedIndexReader.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/versioned/VersionedIndexReader.java new file mode 100644 index 00000000000..66768f4bb50 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/versioned/VersionedIndexReader.java @@ -0,0 +1,121 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.lucene.versioned; + +import org.apache.lucene.index.*; +import org.elasticsearch.util.concurrent.ThreadSafe; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +@ThreadSafe +public class VersionedIndexReader extends FilterIndexReader { + + protected final int version; + + protected final VersionedMap versionedMap; + + public VersionedIndexReader(IndexReader in, int version, VersionedMap versionedMap) { + super(in); + this.version = version; + this.versionedMap = versionedMap; + } + + @Override public TermDocs termDocs() throws IOException { + return new VersionedTermDocs(in.termDocs()); + } + + @Override public TermDocs termDocs(Term term) throws IOException { + return new VersionedTermDocs(in.termDocs(term)); + } + + @Override public TermPositions termPositions() throws IOException { + return new VersionedTermPositions(in.termPositions()); + } + + @Override public TermPositions termPositions(Term term) throws IOException { + return new VersionedTermPositions(in.termPositions(term)); + } + + + private class VersionedTermDocs extends FilterTermDocs { + + public VersionedTermDocs(TermDocs in) { + super(in); + } + + public boolean next() throws IOException { + while (in.next()) { + if (versionedMap.beforeVersion(in.doc(), version)) return true; + } + return false; + } + + public int read(final int[] docs, final int[] freqs) throws IOException { + int i = 0; + while (i < docs.length) { + if (!in.next()) return i; + + int doc = in.doc(); + if (versionedMap.beforeVersion(doc, version)) { + docs[i] = doc; + freqs[i] = in.freq(); + i++; + } + } + return i; + } + + public boolean skipTo(int i) throws IOException { + if (!in.skipTo(i)) return false; + if (versionedMap.beforeVersion(in.doc(), version)) return true; + + return next(); + } + } + + private class VersionedTermPositions extends VersionedTermDocs implements TermPositions { + final TermPositions _tp; + + public VersionedTermPositions(TermPositions in) { + super(in); + _tp = in; + } + + public int nextPosition() throws IOException { + return _tp.nextPosition(); + } + + public int getPayloadLength() { + return _tp.getPayloadLength(); + } + + public byte[] getPayload(byte[] data, int offset) throws IOException { + return _tp.getPayload(data, offset); + } + + public boolean isPayloadAvailable() { + return _tp.isPayloadAvailable(); + } + } + +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/versioned/VersionedMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/versioned/VersionedMap.java new file mode 100644 index 00000000000..11bc0cae57c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/lucene/versioned/VersionedMap.java @@ -0,0 +1,56 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.lucene.versioned; + +import org.elasticsearch.util.concurrent.ThreadSafe; + +/** + * A versioned map, allowing to put version numbers associated with specific + * keys. + *

    + *

    Note. versions can be assumed to be >= 0. + * + * @author kimchy (Shay Banon) + */ +@ThreadSafe +public interface VersionedMap { + + /** + * Returns true if the versionToCheck is smaller than the current version + * associated with the key. If there is no version associated with the key, then + * it should return true as well. + */ + boolean beforeVersion(int key, int versionToCheck); + + /** + * Puts (and replaces if it exists) the current key with the provided version. + */ + void putVersion(int key, int version); + + /** + * Puts the version with the key only if it is absent. + */ + void putVersionIfAbsent(int key, int version); + + /** + * Clears the map. + */ + void clear(); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/settings/ImmutableSettings.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/settings/ImmutableSettings.java new file mode 100644 index 00000000000..1537547b543 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/settings/ImmutableSettings.java @@ -0,0 +1,471 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.settings; + +import org.elasticsearch.util.*; +import org.elasticsearch.util.concurrent.Immutable; +import org.elasticsearch.util.concurrent.ThreadSafe; +import org.elasticsearch.util.io.Streams; +import org.elasticsearch.util.settings.loader.SettingsLoader; +import org.elasticsearch.util.settings.loader.SettingsLoaderFactory; + +import java.io.*; +import java.net.URL; +import java.util.*; +import java.util.concurrent.TimeUnit; + +import static com.google.common.collect.Lists.*; + +/** + * @author kimchy (Shay Banon) + */ +@ThreadSafe +@Immutable +public class ImmutableSettings implements Settings { + + private Map settings; + + private Settings globalSettings; + + private transient ClassLoader classLoader; + + private ImmutableSettings(Map settings, Settings globalSettings, ClassLoader classLoader) { + this.settings = settings; + this.globalSettings = globalSettings == null ? this : globalSettings; + this.classLoader = classLoader == null ? buildClassLoader() : classLoader; + } + + @Override public Settings getGlobalSettings() { + return this.globalSettings; + } + + @Override public ClassLoader getClassLoader() { + return this.classLoader; + } + + @Override public Map getAsMap() { + return Collections.unmodifiableMap(this.settings); + } + + @Override public Settings getComponentSettings(Class component) { + return getComponentSettings("org.elasticsearch", component); + } + + @Override public Settings getComponentSettings(String prefix, Class component) { + String type = component.getName(); + if (!type.startsWith(prefix)) { + throw new SettingsException("Component [" + type + "] does not start with prefix [" + prefix + "]"); + } + String settingPrefix = type.substring(prefix.length() + 1); // 1 for the '.' + settingPrefix = settingPrefix.substring(0, settingPrefix.length() - component.getSimpleName().length() - 1); // remove the simple class name + Builder builder = new Builder(); + for (Map.Entry entry : getAsMap().entrySet()) { + if (entry.getKey().startsWith(settingPrefix)) { + if (entry.getKey().length() <= settingPrefix.length()) { + // ignore this one + continue; + } + builder.put(entry.getKey().substring(settingPrefix.length() + 1), entry.getValue()); + } + } + builder.globalSettings(this); + builder.classLoader(classLoader); + return builder.build(); + } + + @Override public String get(String setting) { + return settings.get(setting); + } + + @Override public String get(String setting, String defaultValue) { + String retVal = settings.get(setting); + return retVal == null ? defaultValue : retVal; + } + + @Override public Float getAsFloat(String setting, Float defaultValue) { + String sValue = get(setting); + if (sValue == null) { + return defaultValue; + } + try { + return Float.parseFloat(sValue); + } catch (NumberFormatException e) { + throw new SettingsException("Failed to parse float setting [" + setting + "] with value [" + sValue + "]", e); + } + } + + @Override public Double getAsDouble(String setting, Double defaultValue) { + String sValue = get(setting); + if (sValue == null) { + return defaultValue; + } + try { + return Double.parseDouble(sValue); + } catch (NumberFormatException e) { + throw new SettingsException("Failed to parse double setting [" + setting + "] with value [" + sValue + "]", e); + } + } + + @Override public Integer getAsInt(String setting, Integer defaultValue) { + String sValue = get(setting); + if (sValue == null) { + return defaultValue; + } + try { + return Integer.parseInt(sValue); + } catch (NumberFormatException e) { + throw new SettingsException("Failed to parse int setting [" + setting + "] with value [" + sValue + "]", e); + } + } + + @Override public Long getAsLong(String setting, Long defaultValue) { + String sValue = get(setting); + if (sValue == null) { + return defaultValue; + } + try { + return Long.parseLong(sValue); + } catch (NumberFormatException e) { + throw new SettingsException("Failed to parse long setting [" + setting + "] with value [" + sValue + "]", e); + } + } + + @Override public Boolean getAsBoolean(String setting, Boolean defaultValue) { + String sValue = get(setting); + if (sValue == null) { + return defaultValue; + } + try { + return Boolean.valueOf(sValue); + } catch (NumberFormatException e) { + throw new SettingsException("Failed to parse boolean setting [" + setting + "] with value [" + sValue + "]", e); + } + } + + @Override public TimeValue getAsTime(String setting, TimeValue defaultValue) { + return TimeValue.parseTimeValue(get(setting), defaultValue); + } + + @Override public SizeValue getAsSize(String setting, SizeValue defaultValue) throws SettingsException { + return SizeValue.parse(get(setting), defaultValue); + } + + @SuppressWarnings({"unchecked"}) + @Override public Class getAsClass(String setting, Class defaultClazz) throws SettingsException { + String sValue = get(setting); + if (sValue == null) { + return defaultClazz; + } + try { + return (Class) getClassLoader().loadClass(sValue); + } catch (ClassNotFoundException e) { + throw new SettingsException("Failed to load class setting [" + setting + "] with value [" + sValue + "]", e); + } + } + + @Override public Class getAsClass(String setting, Class defaultClazz, String prefixPackage, String suffixClassName) throws SettingsException { + String sValue = get(setting); + if (sValue == null) { + return defaultClazz; + } + String fullClassName = sValue; + try { + return (Class) getClassLoader().loadClass(fullClassName); + } catch (ClassNotFoundException e) { + fullClassName = prefixPackage + Strings.capitalize(sValue) + suffixClassName; + try { + return (Class) getClassLoader().loadClass(fullClassName); + } catch (ClassNotFoundException e1) { + fullClassName = prefixPackage + sValue + "." + Strings.capitalize(sValue) + suffixClassName; + try { + return (Class) getClassLoader().loadClass(fullClassName); + } catch (ClassNotFoundException e2) { + throw new NoClassSettingsException("Failed to load class setting [" + setting + "] with value [" + sValue + "]", e); + } + } + } + } + + @Override public String[] getAsArray(String settingPrefix) throws SettingsException { + List result = newArrayList(); + int counter = 0; + while (true) { + String value = get(settingPrefix + '.' + (counter++)); + if (value == null) { + break; + } + result.add(value); + } + return result.toArray(new String[result.size()]); + } + + @Override public Map getGroups(String settingPrefix) throws SettingsException { + if (settingPrefix.charAt(settingPrefix.length() - 1) != '.') { + settingPrefix = settingPrefix + "."; + } + // we don't really care that it might happen twice + Map> map = new LinkedHashMap>(); + for (Object o : settings.keySet()) { + String setting = (String) o; + if (setting.startsWith(settingPrefix)) { + String nameValue = setting.substring(settingPrefix.length()); + int dotIndex = nameValue.indexOf('.'); + if (dotIndex == -1) { + throw new SettingsException("Failed to get setting group for [" + settingPrefix + "] setting prefix and setting [" + setting + "] because of a missing '.'"); + } + String name = nameValue.substring(0, dotIndex); + String value = nameValue.substring(dotIndex + 1); + Map groupSettings = map.get(name); + if (groupSettings == null) { + groupSettings = new LinkedHashMap(); + map.put(name, groupSettings); + } + groupSettings.put(value, get(setting)); + } + } + Map retVal = new LinkedHashMap(); + for (Map.Entry> entry : map.entrySet()) { + retVal.put(entry.getKey(), new ImmutableSettings(Collections.unmodifiableMap(entry.getValue()), globalSettings, classLoader)); + } + return Collections.unmodifiableMap(retVal); + } + + private static ClassLoader buildClassLoader() { + return Classes.getDefaultClassLoader(); + } + + public static Settings readSettingsFromStream(DataInput in) throws IOException { + return readSettingsFromStream(in, null); + } + + public static Settings readSettingsFromStream(DataInput in, Settings globalSettings) throws IOException { + Builder builder = new Builder(); + int numberOfSettings = in.readInt(); + for (int i = 0; i < numberOfSettings; i++) { + builder.put(in.readUTF(), in.readUTF()); + } + builder.globalSettings(globalSettings); + return builder.build(); + } + + public static void writeSettingsToStream(Settings settings, DataOutput out) throws IOException { + out.writeInt(settings.getAsMap().size()); + for (Map.Entry entry : settings.getAsMap().entrySet()) { + out.writeUTF(entry.getKey()); + out.writeUTF(entry.getValue()); + } + } + + public static Builder settingsBuilder() { + return new Builder(); + } + + public static class Builder implements Settings.Builder { + + public static final Settings EMPTY_SETTINGS = new Builder().build(); + + private final Map map = new LinkedHashMap(); + + private ClassLoader classLoader; + + private Settings globalSettings; + + public Builder() { + + } + + public String get(String key) { + return map.get(key); + } + + public Builder put(String key, String value) { + map.put(key, value); + return this; + } + + public Builder putClass(String key, Class clazz) { + map.put(key, clazz.getName()); + return this; + } + + public Builder putBoolean(String setting, boolean value) { + put(setting, String.valueOf(value)); + return this; + } + + public Builder putInt(String setting, int value) { + put(setting, String.valueOf(value)); + return this; + } + + public Builder putLong(String setting, long value) { + put(setting, String.valueOf(value)); + return this; + } + + public Builder putFloat(String setting, float value) { + put(setting, String.valueOf(value)); + return this; + } + + public Builder putDouble(String setting, double value) { + put(setting, String.valueOf(value)); + return this; + } + + public Builder putTime(String setting, long value, TimeUnit timeUnit) { + putLong(setting, timeUnit.toMillis(value)); + return this; + } + + public Builder putSize(String setting, long value, SizeUnit sizeUnit) { + putLong(setting, sizeUnit.toBytes(value)); + return this; + } + + public Builder putGroup(String settingPrefix, String groupName, String[] settings, String[] values) throws SettingsException { + if (settings.length != values.length) { + throw new SettingsException("The settings length must match the value length"); + } + for (int i = 0; i < settings.length; i++) { + if (values[i] == null) { + continue; + } + put(settingPrefix + "." + groupName + "." + settings[i], values[i]); + } + return this; + } + + public Builder putAll(Settings settings) { + map.putAll(settings.getAsMap()); + return this; + } + + public Builder putAll(Map settings) { + map.putAll(settings); + return this; + } + + public Builder putAll(Properties properties) { + for (Map.Entry entry : properties.entrySet()) { + map.put((String) entry.getKey(), (String) entry.getValue()); + } + return this; + } + + /** + * Loads settings from the actual string content that represents them. + */ + public Builder loadFromSource(String source) { + SettingsLoader settingsLoader = SettingsLoaderFactory.loaderFromSource(source); + try { + Map loadedSettings = settingsLoader.load(source); + putAll(loadedSettings); + } catch (IOException e) { + throw new SettingsException("Failed to load settings from [" + source + "]"); + } + return this; + } + + public Builder loadFromUrl(URL url) throws SettingsException { + try { + return loadFromStream(url.toExternalForm(), url.openStream()); + } catch (IOException e) { + throw new SettingsException("Failed to open stream for url [" + url.toExternalForm() + "]", e); + } + } + + public Builder loadFromStream(String resourceName, InputStream is) throws SettingsException { + SettingsLoader settingsLoader = SettingsLoaderFactory.loaderFromResource(resourceName); + try { + Map loadedSettings = settingsLoader.load(Streams.copyToString(new InputStreamReader(is))); + putAll(loadedSettings); + } catch (IOException e) { + throw new SettingsException("Failed to load settings from [" + resourceName + "]"); + } + return this; + } + + /** + * Loads the resource name from the classpath, returning true if it + * was loaded. + */ + public Builder loadFromClasspath(String resourceName) throws SettingsException { + ClassLoader classLoader = this.classLoader; + if (classLoader == null) { + classLoader = buildClassLoader(); + } + InputStream is = classLoader.getResourceAsStream(resourceName); + if (is == null) { + return this; + } + + return loadFromStream(resourceName, is); + } + + public Builder classLoader(ClassLoader classLoader) { + this.classLoader = classLoader; + return this; + } + + public Builder globalSettings(Settings globalSettings) { + this.globalSettings = globalSettings; + return this; + } + + public Builder putProperties(String prefix, Properties properties) { + for (Object key1 : properties.keySet()) { + String key = (String) key1; + String value = properties.getProperty(key); + if (key.startsWith(prefix)) { + map.put(key.substring(prefix.length()), value); + } + } + return this; + } + + public Builder replacePropertyPlaceholders() { + PropertyPlaceholder propertyPlaceholder = new PropertyPlaceholder("${", "}", false); + PropertyPlaceholder.PlaceholderResolver placeholderResolver = new PropertyPlaceholder.PlaceholderResolver() { + @Override public String resolvePlaceholder(String placeholderName) { + String value = System.getProperty(placeholderName); + if (value != null) { + return value; + } + value = System.getenv(placeholderName); + if (value != null) { + return value; + } + return map.get(placeholderName); + } + }; + for (Map.Entry entry : map.entrySet()) { + map.put(entry.getKey(), propertyPlaceholder.replacePlaceholders(entry.getValue(), placeholderResolver)); + } + return this; + } + + public Settings build() { + return new ImmutableSettings( + Collections.unmodifiableMap(map), + globalSettings, classLoader); + } + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/settings/NoClassSettingsException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/settings/NoClassSettingsException.java new file mode 100644 index 00000000000..6a7e3639513 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/settings/NoClassSettingsException.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.settings; + +/** + * @author kimchy (Shay Banon) + */ +public class NoClassSettingsException extends SettingsException { + + public NoClassSettingsException(String message) { + super(message); + } + + public NoClassSettingsException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/settings/Settings.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/settings/Settings.java new file mode 100644 index 00000000000..98026ab203c --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/settings/Settings.java @@ -0,0 +1,73 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.settings; + +import org.elasticsearch.util.SizeValue; +import org.elasticsearch.util.TimeValue; +import org.elasticsearch.util.concurrent.ThreadSafe; + +import java.util.Map; + +/** + * @author kimchy (Shay Banon) + */ +@ThreadSafe +public interface Settings { + + Settings getGlobalSettings(); + + Settings getComponentSettings(Class component); + + Settings getComponentSettings(String prefix, Class component); + + ClassLoader getClassLoader(); + + Map getAsMap(); + + String get(String setting); + + String get(String setting, String defaultValue); + + Map getGroups(String settingPrefix) throws SettingsException; + + Float getAsFloat(String setting, Float defaultValue) throws SettingsException; + + Double getAsDouble(String setting, Double defaultValue) throws SettingsException; + + Integer getAsInt(String setting, Integer defaultValue) throws SettingsException; + + Long getAsLong(String setting, Long defaultValue) throws SettingsException; + + Boolean getAsBoolean(String setting, Boolean defaultValue) throws SettingsException; + + TimeValue getAsTime(String setting, TimeValue defaultValue) throws SettingsException; + + SizeValue getAsSize(String setting, SizeValue defaultValue) throws SettingsException; + + Class getAsClass(String setting, Class defaultClazz) throws SettingsException; + + Class getAsClass(String setting, Class defaultClazz, String prefixPackage, String suffixClassName) throws SettingsException; + + String[] getAsArray(String settingPrefix) throws SettingsException; + + interface Builder { + Settings build(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/settings/SettingsException.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/settings/SettingsException.java new file mode 100644 index 00000000000..8df6c4fedf0 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/settings/SettingsException.java @@ -0,0 +1,36 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.settings; + +import org.elasticsearch.ElasticSearchException; + +/** + * @author kimchy (Shay Banon) + */ +public class SettingsException extends ElasticSearchException { + + public SettingsException(String message) { + super(message); + } + + public SettingsException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/settings/SettingsModule.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/settings/SettingsModule.java new file mode 100644 index 00000000000..c94abc4bdb4 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/settings/SettingsModule.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.settings; + +import com.google.inject.AbstractModule; + +/** + * @author kimchy (Shay Banon) + */ +public class SettingsModule extends AbstractModule { + + private final Settings settings; + + public SettingsModule(Settings settings) { + this.settings = settings; + } + + @Override protected void configure() { + bind(Settings.class).toInstance(settings); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/settings/loader/JsonSettingsLoader.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/settings/loader/JsonSettingsLoader.java new file mode 100644 index 00000000000..421c8a0d5a6 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/settings/loader/JsonSettingsLoader.java @@ -0,0 +1,112 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.settings.loader; + +import org.codehaus.jackson.JsonFactory; +import org.codehaus.jackson.JsonParser; +import org.codehaus.jackson.JsonToken; +import org.elasticsearch.util.io.FastStringReader; +import org.elasticsearch.util.json.Jackson; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static com.google.common.collect.Lists.*; +import static com.google.common.collect.Maps.*; + +/** + * Loads settings from json source. Basically, flats them into a Map. + * + * @author kimchy (Shay Banon) + */ +public class JsonSettingsLoader implements SettingsLoader { + + private final JsonFactory jsonFactory = Jackson.defaultJsonFactory(); + + @Override public Map load(String source) throws IOException { + JsonParser jp = jsonFactory.createJsonParser(new FastStringReader(source)); + return load(jp); + } + + public Map load(JsonParser jp) throws IOException { + StringBuilder sb = new StringBuilder(); + Map settings = newHashMap(); + List path = newArrayList(); + jp.nextToken(); + serializeObject(settings, sb, path, jp, null); + return settings; + } + + private void serializeObject(Map settings, StringBuilder sb, List path, JsonParser jp, String objFieldName) throws IOException { + if (objFieldName != null) { + path.add(objFieldName); + } + + String currentFieldName = null; + JsonToken token; + while ((token = jp.nextToken()) != JsonToken.END_OBJECT) { + if (token == JsonToken.START_OBJECT) { + serializeObject(settings, sb, path, jp, currentFieldName); + } else if (token == JsonToken.START_ARRAY) { + serializeArray(settings, sb, path, jp, currentFieldName); + } else if (token == JsonToken.FIELD_NAME) { + currentFieldName = jp.getCurrentName(); + } else if (token == JsonToken.VALUE_NULL) { + // ignore this + } else { + serializeValue(settings, sb, path, jp, currentFieldName); + + } + } + + if (objFieldName != null) { + path.remove(path.size() - 1); + } + } + + private void serializeArray(Map settings, StringBuilder sb, List path, JsonParser jp, String fieldName) throws IOException { + JsonToken token; + int counter = 0; + while ((token = jp.nextToken()) != JsonToken.END_ARRAY) { + if (token == JsonToken.START_OBJECT) { + serializeObject(settings, sb, path, jp, fieldName + '.' + (counter++)); + } else if (token == JsonToken.START_ARRAY) { + serializeArray(settings, sb, path, jp, fieldName + '.' + (counter++)); + } else if (token == JsonToken.FIELD_NAME) { + fieldName = jp.getCurrentName(); + } else if (token == JsonToken.VALUE_NULL) { + // ignore + } else { + serializeValue(settings, sb, path, jp, fieldName + '.' + (counter++)); + } + } + } + + private void serializeValue(Map settings, StringBuilder sb, List path, JsonParser jp, String fieldName) throws IOException { + sb.setLength(0); + for (String pathEle : path) { + sb.append(pathEle).append('.'); + } + sb.append(fieldName); + settings.put(sb.toString(), jp.getText()); + } + +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/settings/loader/PropertiesSettingsLoader.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/settings/loader/PropertiesSettingsLoader.java new file mode 100644 index 00000000000..9c70ffd7db9 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/settings/loader/PropertiesSettingsLoader.java @@ -0,0 +1,46 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.settings.loader; + +import org.elasticsearch.util.io.FastStringReader; + +import java.io.IOException; +import java.util.Map; +import java.util.Properties; + +import static com.google.common.collect.Maps.*; + +/** + * Loads the settings from a properties file. + * + * @author kimchy (Shay Banon) + */ +public class PropertiesSettingsLoader implements SettingsLoader { + + @Override public Map load(String source) throws IOException { + Properties props = new Properties(); + props.load(new FastStringReader(source)); + Map result = newHashMap(); + for (Map.Entry entry : props.entrySet()) { + result.put((String) entry.getKey(), (String) entry.getValue()); + } + return result; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/settings/loader/SettingsLoader.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/settings/loader/SettingsLoader.java new file mode 100644 index 00000000000..0e6421efd76 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/settings/loader/SettingsLoader.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.settings.loader; + +import java.io.IOException; +import java.util.Map; + +/** + * Provides the ability to load settings (in the form of a simple Map) from + * the actual source content that represents them. + * + * @author kimchy (Shay Banon) + */ +public interface SettingsLoader { + + Map load(String source) throws IOException; +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/settings/loader/SettingsLoaderFactory.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/settings/loader/SettingsLoaderFactory.java new file mode 100644 index 00000000000..88b9e73c885 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/settings/loader/SettingsLoaderFactory.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.settings.loader; + +/** + * @author kimchy (Shay Banon) + */ +public final class SettingsLoaderFactory { + + private SettingsLoaderFactory() { + + } + + public static SettingsLoader loaderFromResource(String resourceName) { + if (resourceName.endsWith(".json")) { + return new JsonSettingsLoader(); + } else if (resourceName.endsWith(".yml")) { + return new YamlSettingsLoader(); + } else if (resourceName.endsWith(".properties")) { + return new PropertiesSettingsLoader(); + } else { + // lets default to the json one + return new JsonSettingsLoader(); + } + } + + public static SettingsLoader loaderFromSource(String source) { + if (source.indexOf('{') != -1 && source.indexOf('}') != -1) { + return new JsonSettingsLoader(); + } + if (source.indexOf(':') != -1) { + return new YamlSettingsLoader(); + } + return new PropertiesSettingsLoader(); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/settings/loader/YamlSettingsLoader.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/settings/loader/YamlSettingsLoader.java new file mode 100644 index 00000000000..5346b4055bd --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/settings/loader/YamlSettingsLoader.java @@ -0,0 +1,96 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.settings.loader; + +import org.yaml.snakeyaml.Yaml; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static com.google.common.collect.Lists.*; +import static com.google.common.collect.Maps.*; + +/** + * @author kimchy (Shay Banon) + */ +public class YamlSettingsLoader implements SettingsLoader { + + @Override public Map load(String source) throws IOException { + // replace tabs with whitespace (yaml does not accept tabs, but many users might use it still...) + source = source.replace("\t", " "); + Yaml yaml = new Yaml(); + Map yamlMap = (Map) yaml.load(source); + StringBuilder sb = new StringBuilder(); + Map settings = newHashMap(); + if (yamlMap == null) { + return settings; + } + List path = newArrayList(); + serializeMap(settings, sb, path, yamlMap); + return settings; + } + + private void serializeMap(Map settings, StringBuilder sb, List path, Map yamlMap) { + for (Map.Entry entry : yamlMap.entrySet()) { + if (entry.getValue() instanceof Map) { + path.add((String) entry.getKey()); + serializeMap(settings, sb, path, (Map) entry.getValue()); + path.remove(path.size() - 1); + } else if (entry.getValue() instanceof List) { + path.add((String) entry.getKey()); + serializeList(settings, sb, path, (List) entry.getValue()); + path.remove(path.size() - 1); + } else { + serializeValue(settings, sb, path, (String) entry.getKey(), entry.getValue()); + } + } + } + + private void serializeList(Map settings, StringBuilder sb, List path, List yamlList) { + int counter = 0; + for (Object listEle : yamlList) { + if (listEle instanceof Map) { + path.add(Integer.toString(counter)); + serializeMap(settings, sb, path, (Map) listEle); + path.remove(path.size() - 1); + } else if (listEle instanceof List) { + path.add(Integer.toString(counter)); + serializeList(settings, sb, path, (List) listEle); + path.remove(path.size() - 1); + } else { + serializeValue(settings, sb, path, Integer.toString(counter), listEle); + } + counter++; + } + } + + private void serializeValue(Map settings, StringBuilder sb, List path, String name, Object value) { + if (value == null) { + return; + } + sb.setLength(0); + for (String pathEle : path) { + sb.append(pathEle).append('.'); + } + sb.append(name); + settings.put(sb.toString(), value.toString()); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/transport/BoundTransportAddress.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/transport/BoundTransportAddress.java new file mode 100644 index 00000000000..ab07c795234 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/transport/BoundTransportAddress.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.transport; + +/** + * A bounded transport address is a tuple of two {@link TransportAddress}, one that represents + * the address the transport is bounded on, the the published one represents the one clients should + * communicate on. + * + * @author kimchy (Shay Banon) + */ +public class BoundTransportAddress { + + private final TransportAddress boundAddress; + + private final TransportAddress publishAddress; + + public BoundTransportAddress(TransportAddress boundAddress, TransportAddress publishAddress) { + this.boundAddress = boundAddress; + this.publishAddress = publishAddress; + } + + public TransportAddress boundAddress() { + return boundAddress; + } + + public TransportAddress publishAddress() { + return publishAddress; + } + + @Override public String toString() { + return "boundAddress [" + boundAddress + "], publishAddress [" + publishAddress + "]"; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/transport/DummyTransportAddress.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/transport/DummyTransportAddress.java new file mode 100644 index 00000000000..343ca64bd76 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/transport/DummyTransportAddress.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.transport; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class DummyTransportAddress implements TransportAddress { + + public static final DummyTransportAddress INSTANCE = new DummyTransportAddress(); + + DummyTransportAddress() { + } + + @Override public short uniqueAddressTypeId() { + return 0; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + } + + @Override public void writeTo(DataOutput out) throws IOException { + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/transport/InetSocketTransportAddress.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/transport/InetSocketTransportAddress.java new file mode 100644 index 00000000000..426183944cd --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/transport/InetSocketTransportAddress.java @@ -0,0 +1,89 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.transport; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.net.InetSocketAddress; + +/** + * @author kimchy (Shay Banon) + */ +public class InetSocketTransportAddress implements TransportAddress { + + private InetSocketAddress address; + + InetSocketTransportAddress() { + + } + + public InetSocketTransportAddress(String hostname, int port) { + this(new InetSocketAddress(hostname, port)); + } + + public InetSocketTransportAddress(InetSocketAddress address) { + this.address = address; + } + + public static InetSocketTransportAddress readInetSocketTransportAddress(DataInput in) throws IOException, ClassNotFoundException { + InetSocketTransportAddress address = new InetSocketTransportAddress(); + address.readFrom(in); + return address; + } + + @Override public short uniqueAddressTypeId() { + return 1; + } + + public InetSocketAddress address() { + return this.address; + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + address = new InetSocketAddress(in.readUTF(), in.readInt()); + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeUTF(address.getHostName()); + out.writeInt(address.getPort()); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + InetSocketTransportAddress address1 = (InetSocketTransportAddress) o; + + if (address != null ? !address.equals(address1.address) : address1.address != null) return false; + + return true; + } + + @Override + public int hashCode() { + return address != null ? address.hashCode() : 0; + } + + @Override public String toString() { + return "inet[" + address + "]"; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/transport/NetworkExceptionHelper.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/transport/NetworkExceptionHelper.java new file mode 100644 index 00000000000..6225d2d039a --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/transport/NetworkExceptionHelper.java @@ -0,0 +1,54 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.transport; + +import java.net.ConnectException; +import java.nio.channels.ClosedChannelException; + +/** + * @author kimchy (Shay Banon) + */ +public class NetworkExceptionHelper { + + public static boolean isConnectException(Throwable e) { + if (e instanceof ConnectException) { + return true; + } + return false; + } + + public static boolean isCloseConnectionException(Throwable e) { + if (e instanceof ClosedChannelException) { + return true; + } + if (e.getMessage() != null) { + if (e.getMessage().contains("Connection reset by peer")) { + return true; + } + if (e.getMessage().contains("connection was aborted")) { + return true; + } + if (e.getMessage().contains("forcibly closed")) { + return true; + } + } + return false; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/transport/PortsRange.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/transport/PortsRange.java new file mode 100644 index 00000000000..d68c671a547 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/transport/PortsRange.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.transport; + +import java.util.StringTokenizer; + +/** + * @author kimchy (Shay Banon) + */ +public class PortsRange { + + private final String portRange; + + public PortsRange(String portRange) { + this.portRange = portRange; + } + + public boolean iterate(PortCallback callback) throws NumberFormatException { + StringTokenizer st = new StringTokenizer(portRange, ","); + boolean success = false; + while (st.hasMoreTokens() && !success) { + String portToken = st.nextToken().trim(); + int index = portToken.indexOf('-'); + if (index == -1) { + int portNumber = Integer.parseInt(portToken.trim()); + success = callback.onPortNumber(portNumber); + if (success) { + break; + } + } else { + int startPort = Integer.parseInt(portToken.substring(0, index).trim()); + int endPort = Integer.parseInt(portToken.substring(index + 1).trim()); + if (endPort < startPort) { + throw new IllegalArgumentException("Start port [" + startPort + "] must be greater than end port [" + endPort + "]"); + } + for (int i = startPort; i <= endPort; i++) { + success = callback.onPortNumber(i); + if (success) { + break; + } + } + } + } + return success; + } + + public static interface PortCallback { + boolean onPortNumber(int portNumber); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/transport/TransportAddress.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/transport/TransportAddress.java new file mode 100644 index 00000000000..25bc637f2ba --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/transport/TransportAddress.java @@ -0,0 +1,32 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.transport; + +import org.elasticsearch.util.io.Streamable; + +import java.io.Serializable; + +/** + * @author kimchy (Shay Banon) + */ +public interface TransportAddress extends Streamable, Serializable { + + short uniqueAddressTypeId(); +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/transport/TransportAddressSerializers.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/transport/TransportAddressSerializers.java new file mode 100644 index 00000000000..7f9f716400e --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/transport/TransportAddressSerializers.java @@ -0,0 +1,86 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.transport; + +import com.google.common.collect.ImmutableMap; +import org.elasticsearch.ElasticSearchIllegalStateException; +import org.elasticsearch.util.logging.Loggers; +import org.slf4j.Logger; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.lang.reflect.Constructor; + +import static org.elasticsearch.util.MapBuilder.*; + +/** + * A global registry of all different types of {@link org.elasticsearch.util.transport.TransportAddress} allowing + * to perfrom serialization of them. + *

    + *

    By defualt, adds {@link org.elasticsearch.util.transport.InetSocketTransportAddress}. + * + * @author kimchy (Shay Banon) + */ +public abstract class TransportAddressSerializers { + + private static final Logger logger = Loggers.getLogger(TransportAddressSerializers.class); + + private static ImmutableMap> addressConstructors = ImmutableMap.of(); + + static { + try { + addAddressType(DummyTransportAddress.INSTANCE); + addAddressType(new InetSocketTransportAddress()); + } catch (Exception e) { + logger.warn("Failed to add InetSocketTransportAddress", e); + } + } + + public static synchronized void addAddressType(TransportAddress address) throws Exception { + if (addressConstructors.containsKey(address.uniqueAddressTypeId())) { + throw new ElasticSearchIllegalStateException("Address [" + address.uniqueAddressTypeId() + "] already bound"); + } + Constructor constructor = address.getClass().getDeclaredConstructor(); + constructor.setAccessible(true); + addressConstructors = newMapBuilder(addressConstructors).put(address.uniqueAddressTypeId(), constructor).immutableMap(); + } + + public static TransportAddress addressFromStream(DataInput input) throws IOException, ClassNotFoundException { + short addressUniqueId = input.readShort(); + Constructor constructor = addressConstructors.get(addressUniqueId); + if (constructor == null) { + throw new IOException("No transport address mapped to [" + addressUniqueId + "]"); + } + TransportAddress address; + try { + address = constructor.newInstance(); + } catch (Exception e) { + throw new IOException("Failed to create class with constructor [" + constructor + "]", e); + } + address.readFrom(input); + return address; + } + + public static void addressToStream(DataOutput out, TransportAddress address) throws IOException { + out.writeShort(address.uniqueAddressTypeId()); + address.writeTo(out); + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/trove/ExtTIntArrayList.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/trove/ExtTIntArrayList.java new file mode 100644 index 00000000000..87f382acef7 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/trove/ExtTIntArrayList.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.trove; + +import org.elasticsearch.util.gnu.trove.TIntArrayList; + +/** + * @author kimchy (Shay Banon) + */ +public class ExtTIntArrayList extends TIntArrayList { + + public ExtTIntArrayList() { + } + + public ExtTIntArrayList(int capacity) { + super(capacity); + } + + public ExtTIntArrayList(int[] values) { + super(values); + } + + public int[] unsafeArray() { + return _data; + } +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/trove/ExtTIntIntHashMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/trove/ExtTIntIntHashMap.java new file mode 100644 index 00000000000..19dc67516ce --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/trove/ExtTIntIntHashMap.java @@ -0,0 +1,66 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.trove; + +import org.elasticsearch.util.gnu.trove.TIntHashingStrategy; +import org.elasticsearch.util.gnu.trove.TIntIntHashMap; + +/** + * @author kimchy (Shay Banon) + */ +public class ExtTIntIntHashMap extends TIntIntHashMap { + + private int defaultReturnValue = 0; + + public ExtTIntIntHashMap() { + } + + public ExtTIntIntHashMap(int initialCapacity) { + super(initialCapacity); + } + + public ExtTIntIntHashMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + public ExtTIntIntHashMap(TIntHashingStrategy strategy) { + super(strategy); + } + + public ExtTIntIntHashMap(int initialCapacity, TIntHashingStrategy strategy) { + super(initialCapacity, strategy); + } + + public ExtTIntIntHashMap(int initialCapacity, float loadFactor, TIntHashingStrategy strategy) { + super(initialCapacity, loadFactor, strategy); + } + + public ExtTIntIntHashMap defaultReturnValue(int defaultReturnValue) { + this.defaultReturnValue = defaultReturnValue; + return this; + } + + @Override public int get(int key) { + int index = index(key); + return index < 0 ? defaultReturnValue : _values[index]; + } + + +} diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/util/trove/ExtTObjectIntHasMap.java b/modules/elasticsearch/src/main/java/org/elasticsearch/util/trove/ExtTObjectIntHasMap.java new file mode 100644 index 00000000000..b22a078a182 --- /dev/null +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/util/trove/ExtTObjectIntHasMap.java @@ -0,0 +1,64 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.trove; + +import org.elasticsearch.util.gnu.trove.TObjectHashingStrategy; +import org.elasticsearch.util.gnu.trove.TObjectIntHashMap; + +/** + * @author kimchy (Shay Banon) + */ +public class ExtTObjectIntHasMap extends TObjectIntHashMap { + + private int defaultReturnValue = 0; + + public ExtTObjectIntHasMap() { + } + + public ExtTObjectIntHasMap(int initialCapacity) { + super(initialCapacity); + } + + public ExtTObjectIntHasMap(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor); + } + + public ExtTObjectIntHasMap(TObjectHashingStrategy ttObjectHashingStrategy) { + super(ttObjectHashingStrategy); + } + + public ExtTObjectIntHasMap(int initialCapacity, TObjectHashingStrategy ttObjectHashingStrategy) { + super(initialCapacity, ttObjectHashingStrategy); + } + + public ExtTObjectIntHasMap(int initialCapacity, float loadFactor, TObjectHashingStrategy ttObjectHashingStrategy) { + super(initialCapacity, loadFactor, ttObjectHashingStrategy); + } + + public ExtTObjectIntHasMap defaultReturnValue(int defaultReturnValue) { + this.defaultReturnValue = defaultReturnValue; + return this; + } + + @Override public int get(T key) { + int index = index(key); + return index < 0 ? defaultReturnValue : _values[index]; + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/VersionTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/VersionTests.java new file mode 100644 index 00000000000..8a449291db8 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/VersionTests.java @@ -0,0 +1,24 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch; + +public class VersionTests { + +} \ No newline at end of file diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/cluster/routing/serialization/RoutingTableSerializationTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/cluster/routing/serialization/RoutingTableSerializationTests.java new file mode 100644 index 00000000000..887b497b262 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/cluster/routing/serialization/RoutingTableSerializationTests.java @@ -0,0 +1,71 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.routing.serialization; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.Node; +import org.elasticsearch.cluster.node.Nodes; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.strategy.DefaultShardsRoutingStrategy; +import org.elasticsearch.util.io.ByteArrayDataInputStream; +import org.elasticsearch.util.io.ByteArrayDataOutputStream; +import org.elasticsearch.util.transport.DummyTransportAddress; +import org.testng.annotations.Test; + +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; +import static org.elasticsearch.cluster.metadata.MetaData.*; +import static org.elasticsearch.cluster.routing.RoutingBuilders.*; +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy (Shay Banon) + */ +public class RoutingTableSerializationTests { + + @Test public void testSimple() throws Exception { + MetaData metaData = newMetaDataBuilder() + .put(newIndexMetaDataBuilder("test").numberOfShards(10).numberOfReplicas(1)) + .build(); + + RoutingTable routingTable = routingTable() + .add(indexRoutingTable("test").initializeEmpty(metaData.index("test"))) + .build(); + + Nodes nodes = Nodes.newNodesBuilder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3")).build(); + + ClusterState clusterState = ClusterState.newClusterStateBuilder().nodes(nodes).metaData(metaData).routingTable(routingTable).build(); + + DefaultShardsRoutingStrategy strategy = new DefaultShardsRoutingStrategy(); + RoutingTable source = strategy.reroute(clusterState); + + ByteArrayDataOutputStream outStream = new ByteArrayDataOutputStream(); + RoutingTable.Builder.writeTo(source, outStream); + ByteArrayDataInputStream inStream = new ByteArrayDataInputStream(outStream.copiedByteArray()); + RoutingTable target = RoutingTable.Builder.readFrom(inStream); + + assertThat(target.prettyPrint(), equalTo(source.prettyPrint())); + } + + private Node newNode(String nodeId) { + return new Node(nodeId, DummyTransportAddress.INSTANCE); + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/cluster/routing/strategy/SingleShardNoBackupsRoutingStrategyTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/cluster/routing/strategy/SingleShardNoBackupsRoutingStrategyTests.java new file mode 100644 index 00000000000..62db40f7cf4 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/cluster/routing/strategy/SingleShardNoBackupsRoutingStrategyTests.java @@ -0,0 +1,442 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.routing.strategy; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.Node; +import org.elasticsearch.cluster.node.Nodes; +import org.elasticsearch.cluster.routing.MutableShardRouting; +import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.cluster.routing.RoutingNodes; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.util.logging.Loggers; +import org.elasticsearch.util.transport.DummyTransportAddress; +import org.slf4j.Logger; +import org.testng.annotations.Test; + +import java.util.List; +import java.util.Set; + +import static com.google.common.collect.Lists.*; +import static com.google.common.collect.Sets.*; +import static org.elasticsearch.cluster.ClusterState.*; +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; +import static org.elasticsearch.cluster.metadata.MetaData.*; +import static org.elasticsearch.cluster.node.Nodes.*; +import static org.elasticsearch.cluster.routing.RoutingBuilders.*; +import static org.elasticsearch.cluster.routing.ShardRoutingState.*; +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy (Shay Banon) + */ +public class SingleShardNoBackupsRoutingStrategyTests { + + private final Logger logger = Loggers.getLogger(SingleShardNoBackupsRoutingStrategyTests.class); + + @Test public void testSingleIndexStartedShard() { + DefaultShardsRoutingStrategy strategy = new DefaultShardsRoutingStrategy(); + + logger.info("Building initial routing table"); + + MetaData metaData = newMetaDataBuilder() + .put(newIndexMetaDataBuilder("test").numberOfShards(1).numberOfReplicas(0)) + .build(); + + RoutingTable routingTable = routingTable() + .add(indexRoutingTable("test").initializeEmpty(metaData.index("test"))) + .build(); + + ClusterState clusterState = newClusterStateBuilder().metaData(metaData).routingTable(routingTable).build(); + + assertThat(routingTable.index("test").shards().size(), equalTo(1)); + assertThat(routingTable.index("test").shard(0).size(), equalTo(1)); + assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1)); + assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED)); + assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue()); + + logger.info("Adding one node and performing rerouting"); + clusterState = newClusterStateBuilder().state(clusterState).nodes(newNodesBuilder().put(newNode("node1"))).build(); + RoutingTable prevRoutingTable = routingTable; + routingTable = strategy.reroute(clusterState); + clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build(); + + assertThat(routingTable.index("test").shards().size(), equalTo(1)); + assertThat(routingTable.index("test").shard(0).size(), equalTo(1)); + assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1)); + assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(INITIALIZING)); + assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node1")); + + logger.info("Rerouting again, nothing should change"); + prevRoutingTable = routingTable; + clusterState = newClusterStateBuilder().state(clusterState).build(); + routingTable = strategy.reroute(clusterState); + assertThat(routingTable == prevRoutingTable, equalTo(true)); + + logger.info("Marking the shard as started"); + RoutingNodes routingNodes = routingTable.routingNodes(clusterState.metaData()); + prevRoutingTable = routingTable; + routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING)); + clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build(); + + assertThat(routingTable != prevRoutingTable, equalTo(true)); + assertThat(routingTable.index("test").shards().size(), equalTo(1)); + assertThat(routingTable.index("test").shard(0).size(), equalTo(1)); + assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1)); + assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(STARTED)); + assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node1")); + + logger.info("Starting another node and making sure nothing changed"); + clusterState = newClusterStateBuilder().state(clusterState).nodes(newNodesBuilder().putAll(clusterState.nodes()).put(newNode("node2"))).build(); + prevRoutingTable = routingTable; + routingTable = strategy.reroute(clusterState); + clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build(); + + assertThat(routingTable == prevRoutingTable, equalTo(true)); + assertThat(routingTable.index("test").shards().size(), equalTo(1)); + assertThat(routingTable.index("test").shard(0).size(), equalTo(1)); + assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1)); + assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(STARTED)); + assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node1")); + + logger.info("Killing node1 where the shard is, checking the shard is relocated"); + + clusterState = newClusterStateBuilder().state(clusterState).nodes(newNodesBuilder().putAll(clusterState.nodes()).remove("node1")).build(); + prevRoutingTable = routingTable; + routingTable = strategy.reroute(clusterState); + clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build(); + + assertThat(routingTable != prevRoutingTable, equalTo(true)); + assertThat(routingTable.index("test").shards().size(), equalTo(1)); + assertThat(routingTable.index("test").shard(0).size(), equalTo(1)); + assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1)); + assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(INITIALIZING)); + assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node2")); + + logger.info("Start another node, make sure that things remain the same (shard is in node2 and initializing)"); + clusterState = newClusterStateBuilder().state(clusterState).nodes(newNodesBuilder().putAll(clusterState.nodes()).put(newNode("node3"))).build(); + prevRoutingTable = routingTable; + routingTable = strategy.reroute(clusterState); + clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build(); + assertThat(routingTable == prevRoutingTable, equalTo(true)); + + logger.info("Start the shard on node 2"); + routingNodes = routingTable.routingNodes(clusterState.metaData()); + prevRoutingTable = routingTable; + routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node2").shardsWithState(INITIALIZING)); + clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build(); + + assertThat(routingTable != prevRoutingTable, equalTo(true)); + assertThat(routingTable.index("test").shards().size(), equalTo(1)); + assertThat(routingTable.index("test").shard(0).size(), equalTo(1)); + assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1)); + assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(STARTED)); + assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node2")); + } + + @Test public void testSingleIndexShardFailed() { + DefaultShardsRoutingStrategy strategy = new DefaultShardsRoutingStrategy(); + + logger.info("Building initial routing table"); + + MetaData metaData = newMetaDataBuilder() + .put(newIndexMetaDataBuilder("test").numberOfShards(1).numberOfReplicas(0)) + .build(); + + RoutingTable routingTable = routingTable() + .add(indexRoutingTable("test").initializeEmpty(metaData.index("test"))) + .build(); + + ClusterState clusterState = newClusterStateBuilder().metaData(metaData).routingTable(routingTable).build(); + + assertThat(routingTable.index("test").shards().size(), equalTo(1)); + assertThat(routingTable.index("test").shard(0).size(), equalTo(1)); + assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1)); + assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED)); + assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue()); + + logger.info("Adding one node and rerouting"); + clusterState = newClusterStateBuilder().state(clusterState).nodes(newNodesBuilder().put(newNode("node1"))).build(); + RoutingTable prevRoutingTable = routingTable; + routingTable = strategy.reroute(clusterState); + clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build(); + + assertThat(prevRoutingTable != routingTable, equalTo(true)); + assertThat(routingTable.index("test").shards().size(), equalTo(1)); + assertThat(routingTable.index("test").shard(0).size(), equalTo(1)); + assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1)); + assertThat(routingTable.index("test").shard(0).shards().get(0).unassigned(), equalTo(false)); + assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(INITIALIZING)); + assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node1")); + + logger.info("Marking the shard as failed"); + RoutingNodes routingNodes = routingTable.routingNodes(clusterState.metaData()); + prevRoutingTable = routingTable; + routingTable = strategy.applyFailedShards(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING)); + clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build(); + + assertThat(prevRoutingTable != routingTable, equalTo(true)); + assertThat(routingTable.index("test").shards().size(), equalTo(1)); + assertThat(routingTable.index("test").shard(0).size(), equalTo(1)); + assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1)); + assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED)); + assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue()); + } + + @Test public void testMultiIndexEvenDistribution() { + DefaultShardsRoutingStrategy strategy = new DefaultShardsRoutingStrategy(); + + final int numberOfIndices = 50; + logger.info("Building initial routing table with " + numberOfIndices + " indices"); + + MetaData.Builder metaDataBuilder = newMetaDataBuilder(); + for (int i = 0; i < numberOfIndices; i++) { + metaDataBuilder.put(newIndexMetaDataBuilder("test" + i).numberOfShards(1).numberOfReplicas(0)); + } + MetaData metaData = metaDataBuilder.build(); + + RoutingTable.Builder routingTableBuilder = routingTable(); + for (int i = 0; i < numberOfIndices; i++) { + routingTableBuilder.add(indexRoutingTable("test" + i).initializeEmpty(metaData.index("test" + i))); + } + RoutingTable routingTable = routingTableBuilder.build(); + ClusterState clusterState = newClusterStateBuilder().metaData(metaData).routingTable(routingTable).build(); + + assertThat(routingTable.indicesRouting().size(), equalTo(numberOfIndices)); + for (int i = 0; i < numberOfIndices; i++) { + assertThat(routingTable.index("test" + i).shards().size(), equalTo(1)); + assertThat(routingTable.index("test" + i).shard(0).size(), equalTo(1)); + assertThat(routingTable.index("test" + i).shard(0).shards().size(), equalTo(1)); + assertThat(routingTable.index("test" + i).shard(0).shards().get(0).state(), equalTo(UNASSIGNED)); + assertThat(routingTable.index("test" + i).shard(0).shards().get(0).currentNodeId(), nullValue()); + } + + logger.info("Adding " + (numberOfIndices / 2) + " nodes"); + Nodes.Builder nodesBuilder = newNodesBuilder(); + List nodes = newArrayList(); + for (int i = 0; i < (numberOfIndices / 2); i++) { + nodesBuilder.put(newNode("node" + i)); + } + RoutingTable prevRoutingTable = routingTable; + clusterState = newClusterStateBuilder().state(clusterState).nodes(nodesBuilder).build(); + routingTable = strategy.reroute(clusterState); + clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build(); + + assertThat(prevRoutingTable != routingTable, equalTo(true)); + for (int i = 0; i < numberOfIndices; i++) { + assertThat(routingTable.index("test" + i).shards().size(), equalTo(1)); + assertThat(routingTable.index("test" + i).shard(0).size(), equalTo(1)); + assertThat(routingTable.index("test" + i).shard(0).shards().size(), equalTo(1)); + assertThat(routingTable.index("test" + i).shard(0).shards().get(0).unassigned(), equalTo(false)); + assertThat(routingTable.index("test" + i).shard(0).shards().get(0).state(), equalTo(INITIALIZING)); + assertThat(routingTable.index("test" + i).shard(0).shards().get(0).primary(), equalTo(true)); + // make sure we still have 2 shards initializing per node on the first 25 nodes + String nodeId = routingTable.index("test" + i).shard(0).shards().get(0).currentNodeId(); + int nodeIndex = Integer.parseInt(nodeId.substring("node".length())); + assertThat(nodeIndex, lessThan(25)); + } + RoutingNodes routingNodes = routingTable.routingNodes(metaData); + Set encounteredIndices = newHashSet(); + for (RoutingNode routingNode : routingNodes) { + assertThat(routingNode.numberOfShardsWithState(STARTED), equalTo(0)); + assertThat(routingNode.shards().size(), equalTo(2)); + // make sure we still have 2 shards initializing per node on the only 25 nodes + int nodeIndex = Integer.parseInt(routingNode.nodeId().substring("node".length())); + assertThat(nodeIndex, lessThan(25)); + // check that we don't have a shard associated with a node with the same index name (we have a single shard) + for (MutableShardRouting shardRoutingEntry : routingNode) { + assertThat(encounteredIndices, not(hasItem(shardRoutingEntry.index()))); + encounteredIndices.add(shardRoutingEntry.index()); + } + } + + logger.info("Adding additional " + (numberOfIndices / 2) + " nodes, nothing should change"); + nodesBuilder = newNodesBuilder().putAll(clusterState.nodes()); + for (int i = (numberOfIndices / 2); i < numberOfIndices; i++) { + nodesBuilder.put(newNode("node" + i)); + } + prevRoutingTable = routingTable; + clusterState = newClusterStateBuilder().state(clusterState).nodes(nodesBuilder).build(); + routingTable = strategy.reroute(clusterState); + clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build(); + + assertThat(prevRoutingTable != routingTable, equalTo(false)); + + logger.info("Marking the shard as started"); + prevRoutingTable = routingTable; + routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsOfType(INITIALIZING)); + clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build(); + + assertThat(routingTable != prevRoutingTable, equalTo(true)); + for (int i = 0; i < numberOfIndices; i++) { + assertThat(routingTable.index("test" + i).shards().size(), equalTo(1)); + assertThat(routingTable.index("test" + i).shard(0).size(), equalTo(1)); + assertThat(routingTable.index("test" + i).shard(0).shards().size(), equalTo(1)); + assertThat(routingTable.index("test" + i).shard(0).shards().get(0).unassigned(), equalTo(false)); + assertThat(routingTable.index("test" + i).shard(0).shards().get(0).state(), equalTo(STARTED)); + assertThat(routingTable.index("test" + i).shard(0).shards().get(0).primary(), equalTo(true)); + // make sure we still have 2 shards initializing per node on the first 25 nodes + String nodeId = routingTable.index("test" + i).shard(0).shards().get(0).currentNodeId(); + int nodeIndex = Integer.parseInt(nodeId.substring("node".length())); + assertThat(nodeIndex, lessThan(25)); + } + + logger.info("Perform another round of reroute after we started the shards (we don't do automatic reroute when applying started shards)"); + prevRoutingTable = routingTable; + routingTable = strategy.reroute(clusterState); + clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build(); + + assertThat(prevRoutingTable != routingTable, equalTo(true)); + int numberOfRelocatingShards = 0; + int numberOfStartedShards = 0; + for (int i = 0; i < numberOfIndices; i++) { + assertThat(routingTable.index("test" + i).shards().size(), equalTo(1)); + assertThat(routingTable.index("test" + i).shard(0).size(), equalTo(1)); + assertThat(routingTable.index("test" + i).shard(0).shards().size(), equalTo(1)); + assertThat(routingTable.index("test" + i).shard(0).shards().get(0).unassigned(), equalTo(false)); + assertThat(routingTable.index("test" + i).shard(0).shards().get(0).state(), anyOf(equalTo(STARTED), equalTo(RELOCATING))); + if (routingTable.index("test" + i).shard(0).shards().get(0).state() == STARTED) { + numberOfStartedShards++; + } else if (routingTable.index("test" + i).shard(0).shards().get(0).state() == RELOCATING) { + numberOfRelocatingShards++; + } + assertThat(routingTable.index("test" + i).shard(0).shards().get(0).primary(), equalTo(true)); + // make sure we still have 2 shards either relocating or started on the first 25 nodes (still) + String nodeId = routingTable.index("test" + i).shard(0).shards().get(0).currentNodeId(); + int nodeIndex = Integer.parseInt(nodeId.substring("node".length())); + assertThat(nodeIndex, lessThan(25)); + } + assertThat(numberOfRelocatingShards, equalTo(25)); + assertThat(numberOfStartedShards, equalTo(25)); + } + + @Test public void testMultiIndexUnevenNodes() { + DefaultShardsRoutingStrategy strategy = new DefaultShardsRoutingStrategy(); + + final int numberOfIndices = 10; + logger.info("Building initial routing table with " + numberOfIndices + " indices"); + + MetaData.Builder metaDataBuilder = newMetaDataBuilder(); + for (int i = 0; i < numberOfIndices; i++) { + metaDataBuilder.put(newIndexMetaDataBuilder("test" + i).numberOfShards(1).numberOfReplicas(0)); + } + MetaData metaData = metaDataBuilder.build(); + + RoutingTable.Builder routingTableBuilder = routingTable(); + for (int i = 0; i < numberOfIndices; i++) { + routingTableBuilder.add(indexRoutingTable("test" + i).initializeEmpty(metaData.index("test" + i))); + } + RoutingTable routingTable = routingTableBuilder.build(); + + ClusterState clusterState = newClusterStateBuilder().metaData(metaData).routingTable(routingTable).build(); + + assertThat(routingTable.indicesRouting().size(), equalTo(numberOfIndices)); + + logger.info("Starting 3 nodes and retouring"); + clusterState = newClusterStateBuilder().state(clusterState) + .nodes(newNodesBuilder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3"))) + .build(); + RoutingTable prevRoutingTable = routingTable; + routingTable = strategy.reroute(clusterState); + clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build(); + + assertThat(prevRoutingTable != routingTable, equalTo(true)); + for (int i = 0; i < numberOfIndices; i++) { + assertThat(routingTable.index("test" + i).shards().size(), equalTo(1)); + assertThat(routingTable.index("test" + i).shard(0).size(), equalTo(1)); + assertThat(routingTable.index("test" + i).shard(0).shards().size(), equalTo(1)); + assertThat(routingTable.index("test" + i).shard(0).shards().get(0).state(), equalTo(INITIALIZING)); + } + RoutingNodes routingNodes = routingTable.routingNodes(metaData); + assertThat(routingNodes.numberOfShardsOfType(INITIALIZING), equalTo(numberOfIndices)); + assertThat(routingNodes.node("node1").numberOfShardsWithState(INITIALIZING), anyOf(equalTo(3), equalTo(4))); + assertThat(routingNodes.node("node2").numberOfShardsWithState(INITIALIZING), anyOf(equalTo(3), equalTo(4))); + assertThat(routingNodes.node("node2").numberOfShardsWithState(INITIALIZING), anyOf(equalTo(3), equalTo(4))); + + logger.info("Start two more nodes, things should remain the same"); + clusterState = newClusterStateBuilder().state(clusterState) + .nodes(newNodesBuilder().putAll(clusterState.nodes()).put(newNode("node4")).put(newNode("node5"))) + .build(); + clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build(); + + prevRoutingTable = routingTable; + routingTable = strategy.reroute(clusterState); + + assertThat(prevRoutingTable == routingTable, equalTo(true)); + + routingNodes = routingTable.routingNodes(metaData); + prevRoutingTable = routingTable; + routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsOfType(INITIALIZING)); + clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build(); + + assertThat(prevRoutingTable != routingTable, equalTo(true)); + for (int i = 0; i < numberOfIndices; i++) { + assertThat(routingTable.index("test" + i).shards().size(), equalTo(1)); + assertThat(routingTable.index("test" + i).shard(0).size(), equalTo(1)); + assertThat(routingTable.index("test" + i).shard(0).shards().size(), equalTo(1)); + assertThat(routingTable.index("test" + i).shard(0).shards().get(0).state(), equalTo(STARTED)); + } + routingNodes = routingTable.routingNodes(metaData); + assertThat(routingNodes.numberOfShardsOfType(STARTED), equalTo(numberOfIndices)); + assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), anyOf(equalTo(3), equalTo(4))); + assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), anyOf(equalTo(3), equalTo(4))); + assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), anyOf(equalTo(3), equalTo(4))); + + logger.info("Now, reroute so we start the relocation process for even distribution (4 should be relocated)"); + prevRoutingTable = routingTable; + routingTable = strategy.reroute(clusterState); + clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build(); + assertThat(prevRoutingTable != routingTable, equalTo(true)); + for (int i = 0; i < numberOfIndices; i++) { + assertThat(routingTable.index("test" + i).shards().size(), equalTo(1)); + assertThat(routingTable.index("test" + i).shard(0).size(), equalTo(1)); + assertThat(routingTable.index("test" + i).shard(0).shards().size(), equalTo(1)); + assertThat(routingTable.index("test" + i).shard(0).shards().get(0).state(), anyOf(equalTo(RELOCATING), equalTo(STARTED))); + } + routingNodes = routingTable.routingNodes(metaData); + assertThat("4 source shard routing are relocating", routingNodes.numberOfShardsOfType(RELOCATING), equalTo(4)); + assertThat("4 target shard routing are initializing", routingNodes.numberOfShardsOfType(INITIALIZING), equalTo(4)); + + logger.info("Now, mark the relocated as started"); + prevRoutingTable = routingTable; + routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsOfType(INITIALIZING)); + clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build(); +// routingTable = strategy.reroute(new RoutingStrategyInfo(metaData, routingTable), nodes); + + assertThat(prevRoutingTable != routingTable, equalTo(true)); + for (int i = 0; i < numberOfIndices; i++) { + assertThat(routingTable.index("test" + i).shards().size(), equalTo(1)); + assertThat(routingTable.index("test" + i).shard(0).size(), equalTo(1)); + assertThat(routingTable.index("test" + i).shard(0).shards().size(), equalTo(1)); + assertThat(routingTable.index("test" + i).shard(0).shards().get(0).state(), anyOf(equalTo(RELOCATING), equalTo(STARTED))); + } + routingNodes = routingTable.routingNodes(metaData); + assertThat(routingNodes.numberOfShardsOfType(STARTED), equalTo(numberOfIndices)); + for (RoutingNode routingNode : routingNodes) { + assertThat(routingNode.numberOfShardsWithState(STARTED), equalTo(2)); + } + } + + private Node newNode(String nodeId) { + return new Node(nodeId, DummyTransportAddress.INSTANCE); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/cluster/routing/strategy/SingleShardOneBackupRoutingStrategyTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/cluster/routing/strategy/SingleShardOneBackupRoutingStrategyTests.java new file mode 100644 index 00000000000..24bc9e03399 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/cluster/routing/strategy/SingleShardOneBackupRoutingStrategyTests.java @@ -0,0 +1,184 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.routing.strategy; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.Node; +import org.elasticsearch.cluster.routing.RoutingNodes; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.util.logging.Loggers; +import org.elasticsearch.util.transport.DummyTransportAddress; +import org.slf4j.Logger; +import org.testng.annotations.Test; + +import static org.elasticsearch.cluster.ClusterState.*; +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; +import static org.elasticsearch.cluster.metadata.MetaData.*; +import static org.elasticsearch.cluster.node.Nodes.*; +import static org.elasticsearch.cluster.routing.RoutingBuilders.*; +import static org.elasticsearch.cluster.routing.ShardRoutingState.*; +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy (Shay Banon) + */ +public class SingleShardOneBackupRoutingStrategyTests { + + private final Logger logger = Loggers.getLogger(SingleShardOneBackupRoutingStrategyTests.class); + + @Test public void testSingleIndexFirstStartPrimaryThenBackups() { + DefaultShardsRoutingStrategy strategy = new DefaultShardsRoutingStrategy(); + + logger.info("Building initial routing table"); + + MetaData metaData = newMetaDataBuilder() + .put(newIndexMetaDataBuilder("test").numberOfShards(1).numberOfReplicas(1)) + .build(); + + RoutingTable routingTable = routingTable() + .add(indexRoutingTable("test").initializeEmpty(metaData.index("test"))) + .build(); + + ClusterState clusterState = newClusterStateBuilder().metaData(metaData).routingTable(routingTable).build(); + + assertThat(routingTable.index("test").shards().size(), equalTo(1)); + assertThat(routingTable.index("test").shard(0).size(), equalTo(2)); + assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2)); + assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED)); + assertThat(routingTable.index("test").shard(0).shards().get(1).state(), equalTo(UNASSIGNED)); + assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue()); + assertThat(routingTable.index("test").shard(0).shards().get(1).currentNodeId(), nullValue()); + + logger.info("Adding one node and performing rerouting"); + clusterState = newClusterStateBuilder().state(clusterState).nodes(newNodesBuilder().put(newNode("node1"))).build(); + + RoutingTable prevRoutingTable = routingTable; + routingTable = strategy.reroute(clusterState); + clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build(); + + assertThat(prevRoutingTable != routingTable, equalTo(true)); + assertThat(routingTable.index("test").shards().size(), equalTo(1)); + assertThat(routingTable.index("test").shard(0).size(), equalTo(2)); + assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2)); + assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(INITIALIZING)); + assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo("node1")); + assertThat(routingTable.index("test").shard(0).backupsShards().size(), equalTo(1)); + assertThat(routingTable.index("test").shard(0).backupsShards().get(0).state(), equalTo(UNASSIGNED)); + assertThat(routingTable.index("test").shard(0).backupsShards().get(0).currentNodeId(), nullValue()); + + logger.info("Add another node and perform rerouting"); + clusterState = newClusterStateBuilder().state(clusterState).nodes(newNodesBuilder().putAll(clusterState.nodes()).put(newNode("node2"))).build(); + prevRoutingTable = routingTable; + routingTable = strategy.reroute(clusterState); + clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build(); + + assertThat(prevRoutingTable != routingTable, equalTo(true)); + assertThat(routingTable.index("test").shards().size(), equalTo(1)); + assertThat(routingTable.index("test").shard(0).size(), equalTo(2)); + assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2)); + assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(INITIALIZING)); + assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo("node1")); + assertThat(routingTable.index("test").shard(0).backupsShards().size(), equalTo(1)); + // backup shards are initializing as well, we make sure that they recover from primary *started* shards in the IndicesClusterStateService + assertThat(routingTable.index("test").shard(0).backupsShards().get(0).state(), equalTo(INITIALIZING)); + assertThat(routingTable.index("test").shard(0).backupsShards().get(0).currentNodeId(), equalTo("node2")); + + logger.info("Start the primary shard (on node1)"); + RoutingNodes routingNodes = routingTable.routingNodes(clusterState.metaData()); + prevRoutingTable = routingTable; + routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING)); + clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build(); + + assertThat(prevRoutingTable != routingTable, equalTo(true)); + assertThat(routingTable.index("test").shards().size(), equalTo(1)); + assertThat(routingTable.index("test").shard(0).size(), equalTo(2)); + assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2)); + assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED)); + assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo("node1")); + assertThat(routingTable.index("test").shard(0).backupsShards().size(), equalTo(1)); + // backup shards are initializing as well, we make sure that they recover from primary *started* shards in the IndicesClusterStateService + assertThat(routingTable.index("test").shard(0).backupsShards().get(0).state(), equalTo(INITIALIZING)); + assertThat(routingTable.index("test").shard(0).backupsShards().get(0).currentNodeId(), equalTo("node2")); + + + logger.info("Reroute, nothing should change"); + prevRoutingTable = routingTable; + routingTable = strategy.reroute(clusterState); + assertThat(prevRoutingTable == routingTable, equalTo(true)); + + logger.info("Start the backup shard"); + routingNodes = routingTable.routingNodes(metaData); + prevRoutingTable = routingTable; + routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node2").shardsWithState(INITIALIZING)); + clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build(); + + assertThat(prevRoutingTable != routingTable, equalTo(true)); + assertThat(routingTable.index("test").shards().size(), equalTo(1)); + assertThat(routingTable.index("test").shard(0).size(), equalTo(2)); + assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2)); + assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED)); + assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo("node1")); + assertThat(routingTable.index("test").shard(0).backupsShards().size(), equalTo(1)); + assertThat(routingTable.index("test").shard(0).backupsShards().get(0).state(), equalTo(STARTED)); + assertThat(routingTable.index("test").shard(0).backupsShards().get(0).currentNodeId(), equalTo("node2")); + + logger.info("Kill node1, backup shard should become primary"); + + clusterState = newClusterStateBuilder().state(clusterState).nodes(newNodesBuilder().putAll(clusterState.nodes()).remove("node1")).build(); + prevRoutingTable = routingTable; + routingTable = strategy.reroute(clusterState); + clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build(); + + assertThat(prevRoutingTable != routingTable, equalTo(true)); + assertThat(routingTable.index("test").shards().size(), equalTo(1)); + assertThat(routingTable.index("test").shard(0).size(), equalTo(2)); + assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2)); + assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED)); + assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo("node2")); + assertThat(routingTable.index("test").shard(0).backupsShards().size(), equalTo(1)); + // backup shards are initializing as well, we make sure that they recover from primary *started* shards in the IndicesClusterStateService + assertThat(routingTable.index("test").shard(0).backupsShards().get(0).state(), equalTo(UNASSIGNED)); + assertThat(routingTable.index("test").shard(0).backupsShards().get(0).currentNodeId(), nullValue()); + + logger.info("Start another node, backup shard should start initializing"); + + clusterState = newClusterStateBuilder().state(clusterState).nodes(newNodesBuilder().putAll(clusterState.nodes()).put(newNode("node3"))).build(); + prevRoutingTable = routingTable; + routingTable = strategy.reroute(clusterState); + clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build(); + + assertThat(prevRoutingTable != routingTable, equalTo(true)); + assertThat(routingTable.index("test").shards().size(), equalTo(1)); + assertThat(routingTable.index("test").shard(0).size(), equalTo(2)); + assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2)); + assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED)); + assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo("node2")); + assertThat(routingTable.index("test").shard(0).backupsShards().size(), equalTo(1)); + // backup shards are initializing as well, we make sure that they recover from primary *started* shards in the IndicesClusterStateService + assertThat(routingTable.index("test").shard(0).backupsShards().get(0).state(), equalTo(INITIALIZING)); + assertThat(routingTable.index("test").shard(0).backupsShards().get(0).currentNodeId(), equalTo("node3")); + } + + private Node newNode(String nodeId) { + return new Node(nodeId, DummyTransportAddress.INSTANCE); + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/cluster/routing/strategy/TenShardsOneBackupRoutingTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/cluster/routing/strategy/TenShardsOneBackupRoutingTests.java new file mode 100644 index 00000000000..b7abd607b93 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/cluster/routing/strategy/TenShardsOneBackupRoutingTests.java @@ -0,0 +1,188 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.routing.strategy; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.Node; +import org.elasticsearch.cluster.routing.RoutingNodes; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.util.logging.Loggers; +import org.elasticsearch.util.transport.DummyTransportAddress; +import org.slf4j.Logger; +import org.testng.annotations.Test; + +import static org.elasticsearch.cluster.ClusterState.*; +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; +import static org.elasticsearch.cluster.metadata.MetaData.*; +import static org.elasticsearch.cluster.node.Nodes.*; +import static org.elasticsearch.cluster.routing.RoutingBuilders.*; +import static org.elasticsearch.cluster.routing.ShardRoutingState.*; +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy (Shay Banon) + */ +public class TenShardsOneBackupRoutingTests { + + private final Logger logger = Loggers.getLogger(TenShardsOneBackupRoutingTests.class); + + @Test public void testSingleIndexFirstStartPrimaryThenBackups() { + DefaultShardsRoutingStrategy strategy = new DefaultShardsRoutingStrategy(); + + logger.info("Building initial routing table"); + + MetaData metaData = newMetaDataBuilder() + .put(newIndexMetaDataBuilder("test").numberOfShards(10).numberOfReplicas(1)) + .build(); + + RoutingTable routingTable = routingTable() + .add(indexRoutingTable("test").initializeEmpty(metaData.index("test"))) + .build(); + + ClusterState clusterState = newClusterStateBuilder().metaData(metaData).routingTable(routingTable).build(); + + assertThat(routingTable.index("test").shards().size(), equalTo(10)); + for (int i = 0; i < routingTable.index("test").shards().size(); i++) { + assertThat(routingTable.index("test").shard(i).size(), equalTo(2)); + assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2)); + assertThat(routingTable.index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED)); + assertThat(routingTable.index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED)); + assertThat(routingTable.index("test").shard(i).shards().get(0).currentNodeId(), nullValue()); + assertThat(routingTable.index("test").shard(i).shards().get(1).currentNodeId(), nullValue()); + } + + logger.info("Adding one node and performing rerouting"); + clusterState = newClusterStateBuilder().state(clusterState).nodes(newNodesBuilder().put(newNode("node1"))).build(); + + RoutingTable prevRoutingTable = routingTable; + routingTable = strategy.reroute(clusterState); + clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build(); + + assertThat(prevRoutingTable != routingTable, equalTo(true)); + assertThat(routingTable.index("test").shards().size(), equalTo(10)); + for (int i = 0; i < routingTable.index("test").shards().size(); i++) { + assertThat(routingTable.index("test").shard(i).size(), equalTo(2)); + assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2)); + assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING)); + assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), equalTo("node1")); + assertThat(routingTable.index("test").shard(i).backupsShards().size(), equalTo(1)); + assertThat(routingTable.index("test").shard(i).backupsShards().get(0).state(), equalTo(UNASSIGNED)); + assertThat(routingTable.index("test").shard(i).backupsShards().get(0).currentNodeId(), nullValue()); + } + + logger.info("Add another node and perform rerouting"); + clusterState = newClusterStateBuilder().state(clusterState).nodes(newNodesBuilder().putAll(clusterState.nodes()).put(newNode("node2"))).build(); + prevRoutingTable = routingTable; + routingTable = strategy.reroute(clusterState); + clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build(); + + assertThat(prevRoutingTable != routingTable, equalTo(true)); + assertThat(routingTable.index("test").shards().size(), equalTo(10)); + for (int i = 0; i < routingTable.index("test").shards().size(); i++) { + assertThat(routingTable.index("test").shard(i).size(), equalTo(2)); + assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2)); + assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING)); + assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), equalTo("node1")); + assertThat(routingTable.index("test").shard(i).backupsShards().size(), equalTo(1)); + // backup shards are initializing as well, we make sure that they recover from primary *started* shards in the IndicesClusterStateService + assertThat(routingTable.index("test").shard(i).backupsShards().get(0).state(), equalTo(INITIALIZING)); + assertThat(routingTable.index("test").shard(i).backupsShards().get(0).currentNodeId(), equalTo("node2")); + } + + logger.info("Start the primary shard (on node1)"); + RoutingNodes routingNodes = routingTable.routingNodes(clusterState.metaData()); + prevRoutingTable = routingTable; + routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING)); + clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build(); + + assertThat(prevRoutingTable != routingTable, equalTo(true)); + assertThat(routingTable.index("test").shards().size(), equalTo(10)); + for (int i = 0; i < routingTable.index("test").shards().size(); i++) { + assertThat(routingTable.index("test").shard(i).size(), equalTo(2)); + assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2)); + assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED)); + assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), equalTo("node1")); + assertThat(routingTable.index("test").shard(i).backupsShards().size(), equalTo(1)); + // backup shards are initializing as well, we make sure that they recover from primary *started* shards in the IndicesClusterStateService + assertThat(routingTable.index("test").shard(i).backupsShards().get(0).state(), equalTo(INITIALIZING)); + assertThat(routingTable.index("test").shard(i).backupsShards().get(0).currentNodeId(), equalTo("node2")); + } + + logger.info("Reroute, nothing should change"); + prevRoutingTable = routingTable; + routingTable = strategy.reroute(clusterState); + assertThat(prevRoutingTable == routingTable, equalTo(true)); + + logger.info("Start the backup shard"); + routingNodes = routingTable.routingNodes(metaData); + prevRoutingTable = routingTable; + routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node2").shardsWithState(INITIALIZING)); + clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build(); + routingNodes = routingTable.routingNodes(metaData); + + assertThat(prevRoutingTable != routingTable, equalTo(true)); + assertThat(routingTable.index("test").shards().size(), equalTo(10)); + for (int i = 0; i < routingTable.index("test").shards().size(); i++) { + assertThat(routingTable.index("test").shard(i).size(), equalTo(2)); + assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2)); + assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED)); + assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), equalTo("node1")); + assertThat(routingTable.index("test").shard(i).backupsShards().size(), equalTo(1)); + assertThat(routingTable.index("test").shard(i).backupsShards().get(0).state(), equalTo(STARTED)); + assertThat(routingTable.index("test").shard(i).backupsShards().get(0).currentNodeId(), equalTo("node2")); + } + assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(10)); + assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(10)); + + logger.info("Add another node and perform rerouting"); + clusterState = newClusterStateBuilder().state(clusterState).nodes(newNodesBuilder().putAll(clusterState.nodes()).put(newNode("node3"))).build(); + prevRoutingTable = routingTable; + routingTable = strategy.reroute(clusterState); + clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build(); + routingNodes = routingTable.routingNodes(metaData); + + assertThat(prevRoutingTable != routingTable, equalTo(true)); + assertThat(routingTable.index("test").shards().size(), equalTo(10)); + assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED, RELOCATING), equalTo(10)); + assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), lessThan(10)); + assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED, RELOCATING), equalTo(10)); + assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), lessThan(10)); + assertThat(routingNodes.node("node3").numberOfShardsWithState(INITIALIZING), equalTo(6)); + + logger.info("Start the shards on node 3"); + routingNodes = routingTable.routingNodes(clusterState.metaData()); + prevRoutingTable = routingTable; + routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node3").shardsWithState(INITIALIZING)); + clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build(); + routingNodes = routingTable.routingNodes(metaData); + + assertThat(prevRoutingTable != routingTable, equalTo(true)); + assertThat(routingTable.index("test").shards().size(), equalTo(10)); + assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(8)); + assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(6)); + assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(6)); + } + + private Node newNode(String nodeId) { + return new Node(nodeId, DummyTransportAddress.INSTANCE); + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/deps/joda/SimpleJodaTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/deps/joda/SimpleJodaTests.java new file mode 100644 index 00000000000..288e798813e --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/deps/joda/SimpleJodaTests.java @@ -0,0 +1,57 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.deps.joda; + +import org.joda.time.DateTimeZone; +import org.joda.time.format.DateTimeFormatter; +import org.joda.time.format.ISODateTimeFormat; +import org.testng.annotations.Test; + +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy (Shay Banon) + */ +public class SimpleJodaTests { + + @Test public void testIsoDateFormatDateTimeNoMillisUTC() { + DateTimeFormatter formatter = ISODateTimeFormat.dateTimeNoMillis().withZone(DateTimeZone.UTC); + long millis = formatter.parseMillis("1970-01-01T00:00:00Z"); + + assertThat(millis, equalTo(0l)); + } + + @Test public void testIsoDateFormatDateOptionalTimeUTC() { + DateTimeFormatter formatter = ISODateTimeFormat.dateOptionalTimeParser().withZone(DateTimeZone.UTC); + long millis = formatter.parseMillis("1970-01-01T00:00:00Z"); + assertThat(millis, equalTo(0l)); + millis = formatter.parseMillis("1970-01-01T00:00:00.001Z"); + assertThat(millis, equalTo(1l)); + millis = formatter.parseMillis("1970-01-01T00:00:00.1Z"); + assertThat(millis, equalTo(100l)); + millis = formatter.parseMillis("1970-01-01T00:00:00.1"); + assertThat(millis, equalTo(100l)); + millis = formatter.parseMillis("1970-01-01T00:00:00"); + assertThat(millis, equalTo(0l)); + millis = formatter.parseMillis("1970-01-01"); + assertThat(millis, equalTo(0l)); + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/deps/lucene/SimpleLuceneTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/deps/lucene/SimpleLuceneTests.java new file mode 100644 index 00000000000..3e03178e98a --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/deps/lucene/SimpleLuceneTests.java @@ -0,0 +1,123 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.deps.lucene; + +import org.apache.lucene.document.*; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.RAMDirectory; +import org.apache.lucene.util.NumericUtils; +import org.elasticsearch.util.lucene.Lucene; +import org.testng.annotations.Test; + +import java.util.ArrayList; + +import static org.elasticsearch.util.lucene.DocumentBuilder.*; +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy + */ +public class SimpleLuceneTests { + + @Test public void testSimpleNumericOps() throws Exception { + Directory dir = new RAMDirectory(); + IndexWriter indexWriter = new IndexWriter(dir, Lucene.STANDARD_ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED); + + indexWriter.addDocument(doc().add(field("_id", "1")).add(new NumericField("test", Field.Store.YES, true).setIntValue(2)).build()); + + IndexSearcher searcher = new IndexSearcher(indexWriter.getReader()); + TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1); + Document doc = searcher.doc(topDocs.scoreDocs[0].doc); + Fieldable f = doc.getFieldable("test"); + assertThat(f.stringValue(), equalTo("2")); + + topDocs = searcher.search(new TermQuery(new Term("test", NumericUtils.intToPrefixCoded(2))), 1); + doc = searcher.doc(topDocs.scoreDocs[0].doc); + f = doc.getFieldable("test"); + assertThat(f.stringValue(), equalTo("2")); + + indexWriter.close(); + } + + /** + * Here, we verify that the order that we add fields to a document counts, and not the lexi order + * of the field. This means that heavily accessed fields that use field selector should be added + * first (with load and break). + */ + @Test public void testOrdering() throws Exception { + Directory dir = new RAMDirectory(); + IndexWriter indexWriter = new IndexWriter(dir, Lucene.STANDARD_ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED); + + indexWriter.addDocument(doc() + .add(field("_id", "1")) + .add(field("#id", "1")).build()); + + IndexSearcher searcher = new IndexSearcher(indexWriter.getReader()); + TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1); + final ArrayList fieldsOrder = new ArrayList(); + Document doc = searcher.doc(topDocs.scoreDocs[0].doc, new FieldSelector() { + @Override public FieldSelectorResult accept(String fieldName) { + fieldsOrder.add(fieldName); + return FieldSelectorResult.LOAD; + } + }); + + assertThat(fieldsOrder.size(), equalTo(2)); + assertThat(fieldsOrder.get(0), equalTo("_id")); + assertThat(fieldsOrder.get(1), equalTo("#id")); + + indexWriter.close(); + } + + @Test public void testBoost() throws Exception { + Directory dir = new RAMDirectory(); + IndexWriter indexWriter = new IndexWriter(dir, Lucene.STANDARD_ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED); + + for (int i = 0; i < 100; i++) { + // TODO (just setting the boost value does not seem to work...) + StringBuilder value = new StringBuilder().append("value"); + for (int j = 0; j < i; j++) { + value.append(" ").append("value"); + } + indexWriter.addDocument(doc() + .add(field("id", Integer.toString(i))) + .add(field("value", value.toString())) + .boost(i).build()); + } + + IndexSearcher searcher = new IndexSearcher(indexWriter.getReader()); + TermQuery query = new TermQuery(new Term("value", "value")); + TopDocs topDocs = searcher.search(query, 100); + assertThat(100, equalTo(topDocs.totalHits)); + for (int i = 0; i < topDocs.scoreDocs.length; i++) { + Document doc = searcher.doc(topDocs.scoreDocs[i].doc); +// System.out.println(doc.get("id") + ": " + searcher.explain(query, topDocs.scoreDocs[i].doc)); + assertThat(doc.get("id"), equalTo(Integer.toString(100 - i - 1))); + } + + indexWriter.close(); + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/discovery/jgroups/SimpleJgroupsDiscoveryTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/discovery/jgroups/SimpleJgroupsDiscoveryTests.java new file mode 100644 index 00000000000..e05abf61309 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/discovery/jgroups/SimpleJgroupsDiscoveryTests.java @@ -0,0 +1,24 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery.jgroups; + +public class SimpleJgroupsDiscoveryTests { + +} \ No newline at end of file diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/discovery/jgroups/SimpleJgroupsTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/discovery/jgroups/SimpleJgroupsTests.java new file mode 100644 index 00000000000..37e9dd5f14c --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/discovery/jgroups/SimpleJgroupsTests.java @@ -0,0 +1,58 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery.jgroups; + +import org.jgroups.ChannelException; +import org.jgroups.JChannel; +import org.jgroups.Message; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +/** + * @author kimchy (Shay Banon) + */ +public class SimpleJgroupsTests { + + static { + System.setProperty("jgroups.logging.log_factory_class", JgroupsCustomLogFactory.class.getName()); + } + + private JChannel channel1; + + private JChannel channel2; + + @BeforeMethod public void setupChannels() throws ChannelException { + channel1 = new JChannel("udp.xml"); + channel1.connect("test"); + + channel2 = new JChannel("udp.xml"); + channel2.connect("test"); + } + + @AfterMethod public void closeChannels() { + channel1.close(); + channel2.close(); + } + + @Test public void testUdpJgroups() throws Exception { + channel1.send(new Message(null, channel1.getAddress(), "test")); + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/http/PathTrieTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/http/PathTrieTests.java new file mode 100644 index 00000000000..5d063da7dbc --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/http/PathTrieTests.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http; + +import org.testng.annotations.Test; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy (Shay Banon) + */ +public class PathTrieTests { + + @Test public void testPath() { + PathTrie trie = new PathTrie(); + trie.insert("/a/b/c", "walla"); + trie.insert("a/d/g", "kuku"); + trie.insert("x/b/c", "lala"); + trie.insert("a/x/*", "one"); + trie.insert("a/b/*", "two"); + trie.insert("*/*/x", "three"); + trie.insert("{index}/insert/{docId}", "bingo"); + + assertThat(trie.retrieve("a/b/c"), equalTo("walla")); + assertThat(trie.retrieve("a/d/g"), equalTo("kuku")); + assertThat(trie.retrieve("x/b/c"), equalTo("lala")); + assertThat(trie.retrieve("a/x/b"), equalTo("one")); + assertThat(trie.retrieve("a/b/d"), equalTo("two")); + + assertThat(trie.retrieve("a/b"), nullValue()); + assertThat(trie.retrieve("a/b/c/d"), nullValue()); + assertThat(trie.retrieve("g/t/x"), equalTo("three")); + + Map> params = new HashMap>(); + assertThat(trie.retrieve("index1/insert/12", params), equalTo("bingo")); + assertThat(params.size(), equalTo(2)); + assertThat(params.get("index").get(0), equalTo("index1")); + assertThat(params.get("docId").get(0), equalTo("12")); + } + + @Test public void testEmptyPath() { + PathTrie trie = new PathTrie(); + trie.insert("/", "walla"); + assertThat(trie.retrieve(""), equalTo("walla")); + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/analysis/AnalysisModuleTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/index/analysis/AnalysisModuleTests.java new file mode 100644 index 00000000000..3350d1c34ba --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/analysis/AnalysisModuleTests.java @@ -0,0 +1,70 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import com.google.inject.Guice; +import com.google.inject.Injector; +import org.apache.lucene.analysis.Analyzer; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexNameModule; +import org.elasticsearch.index.settings.IndexSettingsModule; +import org.elasticsearch.util.settings.Settings; +import org.testng.annotations.Test; + +import static org.elasticsearch.util.settings.ImmutableSettings.*; +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy (Shay Banon) + */ +public class AnalysisModuleTests { + + @Test public void testSimpleConfigurationJson() { + Settings settings = settingsBuilder().loadFromClasspath("org/elasticsearch/index/analysis/test1.json").build(); + testSimpleConfiguration(settings); + } + + @Test public void testSimpleConfigurationYaml() { + Settings settings = settingsBuilder().loadFromClasspath("org/elasticsearch/index/analysis/test1.yml").build(); + testSimpleConfiguration(settings); + } + + private void testSimpleConfiguration(Settings settings) { + Index index = new Index("test"); + Injector injector = Guice.createInjector( + new IndexSettingsModule(settings), + new IndexNameModule(index), + new AnalysisModule(settings)); + + AnalysisService analysisService = injector.getInstance(AnalysisService.class); + + Analyzer analyzer = analysisService.analyzer("custom1"); + + assertThat(analyzer, instanceOf(CustomAnalyzer.class)); + CustomAnalyzer custom1 = (CustomAnalyzer) analyzer; + assertThat(custom1.tokenizerFactory(), instanceOf(StandardTokenizerFactory.class)); + assertThat(custom1.tokenFilters().length, equalTo(2)); + + StopTokenFilterFactory stop1 = (StopTokenFilterFactory) custom1.tokenFilters()[0]; + assertThat(stop1.stopWords().size(), equalTo(1)); + assertThat(stop1.stopWords(), hasItem("test-stop")); + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/analysis/test1.json b/modules/elasticsearch/src/test/java/org/elasticsearch/index/analysis/test1.json new file mode 100644 index 00000000000..f5d49e7a289 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/analysis/test1.json @@ -0,0 +1,31 @@ +{ + index : { + analysis : { + tokenizer : { + standard : { + type : "standard" + } + }, + filter : { + stop : { + type : "stop", + stopwords : ["test-stop"] + }, + stop2 : { + type : "stop", + stopwords : ["stop2-1", "stop2-2"] + } + }, + analyzer : { + standard : { + type : "standard", + stopwords : ["test1", "test2", "test3"] + }, + custom1 : { + tokenizer : "standard", + filter : ["stop", "stop2"] + } + } + } + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/analysis/test1.yml b/modules/elasticsearch/src/test/java/org/elasticsearch/index/analysis/test1.yml new file mode 100644 index 00000000000..bf859579e42 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/analysis/test1.yml @@ -0,0 +1,19 @@ +index : + analysis : + tokenizer : + standard : + type : standard + filter : + stop : + type : stop + stopwords : [test-stop] + stop2 : + type : stop + stopwords : [stop2-1, stop2-2] + analyzer : + standard : + type : standard + stopwords : [test1, test2, test3] + custom1 : + tokenizer : standard + filter : [stop, stop2] diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/deletionpolicy/SnapshotDeletionPolicyTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/index/deletionpolicy/SnapshotDeletionPolicyTests.java new file mode 100644 index 00000000000..3d2dab400b5 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/deletionpolicy/SnapshotDeletionPolicyTests.java @@ -0,0 +1,162 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.deletionpolicy; + +import org.apache.lucene.analysis.standard.StandardAnalyzer; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.store.RAMDirectory; +import org.apache.lucene.util.Version; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.testng.annotations.AfterTest; +import org.testng.annotations.BeforeTest; +import org.testng.annotations.Test; + +import static org.elasticsearch.util.lucene.Directories.*; +import static org.elasticsearch.util.lucene.DocumentBuilder.*; +import static org.elasticsearch.util.settings.ImmutableSettings.Builder.*; +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * A set of tests for {@link SnapshotDeletionPolicy}. + * + * @author kimchy (Shay Banon) + */ +public class SnapshotDeletionPolicyTests { + + protected final ShardId shardId = new ShardId(new Index("index"), 1); + + private RAMDirectory dir; + private SnapshotDeletionPolicy deletionPolicy; + private IndexWriter indexWriter; + + @BeforeTest public void setUp() throws Exception { + dir = new RAMDirectory(); + deletionPolicy = new SnapshotDeletionPolicy(new KeepOnlyLastDeletionPolicy(shardId, EMPTY_SETTINGS)); + indexWriter = new IndexWriter(dir, new StandardAnalyzer(Version.LUCENE_CURRENT), true, deletionPolicy, IndexWriter.MaxFieldLength.UNLIMITED); + } + + @AfterTest public void tearDown() throws Exception { + indexWriter.close(); + dir.close(); + } + + @Test public void testSimpleSnapshot() throws Exception { + // add a document and commit, resulting in one commit point + indexWriter.addDocument(doc().add(field("test", "1")).build()); + indexWriter.commit(); + + assertThat(listCommits(dir).size(), equalTo(1)); + + // add another document and commit, resulting again in one commit point + indexWriter.addDocument(doc().add(field("test", "1")).build()); + indexWriter.commit(); + assertThat(listCommits(dir).size(), equalTo(1)); + + // snapshot the last commit, and then add a document and commit, now we should have two commit points + SnapshotIndexCommit snapshot = deletionPolicy.snapshot(); + indexWriter.addDocument(doc().add(field("test", "1")).build()); + indexWriter.commit(); + assertThat(listCommits(dir).size(), equalTo(2)); + + // release the commit, add a document and commit, now we should be back to one commit point + assertThat(snapshot.release(), equalTo(true)); + indexWriter.addDocument(doc().add(field("test", "1")).build()); + indexWriter.commit(); + assertThat(listCommits(dir).size(), equalTo(1)); + } + + @Test public void testMultiSnapshot() throws Exception { + // add a document and commit, resulting in one commit point + indexWriter.addDocument(doc().add(field("test", "1")).build()); + indexWriter.commit(); + assertThat(listCommits(dir).size(), equalTo(1)); + + // take two snapshots + SnapshotIndexCommit snapshot1 = deletionPolicy.snapshot(); + SnapshotIndexCommit snapshot2 = deletionPolicy.snapshot(); + + // we should have two commits points + indexWriter.addDocument(doc().add(field("test", "1")).build()); + indexWriter.commit(); + assertThat(listCommits(dir).size(), equalTo(2)); + + // release one snapshot, we should still have two commit points + assertThat(snapshot1.release(), equalTo(true)); + indexWriter.addDocument(doc().add(field("test", "1")).build()); + indexWriter.commit(); + assertThat(listCommits(dir).size(), equalTo(2)); + + // release the second snapshot, we should be back to one commit + assertThat(snapshot2.release(), equalTo(true)); + indexWriter.addDocument(doc().add(field("test", "1")).build()); + indexWriter.commit(); + assertThat(listCommits(dir).size(), equalTo(1)); + } + + @Test public void testMultiReleaseException() throws Exception { + // add a document and commit, resulting in one commit point + indexWriter.addDocument(doc().add(field("test", "1")).build()); + indexWriter.commit(); + assertThat(listCommits(dir).size(), equalTo(1)); + + // snapshot the last commit, and release it twice, the seconds should throw an exception + SnapshotIndexCommit snapshot = deletionPolicy.snapshot(); + assertThat(snapshot.release(), equalTo(true)); + assertThat(snapshot.release(), equalTo(false)); + } + + @Test public void testSimpleSnapshots() throws Exception { + // add a document and commit, resulting in one commit point + indexWriter.addDocument(doc().add(field("test", "1")).build()); + indexWriter.commit(); + assertThat(listCommits(dir).size(), equalTo(1)); + + // add another document and commit, resulting again in one commint point + indexWriter.addDocument(doc().add(field("test", "1")).build()); + indexWriter.commit(); + assertThat(listCommits(dir).size(), equalTo(1)); + + // snapshot the last commit, and then add a document and commit, now we should have two commit points + SnapshotIndexCommit snapshot = deletionPolicy.snapshot(); + indexWriter.addDocument(doc().add(field("test", "1")).build()); + indexWriter.commit(); + assertThat(listCommits(dir).size(), equalTo(2)); + + // now, take a snapshot of all the commits + SnapshotIndexCommits snapshots = deletionPolicy.snapshots(); + assertThat(snapshots.size(), equalTo(2)); + + // release the snapshot, add a document and commit + // we should have 3 commits points since we are holding onto the first two with snapshots + // and we are using the keep only last + assertThat(snapshot.release(), equalTo(true)); + indexWriter.addDocument(doc().add(field("test", "1")).build()); + indexWriter.commit(); + assertThat(listCommits(dir).size(), equalTo(3)); + + // now release the snapshots, we should be back to a single commit point + assertThat(snapshots.release(), equalTo(true)); + indexWriter.addDocument(doc().add(field("test", "1")).build()); + indexWriter.commit(); + assertThat(listCommits(dir).size(), equalTo(1)); + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/deletionpolicy/SnapshotIndexCommitExistsMatcher.java b/modules/elasticsearch/src/test/java/org/elasticsearch/index/deletionpolicy/SnapshotIndexCommitExistsMatcher.java new file mode 100644 index 00000000000..09625730a7d --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/deletionpolicy/SnapshotIndexCommitExistsMatcher.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.deletionpolicy; + +import org.hamcrest.Description; +import org.hamcrest.Matcher; +import org.hamcrest.TypeSafeMatcher; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class SnapshotIndexCommitExistsMatcher extends TypeSafeMatcher { + + @Override public boolean matchesSafely(SnapshotIndexCommit snapshotIndexCommit) { + for (String fileName : snapshotIndexCommit.getFiles()) { + try { + if (!snapshotIndexCommit.getDirectory().fileExists(fileName)) { + return false; + } + } catch (IOException e) { + return false; + } + } + return true; + } + + @Override public void describeTo(Description description) { + description.appendText("an index commit existence"); + } + + public static Matcher snapshotIndexCommitExists() { + return new SnapshotIndexCommitExistsMatcher(); + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/engine/AbstractSimpleEngineTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/index/engine/AbstractSimpleEngineTests.java new file mode 100644 index 00000000000..b764702e95f --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/engine/AbstractSimpleEngineTests.java @@ -0,0 +1,405 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + +import org.apache.lucene.index.IndexDeletionPolicy; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.TermQuery; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.deletionpolicy.KeepOnlyLastDeletionPolicy; +import org.elasticsearch.index.deletionpolicy.SnapshotDeletionPolicy; +import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit; +import org.elasticsearch.index.merge.policy.LogByteSizeMergePolicyProvider; +import org.elasticsearch.index.merge.policy.MergePolicyProvider; +import org.elasticsearch.index.merge.scheduler.MergeSchedulerProvider; +import org.elasticsearch.index.merge.scheduler.SerialMergeSchedulerProvider; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.index.store.ram.RamStore; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.index.translog.memory.MemoryTranslog; +import org.elasticsearch.util.lucene.Lucene; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; + +import static org.elasticsearch.index.deletionpolicy.SnapshotIndexCommitExistsMatcher.*; +import static org.elasticsearch.index.engine.EngineSearcherTotalHitsMatcher.*; +import static org.elasticsearch.index.translog.TranslogSizeMatcher.*; +import static org.elasticsearch.util.lucene.DocumentBuilder.*; +import static org.elasticsearch.util.settings.ImmutableSettings.Builder.*; +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class AbstractSimpleEngineTests { + + protected final ShardId shardId = new ShardId(new Index("index"), 1); + + private Store store; + + private Engine engine; + + @BeforeMethod public void setUp() throws Exception { + store = createStore(); + store.deleteContent(); + engine = createEngine(store); + engine.start(); + } + + @AfterMethod public void tearDown() throws Exception { + engine.close(); + store.close(); + } + + protected Store createStore() { + return new RamStore(shardId, EMPTY_SETTINGS); + } + + protected Translog createTranslog() { + return new MemoryTranslog(shardId, EMPTY_SETTINGS); + } + + protected IndexDeletionPolicy createIndexDeletionPolicy() { + return new KeepOnlyLastDeletionPolicy(shardId, EMPTY_SETTINGS); + } + + protected SnapshotDeletionPolicy createSnapshotDeletionPolicy() { + return new SnapshotDeletionPolicy(createIndexDeletionPolicy()); + } + + protected MergePolicyProvider createMergePolicy() { + return new LogByteSizeMergePolicyProvider(store); + } + + protected MergeSchedulerProvider createMergeScheduler() { + return new SerialMergeSchedulerProvider(shardId, EMPTY_SETTINGS); + } + + protected abstract Engine createEngine(Store store); + + @Test public void testSimpleOperations() throws Exception { + Engine.Searcher searchResult = engine.searcher(); + + assertThat(searchResult, engineSearcherTotalHits(0)); + searchResult.release(); + + // create a document + engine.create(new Engine.Create(doc().add(field("_uid", "1")).add(field("value", "test")).build(), Lucene.STANDARD_ANALYZER, "test", "1", "{1}")); + + // its not there... + searchResult = engine.searcher(); + assertThat(searchResult, engineSearcherTotalHits(0)); + assertThat(searchResult, engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); + searchResult.release(); + + // refresh and it should be there + engine.refresh(true); + + // now its there... + searchResult = engine.searcher(); + assertThat(searchResult, engineSearcherTotalHits(1)); + assertThat(searchResult, engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); + searchResult.release(); + + // now do an update + engine.index(new Engine.Index(newUid("1"), doc().add(field("_uid", "1")).add(field("value", "test1")).build(), Lucene.STANDARD_ANALYZER, "test", "1", "{1}")); + + // its not updated yet... + searchResult = engine.searcher(); + assertThat(searchResult, engineSearcherTotalHits(1)); + assertThat(searchResult, engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); + assertThat(searchResult, engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0)); + searchResult.release(); + + // refresh and it should be updated + engine.refresh(true); + + searchResult = engine.searcher(); + assertThat(searchResult, engineSearcherTotalHits(1)); + assertThat(searchResult, engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); + assertThat(searchResult, engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1)); + searchResult.release(); + + // now delete + engine.delete(new Engine.Delete(newUid("1"))); + + // its not deleted yet + searchResult = engine.searcher(); + assertThat(searchResult, engineSearcherTotalHits(1)); + assertThat(searchResult, engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); + assertThat(searchResult, engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1)); + searchResult.release(); + + // refresh and it should be deleted + engine.refresh(true); + + searchResult = engine.searcher(); + assertThat(searchResult, engineSearcherTotalHits(0)); + assertThat(searchResult, engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); + assertThat(searchResult, engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0)); + searchResult.release(); + + // add it back + engine.create(new Engine.Create(doc().add(field("_uid", "1")).add(field("value", "test")).build(), Lucene.STANDARD_ANALYZER, "test", "1", "{1}")); + + // its not there... + searchResult = engine.searcher(); + assertThat(searchResult, engineSearcherTotalHits(0)); + assertThat(searchResult, engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); + assertThat(searchResult, engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0)); + searchResult.release(); + + // refresh and it should be there + engine.refresh(true); + + // now its there... + searchResult = engine.searcher(); + assertThat(searchResult, engineSearcherTotalHits(1)); + assertThat(searchResult, engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); + assertThat(searchResult, engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0)); + searchResult.release(); + + // now flush + engine.flush(); + + // make sure we can still work with the engine + // now do an update + engine.index(new Engine.Index(newUid("1"), doc().add(field("_uid", "1")).add(field("value", "test1")).build(), Lucene.STANDARD_ANALYZER, "test", "1", "{1}")); + + // its not updated yet... + searchResult = engine.searcher(); + assertThat(searchResult, engineSearcherTotalHits(1)); + assertThat(searchResult, engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); + assertThat(searchResult, engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0)); + searchResult.release(); + + // refresh and it should be updated + engine.refresh(true); + + searchResult = engine.searcher(); + assertThat(searchResult, engineSearcherTotalHits(1)); + assertThat(searchResult, engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); + assertThat(searchResult, engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1)); + searchResult.release(); + + engine.close(); + + // TODO check that operations on engine will throw an EngineAlreadyClosed exception (and while you are at it, create the exception as well) + + // now create a new engine, it should see the flushed changes + engine = createEngine(store); + engine.start(); + + searchResult = engine.searcher(); + assertThat(searchResult, engineSearcherTotalHits(1)); + assertThat(searchResult, engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); + assertThat(searchResult, engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1)); + searchResult.release(); + } + + @Test public void testSearchResultRelease() throws Exception { + Engine.Searcher searchResult = engine.searcher(); + assertThat(searchResult, engineSearcherTotalHits(0)); + searchResult.release(); + + // create a document + engine.create(new Engine.Create(doc().add(field("_uid", "1")).add(field("value", "test")).build(), Lucene.STANDARD_ANALYZER, "test", "1", "{1}")); + + // its not there... + searchResult = engine.searcher(); + assertThat(searchResult, engineSearcherTotalHits(0)); + assertThat(searchResult, engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); + searchResult.release(); + + // refresh and it should be there + engine.refresh(true); + + // now its there... + searchResult = engine.searcher(); + assertThat(searchResult, engineSearcherTotalHits(1)); + assertThat(searchResult, engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); + // don't release the search result yet... + + // delete, refresh and do a new search, it should not be there + engine.delete(new Engine.Delete(newUid("1"))); + engine.refresh(true); + Engine.Searcher updateSearchResult = engine.searcher(); + assertThat(updateSearchResult, engineSearcherTotalHits(0)); + updateSearchResult.release(); + + // the non release search result should not see the deleted yet... + assertThat(searchResult, engineSearcherTotalHits(1)); + assertThat(searchResult, engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); + searchResult.release(); + } + + @Test public void testSimpleSnapshot() throws Exception { + // create a document + engine.create(new Engine.Create(doc().add(field("_uid", "1")).add(field("value", "test")).build(), Lucene.STANDARD_ANALYZER, "test", "1", "{1}")); + + final ExecutorService executorService = Executors.newCachedThreadPool(); + + engine.snapshot(new Engine.SnapshotHandler() { + @Override public void snapshot(final SnapshotIndexCommit snapshotIndexCommit1, final Translog.Snapshot translogSnapshot1) { + assertThat(snapshotIndexCommit1, snapshotIndexCommitExists()); + assertThat(translogSnapshot1, translogSize(1)); + Translog.Create create1 = (Translog.Create) translogSnapshot1.iterator().next(); + assertThat(create1.source(), equalTo("{1}")); + + Future future = executorService.submit(new Callable() { + @Override public Object call() throws Exception { + engine.flush(); + engine.create(new Engine.Create(doc().add(field("_uid", "2")).add(field("value", "test")).build(), Lucene.STANDARD_ANALYZER, "test", "2", "{2}")); + engine.flush(); + engine.create(new Engine.Create(doc().add(field("_uid", "3")).add(field("value", "test")).build(), Lucene.STANDARD_ANALYZER, "test", "3", "{3}")); + return null; + } + }); + + try { + future.get(); + } catch (Exception e) { + e.printStackTrace(); + assertThat(e.getMessage(), false, equalTo(true)); + } + + assertThat(snapshotIndexCommit1, snapshotIndexCommitExists()); + + engine.snapshot(new Engine.SnapshotHandler() { + @Override public void snapshot(SnapshotIndexCommit snapshotIndexCommit2, Translog.Snapshot translogSnapshot2) throws EngineException { + assertThat(snapshotIndexCommit1, snapshotIndexCommitExists()); + assertThat(snapshotIndexCommit2, snapshotIndexCommitExists()); + assertThat(snapshotIndexCommit2.getSegmentsFileName(), not(equalTo(snapshotIndexCommit1.getSegmentsFileName()))); + assertThat(translogSnapshot2, translogSize(1)); + Translog.Create create3 = (Translog.Create) translogSnapshot2.iterator().next(); + assertThat(create3.source(), equalTo("{3}")); + } + }); + } + }); + + engine.close(); + } + + @Test public void testSimpleRecover() throws Exception { + engine.create(new Engine.Create(doc().add(field("_uid", "1")).add(field("value", "test")).build(), Lucene.STANDARD_ANALYZER, "test", "1", "{1}")); + engine.flush(); + + engine.recover(new Engine.RecoveryHandler() { + @Override public void phase1(SnapshotIndexCommit snapshot) throws EngineException { + try { + engine.flush(); + assertThat("flush is not allowed in phase 3", false, equalTo(true)); + } catch (FlushNotAllowedEngineException e) { + // all is well + } + } + + @Override public void phase2(Translog.Snapshot snapshot) throws EngineException { + assertThat(snapshot, translogSize(0)); + try { + engine.flush(); + assertThat("flush is not allowed in phase 3", false, equalTo(true)); + } catch (FlushNotAllowedEngineException e) { + // all is well + } + } + + @Override public void phase3(Translog.Snapshot snapshot) throws EngineException { + assertThat(snapshot, translogSize(0)); + try { + // we can do this here since we are on the same thread + engine.flush(); + assertThat("flush is not allowed in phase 3", false, equalTo(true)); + } catch (FlushNotAllowedEngineException e) { + // all is well + } + } + }); + + engine.flush(); + engine.close(); + } + + @Test public void testRecoverWithOperationsBetweenPhase1AndPhase2() throws Exception { + engine.create(new Engine.Create(doc().add(field("_uid", "1")).add(field("value", "test")).build(), Lucene.STANDARD_ANALYZER, "test", "1", "{1}")); + engine.flush(); + engine.create(new Engine.Create(doc().add(field("_uid", "2")).add(field("value", "test")).build(), Lucene.STANDARD_ANALYZER, "test", "2", "{2}")); + + engine.recover(new Engine.RecoveryHandler() { + @Override public void phase1(SnapshotIndexCommit snapshot) throws EngineException { + } + + @Override public void phase2(Translog.Snapshot snapshot) throws EngineException { + assertThat(snapshot, translogSize(1)); + Translog.Create create = (Translog.Create) snapshot.iterator().next(); + assertThat(create.source(), equalTo("{2}")); + } + + @Override public void phase3(Translog.Snapshot snapshot) throws EngineException { + assertThat(snapshot, translogSize(0)); + } + }); + + engine.flush(); + engine.close(); + } + + @Test public void testRecoverWithOperationsBetweenPhase1AndPhase2AndPhase3() throws Exception { + engine.create(new Engine.Create(doc().add(field("_uid", "1")).add(field("value", "test")).build(), Lucene.STANDARD_ANALYZER, "test", "1", "{1}")); + engine.flush(); + engine.create(new Engine.Create(doc().add(field("_uid", "2")).add(field("value", "test")).build(), Lucene.STANDARD_ANALYZER, "test", "2", "{2}")); + + engine.recover(new Engine.RecoveryHandler() { + @Override public void phase1(SnapshotIndexCommit snapshot) throws EngineException { + } + + @Override public void phase2(Translog.Snapshot snapshot) throws EngineException { + assertThat(snapshot, translogSize(1)); + Translog.Create create = (Translog.Create) snapshot.iterator().next(); + assertThat(create.source(), equalTo("{2}")); + + // add for phase3 + engine.create(new Engine.Create(doc().add(field("_uid", "3")).add(field("value", "test")).build(), Lucene.STANDARD_ANALYZER, "test", "3", "{3}")); + } + + @Override public void phase3(Translog.Snapshot snapshot) throws EngineException { + assertThat(snapshot, translogSize(1)); + Translog.Create create = (Translog.Create) snapshot.iterator().next(); + assertThat(create.source(), equalTo("{3}")); + } + }); + + engine.flush(); + engine.close(); + } + + private Term newUid(String id) { + return new Term("_uid", id); + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/engine/EngineSearcherTotalHitsMatcher.java b/modules/elasticsearch/src/test/java/org/elasticsearch/index/engine/EngineSearcherTotalHitsMatcher.java new file mode 100644 index 00000000000..1ffcbaccf22 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/engine/EngineSearcherTotalHitsMatcher.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.elasticsearch.util.lucene.Lucene; +import org.hamcrest.Description; +import org.hamcrest.Matcher; +import org.hamcrest.TypeSafeMatcher; + +import java.io.IOException; + +/** + * @author kimchy + */ +public final class EngineSearcherTotalHitsMatcher extends TypeSafeMatcher { + + private final Query query; + + private final int totalHits; + + public EngineSearcherTotalHitsMatcher(Query query, int totalHits) { + this.query = query; + this.totalHits = totalHits; + } + + @Override public boolean matchesSafely(Engine.Searcher searcher) { + try { + long count = Lucene.count(searcher.searcher(), query, -1f); + return count == totalHits; + } catch (IOException e) { + return false; + } + } + + @Override public void describeTo(Description description) { + description.appendText("total hits of size ").appendValue(totalHits).appendText(" with query ").appendValue(query); + } + + public static Matcher engineSearcherTotalHits(Query query, int totalHits) { + return new EngineSearcherTotalHitsMatcher(query, totalHits); + } + + public static Matcher engineSearcherTotalHits(int totalHits) { + return new EngineSearcherTotalHitsMatcher(new MatchAllDocsQuery(), totalHits); + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/engine/robin/SimpleRobinEngineTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/index/engine/robin/SimpleRobinEngineTests.java new file mode 100644 index 00000000000..87da2270dfd --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/engine/robin/SimpleRobinEngineTests.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine.robin; + +import org.elasticsearch.index.analysis.AnalysisService; +import org.elasticsearch.index.engine.AbstractSimpleEngineTests; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.similarity.SimilarityService; +import org.elasticsearch.index.store.Store; + +import static org.elasticsearch.util.settings.ImmutableSettings.Builder.*; + +/** + * @author kimchy + */ +public class SimpleRobinEngineTests extends AbstractSimpleEngineTests { + + protected Engine createEngine(Store store) { + return new RobinEngine(shardId, EMPTY_SETTINGS, store, createSnapshotDeletionPolicy(), createTranslog(), createMergePolicy(), createMergeScheduler(), + new AnalysisService(shardId.index()), new SimilarityService(shardId.index())); + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/mapper/json/simple/SimpleJsonMapperTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/index/mapper/json/simple/SimpleJsonMapperTests.java new file mode 100644 index 00000000000..936c653041c --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/mapper/json/simple/SimpleJsonMapperTests.java @@ -0,0 +1,99 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper.json.simple; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.analysis.AnalysisService; +import org.elasticsearch.index.mapper.Uid; +import org.elasticsearch.index.mapper.json.JsonDocumentMapper; +import org.elasticsearch.index.mapper.json.JsonDocumentMapperParser; +import org.testng.annotations.Test; + +import static org.apache.lucene.document.Field.Store.*; +import static org.elasticsearch.index.mapper.json.JsonMapperBuilders.*; +import static org.elasticsearch.util.io.Streams.*; +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy + */ +public class SimpleJsonMapperTests { + + @Test public void testSimpleMapper() throws Exception { + JsonDocumentMapper docMapper = doc( + object("person") + .add(object("name").add(stringField("first").store(YES).index(Field.Index.NO))) + ).sourceField(source("_source").compressionThreshold(0)).build(); + + String json = copyToStringFromClasspath("/org/elasticsearch/index/mapper/json/simple/test1.json"); + Document doc = docMapper.parse("person", "1", json).doc(); + + assertThat((double) doc.getBoost(), closeTo(3.7, 0.01)); + assertThat(doc.get(docMapper.mappers().name("first").mapper().indexName()), equalTo("shay")); + assertThat(docMapper.mappers().name("first").mapper().fullName(), equalTo("name.first")); +// System.out.println("Document: " + doc); +// System.out.println("Json: " + docMapper.sourceMapper().value(doc)); + doc = docMapper.parse(json).doc(); +// System.out.println("Document: " + doc); +// System.out.println("Json: " + docMapper.sourceMapper().value(doc)); + } + + @Test public void testSimpleParser() throws Exception { + String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/json/simple/test-mapping.json"); + JsonDocumentMapper docMapper = (JsonDocumentMapper) new JsonDocumentMapperParser(new AnalysisService(new Index("test"))).parse(mapping); + String json = copyToStringFromClasspath("/org/elasticsearch/index/mapper/json/simple/test1.json"); + Document doc = docMapper.parse(json).doc(); + assertThat(doc.get(docMapper.uidMapper().indexName()), equalTo(Uid.createUid("person", "1"))); + assertThat((double) doc.getBoost(), closeTo(3.7, 0.01)); + assertThat(doc.get(docMapper.mappers().name("first").mapper().indexName()), equalTo("shay")); + assertThat(doc.getFields(docMapper.idMapper().indexName()).length, equalTo(1)); +// System.out.println("Document: " + doc); +// System.out.println("Json: " + docMapper.sourceMapper().value(doc)); + } + + @Test public void testSimpleParserMappingWithNoType() throws Exception { + String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/json/simple/test-mapping-notype.json"); + JsonDocumentMapper docMapper = (JsonDocumentMapper) new JsonDocumentMapperParser(new AnalysisService(new Index("test"))).parse("person", mapping); + String json = copyToStringFromClasspath("/org/elasticsearch/index/mapper/json/simple/test1.json"); + Document doc = docMapper.parse(json).doc(); + assertThat(doc.get(docMapper.uidMapper().indexName()), equalTo(Uid.createUid("person", "1"))); + assertThat((double) doc.getBoost(), closeTo(3.7, 0.01)); + assertThat(doc.get(docMapper.mappers().name("first").mapper().indexName()), equalTo("shay")); + assertThat(doc.getFields(docMapper.idMapper().indexName()).length, equalTo(1)); +// System.out.println("Document: " + doc); +// System.out.println("Json: " + docMapper.sourceMapper().value(doc)); + } + + @Test public void testSimpleParserNoTypeNoId() throws Exception { + String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/json/simple/test-mapping.json"); + JsonDocumentMapper docMapper = (JsonDocumentMapper) new JsonDocumentMapperParser(new AnalysisService(new Index("test"))).parse(mapping); + String json = copyToStringFromClasspath("/org/elasticsearch/index/mapper/json/simple/test1-notype-noid.json"); + Document doc = docMapper.parse("person", "1", json).doc(); + assertThat(doc.get(docMapper.uidMapper().indexName()), equalTo(Uid.createUid("person", "1"))); + assertThat((double) doc.getBoost(), closeTo(3.7, 0.01)); + assertThat(doc.get(docMapper.mappers().name("first").mapper().indexName()), equalTo("shay")); + assertThat(doc.getFields(docMapper.idMapper().indexName()).length, equalTo(1)); +// System.out.println("Document: " + doc); +// System.out.println("Json: " + docMapper.sourceMapper().value(doc)); + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/mapper/json/simple/test-mapping-notype.json b/modules/elasticsearch/src/test/java/org/elasticsearch/index/mapper/json/simple/test-mapping-notype.json new file mode 100644 index 00000000000..f5aac46dfcc --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/mapper/json/simple/test-mapping-notype.json @@ -0,0 +1,52 @@ +{ + dateFormats : ["yyyy-MM-dd", "dd-MM-yyyy"], + dynamic : false, + enabled : true, + idField : {name : "_id", indexName : "_id"}, + sourceField : {name : "_source", compressionThreshold : 0}, + typeField : {name : "_type"}, + boostField : {name : "_boost", nullValue : 2.0}, + properties : { + name : { + type : "object", + dynamic : false, + properties : { + first : {type : "string", store : "yes"}, + last : {type : "string", index : "not_analyzed"} + } + }, + address : { + type : "object", + properties : { + first : { + properties : { + location : {type : "string", store : "yes", indexName : "firstLocation"} + } + }, + last : { + properties : { + location : {type : "string"} + } + } + } + }, + age : {type : "integer", nullValue : 0}, + birthdate : {type : "date", format : "yyyy-MM-dd"}, + nerd : {type : "boolean"}, + dogs : {type : "string", indexName : "dog"}, + complex : { + type : "object", + properties : { + value1 : {type : "string"}, + value2 : {type : "string"} + } + }, + complex2 : { + type : "object", + properties : { + value1 : {type : "string"}, + value2 : {type : "string"} + } + } + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/mapper/json/simple/test-mapping.json b/modules/elasticsearch/src/test/java/org/elasticsearch/index/mapper/json/simple/test-mapping.json new file mode 100644 index 00000000000..8564b1b9bf3 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/mapper/json/simple/test-mapping.json @@ -0,0 +1,54 @@ +{ + person : { + dateFormats : ["yyyy-MM-dd", "dd-MM-yyyy"], + dynamic : false, + enabled : true, + idField : {name : "_id", indexName : "_id"}, + sourceField : {name : "_source", compressionThreshold : 0}, + typeField : {name : "_type"}, + boostField : {name : "_boost", nullValue : 2.0}, + properties : { + name : { + type : "object", + dynamic : false, + properties : { + first : {type : "string", store : "yes"}, + last : {type : "string", index : "not_analyzed"} + } + }, + address : { + type : "object", + properties : { + first : { + properties : { + location : {type : "string", store : "yes", indexName : "firstLocation"} + } + }, + last : { + properties : { + location : {type : "string"} + } + } + } + }, + age : {type : "integer", nullValue : 0}, + birthdate : {type : "date", format : "yyyy-MM-dd"}, + nerd : {type : "boolean"}, + dogs : {type : "string", indexName : "dog"}, + complex : { + type : "object", + properties : { + value1 : {type : "string"}, + value2 : {type : "string"} + } + }, + complex2 : { + type : "object", + properties : { + value1 : {type : "string"}, + value2 : {type : "string"} + } + } + } + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/mapper/json/simple/test1-notype-noid.json b/modules/elasticsearch/src/test/java/org/elasticsearch/index/mapper/json/simple/test1-notype-noid.json new file mode 100644 index 00000000000..09e3e463e98 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/mapper/json/simple/test1-notype-noid.json @@ -0,0 +1,40 @@ +{ + _boost : 3.7, + name : { + first : "shay", + last : "banon" + }, + address : { + first : { + location : "first location" + }, + last : { + location : "last location" + } + }, + age : 32, + birthDate : "1977-11-15", + nerd : true, + dogs : ["buck", "mia"], + complex : [ + { + value1 : "value1" + }, + { + value2 : "value2" + } + ], + complex2 : [ + [ + { + value1 : "value1" + } + ], + [ + { + value2 : "value2" + } + ] + ], + nullValue : null +} \ No newline at end of file diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/mapper/json/simple/test1-notype.json b/modules/elasticsearch/src/test/java/org/elasticsearch/index/mapper/json/simple/test1-notype.json new file mode 100644 index 00000000000..f566f4992e3 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/mapper/json/simple/test1-notype.json @@ -0,0 +1,41 @@ +{ + _boost : 3.7, + _id : "1", + name : { + first : "shay", + last : "banon" + }, + address : { + first : { + location : "first location" + }, + last : { + location : "last location" + } + }, + age : 32, + birthDate : "1977-11-15", + nerd : true, + dogs : ["buck", "mia"], + complex : [ + { + value1 : "value1" + }, + { + value2 : "value2" + } + ], + complex2 : [ + [ + { + value1 : "value1" + } + ], + [ + { + value2 : "value2" + } + ] + ], + nullValue : null +} \ No newline at end of file diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/mapper/json/simple/test1.json b/modules/elasticsearch/src/test/java/org/elasticsearch/index/mapper/json/simple/test1.json new file mode 100644 index 00000000000..181d61cfa24 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/mapper/json/simple/test1.json @@ -0,0 +1,43 @@ +{ + person : { + _boost : 3.7, + _id : "1", + name : { + first : "shay", + last : "banon" + }, + address : { + first : { + location : "first location" + }, + last : { + location : "last location" + } + }, + age : 32, + birthDate : "1977-11-15", + nerd : true, + dogs : ["buck", "mia"], + complex : [ + { + value1 : "value1" + }, + { + value2 : "value2" + } + ], + complex2 : [ + [ + { + value1 : "value1" + } + ], + [ + { + value2 : "value2" + } + ] + ], + nullValue : null + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/SimpleJsonIndexQueryParserTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/SimpleJsonIndexQueryParserTests.java new file mode 100644 index 00000000000..2b5675c1d5e --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/SimpleJsonIndexQueryParserTests.java @@ -0,0 +1,544 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json; + +import org.apache.lucene.index.Term; +import org.apache.lucene.search.*; +import org.apache.lucene.search.spans.*; +import org.apache.lucene.util.NumericUtils; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.analysis.AnalysisService; +import org.elasticsearch.index.cache.filter.none.NoneFilterCache; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.query.IndexQueryParser; +import org.elasticsearch.util.lucene.search.TermFilter; +import org.testng.annotations.Test; + +import java.io.IOException; +import java.lang.reflect.Field; +import java.util.List; + +import static org.elasticsearch.index.query.json.JsonFilterBuilders.*; +import static org.elasticsearch.index.query.json.JsonQueryBuilders.*; +import static org.elasticsearch.util.io.Streams.*; +import static org.elasticsearch.util.settings.ImmutableSettings.Builder.*; +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy + */ +public class SimpleJsonIndexQueryParserTests { + + private final Index index = new Index("test"); + + @Test public void testQueryStringBuilder() throws Exception { + IndexQueryParser queryParser = newQueryParser(); + Query parsedQuery = queryParser.parse(queryString("test").defaultField("content").phraseSlop(1).build()); + + assertThat(parsedQuery, instanceOf(TermQuery.class)); + TermQuery termQuery = (TermQuery) parsedQuery; + assertThat(termQuery.getTerm(), equalTo(new Term("content", "test"))); + } + + @Test public void testQueryString() throws Exception { + IndexQueryParser queryParser = newQueryParser(); + String query = copyToStringFromClasspath("/org/elasticsearch/index/query/json/query.json"); + Query parsedQuery = queryParser.parse(query); + assertThat(parsedQuery, instanceOf(TermQuery.class)); + TermQuery termQuery = (TermQuery) parsedQuery; + assertThat(termQuery.getTerm(), equalTo(new Term("content", "test"))); + } + + @Test public void testMatchAllBuilder() throws Exception { + IndexQueryParser queryParser = newQueryParser(); + Query parsedQuery = queryParser.parse(matchAllQuery().boost(1.2f).build()); + assertThat(parsedQuery, instanceOf(MatchAllDocsQuery.class)); + MatchAllDocsQuery matchAllDocsQuery = (MatchAllDocsQuery) parsedQuery; + assertThat((double) matchAllDocsQuery.getBoost(), closeTo(1.2, 0.01)); + } + + @Test public void testMatchAll() throws Exception { + IndexQueryParser queryParser = newQueryParser(); + String query = copyToStringFromClasspath("/org/elasticsearch/index/query/json/matchAll.json"); + Query parsedQuery = queryParser.parse(query); + assertThat(parsedQuery, instanceOf(MatchAllDocsQuery.class)); + MatchAllDocsQuery matchAllDocsQuery = (MatchAllDocsQuery) parsedQuery; + assertThat((double) matchAllDocsQuery.getBoost(), closeTo(1.2, 0.01)); + } + + @Test public void testDisMaxBuilder() throws Exception { + IndexQueryParser queryParser = newQueryParser(); + Query parsedQuery = queryParser.parse(disMaxQuery().boost(1.2f).tieBreakerMultiplier(0.7f).add(termQuery("age", 34)).add(termQuery("age", 35))); + assertThat(parsedQuery, instanceOf(DisjunctionMaxQuery.class)); + DisjunctionMaxQuery disjunctionMaxQuery = (DisjunctionMaxQuery) parsedQuery; + assertThat((double) disjunctionMaxQuery.getBoost(), closeTo(1.2, 0.01)); + + Field field = disjunctionMaxQuery.getClass().getDeclaredField("disjuncts"); + field.setAccessible(true); + List disjuncts = (List) field.get(disjunctionMaxQuery); + assertThat(disjuncts.size(), equalTo(2)); + + Query firstQ = disjuncts.get(0); + assertThat(firstQ, instanceOf(TermQuery.class)); + assertThat(((TermQuery) firstQ).getTerm(), equalTo(new Term("age", NumericUtils.longToPrefixCoded(34)))); + + Query secondsQ = disjuncts.get(1); + assertThat(secondsQ, instanceOf(TermQuery.class)); + assertThat(((TermQuery) secondsQ).getTerm(), equalTo(new Term("age", NumericUtils.longToPrefixCoded(35)))); + } + + @Test public void testDisMax() throws Exception { + IndexQueryParser queryParser = newQueryParser(); + String query = copyToStringFromClasspath("/org/elasticsearch/index/query/json/disMax.json"); + Query parsedQuery = queryParser.parse(query); + assertThat(parsedQuery, instanceOf(DisjunctionMaxQuery.class)); + DisjunctionMaxQuery disjunctionMaxQuery = (DisjunctionMaxQuery) parsedQuery; + assertThat((double) disjunctionMaxQuery.getBoost(), closeTo(1.2, 0.01)); + + Field field = disjunctionMaxQuery.getClass().getDeclaredField("disjuncts"); + field.setAccessible(true); + List disjuncts = (List) field.get(disjunctionMaxQuery); + assertThat(disjuncts.size(), equalTo(2)); + + Query firstQ = disjuncts.get(0); + assertThat(firstQ, instanceOf(TermQuery.class)); + assertThat(((TermQuery) firstQ).getTerm(), equalTo(new Term("age", NumericUtils.longToPrefixCoded(34)))); + + Query secondsQ = disjuncts.get(1); + assertThat(secondsQ, instanceOf(TermQuery.class)); + assertThat(((TermQuery) secondsQ).getTerm(), equalTo(new Term("age", NumericUtils.longToPrefixCoded(35)))); + } + + @Test public void testTermQueryBuilder() throws IOException { + IndexQueryParser queryParser = newQueryParser(); + Query parsedQuery = queryParser.parse(termQuery("age", 34).build()); + assertThat(parsedQuery, instanceOf(TermQuery.class)); + TermQuery termQuery = (TermQuery) parsedQuery; + // since age is automatically registered in data, we encode it as numeric + assertThat(termQuery.getTerm(), equalTo(new Term("age", NumericUtils.longToPrefixCoded(34)))); + } + + @Test public void testTermQuery() throws IOException { + IndexQueryParser queryParser = newQueryParser(); + String query = copyToStringFromClasspath("/org/elasticsearch/index/query/json/term.json"); + Query parsedQuery = queryParser.parse(query); + assertThat(parsedQuery, instanceOf(TermQuery.class)); + TermQuery termQuery = (TermQuery) parsedQuery; + // since age is automatically registered in data, we encode it as numeric + assertThat(termQuery.getTerm(), equalTo(new Term("age", NumericUtils.longToPrefixCoded(34)))); + } + + @Test public void testTermWithBoostQueryBuilder() throws IOException { + IndexQueryParser queryParser = newQueryParser(); + Query parsedQuery = queryParser.parse(termQuery("age", 34).boost(2.0f).build()); + assertThat(parsedQuery, instanceOf(TermQuery.class)); + TermQuery termQuery = (TermQuery) parsedQuery; + // since age is automatically registered in data, we encode it as numeric + assertThat(termQuery.getTerm(), equalTo(new Term("age", NumericUtils.longToPrefixCoded(34)))); + assertThat((double) termQuery.getBoost(), closeTo(2.0, 0.01)); + } + + @Test public void testTermWithBoostQuery() throws IOException { + IndexQueryParser queryParser = newQueryParser(); + String query = copyToStringFromClasspath("/org/elasticsearch/index/query/json/term-with-boost.json"); + Query parsedQuery = queryParser.parse(query); + assertThat(parsedQuery, instanceOf(TermQuery.class)); + TermQuery termQuery = (TermQuery) parsedQuery; + // since age is automatically registered in data, we encode it as numeric + assertThat(termQuery.getTerm(), equalTo(new Term("age", NumericUtils.longToPrefixCoded(34)))); + assertThat((double) termQuery.getBoost(), closeTo(2.0, 0.01)); + } + + @Test public void testPrefixQueryBuilder() throws IOException { + IndexQueryParser queryParser = newQueryParser(); + Query parsedQuery = queryParser.parse(prefixQuery("name.first", "sh").build()); + assertThat(parsedQuery, instanceOf(PrefixQuery.class)); + PrefixQuery prefixQuery = (PrefixQuery) parsedQuery; + // since age is automatically registered in data, we encode it as numeric + assertThat(prefixQuery.getPrefix(), equalTo(new Term("name.first", "sh"))); + } + + @Test public void testPrefixQuery() throws IOException { + IndexQueryParser queryParser = newQueryParser(); + String query = copyToStringFromClasspath("/org/elasticsearch/index/query/json/prefix.json"); + Query parsedQuery = queryParser.parse(query); + assertThat(parsedQuery, instanceOf(PrefixQuery.class)); + PrefixQuery prefixQuery = (PrefixQuery) parsedQuery; + // since age is automatically registered in data, we encode it as numeric + assertThat(prefixQuery.getPrefix(), equalTo(new Term("name.first", "sh"))); + } + + @Test public void testPrefixFilteredQueryBuilder() throws IOException { + IndexQueryParser queryParser = newQueryParser(); + Query parsedQuery = queryParser.parse(filteredQuery(termQuery("name.first", "shay"), prefixFilter("name.first", "sh")).build()); + assertThat(parsedQuery, instanceOf(FilteredQuery.class)); + FilteredQuery filteredQuery = (FilteredQuery) parsedQuery; + PrefixFilter prefixFilter = (PrefixFilter) filteredQuery.getFilter(); + assertThat(prefixFilter.getPrefix(), equalTo(new Term("name.first", "sh"))); + } + + @Test public void testPrefixFilteredQuery() throws IOException { + IndexQueryParser queryParser = newQueryParser(); + String query = copyToStringFromClasspath("/org/elasticsearch/index/query/json/prefix-filter.json"); + Query parsedQuery = queryParser.parse(query); + assertThat(parsedQuery, instanceOf(FilteredQuery.class)); + FilteredQuery filteredQuery = (FilteredQuery) parsedQuery; + PrefixFilter prefixFilter = (PrefixFilter) filteredQuery.getFilter(); + assertThat(prefixFilter.getPrefix(), equalTo(new Term("name.first", "sh"))); + } + + @Test public void testPrefixQueryBoostQueryBuilder() throws IOException { + IndexQueryParser queryParser = newQueryParser(); + Query parsedQuery = queryParser.parse(prefixQuery("name.first", "sh").boost(2.0f).build()); + assertThat(parsedQuery, instanceOf(PrefixQuery.class)); + PrefixQuery prefixQuery = (PrefixQuery) parsedQuery; + assertThat(prefixQuery.getPrefix(), equalTo(new Term("name.first", "sh"))); + assertThat((double) prefixQuery.getBoost(), closeTo(2.0, 0.01)); + } + + @Test public void testPrefixQueryBoostQuery() throws IOException { + IndexQueryParser queryParser = newQueryParser(); + String query = copyToStringFromClasspath("/org/elasticsearch/index/query/json/prefix-with-boost.json"); + Query parsedQuery = queryParser.parse(query); + assertThat(parsedQuery, instanceOf(PrefixQuery.class)); + PrefixQuery prefixQuery = (PrefixQuery) parsedQuery; + assertThat(prefixQuery.getPrefix(), equalTo(new Term("name.first", "sh"))); + assertThat((double) prefixQuery.getBoost(), closeTo(2.0, 0.01)); + } + + @Test public void testWildcardQueryBuilder() throws IOException { + IndexQueryParser queryParser = newQueryParser(); + Query parsedQuery = queryParser.parse(wildcardQuery("name.first", "sh*").build()); + assertThat(parsedQuery, instanceOf(WildcardQuery.class)); + WildcardQuery wildcardQuery = (WildcardQuery) parsedQuery; + assertThat(wildcardQuery.getTerm(), equalTo(new Term("name.first", "sh*"))); + } + + @Test public void testWildcardQuery() throws IOException { + IndexQueryParser queryParser = newQueryParser(); + String query = copyToStringFromClasspath("/org/elasticsearch/index/query/json/wildcard.json"); + Query parsedQuery = queryParser.parse(query); + assertThat(parsedQuery, instanceOf(WildcardQuery.class)); + WildcardQuery wildcardQuery = (WildcardQuery) parsedQuery; + assertThat(wildcardQuery.getTerm(), equalTo(new Term("name.first", "sh*"))); + } + + @Test public void testRangeQueryBuilder() throws IOException { + IndexQueryParser queryParser = newQueryParser(); + Query parsedQuery = queryParser.parse(rangeQuery("age").from(23).to(54).includeLower(true).includeUpper(false).build()); + // since age is automatically registered in data, we encode it as numeric + assertThat(parsedQuery, instanceOf(NumericRangeQuery.class)); + NumericRangeQuery rangeQuery = (NumericRangeQuery) parsedQuery; + assertThat(rangeQuery.getField(), equalTo("age")); + assertThat(rangeQuery.getMin().intValue(), equalTo(23)); + assertThat(rangeQuery.getMax().intValue(), equalTo(54)); + assertThat(rangeQuery.includesMin(), equalTo(true)); + assertThat(rangeQuery.includesMax(), equalTo(false)); + } + + @Test public void testRangeQuery() throws IOException { + IndexQueryParser queryParser = newQueryParser(); + String query = copyToStringFromClasspath("/org/elasticsearch/index/query/json/range.json"); + Query parsedQuery = queryParser.parse(query); + // since age is automatically registered in data, we encode it as numeric + assertThat(parsedQuery, instanceOf(NumericRangeQuery.class)); + NumericRangeQuery rangeQuery = (NumericRangeQuery) parsedQuery; + assertThat(rangeQuery.getField(), equalTo("age")); + assertThat(rangeQuery.getMin().intValue(), equalTo(23)); + assertThat(rangeQuery.getMax().intValue(), equalTo(54)); + assertThat(rangeQuery.includesMin(), equalTo(true)); + assertThat(rangeQuery.includesMax(), equalTo(false)); + } + + @Test public void testRangeFilteredQueryBuilder() throws IOException { + IndexQueryParser queryParser = newQueryParser(); + Query parsedQuery = queryParser.parse(filteredQuery(termQuery("name.first", "shay"), rangeFilter("age").from(23).to(54).includeLower(true).includeUpper(false)).build()); + // since age is automatically registered in data, we encode it as numeric + assertThat(parsedQuery, instanceOf(FilteredQuery.class)); + Filter filter = ((FilteredQuery) parsedQuery).getFilter(); + assertThat(filter, instanceOf(NumericRangeFilter.class)); + NumericRangeFilter rangeFilter = (NumericRangeFilter) filter; + assertThat(rangeFilter.getField(), equalTo("age")); + assertThat(rangeFilter.getMin().intValue(), equalTo(23)); + assertThat(rangeFilter.getMax().intValue(), equalTo(54)); + assertThat(rangeFilter.includesMin(), equalTo(true)); + assertThat(rangeFilter.includesMax(), equalTo(false)); + } + + @Test public void testRangeFilteredQuery() throws IOException { + IndexQueryParser queryParser = newQueryParser(); + String query = copyToStringFromClasspath("/org/elasticsearch/index/query/json/range-filter.json"); + Query parsedQuery = queryParser.parse(query); + // since age is automatically registered in data, we encode it as numeric + assertThat(parsedQuery, instanceOf(FilteredQuery.class)); + Filter filter = ((FilteredQuery) parsedQuery).getFilter(); + assertThat(filter, instanceOf(NumericRangeFilter.class)); + NumericRangeFilter rangeFilter = (NumericRangeFilter) filter; + assertThat(rangeFilter.getField(), equalTo("age")); + assertThat(rangeFilter.getMin().intValue(), equalTo(23)); + assertThat(rangeFilter.getMax().intValue(), equalTo(54)); + assertThat(rangeFilter.includesMin(), equalTo(true)); + assertThat(rangeFilter.includesMax(), equalTo(false)); + } + + @Test public void testBoolFilteredQuery() throws IOException { + IndexQueryParser queryParser = newQueryParser(); + String query = copyToStringFromClasspath("/org/elasticsearch/index/query/json/bool-filter.json"); + Query parsedQuery = queryParser.parse(query); + assertThat(parsedQuery, instanceOf(FilteredQuery.class)); + FilteredQuery filteredQuery = (FilteredQuery) parsedQuery; + BooleanFilter booleanFilter = (BooleanFilter) filteredQuery.getFilter(); + + // TODO get the content and test + } + + @Test public void testBoolQueryBuilder() throws IOException { + IndexQueryParser queryParser = newQueryParser(); + Query parsedQuery = queryParser.parse(boolQuery().must(termQuery("content", "test1")).mustNot(termQuery("content", "test2")).should(termQuery("content", "test3")).must(termQuery("content", "test4")).build()); + assertThat(parsedQuery, instanceOf(BooleanQuery.class)); + BooleanQuery booleanQuery = (BooleanQuery) parsedQuery; + BooleanClause[] clauses = booleanQuery.getClauses(); + + assertThat(clauses.length, equalTo(4)); + + assertThat(((TermQuery) clauses[0].getQuery()).getTerm(), equalTo(new Term("content", "test1"))); + assertThat(clauses[0].getOccur(), equalTo(BooleanClause.Occur.MUST)); + + assertThat(((TermQuery) clauses[1].getQuery()).getTerm(), equalTo(new Term("content", "test2"))); + assertThat(clauses[1].getOccur(), equalTo(BooleanClause.Occur.MUST_NOT)); + + assertThat(((TermQuery) clauses[2].getQuery()).getTerm(), equalTo(new Term("content", "test3"))); + assertThat(clauses[2].getOccur(), equalTo(BooleanClause.Occur.SHOULD)); + + assertThat(((TermQuery) clauses[3].getQuery()).getTerm(), equalTo(new Term("content", "test4"))); + assertThat(clauses[3].getOccur(), equalTo(BooleanClause.Occur.MUST)); + } + + + @Test public void testBoolQuery() throws IOException { + IndexQueryParser queryParser = newQueryParser(); + String query = copyToStringFromClasspath("/org/elasticsearch/index/query/json/bool.json"); + Query parsedQuery = queryParser.parse(query); + assertThat(parsedQuery, instanceOf(BooleanQuery.class)); + BooleanQuery booleanQuery = (BooleanQuery) parsedQuery; + BooleanClause[] clauses = booleanQuery.getClauses(); + + assertThat(clauses.length, equalTo(4)); + + assertThat(((TermQuery) clauses[0].getQuery()).getTerm(), equalTo(new Term("content", "test1"))); + assertThat(clauses[0].getOccur(), equalTo(BooleanClause.Occur.MUST)); + + assertThat(((TermQuery) clauses[1].getQuery()).getTerm(), equalTo(new Term("content", "test2"))); + assertThat(clauses[1].getOccur(), equalTo(BooleanClause.Occur.MUST_NOT)); + + assertThat(((TermQuery) clauses[2].getQuery()).getTerm(), equalTo(new Term("content", "test3"))); + assertThat(clauses[2].getOccur(), equalTo(BooleanClause.Occur.SHOULD)); + + assertThat(((TermQuery) clauses[3].getQuery()).getTerm(), equalTo(new Term("content", "test4"))); + assertThat(clauses[3].getOccur(), equalTo(BooleanClause.Occur.MUST)); + } + + @Test public void testFilteredQueryBuilder() throws IOException { + IndexQueryParser queryParser = newQueryParser(); + Query parsedQuery = queryParser.parse(filteredQuery(termQuery("name.first", "shay"), termFilter("name.last", "banon")).build()); + assertThat(parsedQuery, instanceOf(FilteredQuery.class)); + FilteredQuery filteredQuery = (FilteredQuery) parsedQuery; + assertThat(((TermQuery) filteredQuery.getQuery()).getTerm(), equalTo(new Term("name.first", "shay"))); + assertThat(((TermFilter) filteredQuery.getFilter()).getTerm(), equalTo(new Term("name.last", "banon"))); + } + + @Test public void testFilteredQuery() throws IOException { + IndexQueryParser queryParser = newQueryParser(); + String query = copyToStringFromClasspath("/org/elasticsearch/index/query/json/filtered-query.json"); + Query parsedQuery = queryParser.parse(query); + assertThat(parsedQuery, instanceOf(FilteredQuery.class)); + FilteredQuery filteredQuery = (FilteredQuery) parsedQuery; + assertThat(((TermQuery) filteredQuery.getQuery()).getTerm(), equalTo(new Term("name.first", "shay"))); + assertThat(((TermFilter) filteredQuery.getFilter()).getTerm(), equalTo(new Term("name.last", "banon"))); + } + + @Test public void testConstantScoreQueryBuilder() throws IOException { + IndexQueryParser queryParser = newQueryParser(); + Query parsedQuery = queryParser.parse(constantScoreQuery(termFilter("name.last", "banon"))); + assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class)); + ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) parsedQuery; + assertThat(((TermFilter) constantScoreQuery.getFilter()).getTerm(), equalTo(new Term("name.last", "banon"))); + } + + @Test public void testConstantScoreQuery() throws IOException { + IndexQueryParser queryParser = newQueryParser(); + String query = copyToStringFromClasspath("/org/elasticsearch/index/query/json/constantScore-query.json"); + Query parsedQuery = queryParser.parse(query); + assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class)); + ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) parsedQuery; + assertThat(((TermFilter) constantScoreQuery.getFilter()).getTerm(), equalTo(new Term("name.last", "banon"))); + } + + @Test public void testSpanTermQueryBuilder() throws IOException { + IndexQueryParser queryParser = newQueryParser(); + Query parsedQuery = queryParser.parse(spanTermQuery("age", 34).build()); + assertThat(parsedQuery, instanceOf(SpanTermQuery.class)); + SpanTermQuery termQuery = (SpanTermQuery) parsedQuery; + // since age is automatically registered in data, we encode it as numeric + assertThat(termQuery.getTerm(), equalTo(new Term("age", NumericUtils.longToPrefixCoded(34)))); + } + + @Test public void testSpanTermQuery() throws IOException { + IndexQueryParser queryParser = newQueryParser(); + String query = copyToStringFromClasspath("/org/elasticsearch/index/query/json/spanTerm.json"); + Query parsedQuery = queryParser.parse(query); + assertThat(parsedQuery, instanceOf(SpanTermQuery.class)); + SpanTermQuery termQuery = (SpanTermQuery) parsedQuery; + // since age is automatically registered in data, we encode it as numeric + assertThat(termQuery.getTerm(), equalTo(new Term("age", NumericUtils.longToPrefixCoded(34)))); + } + + @Test public void testSpanNotQueryBuilder() throws IOException { + IndexQueryParser queryParser = newQueryParser(); + Query parsedQuery = queryParser.parse(spanNotQuery().include(spanTermQuery("age", 34)).exclude(spanTermQuery("age", 35)).build()); + assertThat(parsedQuery, instanceOf(SpanNotQuery.class)); + SpanNotQuery spanNotQuery = (SpanNotQuery) parsedQuery; + // since age is automatically registered in data, we encode it as numeric + assertThat(((SpanTermQuery) spanNotQuery.getInclude()).getTerm(), equalTo(new Term("age", NumericUtils.longToPrefixCoded(34)))); + assertThat(((SpanTermQuery) spanNotQuery.getExclude()).getTerm(), equalTo(new Term("age", NumericUtils.longToPrefixCoded(35)))); + } + + @Test public void testSpanNotQuery() throws IOException { + IndexQueryParser queryParser = newQueryParser(); + String query = copyToStringFromClasspath("/org/elasticsearch/index/query/json/spanNot.json"); + Query parsedQuery = queryParser.parse(query); + assertThat(parsedQuery, instanceOf(SpanNotQuery.class)); + SpanNotQuery spanNotQuery = (SpanNotQuery) parsedQuery; + // since age is automatically registered in data, we encode it as numeric + assertThat(((SpanTermQuery) spanNotQuery.getInclude()).getTerm(), equalTo(new Term("age", NumericUtils.longToPrefixCoded(34)))); + assertThat(((SpanTermQuery) spanNotQuery.getExclude()).getTerm(), equalTo(new Term("age", NumericUtils.longToPrefixCoded(35)))); + } + + @Test public void testSpanFirstQueryBuilder() throws IOException { + IndexQueryParser queryParser = newQueryParser(); + Query parsedQuery = queryParser.parse(spanFirstQuery(spanTermQuery("age", 34), 12).build()); + assertThat(parsedQuery, instanceOf(SpanFirstQuery.class)); + SpanFirstQuery spanFirstQuery = (SpanFirstQuery) parsedQuery; + // since age is automatically registered in data, we encode it as numeric + assertThat(((SpanTermQuery) spanFirstQuery.getMatch()).getTerm(), equalTo(new Term("age", NumericUtils.longToPrefixCoded(34)))); + assertThat(spanFirstQuery.getEnd(), equalTo(12)); + } + + @Test public void testSpanFirstQuery() throws IOException { + IndexQueryParser queryParser = newQueryParser(); + String query = copyToStringFromClasspath("/org/elasticsearch/index/query/json/spanFirst.json"); + Query parsedQuery = queryParser.parse(query); + assertThat(parsedQuery, instanceOf(SpanFirstQuery.class)); + SpanFirstQuery spanFirstQuery = (SpanFirstQuery) parsedQuery; + // since age is automatically registered in data, we encode it as numeric + assertThat(((SpanTermQuery) spanFirstQuery.getMatch()).getTerm(), equalTo(new Term("age", NumericUtils.longToPrefixCoded(34)))); + assertThat(spanFirstQuery.getEnd(), equalTo(12)); + } + + @Test public void testSpanNearQueryBuilder() throws IOException { + IndexQueryParser queryParser = newQueryParser(); + Query parsedQuery = queryParser.parse(spanNearQuery().clause(spanTermQuery("age", 34)).clause(spanTermQuery("age", 35)).clause(spanTermQuery("age", 36)).slop(12).inOrder(false).collectPayloads(false).build()); + assertThat(parsedQuery, instanceOf(SpanNearQuery.class)); + SpanNearQuery spanNearQuery = (SpanNearQuery) parsedQuery; + assertThat(spanNearQuery.getClauses().length, equalTo(3)); + assertThat(((SpanTermQuery) spanNearQuery.getClauses()[0]).getTerm(), equalTo(new Term("age", NumericUtils.longToPrefixCoded(34)))); + assertThat(((SpanTermQuery) spanNearQuery.getClauses()[1]).getTerm(), equalTo(new Term("age", NumericUtils.longToPrefixCoded(35)))); + assertThat(((SpanTermQuery) spanNearQuery.getClauses()[2]).getTerm(), equalTo(new Term("age", NumericUtils.longToPrefixCoded(36)))); + assertThat(spanNearQuery.isInOrder(), equalTo(false)); + } + + @Test public void testSpanNearQuery() throws IOException { + IndexQueryParser queryParser = newQueryParser(); + String query = copyToStringFromClasspath("/org/elasticsearch/index/query/json/spanNear.json"); + Query parsedQuery = queryParser.parse(query); + assertThat(parsedQuery, instanceOf(SpanNearQuery.class)); + SpanNearQuery spanNearQuery = (SpanNearQuery) parsedQuery; + assertThat(spanNearQuery.getClauses().length, equalTo(3)); + assertThat(((SpanTermQuery) spanNearQuery.getClauses()[0]).getTerm(), equalTo(new Term("age", NumericUtils.longToPrefixCoded(34)))); + assertThat(((SpanTermQuery) spanNearQuery.getClauses()[1]).getTerm(), equalTo(new Term("age", NumericUtils.longToPrefixCoded(35)))); + assertThat(((SpanTermQuery) spanNearQuery.getClauses()[2]).getTerm(), equalTo(new Term("age", NumericUtils.longToPrefixCoded(36)))); + assertThat(spanNearQuery.isInOrder(), equalTo(false)); + } + + @Test public void testSpanOrQueryBuilder() throws IOException { + IndexQueryParser queryParser = newQueryParser(); + Query parsedQuery = queryParser.parse(spanOrQuery().clause(spanTermQuery("age", 34)).clause(spanTermQuery("age", 35)).clause(spanTermQuery("age", 36)).build()); + assertThat(parsedQuery, instanceOf(SpanOrQuery.class)); + SpanOrQuery spanOrQuery = (SpanOrQuery) parsedQuery; + assertThat(spanOrQuery.getClauses().length, equalTo(3)); + assertThat(((SpanTermQuery) spanOrQuery.getClauses()[0]).getTerm(), equalTo(new Term("age", NumericUtils.longToPrefixCoded(34)))); + assertThat(((SpanTermQuery) spanOrQuery.getClauses()[1]).getTerm(), equalTo(new Term("age", NumericUtils.longToPrefixCoded(35)))); + assertThat(((SpanTermQuery) spanOrQuery.getClauses()[2]).getTerm(), equalTo(new Term("age", NumericUtils.longToPrefixCoded(36)))); + } + + @Test public void testSpanOrQuery() throws IOException { + IndexQueryParser queryParser = newQueryParser(); + String query = copyToStringFromClasspath("/org/elasticsearch/index/query/json/spanOr.json"); + Query parsedQuery = queryParser.parse(query); + assertThat(parsedQuery, instanceOf(SpanOrQuery.class)); + SpanOrQuery spanOrQuery = (SpanOrQuery) parsedQuery; + assertThat(spanOrQuery.getClauses().length, equalTo(3)); + assertThat(((SpanTermQuery) spanOrQuery.getClauses()[0]).getTerm(), equalTo(new Term("age", NumericUtils.longToPrefixCoded(34)))); + assertThat(((SpanTermQuery) spanOrQuery.getClauses()[1]).getTerm(), equalTo(new Term("age", NumericUtils.longToPrefixCoded(35)))); + assertThat(((SpanTermQuery) spanOrQuery.getClauses()[2]).getTerm(), equalTo(new Term("age", NumericUtils.longToPrefixCoded(36)))); + } + + @Test public void testQueryFilterBuilder() throws Exception { + IndexQueryParser queryParser = newQueryParser(); + Query parsedQuery = queryParser.parse(filteredQuery(termQuery("name.first", "shay"), queryFilter(termQuery("name.last", "banon"))).build()); + assertThat(parsedQuery, instanceOf(FilteredQuery.class)); + FilteredQuery filteredQuery = (FilteredQuery) parsedQuery; + QueryWrapperFilter queryWrapperFilter = (QueryWrapperFilter) filteredQuery.getFilter(); + Field field = QueryWrapperFilter.class.getDeclaredField("query"); + field.setAccessible(true); + Query wrappedQuery = (Query) field.get(queryWrapperFilter); + assertThat(wrappedQuery, instanceOf(TermQuery.class)); + assertThat(((TermQuery) wrappedQuery).getTerm(), equalTo(new Term("name.last", "banon"))); + } + + @Test public void testQueryFilter() throws Exception { + IndexQueryParser queryParser = newQueryParser(); + String query = copyToStringFromClasspath("/org/elasticsearch/index/query/json/query-filter.json"); + Query parsedQuery = queryParser.parse(query); + assertThat(parsedQuery, instanceOf(FilteredQuery.class)); + FilteredQuery filteredQuery = (FilteredQuery) parsedQuery; + QueryWrapperFilter queryWrapperFilter = (QueryWrapperFilter) filteredQuery.getFilter(); + Field field = QueryWrapperFilter.class.getDeclaredField("query"); + field.setAccessible(true); + Query wrappedQuery = (Query) field.get(queryWrapperFilter); + assertThat(wrappedQuery, instanceOf(TermQuery.class)); + assertThat(((TermQuery) wrappedQuery).getTerm(), equalTo(new Term("name.last", "banon"))); + } + + private JsonIndexQueryParser newQueryParser() throws IOException { + return new JsonIndexQueryParser(new Index("test"), EMPTY_SETTINGS, + newMapperService(), new NoneFilterCache(index, EMPTY_SETTINGS), new AnalysisService(index), null, null, "test", null); + } + + private MapperService newMapperService() throws IOException { + Environment environment = new Environment(); + MapperService mapperService = new MapperService(index, EMPTY_SETTINGS, environment, new AnalysisService(index)); + // init a mapping with data + mapperService.type("person").parse(copyToStringFromClasspath("/org/elasticsearch/index/query/json/data.json")); + return mapperService; + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/bool-filter.json b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/bool-filter.json new file mode 100644 index 00000000000..0fa5f5ed2eb --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/bool-filter.json @@ -0,0 +1,23 @@ +{ + filteredQuery : { + query : { + term : { "name.first" : "shay" } + }, + filter : { + bool : { + must : { + term : { "name.first" : "shay1" } + }, + mustNot: { + term : { "name.first" : "shay2" } + }, + should: { + term : { "name.first" : "shay3" } + }, + must : { + term : { "name.first" : "shay4" } + } + } + } + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/bool.json b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/bool.json new file mode 100644 index 00000000000..758d21e4c45 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/bool.json @@ -0,0 +1,28 @@ +{ + bool : { + must : { + queryString : { + defaultField : "content", + query : "test1" + } + }, + mustNot: { + queryString : { + defaultField : "content", + query : "test2" + } + }, + should: { + queryString : { + defaultField : "content", + query : "test3" + } + }, + must : { + queryString : { + defaultField : "content", + query : "test4" + } + } + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/constantScore-query.json b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/constantScore-query.json new file mode 100644 index 00000000000..0ba7297f4f5 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/constantScore-query.json @@ -0,0 +1,7 @@ +{ + constantScore : { + filter : { + term : { "name.last" : "banon"} + } + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/data.json b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/data.json new file mode 100644 index 00000000000..181d61cfa24 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/data.json @@ -0,0 +1,43 @@ +{ + person : { + _boost : 3.7, + _id : "1", + name : { + first : "shay", + last : "banon" + }, + address : { + first : { + location : "first location" + }, + last : { + location : "last location" + } + }, + age : 32, + birthDate : "1977-11-15", + nerd : true, + dogs : ["buck", "mia"], + complex : [ + { + value1 : "value1" + }, + { + value2 : "value2" + } + ], + complex2 : [ + [ + { + value1 : "value1" + } + ], + [ + { + value2 : "value2" + } + ] + ], + nullValue : null + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/disMax.json b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/disMax.json new file mode 100644 index 00000000000..cd36fbb02fe --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/disMax.json @@ -0,0 +1,14 @@ +{ + disMax : { + tieBreakerMultiplier : 0.7, + boost: 1.2, + queries : [ + { + term : { age : 34 } + }, + { + term : { age : 35 } + } + ] + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/filtered-query.json b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/filtered-query.json new file mode 100644 index 00000000000..56ed4c29604 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/filtered-query.json @@ -0,0 +1,10 @@ +{ + filteredQuery : { + query : { + term : { "name.first" : "shay" } + }, + filter : { + term : { "name.last" : "banon"} + } + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/guice/IndexQueryParserModuleTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/guice/IndexQueryParserModuleTests.java new file mode 100644 index 00000000000..d385d3767fb --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/guice/IndexQueryParserModuleTests.java @@ -0,0 +1,76 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json.guice; + +import com.google.inject.Guice; +import com.google.inject.Injector; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexNameModule; +import org.elasticsearch.index.analysis.AnalysisModule; +import org.elasticsearch.index.cache.filter.FilterCacheModule; +import org.elasticsearch.index.query.IndexQueryParserModule; +import org.elasticsearch.index.query.IndexQueryParserService; +import org.elasticsearch.index.query.json.JsonIndexQueryParser; +import org.elasticsearch.index.query.json.JsonQueryParserRegistry; +import org.elasticsearch.index.settings.IndexSettingsModule; +import org.elasticsearch.util.settings.Settings; +import org.testng.annotations.Test; + +import static org.elasticsearch.util.settings.ImmutableSettings.*; +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexQueryParserModuleTests { + + @Test public void testCustomInjection() { + Settings settings = settingsBuilder() + .putClass("index.queryparser.json.query.my.type", MyJsonQueryParser.class) + .put("index.queryparser.json.query.my.param1", "value1") + .putClass("index.queryparser.json.filter.my.type", MyJsonFilterParser.class) + .put("index.queryparser.json.filter.my.param2", "value2") + .put("index.cache.filter.type", "none") + .build(); + + Index index = new Index("test"); + Injector injector = Guice.createInjector( + new IndexSettingsModule(settings), + new FilterCacheModule(settings), + new AnalysisModule(settings), + new IndexQueryParserModule(settings), + new IndexNameModule(index) + ); + IndexQueryParserService indexQueryParserService = injector.getInstance(IndexQueryParserService.class); + + + JsonQueryParserRegistry parserRegistry = ((JsonIndexQueryParser) indexQueryParserService.defaultIndexQueryParser()).queryParserRegistry(); + + MyJsonQueryParser myJsonQueryParser = (MyJsonQueryParser) parserRegistry.queryParser("my"); + + assertThat(myJsonQueryParser.name(), equalTo("my")); + assertThat(myJsonQueryParser.settings().get("param1"), equalTo("value1")); + + MyJsonFilterParser myJsonFilterParser = (MyJsonFilterParser) parserRegistry.filterParser("my"); + assertThat(myJsonFilterParser.name(), equalTo("my")); + assertThat(myJsonFilterParser.settings().get("param2"), equalTo("value2")); + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/guice/MyJsonFilterParser.java b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/guice/MyJsonFilterParser.java new file mode 100644 index 00000000000..4a7b0b4ad1c --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/guice/MyJsonFilterParser.java @@ -0,0 +1,61 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json.guice; + +import com.google.inject.Inject; +import com.google.inject.assistedinject.Assisted; +import org.apache.lucene.search.Filter; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.query.QueryParsingException; +import org.elasticsearch.index.query.json.JsonFilterParser; +import org.elasticsearch.index.query.json.JsonQueryParseContext; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class MyJsonFilterParser extends AbstractIndexComponent implements JsonFilterParser { + + private final String name; + + private final Settings settings; + + @Inject public MyJsonFilterParser(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) { + super(index, indexSettings); + this.name = name; + this.settings = settings; + } + + @Override public String name() { + return this.name; + } + + @Override public Filter parse(JsonQueryParseContext parseContext) throws IOException, QueryParsingException { + return null; + } + + public Settings settings() { + return settings; + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/guice/MyJsonQueryParser.java b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/guice/MyJsonQueryParser.java new file mode 100644 index 00000000000..135f109b3ba --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/guice/MyJsonQueryParser.java @@ -0,0 +1,61 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query.json.guice; + +import com.google.inject.Inject; +import com.google.inject.assistedinject.Assisted; +import org.apache.lucene.search.Query; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.query.QueryParsingException; +import org.elasticsearch.index.query.json.JsonQueryParseContext; +import org.elasticsearch.index.query.json.JsonQueryParser; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.util.settings.Settings; + +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class MyJsonQueryParser extends AbstractIndexComponent implements JsonQueryParser { + + private final String name; + + private final Settings settings; + + @Inject public MyJsonQueryParser(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) { + super(index, indexSettings); + this.name = name; + this.settings = settings; + } + + @Override public String name() { + return this.name; + } + + @Override public Query parse(JsonQueryParseContext parseContext) throws IOException, QueryParsingException { + return null; + } + + public Settings settings() { + return settings; + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/matchAll.json b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/matchAll.json new file mode 100644 index 00000000000..16e592e4a9d --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/matchAll.json @@ -0,0 +1,3 @@ +{ + matchAll : { boost : 1.2 } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/prefix-filter.json b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/prefix-filter.json new file mode 100644 index 00000000000..ea53f8289f9 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/prefix-filter.json @@ -0,0 +1,10 @@ +{ + filteredQuery : { + query : { + term : { "name.first" : "shay" } + }, + filter : { + prefix : { "name.first" : "sh" } + } + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/prefix-with-boost.json b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/prefix-with-boost.json new file mode 100644 index 00000000000..e8de501a6d6 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/prefix-with-boost.json @@ -0,0 +1,3 @@ +{ + prefix : { "name.first" : { prefix : "sh", boost : 2.0 } } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/prefix.json b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/prefix.json new file mode 100644 index 00000000000..6330463cf8e --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/prefix.json @@ -0,0 +1,3 @@ +{ + prefix : { "name.first" : "sh" } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/query-filter.json b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/query-filter.json new file mode 100644 index 00000000000..ba628634a1d --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/query-filter.json @@ -0,0 +1,10 @@ +{ + filteredQuery : { + query : { + term : { "name.first" : "shay" } + }, + filter : { + query : { term : { "name.last" : "banon" } } + } + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/query.json b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/query.json new file mode 100644 index 00000000000..b53cc06d094 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/query.json @@ -0,0 +1,7 @@ +{ + queryString : { + defaultField : "content", + phraseSlop : 1, + query: "test" + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/range-filter.json b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/range-filter.json new file mode 100644 index 00000000000..0f742fd49b9 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/range-filter.json @@ -0,0 +1,12 @@ +{ + filteredQuery : { + query : { + term : { "name.first" : "shay" } + }, + filter : { + range : { + age : { from : "23", to : "54", includeLower : true, includeUpper: false} + } + } + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/range.json b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/range.json new file mode 100644 index 00000000000..d0ecf0a15c7 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/range.json @@ -0,0 +1,5 @@ +{ + range : { + age : { from : "23", to : "54", includeLower : true, includeUpper: false} + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/spanFirst.json b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/spanFirst.json new file mode 100644 index 00000000000..067572f279e --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/spanFirst.json @@ -0,0 +1,8 @@ +{ + spanFirst : { + match : { + spanTerm : { age : 34 } + }, + end : 12 + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/spanNear.json b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/spanNear.json new file mode 100644 index 00000000000..6f92ed29799 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/spanNear.json @@ -0,0 +1,18 @@ +{ + spanNear : { + clauses : [ + { + spanTerm : { age : 34 } + }, + { + spanTerm : { age : 35 } + }, + { + spanTerm : { age : 36 } + } + ], + slop : 12, + inOrder : false, + collectPayloads : false + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/spanNot.json b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/spanNot.json new file mode 100644 index 00000000000..1d156c206ba --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/spanNot.json @@ -0,0 +1,10 @@ +{ + spanNot : { + include : { + spanTerm : { age : 34 } + }, + exclude : { + spanTerm : { age : 35 } + } + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/spanOr.json b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/spanOr.json new file mode 100644 index 00000000000..aae2626ec86 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/spanOr.json @@ -0,0 +1,15 @@ +{ + spanOr : { + clauses : [ + { + spanTerm : { age : 34 } + }, + { + spanTerm : { age : 35 } + }, + { + spanTerm : { age : 36 } + } + ] + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/spanTerm.json b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/spanTerm.json new file mode 100644 index 00000000000..86e19059708 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/spanTerm.json @@ -0,0 +1,3 @@ +{ + spanTerm : { age : 34 } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/term-with-boost.json b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/term-with-boost.json new file mode 100644 index 00000000000..3fb7b486754 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/term-with-boost.json @@ -0,0 +1,3 @@ +{ + term : { age : { value : 34, boost : 2.0 } } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/term.json b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/term.json new file mode 100644 index 00000000000..1ba83a46f54 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/term.json @@ -0,0 +1,3 @@ +{ + term : { age : 34 } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/wildcard.json b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/wildcard.json new file mode 100644 index 00000000000..b39b8114876 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/query/json/wildcard.json @@ -0,0 +1,3 @@ +{ + wildcard : { "name.first" : "sh*" } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/shard/SimpleIndexShardTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/index/shard/SimpleIndexShardTests.java new file mode 100644 index 00000000000..ac033947932 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/shard/SimpleIndexShardTests.java @@ -0,0 +1,112 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.shard; + +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.analysis.AnalysisService; +import org.elasticsearch.index.cache.filter.FilterCache; +import org.elasticsearch.index.cache.filter.none.NoneFilterCache; +import org.elasticsearch.index.deletionpolicy.KeepOnlyLastDeletionPolicy; +import org.elasticsearch.index.deletionpolicy.SnapshotDeletionPolicy; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.robin.RobinEngine; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.merge.policy.LogByteSizeMergePolicyProvider; +import org.elasticsearch.index.merge.scheduler.SerialMergeSchedulerProvider; +import org.elasticsearch.index.query.IndexQueryParserService; +import org.elasticsearch.index.similarity.SimilarityService; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.index.store.ram.RamStore; +import org.elasticsearch.index.translog.memory.MemoryTranslog; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.threadpool.dynamic.DynamicThreadPool; +import org.elasticsearch.util.settings.Settings; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +import static org.elasticsearch.util.settings.ImmutableSettings.Builder.*; +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy + */ +public class SimpleIndexShardTests { + + private ThreadPool threadPool; + + private IndexShard indexShard; + + @BeforeMethod public void createIndexShard() { + Settings settings = EMPTY_SETTINGS; + Environment environment = new Environment(settings); + ShardId shardId = new ShardId("test", 1); + AnalysisService analysisService = new AnalysisService(shardId.index()); + MapperService mapperService = new MapperService(shardId.index(), settings, environment, analysisService); + IndexQueryParserService queryParserService = new IndexQueryParserService(shardId.index(), mapperService, new NoneFilterCache(shardId.index(), EMPTY_SETTINGS), analysisService); + FilterCache filterCache = new NoneFilterCache(shardId.index(), settings); + + SnapshotDeletionPolicy policy = new SnapshotDeletionPolicy(new KeepOnlyLastDeletionPolicy(shardId, settings)); + Store store = new RamStore(shardId, settings); + MemoryTranslog translog = new MemoryTranslog(shardId, settings); + Engine engine = new RobinEngine(shardId, settings, store, policy, translog, + new LogByteSizeMergePolicyProvider(store), new SerialMergeSchedulerProvider(shardId, settings), + analysisService, new SimilarityService(shardId.index())); + + threadPool = new DynamicThreadPool(); + + indexShard = new InternalIndexShard(shardId, EMPTY_SETTINGS, store, engine, translog, threadPool, mapperService, queryParserService, filterCache).start(); + } + + @AfterMethod public void tearDown() { + indexShard.close(); + threadPool.shutdown(); + } + + @Test public void testSimpleIndexGetDelete() { + String source1 = "{ type1 : { _id : \"1\", name : \"test\", age : 35 } }"; + indexShard.index("type1", "1", source1); + indexShard.refresh(true); + + String sourceFetched = indexShard.get("type1", "1"); + + assertThat(sourceFetched, equalTo(source1)); + + assertThat(indexShard.count(0, "{ term : { age : 35 } }", null), equalTo(1l)); + assertThat(indexShard.count(0, "{ queryString : { query : \"name:test\" } }", null), equalTo(1l)); + assertThat(indexShard.count(0, "{ queryString : { query : \"age:35\" } }", null), equalTo(1l)); + + indexShard.delete("type1", "1"); + indexShard.refresh(true); + + assertThat(indexShard.get("type1", "1"), nullValue()); + + indexShard.index("type1", "1", source1); + indexShard.refresh(true); + sourceFetched = indexShard.get("type1", "1"); + assertThat(sourceFetched, equalTo(source1)); + indexShard.deleteByQuery("{ term : { name : \"test\" } }", null); + indexShard.refresh(true); + assertThat(indexShard.get("type1", "1"), nullValue()); + + indexShard.close(); + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/store/bytebuffer/SimpleByteBufferStoreTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/index/store/bytebuffer/SimpleByteBufferStoreTests.java new file mode 100644 index 00000000000..2e70f22c89a --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/store/bytebuffer/SimpleByteBufferStoreTests.java @@ -0,0 +1,156 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store.bytebuffer; + +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.store.Lock; +import org.apache.lucene.store.LockObtainFailedException; +import org.elasticsearch.util.SizeUnit; +import org.elasticsearch.util.SizeValue; +import org.testng.annotations.Test; + +import java.io.IOException; + +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy + */ +public class SimpleByteBufferStoreTests { + + @Test public void test1BufferNoCache() throws Exception { + ByteBufferDirectory dir = new ByteBufferDirectory(new SizeValue(1, SizeUnit.BYTES), new SizeValue(0, SizeUnit.BYTES), false, false); + insertData(dir); + verifyData(dir); + dir.close(); + } + + @Test public void test1Buffer() throws Exception { + ByteBufferDirectory dir = new ByteBufferDirectory(new SizeValue(1, SizeUnit.BYTES), new SizeValue(10, SizeUnit.BYTES), false, false); + insertData(dir); + verifyData(dir); + dir.close(); + } + + @Test public void test3Buffer() throws Exception { + ByteBufferDirectory dir = new ByteBufferDirectory(new SizeValue(3, SizeUnit.BYTES), new SizeValue(10, SizeUnit.BYTES), false, false); + insertData(dir); + verifyData(dir); + dir.close(); + } + + @Test public void test10Buffer() throws Exception { + ByteBufferDirectory dir = new ByteBufferDirectory(new SizeValue(10, SizeUnit.BYTES), new SizeValue(20, SizeUnit.BYTES), false, false); + insertData(dir); + verifyData(dir); + dir.close(); + } + + @Test public void test15Buffer() throws Exception { + ByteBufferDirectory dir = new ByteBufferDirectory(new SizeValue(15, SizeUnit.BYTES), new SizeValue(30, SizeUnit.BYTES), false, false); + insertData(dir); + verifyData(dir); + dir.close(); + } + + @Test public void test40Buffer() throws Exception { + ByteBufferDirectory dir = new ByteBufferDirectory(new SizeValue(40, SizeUnit.BYTES), new SizeValue(80, SizeUnit.BYTES), false, false); + insertData(dir); + verifyData(dir); + dir.close(); + } + + @Test public void testSimpleLocking() throws Exception { + ByteBufferDirectory dir = new ByteBufferDirectory(new SizeValue(40, SizeUnit.BYTES), new SizeValue(80, SizeUnit.BYTES), false, false); + + Lock lock = dir.makeLock("testlock"); + + assertThat(lock.isLocked(), equalTo(false)); + assertThat(lock.obtain(200), equalTo(true)); + assertThat(lock.isLocked(), equalTo(true)); + try { + assertThat(lock.obtain(200), equalTo(false)); + assertThat("lock should be thrown", false, equalTo(true)); + } catch (LockObtainFailedException e) { + // all is well + } + lock.release(); + assertThat(lock.isLocked(), equalTo(false)); + dir.close(); + } + + private void insertData(ByteBufferDirectory dir) throws IOException { + byte[] test = new byte[]{1, 2, 3, 4, 5, 6, 7, 8}; + IndexOutput indexOutput = dir.createOutput("value1"); + indexOutput.writeBytes(new byte[]{2, 4, 6, 7, 8}, 5); + indexOutput.writeInt(-1); + indexOutput.writeLong(10); + indexOutput.writeInt(0); + indexOutput.writeInt(0); + indexOutput.writeBytes(test, 8); + indexOutput.writeBytes(test, 5); + + indexOutput.seek(0); + indexOutput.writeByte((byte) 8); + if (dir.bufferSizeInBytes() > 4) { + indexOutput.seek(2); + indexOutput.writeBytes(new byte[]{1, 2}, 2); + } + + indexOutput.close(); + } + + private void verifyData(ByteBufferDirectory dir) throws IOException { + byte[] test = new byte[]{1, 2, 3, 4, 5, 6, 7, 8}; + assertThat(dir.fileExists("value1"), equalTo(true)); + assertThat(dir.fileLength("value1"), equalTo(38l)); + + IndexInput indexInput = dir.openInput("value1"); + indexInput.readBytes(test, 0, 5); + assertThat(test[0], equalTo((byte) 8)); + assertThat(indexInput.readInt(), equalTo(-1)); + assertThat(indexInput.readLong(), equalTo((long) 10)); + assertThat(indexInput.readInt(), equalTo(0)); + assertThat(indexInput.readInt(), equalTo(0)); + indexInput.readBytes(test, 0, 8); + assertThat(test[0], equalTo((byte) 1)); + assertThat(test[7], equalTo((byte) 8)); + indexInput.readBytes(test, 0, 5); + assertThat(test[0], equalTo((byte) 1)); + assertThat(test[4], equalTo((byte) 5)); + + indexInput.seek(28); + assertThat(indexInput.readByte(), equalTo((byte) 4)); + indexInput.seek(30); + assertThat(indexInput.readByte(), equalTo((byte) 6)); + + indexInput.close(); + + indexInput = dir.openInput("value1"); + // iterate over all the data + for (int i = 0; i < 38; i++) { + indexInput.readByte(); + } + indexInput.close(); + } + +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/store/memory/SimpleMemoryStoreTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/index/store/memory/SimpleMemoryStoreTests.java new file mode 100644 index 00000000000..71f2b3ef184 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/store/memory/SimpleMemoryStoreTests.java @@ -0,0 +1,156 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store.memory; + +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.store.Lock; +import org.apache.lucene.store.LockObtainFailedException; +import org.elasticsearch.util.SizeUnit; +import org.elasticsearch.util.SizeValue; +import org.testng.annotations.Test; + +import java.io.IOException; + +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy (Shay Banon) + */ +public class SimpleMemoryStoreTests { + + @Test public void test1BufferNoCache() throws Exception { + MemoryDirectory dir = new MemoryDirectory(new SizeValue(1, SizeUnit.BYTES), new SizeValue(0, SizeUnit.BYTES), false); + insertData(dir); + verifyData(dir); + dir.close(); + } + + @Test public void test1Buffer() throws Exception { + MemoryDirectory dir = new MemoryDirectory(new SizeValue(1, SizeUnit.BYTES), new SizeValue(10, SizeUnit.BYTES), false); + insertData(dir); + verifyData(dir); + dir.close(); + } + + @Test public void test3Buffer() throws Exception { + MemoryDirectory dir = new MemoryDirectory(new SizeValue(3, SizeUnit.BYTES), new SizeValue(10, SizeUnit.BYTES), false); + insertData(dir); + verifyData(dir); + dir.close(); + } + + @Test public void test10Buffer() throws Exception { + MemoryDirectory dir = new MemoryDirectory(new SizeValue(10, SizeUnit.BYTES), new SizeValue(20, SizeUnit.BYTES), false); + insertData(dir); + verifyData(dir); + dir.close(); + } + + @Test public void test15Buffer() throws Exception { + MemoryDirectory dir = new MemoryDirectory(new SizeValue(15, SizeUnit.BYTES), new SizeValue(30, SizeUnit.BYTES), false); + insertData(dir); + verifyData(dir); + dir.close(); + } + + @Test public void test40Buffer() throws Exception { + MemoryDirectory dir = new MemoryDirectory(new SizeValue(40, SizeUnit.BYTES), new SizeValue(80, SizeUnit.BYTES), false); + insertData(dir); + verifyData(dir); + dir.close(); + } + + @Test public void testSimpeLocking() throws Exception { + MemoryDirectory dir = new MemoryDirectory(new SizeValue(40, SizeUnit.BYTES), new SizeValue(80, SizeUnit.BYTES), false); + + Lock lock = dir.makeLock("testlock"); + + assertThat(lock.isLocked(), equalTo(false)); + assertThat(lock.obtain(200), equalTo(true)); + assertThat(lock.isLocked(), equalTo(true)); + try { + assertThat(lock.obtain(200), equalTo(false)); + assertThat("lock should be thrown", false, equalTo(true)); + } catch (LockObtainFailedException e) { + // all is well + } + lock.release(); + assertThat(lock.isLocked(), equalTo(false)); + dir.close(); + } + + private void insertData(MemoryDirectory dir) throws IOException { + byte[] test = new byte[]{1, 2, 3, 4, 5, 6, 7, 8}; + IndexOutput indexOutput = dir.createOutput("value1"); + indexOutput.writeBytes(new byte[]{2, 4, 6, 7, 8}, 5); + indexOutput.writeInt(-1); + indexOutput.writeLong(10); + indexOutput.writeInt(0); + indexOutput.writeInt(0); + indexOutput.writeBytes(test, 8); + indexOutput.writeBytes(test, 5); + + indexOutput.seek(0); + indexOutput.writeByte((byte) 8); + if (dir.bufferSizeInBytes() > 4) { + indexOutput.seek(2); + indexOutput.writeBytes(new byte[]{1, 2}, 2); + } + + indexOutput.close(); + } + + private void verifyData(MemoryDirectory dir) throws IOException { + byte[] test = new byte[]{1, 2, 3, 4, 5, 6, 7, 8}; + assertThat(dir.fileExists("value1"), equalTo(true)); + assertThat(dir.fileLength("value1"), equalTo(38l)); + + IndexInput indexInput = dir.openInput("value1"); + indexInput.readBytes(test, 0, 5); + assertThat(test[0], equalTo((byte) 8)); + assertThat(indexInput.readInt(), equalTo(-1)); + assertThat(indexInput.readLong(), equalTo((long) 10)); + assertThat(indexInput.readInt(), equalTo(0)); + assertThat(indexInput.readInt(), equalTo(0)); + indexInput.readBytes(test, 0, 8); + assertThat(test[0], equalTo((byte) 1)); + assertThat(test[7], equalTo((byte) 8)); + indexInput.readBytes(test, 0, 5); + assertThat(test[0], equalTo((byte) 1)); + assertThat(test[4], equalTo((byte) 5)); + + indexInput.seek(28); + assertThat(indexInput.readByte(), equalTo((byte) 4)); + indexInput.seek(30); + assertThat(indexInput.readByte(), equalTo((byte) 6)); + + indexInput.close(); + + indexInput = dir.openInput("value1"); + // iterate over all the data + for (int i = 0; i < 38; i++) { + indexInput.readByte(); + } + indexInput.close(); + } + +} \ No newline at end of file diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/translog/AbstractSimpleTranslogTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/index/translog/AbstractSimpleTranslogTests.java new file mode 100644 index 00000000000..da602505981 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/translog/AbstractSimpleTranslogTests.java @@ -0,0 +1,149 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.translog; + +import org.apache.lucene.index.Term; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.hamcrest.Matchers; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +import java.util.Iterator; + +import static org.elasticsearch.index.translog.TranslogSizeMatcher.*; +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class AbstractSimpleTranslogTests { + + protected final ShardId shardId = new ShardId(new Index("index"), 1); + + protected Translog translog; + + @BeforeMethod public void setUp() { + translog = create(); + translog.newTranslog(); + } + + @AfterMethod public void tearDown() { + translog.close(); + } + + protected abstract Translog create(); + + @Test public void testSimpleOperations() { + Translog.Snapshot snapshot = translog.snapshot(); + + assertThat(snapshot, translogSize(0)); + snapshot.release(); + + translog.add(new Translog.Create("test", "1", "{1}")); + snapshot = translog.snapshot(); + assertThat(snapshot, translogSize(1)); + snapshot.release(); + + translog.add(new Translog.Index("test", "2", "{2}")); + snapshot = translog.snapshot(); + assertThat(snapshot, translogSize(2)); + snapshot.release(); + + translog.add(new Translog.Delete(newUid("3"))); + snapshot = translog.snapshot(); + assertThat(snapshot, translogSize(3)); + snapshot.release(); + + translog.add(new Translog.DeleteByQuery("{4}", null)); + snapshot = translog.snapshot(); + assertThat(snapshot, translogSize(4)); + snapshot.release(); + + snapshot = translog.snapshot(); + Iterator it = snapshot.iterator(); + Translog.Create create = (Translog.Create) it.next(); + assertThat(create.source(), equalTo("{1}")); + Translog.Index index = (Translog.Index) it.next(); + assertThat(index.source(), equalTo("{2}")); + Translog.Delete delete = (Translog.Delete) it.next(); + assertThat(delete.uid(), equalTo(newUid("3"))); + Translog.DeleteByQuery deleteByQuery = (Translog.DeleteByQuery) it.next(); + assertThat(deleteByQuery.source(), equalTo("{4}")); + snapshot.release(); + + long firstId = translog.currentId(); + translog.newTranslog(); + assertThat(translog.currentId(), Matchers.not(equalTo(firstId))); + + snapshot = translog.snapshot(); + assertThat(snapshot, translogSize(0)); + snapshot.release(); + } + + @Test public void testSnapshot() { + Translog.Snapshot snapshot = translog.snapshot(); + assertThat(snapshot, translogSize(0)); + snapshot.release(); + + translog.add(new Translog.Create("test", "1", "{1}")); + snapshot = translog.snapshot(); + assertThat(snapshot, translogSize(1)); + Translog.Create create = (Translog.Create) snapshot.iterator().next(); + assertThat(create.source(), equalTo("{1}")); + snapshot.release(); + + translog.add(new Translog.Index("test", "2", "{2}")); + snapshot = translog.snapshot(snapshot); + assertThat(snapshot, translogSize(1)); + Translog.Index index = (Translog.Index) snapshot.iterator().next(); + assertThat(index.source(), equalTo("{2}")); + snapshot.release(); + } + + @Test public void testSnapshotWithNewTranslog() { + Translog.Snapshot snapshot = translog.snapshot(); + assertThat(snapshot, translogSize(0)); + snapshot.release(); + + translog.add(new Translog.Create("test", "1", "{1}")); + Translog.Snapshot actualSnapshot = translog.snapshot(); + + translog.add(new Translog.Index("test", "2", "{2}")); + + translog.newTranslog(); + + translog.add(new Translog.Index("test", "3", "{3}")); + + snapshot = translog.snapshot(actualSnapshot); + assertThat(snapshot, translogSize(1)); + Translog.Index index = (Translog.Index) snapshot.iterator().next(); + assertThat(index.source(), equalTo("{3}")); + + actualSnapshot.release(); + snapshot.release(); + } + + private Term newUid(String id) { + return new Term("_uid", id); + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/translog/TranslogSizeMatcher.java b/modules/elasticsearch/src/test/java/org/elasticsearch/index/translog/TranslogSizeMatcher.java new file mode 100644 index 00000000000..a7c0ebba5e0 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/translog/TranslogSizeMatcher.java @@ -0,0 +1,52 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.translog; + +import org.hamcrest.Description; +import org.hamcrest.Matcher; +import org.hamcrest.TypeSafeMatcher; + +/** + * @author kimchy (Shay Banon) + */ +public class TranslogSizeMatcher extends TypeSafeMatcher { + + private final int size; + + public TranslogSizeMatcher(int size) { + this.size = size; + } + + @Override public boolean matchesSafely(Translog.Snapshot snapshot) { + int count = 0; + for (Translog.Operation op : snapshot) { + count++; + } + return size == count; + } + + @Override public void describeTo(Description description) { + description.appendText("a translog with size ").appendValue(size); + } + + public static Matcher translogSize(int size) { + return new TranslogSizeMatcher(size); + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/index/translog/memory/MemorySimpleTranslogTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/index/translog/memory/MemorySimpleTranslogTests.java new file mode 100644 index 00000000000..b5a13731a36 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/index/translog/memory/MemorySimpleTranslogTests.java @@ -0,0 +1,35 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.translog.memory; + +import org.elasticsearch.index.translog.AbstractSimpleTranslogTests; +import org.elasticsearch.index.translog.Translog; + +import static org.elasticsearch.util.settings.ImmutableSettings.Builder.*; + +/** + * @author kimchy (Shay Banon) + */ +public class MemorySimpleTranslogTests extends AbstractSimpleTranslogTests { + + @Override protected Translog create() { + return new MemoryTranslog(shardId, EMPTY_SETTINGS); + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/monitor/jvm/DeadlockSimulator.java b/modules/elasticsearch/src/test/java/org/elasticsearch/monitor/jvm/DeadlockSimulator.java new file mode 100644 index 00000000000..da584e106a8 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/monitor/jvm/DeadlockSimulator.java @@ -0,0 +1,101 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.monitor.jvm; + +import org.elasticsearch.monitor.dump.DumpMonitorService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.threadpool.dynamic.DynamicThreadPool; + +import static org.elasticsearch.util.settings.ImmutableSettings.Builder.*; + +/** + * @author kimchy + */ +public class DeadlockSimulator { + + public static void main(String[] args) { + ThreadPool threadPool = new DynamicThreadPool(); + DumpMonitorService dumpMonitorService = new DumpMonitorService(); + JvmMonitorService jvmMonitorService = new JvmMonitorService(EMPTY_SETTINGS, threadPool, dumpMonitorService).start(); + + //These are the two resource objects + //we'll try to get locks for + final Object resource1 = "resource1"; + final Object resource2 = "resource2"; + //Here's the first thread. + //It tries to lock resource1 then resource2 + Thread t1 = new Thread() { + public void run() { + //Lock resource 1 + synchronized (resource1) { + System.out.println("Thread 1: locked resource 1"); + //Pause for a bit, simulating some file I/O or + //something. Basically, we just want to give the + //other thread a chance to run. Threads and deadlock + //are asynchronous things, but we're trying to force + //deadlock to happen here... + try { + Thread.sleep(50); + } catch (InterruptedException e) { + } + + //Now wait 'till we can get a lock on resource 2 + synchronized (resource2) { + System.out.println("Thread 1: locked resource 2"); + } + } + } + }; + + //Here's the second thread. + //It tries to lock resource2 then resource1 + Thread t2 = new Thread() { + public void run() { + //This thread locks resource 2 right away + synchronized (resource2) { + System.out.println("Thread 2: locked resource 2"); + //Then it pauses, for the same reason as the first + //thread does + try { + Thread.sleep(50); + } catch (InterruptedException e) { + } + + //Then it tries to lock resource1. + //But wait! Thread 1 locked resource1, and + //won't release it till it gets a lock on resource2. + //This thread holds the lock on resource2, and won't + //release it till it gets resource1. + //We're at an impasse. Neither thread can run, + //and the program freezes up. + synchronized (resource1) { + System.out.println("Thread 2: locked resource 1"); + } + } + } + }; + + //Start the two threads. + //If all goes as planned, deadlock will occur, + //and the program will never exit. + t1.start(); + t2.start(); + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/transport/netty/SimpleNettyTransportTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/transport/netty/SimpleNettyTransportTests.java new file mode 100644 index 00000000000..b50f4f54118 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/transport/netty/SimpleNettyTransportTests.java @@ -0,0 +1,168 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.netty; + +import org.elasticsearch.cluster.node.Node; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.threadpool.dynamic.DynamicThreadPool; +import org.elasticsearch.transport.*; +import org.elasticsearch.util.io.Streamable; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +public class SimpleNettyTransportTests { + + private ThreadPool threadPool; + + private TransportService serviceA; + private TransportService serviceB; + private Node serviceANode; + private Node serviceBNode; + + @BeforeClass public void setUp() { + threadPool = new DynamicThreadPool(); + + serviceA = new TransportService(new NettyTransport(threadPool)).start(); + serviceANode = new Node("A", serviceA.boundAddress().publishAddress()); + + serviceB = new TransportService(new NettyTransport(threadPool)).start(); + serviceBNode = new Node("B", serviceB.boundAddress().publishAddress()); + } + + @AfterClass public void tearDown() { + serviceA.close(); + serviceB.close(); + + threadPool.shutdown(); + } + + @Test public void testHelloWorld() { + serviceA.registerHandler("sayHello", new BaseTransportRequestHandler() { + @Override public StringMessage newInstance() { + return new StringMessage(); + } + + @Override public void messageReceived(StringMessage request, TransportChannel channel) { + System.out.println("got message: " + request.message); + assertThat("moshe", equalTo(request.message)); + try { + channel.sendResponse(new StringMessage("hello " + request.message)); + } catch (IOException e) { + e.printStackTrace(); + assertThat(e.getMessage(), false, equalTo(true)); + } + } + }); + + TransportFuture res = serviceB.submitRequest(serviceANode, "sayHello", + new StringMessage("moshe"), new BaseTransportResponseHandler() { + @Override public StringMessage newInstance() { + return new StringMessage(); + } + + @Override public void handleResponse(StringMessage response) { + System.out.println("got response: " + response.message); + assertThat("hello moshe", equalTo(response.message)); + } + + @Override public void handleException(RemoteTransportException exp) { + exp.printStackTrace(); + assertThat("got exception instead of a response: " + exp.getMessage(), false, equalTo(true)); + } + }); + + try { + StringMessage message = res.get(); + assertThat("hello moshe", equalTo(message.message)); + } catch (Exception e) { + assertThat(e.getMessage(), false, equalTo(true)); + } + + System.out.println("after ..."); + } + + @Test public void testErrorMessage() { + serviceA.registerHandler("sayHelloException", new BaseTransportRequestHandler() { + @Override public StringMessage newInstance() { + return new StringMessage(); + } + + @Override public void messageReceived(StringMessage request, TransportChannel channel) throws Exception { + System.out.println("got message: " + request.message); + assertThat("moshe", equalTo(request.message)); + throw new RuntimeException("bad message !!!"); + } + }); + + TransportFuture res = serviceB.submitRequest(serviceANode, "sayHelloException", + new StringMessage("moshe"), new BaseTransportResponseHandler() { + @Override public StringMessage newInstance() { + return new StringMessage(); + } + + @Override public void handleResponse(StringMessage response) { + assertThat("got response instead of exception", false, equalTo(true)); + } + + @Override public void handleException(RemoteTransportException exp) { + assertThat("bad message !!!", equalTo(exp.getCause().getMessage())); + } + }); + + try { + res.txGet(); + assertThat("exception should be thrown", false, equalTo(true)); + } catch (Exception e) { + assertThat("bad message !!!", equalTo(e.getCause().getMessage())); + } + + System.out.println("after ..."); + + } + + private class StringMessage implements Streamable { + + private String message; + + private StringMessage(String message) { + this.message = message; + } + + private StringMessage() { + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + message = in.readUTF(); + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeUTF(message); + } + } + +} \ No newline at end of file diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/transport/netty/benchmark/BenchmarkMessage.java b/modules/elasticsearch/src/test/java/org/elasticsearch/transport/netty/benchmark/BenchmarkMessage.java new file mode 100644 index 00000000000..0e0b5ff9316 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/transport/netty/benchmark/BenchmarkMessage.java @@ -0,0 +1,56 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.netty.benchmark; + +import org.elasticsearch.util.io.Streamable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * @author kimchy (Shay Banon) + */ +public class BenchmarkMessage implements Streamable { + + long id; + + byte[] payload; + + public BenchmarkMessage(long id, byte[] payload) { + this.id = id; + this.payload = payload; + } + + public BenchmarkMessage() { + } + + @Override public void readFrom(DataInput in) throws IOException, ClassNotFoundException { + id = in.readLong(); + payload = new byte[in.readInt()]; + in.readFully(payload); + } + + @Override public void writeTo(DataOutput out) throws IOException { + out.writeLong(id); + out.writeInt(payload.length); + out.write(payload); + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/transport/netty/benchmark/BenchmarkNettyClient.java b/modules/elasticsearch/src/test/java/org/elasticsearch/transport/netty/benchmark/BenchmarkNettyClient.java new file mode 100644 index 00000000000..fe4070341c6 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/transport/netty/benchmark/BenchmarkNettyClient.java @@ -0,0 +1,125 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.netty.benchmark; + +import com.google.common.collect.Lists; +import org.elasticsearch.cluster.node.Node; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.threadpool.cached.CachedThreadPool; +import org.elasticsearch.transport.BaseTransportResponseHandler; +import org.elasticsearch.transport.RemoteTransportException; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.transport.netty.NettyTransport; +import org.elasticsearch.util.SizeUnit; +import org.elasticsearch.util.SizeValue; +import org.elasticsearch.util.StopWatch; +import org.elasticsearch.util.settings.ImmutableSettings; +import org.elasticsearch.util.settings.Settings; +import org.elasticsearch.util.transport.InetSocketTransportAddress; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicLong; + +/** + * @author kimchy (Shay Banon) + */ +public class BenchmarkNettyClient { + + + public static void main(String[] args) { + final SizeValue payloadSize = new SizeValue(100, SizeUnit.BYTES); + final int NUMBER_OF_CLIENTS = 1; + final int NUMBER_OF_ITERATIONS = 500000; + final byte[] payload = new byte[(int) payloadSize.bytes()]; + final AtomicLong idGenerator = new AtomicLong(); + final boolean waitForRequest = false; + final boolean spawn = true; + + Settings settings = ImmutableSettings.settingsBuilder() + .putBoolean("network.server", false) + .putInt("transport.netty.connectionsPerNode", 5) + .build(); + + final ThreadPool threadPool = new CachedThreadPool(); + final TransportService transportService = new TransportService(new NettyTransport(settings, threadPool)).start(); + + final Node node = new Node("server", new InetSocketTransportAddress("localhost", 9999)); + + transportService.nodesAdded(Lists.newArrayList(node)); + + + Thread[] clients = new Thread[NUMBER_OF_CLIENTS]; + final CountDownLatch latch = new CountDownLatch(NUMBER_OF_CLIENTS * NUMBER_OF_ITERATIONS); + for (int i = 0; i < NUMBER_OF_CLIENTS; i++) { + clients[i] = new Thread(new Runnable() { + @Override public void run() { + for (int j = 0; j < NUMBER_OF_ITERATIONS; j++) { + final long id = idGenerator.incrementAndGet(); + BenchmarkMessage message = new BenchmarkMessage(id, payload); + BaseTransportResponseHandler handler = new BaseTransportResponseHandler() { + @Override public BenchmarkMessage newInstance() { + return new BenchmarkMessage(); + } + + @Override public void handleResponse(BenchmarkMessage response) { + if (response.id != id) { + System.out.println("NO ID MATCH [" + response.id + "] and [" + id + "]"); + } + latch.countDown(); + } + + @Override public void handleException(RemoteTransportException exp) { + exp.printStackTrace(); + latch.countDown(); + } + + @Override public boolean spawn() { + return spawn; + } + }; + + if (waitForRequest) { + transportService.submitRequest(node, "benchmark", message, handler).txGet(); + } else { + transportService.sendRequest(node, "benchmark", message, handler); + } + } + } + }); + } + + StopWatch stopWatch = new StopWatch().start(); + for (int i = 0; i < NUMBER_OF_CLIENTS; i++) { + clients[i].start(); + } + + try { + latch.await(); + } catch (InterruptedException e) { + e.printStackTrace(); + } + stopWatch.stop(); + + System.out.println("Ran [" + NUMBER_OF_CLIENTS + "], each with [" + NUMBER_OF_ITERATIONS + "] iterations, payload [" + payloadSize + "]: took [" + stopWatch.totalTime() + "], TPS: " + (NUMBER_OF_CLIENTS * NUMBER_OF_ITERATIONS) / stopWatch.totalTime().secondsFrac()); + + transportService.close(); + threadPool.shutdownNow(); + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/transport/netty/benchmark/BenchmarkNettyServer.java b/modules/elasticsearch/src/test/java/org/elasticsearch/transport/netty/benchmark/BenchmarkNettyServer.java new file mode 100644 index 00000000000..f9ebc8dd77d --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/transport/netty/benchmark/BenchmarkNettyServer.java @@ -0,0 +1,80 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.netty.benchmark; + +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.threadpool.cached.CachedThreadPool; +import org.elasticsearch.transport.BaseTransportRequestHandler; +import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.transport.netty.NettyTransport; +import org.elasticsearch.util.settings.ImmutableSettings; +import org.elasticsearch.util.settings.Settings; + +/** + * @author kimchy (Shay Banon) + */ +public class BenchmarkNettyServer { + + public static void main(String[] args) { + final boolean spawn = true; + + Settings settings = ImmutableSettings.settingsBuilder() + .putInt("transport.netty.port", 9999) + .build(); + + final ThreadPool threadPool = new CachedThreadPool(); + final TransportService transportService = new TransportService(new NettyTransport(settings, threadPool)).start(); + + transportService.registerHandler("benchmark", new BaseTransportRequestHandler() { + @Override public BenchmarkMessage newInstance() { + return new BenchmarkMessage(); + } + + @Override public void messageReceived(BenchmarkMessage request, TransportChannel channel) throws Exception { + channel.sendResponse(request); + } + + @Override public boolean spawn() { + return spawn; + } + }); + + final Object mutex = new Object(); + + Runtime.getRuntime().addShutdownHook(new Thread() { + @Override public void run() { + transportService.close(); + threadPool.shutdownNow(); + synchronized (mutex) { + mutex.notifyAll(); + } + } + }); + + synchronized (mutex) { + try { + mutex.wait(); + } catch (InterruptedException e) { + // ok? + } + } + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/util/SizeUnitTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/util/SizeUnitTests.java new file mode 100644 index 00000000000..8526ca421c8 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/util/SizeUnitTests.java @@ -0,0 +1,60 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util; + +import org.testng.annotations.Test; + +import static org.elasticsearch.util.SizeUnit.*; +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy (Shay Banon) + */ +public class SizeUnitTests { + + @Test public void testBytes() { + assertThat(BYTES.toBytes(1), equalTo(1l)); + assertThat(BYTES.toKB(1024), equalTo(1l)); + assertThat(BYTES.toMB(1024 * 1024), equalTo(1l)); + assertThat(BYTES.toGB(1024 * 1024 * 1024), equalTo(1l)); + } + + @Test public void testKB() { + assertThat(KB.toBytes(1), equalTo(1024l)); + assertThat(KB.toKB(1), equalTo(1l)); + assertThat(KB.toMB(1024), equalTo(1l)); + assertThat(KB.toGB(1024 * 1024), equalTo(1l)); + } + + @Test public void testMB() { + assertThat(MB.toBytes(1), equalTo(1024l * 1024)); + assertThat(MB.toKB(1), equalTo(1024l)); + assertThat(MB.toMB(1), equalTo(1l)); + assertThat(MB.toGB(1024), equalTo(1l)); + } + + @Test public void testGB() { + assertThat(GB.toBytes(1), equalTo(1024l * 1024 * 1024)); + assertThat(GB.toKB(1), equalTo(1024l * 1024)); + assertThat(GB.toMB(1), equalTo(1024l)); + assertThat(GB.toGB(1), equalTo(1l)); + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/util/SizeValueTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/util/SizeValueTests.java new file mode 100644 index 00000000000..7c7712c4bbb --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/util/SizeValueTests.java @@ -0,0 +1,46 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util; + +import org.testng.annotations.Test; + +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy (Shay Banon) + */ +public class SizeValueTests { + + @Test public void testSimple() { + assertThat(SizeUnit.BYTES.toBytes(10), is(new SizeValue(10, SizeUnit.BYTES).bytes())); + assertThat(SizeUnit.KB.toKB(10), is(new SizeValue(10, SizeUnit.KB).kb())); + assertThat(SizeUnit.MB.toMB(10), is(new SizeValue(10, SizeUnit.MB).mb())); + assertThat(SizeUnit.GB.toGB(10), is(new SizeValue(10, SizeUnit.GB).gb())); + } + + @Test public void testToString() { + assertThat("10b", is(new SizeValue(10, SizeUnit.BYTES).toString())); + assertThat("1.5kb", is(new SizeValue((long) (1024 * 1.5), SizeUnit.BYTES).toString())); + assertThat("1.5mb", is(new SizeValue((long) (1024 * 1.5), SizeUnit.KB).toString())); + assertThat("1.5gb", is(new SizeValue((long) (1024 * 1.5), SizeUnit.MB).toString())); + assertThat("1536gb", is(new SizeValue((long) (1024 * 1.5), SizeUnit.GB).toString())); + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/util/StopWatchTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/util/StopWatchTests.java new file mode 100644 index 00000000000..ac4818da777 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/util/StopWatchTests.java @@ -0,0 +1,145 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util; + +import org.testng.annotations.Test; + +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * Test for {@link StopWatch}. + * + * @author kimchy (Shay Banon) + */ +public class StopWatchTests { + + /** + * Are timings off in JUnit? + */ + @Test public void testValidUsage() throws Exception { + StopWatch sw = new StopWatch(); + long int1 = 166L; + long int2 = 45L; + String name1 = "Task 1"; + String name2 = "Task 2"; + + long fudgeFactor = 5L; + assertThat(sw.isRunning(), equalTo(false)); + sw.start(name1); + Thread.sleep(int1); + assertThat(sw.isRunning(), equalTo(true)); + sw.stop(); + + // TODO are timings off in JUnit? Why do these assertions sometimes fail + // under both Ant and Eclipse? + + //assertTrue("Unexpected timing " + sw.getTotalTime(), sw.getTotalTime() >= int1); + //assertTrue("Unexpected timing " + sw.getTotalTime(), sw.getTotalTime() <= int1 + fudgeFactor); + sw.start(name2); + Thread.sleep(int2); + sw.stop(); + //assertTrue("Unexpected timing " + sw.getTotalTime(), sw.getTotalTime() >= int1 + int2); + //assertTrue("Unexpected timing " + sw.getTotalTime(), sw.getTotalTime() <= int1 + int2 + fudgeFactor); + + assertThat(sw.taskCount(), equalTo(2)); + String pp = sw.prettyPrint(); + assertThat(pp.indexOf(name1) != -1, equalTo(true)); + assertThat(pp.indexOf(name2) != -1, equalTo(true)); + + StopWatch.TaskInfo[] tasks = sw.taskInfo(); + assertThat(tasks.length, equalTo(2)); + assertThat(tasks[0].getTaskName(), equalTo(name1)); + assertThat(tasks[1].getTaskName(), equalTo(name2)); + sw.toString(); + } + + @Test public void testValidUsageNotKeepingTaskList() throws Exception { + StopWatch sw = new StopWatch().keepTaskList(false); + long int1 = 166L; + long int2 = 45L; + String name1 = "Task 1"; + String name2 = "Task 2"; + + long fudgeFactor = 5L; + assertThat(sw.isRunning(), equalTo(false)); + sw.start(name1); + Thread.sleep(int1); + assertThat(sw.isRunning(), equalTo(true)); + sw.stop(); + + // TODO are timings off in JUnit? Why do these assertions sometimes fail + // under both Ant and Eclipse? + + //assertTrue("Unexpected timing " + sw.getTotalTime(), sw.getTotalTime() >= int1); + //assertTrue("Unexpected timing " + sw.getTotalTime(), sw.getTotalTime() <= int1 + fudgeFactor); + sw.start(name2); + Thread.sleep(int2); + sw.stop(); + //assertTrue("Unexpected timing " + sw.getTotalTime(), sw.getTotalTime() >= int1 + int2); + //assertTrue("Unexpected timing " + sw.getTotalTime(), sw.getTotalTime() <= int1 + int2 + fudgeFactor); + + assertThat(sw.taskCount(), equalTo(2)); + String pp = sw.prettyPrint(); + assertThat(pp.indexOf("kept"), not(equalTo(-1))); + sw.toString(); + + try { + sw.taskInfo(); + assertThat(false, equalTo(true)); + } catch (UnsupportedOperationException ex) { + // Ok + } + } + + @Test public void testFailureToStartBeforeGettingTimings() { + StopWatch sw = new StopWatch(); + try { + sw.lastTaskTime(); + assertThat("Can't get last interval if no tests run", false, equalTo(true)); + } catch (IllegalStateException ex) { + // Ok + } + } + + @Test public void testFailureToStartBeforeStop() { + StopWatch sw = new StopWatch(); + try { + sw.stop(); + assertThat("Can't stop without starting", false, equalTo(true)); + } catch (IllegalStateException ex) { + // Ok + } + } + + @Test public void testRejectsStartTwice() { + StopWatch sw = new StopWatch(); + try { + sw.start(""); + sw.stop(); + sw.start(""); + assertThat(sw.isRunning(), equalTo(true)); + sw.start(""); + assertThat("Can't start twice", false, equalTo(true)); + } catch (IllegalStateException ex) { + // Ok + } + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/util/StringsTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/util/StringsTests.java new file mode 100644 index 00000000000..e9bf0b885e9 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/util/StringsTests.java @@ -0,0 +1,596 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util; + +/** + * Tests for {@link Strings}. + * + * @author kimchy (Shay Banon) + */ +public class StringsTests { + +// @Test public void testHasTextBlank() throws Exception { +// String blank = " "; +// assertEquals(false, Strings.hasText(blank)); +// } +// +// @Test public void testHasTextNullEmpty() throws Exception { +// assertEquals(false, Strings.hasText(null)); +// assertEquals(false, Strings.hasText("")); +// } +// +// @Test public void testHasTextValid() throws Exception { +// assertEquals(true, Strings.hasText("t")); +// } +// +// @Test public void testContainsWhitespace() throws Exception { +// assertFalse(Strings.containsWhitespace(null)); +// assertFalse(Strings.containsWhitespace("")); +// assertFalse(Strings.containsWhitespace("a")); +// assertFalse(Strings.containsWhitespace("abc")); +// assertTrue(Strings.containsWhitespace(" ")); +// assertTrue(Strings.containsWhitespace(" a")); +// assertTrue(Strings.containsWhitespace("abc ")); +// assertTrue(Strings.containsWhitespace("a b")); +// assertTrue(Strings.containsWhitespace("a b")); +// } +// +// @Test public void testTrimWhitespace() throws Exception { +// assertEquals(null, Strings.trimWhitespace(null)); +// assertEquals("", Strings.trimWhitespace("")); +// assertEquals("", Strings.trimWhitespace(" ")); +// assertEquals("", Strings.trimWhitespace("\t")); +// assertEquals("a", Strings.trimWhitespace(" a")); +// assertEquals("a", Strings.trimWhitespace("a ")); +// assertEquals("a", Strings.trimWhitespace(" a ")); +// assertEquals("a b", Strings.trimWhitespace(" a b ")); +// assertEquals("a b c", Strings.trimWhitespace(" a b c ")); +// } +// +// @Test public void testTrimAllWhitespace() throws Exception { +// assertEquals("", Strings.trimAllWhitespace("")); +// assertEquals("", Strings.trimAllWhitespace(" ")); +// assertEquals("", Strings.trimAllWhitespace("\t")); +// assertEquals("a", Strings.trimAllWhitespace(" a")); +// assertEquals("a", Strings.trimAllWhitespace("a ")); +// assertEquals("a", Strings.trimAllWhitespace(" a ")); +// assertEquals("ab", Strings.trimAllWhitespace(" a b ")); +// assertEquals("abc", Strings.trimAllWhitespace(" a b c ")); +// } +// +// @Test public void testTrimLeadingWhitespace() throws Exception { +// assertEquals(null, Strings.trimLeadingWhitespace(null)); +// assertEquals("", Strings.trimLeadingWhitespace("")); +// assertEquals("", Strings.trimLeadingWhitespace(" ")); +// assertEquals("", Strings.trimLeadingWhitespace("\t")); +// assertEquals("a", Strings.trimLeadingWhitespace(" a")); +// assertEquals("a ", Strings.trimLeadingWhitespace("a ")); +// assertEquals("a ", Strings.trimLeadingWhitespace(" a ")); +// assertEquals("a b ", Strings.trimLeadingWhitespace(" a b ")); +// assertEquals("a b c ", Strings.trimLeadingWhitespace(" a b c ")); +// } +// +// @Test public void testTrimTrailingWhitespace() throws Exception { +// assertEquals(null, Strings.trimTrailingWhitespace(null)); +// assertEquals("", Strings.trimTrailingWhitespace("")); +// assertEquals("", Strings.trimTrailingWhitespace(" ")); +// assertEquals("", Strings.trimTrailingWhitespace("\t")); +// assertEquals("a", Strings.trimTrailingWhitespace("a ")); +// assertEquals(" a", Strings.trimTrailingWhitespace(" a")); +// assertEquals(" a", Strings.trimTrailingWhitespace(" a ")); +// assertEquals(" a b", Strings.trimTrailingWhitespace(" a b ")); +// assertEquals(" a b c", Strings.trimTrailingWhitespace(" a b c ")); +// } +// +// @Test public void testTrimLeadingCharacter() throws Exception { +// assertEquals(null, Strings.trimLeadingCharacter(null, ' ')); +// assertEquals("", Strings.trimLeadingCharacter("", ' ')); +// assertEquals("", Strings.trimLeadingCharacter(" ", ' ')); +// assertEquals("\t", Strings.trimLeadingCharacter("\t", ' ')); +// assertEquals("a", Strings.trimLeadingCharacter(" a", ' ')); +// assertEquals("a ", Strings.trimLeadingCharacter("a ", ' ')); +// assertEquals("a ", Strings.trimLeadingCharacter(" a ", ' ')); +// assertEquals("a b ", Strings.trimLeadingCharacter(" a b ", ' ')); +// assertEquals("a b c ", Strings.trimLeadingCharacter(" a b c ", ' ')); +// } +// +// @Test public void testTrimTrailingCharacter() throws Exception { +// assertEquals(null, Strings.trimTrailingCharacter(null, ' ')); +// assertEquals("", Strings.trimTrailingCharacter("", ' ')); +// assertEquals("", Strings.trimTrailingCharacter(" ", ' ')); +// assertEquals("\t", Strings.trimTrailingCharacter("\t", ' ')); +// assertEquals("a", Strings.trimTrailingCharacter("a ", ' ')); +// assertEquals(" a", Strings.trimTrailingCharacter(" a", ' ')); +// assertEquals(" a", Strings.trimTrailingCharacter(" a ", ' ')); +// assertEquals(" a b", Strings.trimTrailingCharacter(" a b ", ' ')); +// assertEquals(" a b c", Strings.trimTrailingCharacter(" a b c ", ' ')); +// } +// +// @Test public void testCountOccurrencesOf() { +// assertTrue("nullx2 = 0", +// Strings.countOccurrencesOf(null, null) == 0); +// assertTrue("null string = 0", +// Strings.countOccurrencesOf("s", null) == 0); +// assertTrue("null substring = 0", +// Strings.countOccurrencesOf(null, "s") == 0); +// String s = "erowoiueoiur"; +// assertTrue("not found = 0", +// Strings.countOccurrencesOf(s, "WERWER") == 0); +// assertTrue("not found char = 0", +// Strings.countOccurrencesOf(s, "x") == 0); +// assertTrue("not found ws = 0", +// Strings.countOccurrencesOf(s, " ") == 0); +// assertTrue("not found empty string = 0", +// Strings.countOccurrencesOf(s, "") == 0); +// assertTrue("found char=2", Strings.countOccurrencesOf(s, "e") == 2); +// assertTrue("found substring=2", +// Strings.countOccurrencesOf(s, "oi") == 2); +// assertTrue("found substring=2", +// Strings.countOccurrencesOf(s, "oiu") == 2); +// assertTrue("found substring=3", +// Strings.countOccurrencesOf(s, "oiur") == 1); +// assertTrue("test last", Strings.countOccurrencesOf(s, "r") == 2); +// } +// +// @Test public void testReplace() throws Exception { +// String inString = "a6AazAaa77abaa"; +// String oldPattern = "aa"; +// String newPattern = "foo"; +// +// // Simple replace +// String s = Strings.replace(inString, oldPattern, newPattern); +// assertTrue("Replace 1 worked", s.equals("a6AazAfoo77abfoo")); +// +// // Non match: no change +// s = Strings.replace(inString, "qwoeiruqopwieurpoqwieur", newPattern); +// assertTrue("Replace non matched is equal", s.equals(inString)); +// +// // Null new pattern: should ignore +// s = Strings.replace(inString, oldPattern, null); +// assertTrue("Replace non matched is equal", s.equals(inString)); +// +// // Null old pattern: should ignore +// s = Strings.replace(inString, null, newPattern); +// assertTrue("Replace non matched is equal", s.equals(inString)); +// } +// +// @Test public void testDelete() throws Exception { +// String inString = "The quick brown fox jumped over the lazy dog"; +// +// String noThe = Strings.delete(inString, "the"); +// assertTrue("Result has no the [" + noThe + "]", +// noThe.equals("The quick brown fox jumped over lazy dog")); +// +// String nohe = Strings.delete(inString, "he"); +// assertTrue("Result has no he [" + nohe + "]", +// nohe.equals("T quick brown fox jumped over t lazy dog")); +// +// String nosp = Strings.delete(inString, " "); +// assertTrue("Result has no spaces", +// nosp.equals("Thequickbrownfoxjumpedoverthelazydog")); +// +// String killEnd = Strings.delete(inString, "dog"); +// assertTrue("Result has no dog", +// killEnd.equals("The quick brown fox jumped over the lazy ")); +// +// String mismatch = Strings.delete(inString, "dxxcxcxog"); +// assertTrue("Result is unchanged", mismatch.equals(inString)); +// +// String nochange = Strings.delete(inString, ""); +// assertTrue("Result is unchanged", nochange.equals(inString)); +// } +// +// @Test public void testDeleteAny() throws Exception { +// String inString = "Able was I ere I saw Elba"; +// +// String res = Strings.deleteAny(inString, "I"); +// assertTrue("Result has no Is [" + res + "]", res.equals("Able was ere saw Elba")); +// +// res = Strings.deleteAny(inString, "AeEba!"); +// assertTrue("Result has no Is [" + res + "]", res.equals("l ws I r I sw l")); +// +// String mismatch = Strings.deleteAny(inString, "#@$#$^"); +// assertTrue("Result is unchanged", mismatch.equals(inString)); +// +// String whitespace = "This is\n\n\n \t a messagy string with whitespace\n"; +// assertTrue("Has CR", whitespace.indexOf("\n") != -1); +// assertTrue("Has tab", whitespace.indexOf("\t") != -1); +// assertTrue("Has sp", whitespace.indexOf(" ") != -1); +// String cleaned = Strings.deleteAny(whitespace, "\n\t "); +// assertTrue("Has no CR", cleaned.indexOf("\n") == -1); +// assertTrue("Has no tab", cleaned.indexOf("\t") == -1); +// assertTrue("Has no sp", cleaned.indexOf(" ") == -1); +// assertTrue("Still has chars", cleaned.length() > 10); +// } +// +// +// @Test public void testQuote() { +// assertEquals("'myString'", Strings.quote("myString")); +// assertEquals("''", Strings.quote("")); +// assertNull(Strings.quote(null)); +// } +// +// @Test public void testQuoteIfString() { +// assertEquals("'myString'", Strings.quoteIfString("myString")); +// assertEquals("''", Strings.quoteIfString("")); +// assertEquals(5, Strings.quoteIfString(5)); +// assertNull(Strings.quoteIfString(null)); +// } +// +// @Test public void testUnqualify() { +// String qualified = "i.am.not.unqualified"; +// assertEquals("unqualified", Strings.unqualify(qualified)); +// } +// +// @Test public void testCapitalize() { +// String capitalized = "i am not capitalized"; +// assertEquals("I am not capitalized", Strings.capitalize(capitalized)); +// } +// +// @Test public void testUncapitalize() { +// String capitalized = "I am capitalized"; +// assertEquals("i am capitalized", Strings.uncapitalize(capitalized)); +// } +// +// @Test public void testGetFilename() { +// assertEquals(null, Strings.getFilename(null)); +// assertEquals("", Strings.getFilename("")); +// assertEquals("myfile", Strings.getFilename("myfile")); +// assertEquals("myfile", Strings.getFilename("mypath/myfile")); +// assertEquals("myfile.", Strings.getFilename("myfile.")); +// assertEquals("myfile.", Strings.getFilename("mypath/myfile.")); +// assertEquals("myfile.txt", Strings.getFilename("myfile.txt")); +// assertEquals("myfile.txt", Strings.getFilename("mypath/myfile.txt")); +// } +// +// @Test public void testGetFilenameExtension() { +// assertEquals(null, Strings.getFilenameExtension(null)); +// assertEquals(null, Strings.getFilenameExtension("")); +// assertEquals(null, Strings.getFilenameExtension("myfile")); +// assertEquals(null, Strings.getFilenameExtension("myPath/myfile")); +// assertEquals("", Strings.getFilenameExtension("myfile.")); +// assertEquals("", Strings.getFilenameExtension("myPath/myfile.")); +// assertEquals("txt", Strings.getFilenameExtension("myfile.txt")); +// assertEquals("txt", Strings.getFilenameExtension("mypath/myfile.txt")); +// } +// +// @Test public void testStripFilenameExtension() { +// assertEquals(null, Strings.stripFilenameExtension(null)); +// assertEquals("", Strings.stripFilenameExtension("")); +// assertEquals("myfile", Strings.stripFilenameExtension("myfile")); +// assertEquals("mypath/myfile", Strings.stripFilenameExtension("mypath/myfile")); +// assertEquals("myfile", Strings.stripFilenameExtension("myfile.")); +// assertEquals("mypath/myfile", Strings.stripFilenameExtension("mypath/myfile.")); +// assertEquals("myfile", Strings.stripFilenameExtension("myfile.txt")); +// assertEquals("mypath/myfile", Strings.stripFilenameExtension("mypath/myfile.txt")); +// } +// +// @Test public void testCleanPath() { +// assertEquals("mypath/myfile", Strings.cleanPath("mypath/myfile")); +// assertEquals("mypath/myfile", Strings.cleanPath("mypath\\myfile")); +// assertEquals("mypath/myfile", Strings.cleanPath("mypath/../mypath/myfile")); +// assertEquals("mypath/myfile", Strings.cleanPath("mypath/myfile/../../mypath/myfile")); +// assertEquals("../mypath/myfile", Strings.cleanPath("../mypath/myfile")); +// assertEquals("../mypath/myfile", Strings.cleanPath("../mypath/../mypath/myfile")); +// assertEquals("../mypath/myfile", Strings.cleanPath("mypath/../../mypath/myfile")); +// assertEquals("/../mypath/myfile", Strings.cleanPath("/../mypath/myfile")); +// } +// +// @Test public void testPathEquals() { +// assertTrue("Must be true for the same strings", +// Strings.pathEquals("/dummy1/dummy2/dummy3", +// "/dummy1/dummy2/dummy3")); +// assertTrue("Must be true for the same win strings", +// Strings.pathEquals("C:\\dummy1\\dummy2\\dummy3", +// "C:\\dummy1\\dummy2\\dummy3")); +// assertTrue("Must be true for one top path on 1", +// Strings.pathEquals("/dummy1/bin/../dummy2/dummy3", +// "/dummy1/dummy2/dummy3")); +// assertTrue("Must be true for one win top path on 2", +// Strings.pathEquals("C:\\dummy1\\dummy2\\dummy3", +// "C:\\dummy1\\bin\\..\\dummy2\\dummy3")); +// assertTrue("Must be true for two top paths on 1", +// Strings.pathEquals("/dummy1/bin/../dummy2/bin/../dummy3", +// "/dummy1/dummy2/dummy3")); +// assertTrue("Must be true for two win top paths on 2", +// Strings.pathEquals("C:\\dummy1\\dummy2\\dummy3", +// "C:\\dummy1\\bin\\..\\dummy2\\bin\\..\\dummy3")); +// assertTrue("Must be true for double top paths on 1", +// Strings.pathEquals("/dummy1/bin/tmp/../../dummy2/dummy3", +// "/dummy1/dummy2/dummy3")); +// assertTrue("Must be true for double top paths on 2 with similarity", +// Strings.pathEquals("/dummy1/dummy2/dummy3", +// "/dummy1/dum/dum/../../dummy2/dummy3")); +// assertTrue("Must be true for current paths", +// Strings.pathEquals("./dummy1/dummy2/dummy3", +// "dummy1/dum/./dum/../../dummy2/dummy3")); +// assertFalse("Must be false for relative/absolute paths", +// Strings.pathEquals("./dummy1/dummy2/dummy3", +// "/dummy1/dum/./dum/../../dummy2/dummy3")); +// assertFalse("Must be false for different strings", +// Strings.pathEquals("/dummy1/dummy2/dummy3", +// "/dummy1/dummy4/dummy3")); +// assertFalse("Must be false for one false path on 1", +// Strings.pathEquals("/dummy1/bin/tmp/../dummy2/dummy3", +// "/dummy1/dummy2/dummy3")); +// assertFalse("Must be false for one false win top path on 2", +// Strings.pathEquals("C:\\dummy1\\dummy2\\dummy3", +// "C:\\dummy1\\bin\\tmp\\..\\dummy2\\dummy3")); +// assertFalse("Must be false for top path on 1 + difference", +// Strings.pathEquals("/dummy1/bin/../dummy2/dummy3", +// "/dummy1/dummy2/dummy4")); +// } +// +// @Test public void testConcatenateStringArrays() { +// String[] input1 = new String[]{"myString2"}; +// String[] input2 = new String[]{"myString1", "myString2"}; +// String[] result = Strings.concatenateStringArrays(input1, input2); +// assertEquals(3, result.length); +// assertEquals("myString2", result[0]); +// assertEquals("myString1", result[1]); +// assertEquals("myString2", result[2]); +// +// assertArrayEquals(input1, Strings.concatenateStringArrays(input1, null)); +// assertArrayEquals(input2, Strings.concatenateStringArrays(null, input2)); +// assertNull(Strings.concatenateStringArrays(null, null)); +// } +// +// @Test public void testMergeStringArrays() { +// String[] input1 = new String[]{"myString2"}; +// String[] input2 = new String[]{"myString1", "myString2"}; +// String[] result = Strings.mergeStringArrays(input1, input2); +// assertEquals(2, result.length); +// assertEquals("myString2", result[0]); +// assertEquals("myString1", result[1]); +// +// assertArrayEquals(input1, Strings.mergeStringArrays(input1, null)); +// assertArrayEquals(input2, Strings.mergeStringArrays(null, input2)); +// assertNull(Strings.mergeStringArrays(null, null)); +// } +// +// @Test public void testSortStringArray() { +// String[] input = new String[]{"myString2"}; +// input = Strings.addStringToArray(input, "myString1"); +// assertEquals("myString2", input[0]); +// assertEquals("myString1", input[1]); +// +// Strings.sortStringArray(input); +// assertEquals("myString1", input[0]); +// assertEquals("myString2", input[1]); +// } +// +// @Test public void testRemoveDuplicateStrings() { +// String[] input = new String[]{"myString2", "myString1", "myString2"}; +// input = Strings.removeDuplicateStrings(input); +// assertEquals("myString1", input[0]); +// assertEquals("myString2", input[1]); +// } +// +// @Test public void testSplitArrayElementsIntoProperties() { +// String[] input = new String[]{"key1=value1 ", "key2 =\"value2\""}; +// Properties result = Strings.splitArrayElementsIntoProperties(input, "="); +// assertEquals("value1", result.getProperty("key1")); +// assertEquals("\"value2\"", result.getProperty("key2")); +// } +// +// @Test public void testSplitArrayElementsIntoPropertiesAndDeletedChars() { +// String[] input = new String[]{"key1=value1 ", "key2 =\"value2\""}; +// Properties result = Strings.splitArrayElementsIntoProperties(input, "=", "\""); +// assertEquals("value1", result.getProperty("key1")); +// assertEquals("value2", result.getProperty("key2")); +// } +// +// @Test public void testTokenizeToStringArray() { +// String[] sa = Strings.tokenizeToStringArray("a,b , ,c", ","); +// assertEquals(3, sa.length); +// assertTrue("components are correct", +// sa[0].equals("a") && sa[1].equals("b") && sa[2].equals("c")); +// } +// +// @Test public void testTokenizeToStringArrayWithNotIgnoreEmptyTokens() { +// String[] sa = Strings.tokenizeToStringArray("a,b , ,c", ",", true, false); +// assertEquals(4, sa.length); +// assertTrue("components are correct", +// sa[0].equals("a") && sa[1].equals("b") && sa[2].equals("") && sa[3].equals("c")); +// } +// +// @Test public void testTokenizeToStringArrayWithNotTrimTokens() { +// String[] sa = Strings.tokenizeToStringArray("a,b ,c", ",", false, true); +// assertEquals(3, sa.length); +// assertTrue("components are correct", +// sa[0].equals("a") && sa[1].equals("b ") && sa[2].equals("c")); +// } +// +// @Test public void testCommaDelimitedListToStringArrayWithNullProducesEmptyArray() { +// String[] sa = Strings.commaDelimitedListToStringArray(null); +// assertTrue("String array isn't null with null input", sa != null); +// assertTrue("String array length == 0 with null input", sa.length == 0); +// } +// +// @Test public void testCommaDelimitedListToStringArrayWithEmptyStringProducesEmptyArray() { +// String[] sa = Strings.commaDelimitedListToStringArray(""); +// assertTrue("String array isn't null with null input", sa != null); +// assertTrue("String array length == 0 with null input", sa.length == 0); +// } +// +// private void testStringArrayReverseTransformationMatches(String[] sa) { +// String[] reverse = +// Strings.commaDelimitedListToStringArray(Strings.arrayToCommaDelimitedString(sa)); +// assertEquals("Reverse transformation is equal", +// Arrays.asList(sa), +// Arrays.asList(reverse)); +// } +// +// @Test public void testDelimitedListToStringArrayWithComma() { +// String[] sa = Strings.delimitedListToStringArray("a,b", ","); +// assertEquals(2, sa.length); +// assertEquals("a", sa[0]); +// assertEquals("b", sa[1]); +// } +// +// @Test public void testDelimitedListToStringArrayWithSemicolon() { +// String[] sa = Strings.delimitedListToStringArray("a;b", ";"); +// assertEquals(2, sa.length); +// assertEquals("a", sa[0]); +// assertEquals("b", sa[1]); +// } +// +// @Test public void testDelimitedListToStringArrayWithEmptyString() { +// String[] sa = Strings.delimitedListToStringArray("a,b", ""); +// assertEquals(3, sa.length); +// assertEquals("a", sa[0]); +// assertEquals(",", sa[1]); +// assertEquals("b", sa[2]); +// } +// +// @Test public void testDelimitedListToStringArrayWithNullDelimiter() { +// String[] sa = Strings.delimitedListToStringArray("a,b", null); +// assertEquals(1, sa.length); +// assertEquals("a,b", sa[0]); +// } +// +// @Test public void testCommaDelimitedListToStringArrayMatchWords() { +// // Could read these from files +// String[] sa = new String[]{"foo", "bar", "big"}; +// doTestCommaDelimitedListToStringArrayLegalMatch(sa); +// testStringArrayReverseTransformationMatches(sa); +// +// sa = new String[]{"a", "b", "c"}; +// doTestCommaDelimitedListToStringArrayLegalMatch(sa); +// testStringArrayReverseTransformationMatches(sa); +// +// // Test same words +// sa = new String[]{"AA", "AA", "AA", "AA", "AA"}; +// doTestCommaDelimitedListToStringArrayLegalMatch(sa); +// testStringArrayReverseTransformationMatches(sa); +// } +// +// @Test public void testCommaDelimitedListToStringArraySingleString() { +// // Could read these from files +// String s = "woeirqupoiewuropqiewuorpqiwueopriquwopeiurqopwieur"; +// String[] sa = Strings.commaDelimitedListToStringArray(s); +// assertTrue("Found one String with no delimiters", sa.length == 1); +// assertTrue("Single array entry matches input String with no delimiters", +// sa[0].equals(s)); +// } +// +// @Test public void testCommaDelimitedListToStringArrayWithOtherPunctuation() { +// // Could read these from files +// String[] sa = new String[]{"xcvwert4456346&*.", "///", ".!", ".", ";"}; +// doTestCommaDelimitedListToStringArrayLegalMatch(sa); +// } +// +// /** +// * We expect to see the empty Strings in the output. +// */ +// @Test public void testCommaDelimitedListToStringArrayEmptyStrings() { +// // Could read these from files +// String[] sa = Strings.commaDelimitedListToStringArray("a,,b"); +// assertEquals("a,,b produces array length 3", 3, sa.length); +// assertTrue("components are correct", +// sa[0].equals("a") && sa[1].equals("") && sa[2].equals("b")); +// +// sa = new String[]{"", "", "a", ""}; +// doTestCommaDelimitedListToStringArrayLegalMatch(sa); +// } +// +// private void doTestCommaDelimitedListToStringArrayLegalMatch(String[] components) { +// StringBuffer sbuf = new StringBuffer(); +// for (int i = 0; i < components.length; i++) { +// if (i != 0) { +// sbuf.append(","); +// } +// sbuf.append(components[i]); +// } +// String[] sa = Strings.commaDelimitedListToStringArray(sbuf.toString()); +// assertTrue("String array isn't null with legal match", sa != null); +// assertEquals("String array length is correct with legal match", components.length, sa.length); +// assertTrue("Output equals input", Arrays.equals(sa, components)); +// } +// +// @Test public void testEndsWithIgnoreCase() { +// String suffix = "fOo"; +// assertTrue(Strings.endsWithIgnoreCase("foo", suffix)); +// assertTrue(Strings.endsWithIgnoreCase("Foo", suffix)); +// assertTrue(Strings.endsWithIgnoreCase("barfoo", suffix)); +// assertTrue(Strings.endsWithIgnoreCase("barbarfoo", suffix)); +// assertTrue(Strings.endsWithIgnoreCase("barFoo", suffix)); +// assertTrue(Strings.endsWithIgnoreCase("barBarFoo", suffix)); +// assertTrue(Strings.endsWithIgnoreCase("barfoO", suffix)); +// assertTrue(Strings.endsWithIgnoreCase("barFOO", suffix)); +// assertTrue(Strings.endsWithIgnoreCase("barfOo", suffix)); +// assertFalse(Strings.endsWithIgnoreCase(null, suffix)); +// assertFalse(Strings.endsWithIgnoreCase("barfOo", null)); +// assertFalse(Strings.endsWithIgnoreCase("b", suffix)); +// } +// +// @Test public void testParseLocaleStringSunnyDay() throws Exception { +// Locale expectedLocale = Locale.UK; +// Locale locale = Strings.parseLocaleString(expectedLocale.toString()); +// assertNotNull("When given a bona-fide Locale string, must not return null.", locale); +// assertEquals(expectedLocale, locale); +// } +// +// @Test public void testParseLocaleStringWithMalformedLocaleString() throws Exception { +// Locale locale = Strings.parseLocaleString("_banjo_on_my_knee"); +// assertNotNull("When given a malformed Locale string, must not return null.", locale); +// } +// +// @Test public void testParseLocaleStringWithEmptyLocaleStringYieldsNullLocale() throws Exception { +// Locale locale = Strings.parseLocaleString(""); +// assertNull("When given an empty Locale string, must return null.", locale); +// } +// +// @Test public void testParseLocaleWithMultiValuedVariant() throws Exception { +// final String variant = "proper_northern"; +// final String localeString = "en_GB_" + variant; +// Locale locale = Strings.parseLocaleString(localeString); +// assertEquals("Multi-valued variant portion of the Locale not extracted correctly.", variant, locale.getVariant()); +// } +// +// @Test public void testParseLocaleWithMultiValuedVariantUsingSpacesAsSeparators() throws Exception { +// final String variant = "proper northern"; +// final String localeString = "en GB " + variant; +// Locale locale = Strings.parseLocaleString(localeString); +// assertEquals("Multi-valued variant portion of the Locale not extracted correctly.", variant, locale.getVariant()); +// } +// +// @Test public void testParseLocaleWithMultiValuedVariantUsingMixtureOfUnderscoresAndSpacesAsSeparators() throws Exception { +// final String variant = "proper northern"; +// final String localeString = "en_GB_" + variant; +// Locale locale = Strings.parseLocaleString(localeString); +// assertEquals("Multi-valued variant portion of the Locale not extracted correctly.", variant, locale.getVariant()); +// } +// +// @Test public void testParseLocaleWithMultiValuedVariantUsingSpacesAsSeparatorsWithLotsOfLeadingWhitespace() throws Exception { +// final String variant = "proper northern"; +// final String localeString = "en GB " + variant; // lots of whitespace +// Locale locale = Strings.parseLocaleString(localeString); +// assertEquals("Multi-valued variant portion of the Locale not extracted correctly.", variant, locale.getVariant()); +// } +// +// @Test public void testParseLocaleWithMultiValuedVariantUsingUnderscoresAsSeparatorsWithLotsOfLeadingWhitespace() throws Exception { +// final String variant = "proper_northern"; +// final String localeString = "en_GB_____" + variant; // lots of underscores +// Locale locale = Strings.parseLocaleString(localeString); +// assertEquals("Multi-valued variant portion of the Locale not extracted correctly.", variant, locale.getVariant()); +// } + +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/util/TimeValueTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/util/TimeValueTests.java new file mode 100644 index 00000000000..cddacd94b8a --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/util/TimeValueTests.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util; + +import org.testng.annotations.Test; + +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy (Shay Banon) + */ +public class TimeValueTests { + + @Test public void testSimple() { + assertThat(TimeUnit.MILLISECONDS.toMillis(10), equalTo(new TimeValue(10, TimeUnit.MILLISECONDS).millis())); + assertThat(TimeUnit.MICROSECONDS.toMicros(10), equalTo(new TimeValue(10, TimeUnit.MICROSECONDS).micros())); + assertThat(TimeUnit.SECONDS.toSeconds(10), equalTo(new TimeValue(10, TimeUnit.SECONDS).seconds())); + assertThat(TimeUnit.MINUTES.toMinutes(10), equalTo(new TimeValue(10, TimeUnit.MINUTES).minutes())); + assertThat(TimeUnit.HOURS.toHours(10), equalTo(new TimeValue(10, TimeUnit.HOURS).hours())); + assertThat(TimeUnit.DAYS.toDays(10), equalTo(new TimeValue(10, TimeUnit.DAYS).days())); + } + + @Test public void testToString() { + assertThat("10ms", equalTo(new TimeValue(10, TimeUnit.MILLISECONDS).toString())); + assertThat("1.5s", equalTo(new TimeValue(1533, TimeUnit.MILLISECONDS).toString())); + assertThat("1.5m", equalTo(new TimeValue(90, TimeUnit.SECONDS).toString())); + assertThat("1.5h", equalTo(new TimeValue(90, TimeUnit.MINUTES).toString())); + assertThat("1.5d", equalTo(new TimeValue(36, TimeUnit.HOURS).toString())); + assertThat("1000d", equalTo(new TimeValue(1000, TimeUnit.DAYS).toString())); + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/util/concurrent/BlockingThreadPoolTest.java b/modules/elasticsearch/src/test/java/org/elasticsearch/util/concurrent/BlockingThreadPoolTest.java new file mode 100644 index 00000000000..25285ee71f3 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/util/concurrent/BlockingThreadPoolTest.java @@ -0,0 +1,91 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.concurrent; + +import org.testng.annotations.Test; + +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.ThreadPoolExecutor; + +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy (Shay Banon) + */ +public class BlockingThreadPoolTest { + + @Test public void testBlocking() throws Exception { + final int min = 2; + final int max = 4; + final long waitTime = 1000; //1 second + final ThreadBarrier barrier = new ThreadBarrier(max + 1); + + ThreadPoolExecutor pool = (ThreadPoolExecutor) DynamicExecutors.newBlockingThreadPool(min, max, 60000, 1, waitTime); + assertThat("Min property", pool.getCorePoolSize(), equalTo(min)); + assertThat("Max property", pool.getMaximumPoolSize(), equalTo(max)); + + for (int i = 0; i < max; ++i) { + pool.execute(new Runnable() { + public void run() { + try { + barrier.await(); + barrier.await(); + } + catch (Throwable e) { + barrier.reset(e); + } + } + }); + + //wait until thread executes this task + //otherwise, a task might be queued + Thread.sleep(100); + } + + barrier.await(); + assertThat("wrong pool size", pool.getPoolSize(), equalTo(max)); + assertThat("wrong active size", pool.getActiveCount(), equalTo(max)); + + //Queue should be empty, lets occupy it's only free space + assertThat("queue isn't empty", pool.getQueue().size(), equalTo(0)); + pool.execute(new Runnable() { + public void run() { + //dummy task + } + }); + assertThat("queue isn't full", pool.getQueue().size(), equalTo(1)); + + //request should block since queue is full + try { + pool.execute(new Runnable() { + public void run() { + //dummy task + } + }); + assertThat("Should have thrown RejectedExecutionException", false, equalTo(true)); + } catch (RejectedExecutionException e) { + //caught expected exception + } + + barrier.await(); + pool.shutdown(); + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/util/concurrent/ScalingThreadPoolTest.java b/modules/elasticsearch/src/test/java/org/elasticsearch/util/concurrent/ScalingThreadPoolTest.java new file mode 100644 index 00000000000..7750bde76ce --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/util/concurrent/ScalingThreadPoolTest.java @@ -0,0 +1,148 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.concurrent; + +import org.testng.annotations.Test; + +import java.util.concurrent.ThreadPoolExecutor; + +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy (Shay Banon) + */ +public class ScalingThreadPoolTest { + + @Test public void testScaleUp() throws Exception { + final int min = 2; + final int max = 4; + final ThreadBarrier barrier = new ThreadBarrier(max + 1); + + ThreadPoolExecutor pool = (ThreadPoolExecutor) DynamicExecutors.newScalingThreadPool(min, max, Long.MAX_VALUE); + assertThat("Min property", pool.getCorePoolSize(), equalTo(min)); + assertThat("Max property", pool.getMaximumPoolSize(), equalTo(max)); + + for (int i = 0; i < max; ++i) { + pool.execute(new Runnable() { + public void run() { + try { + barrier.await(); + barrier.await(); + } + catch (Throwable e) { + barrier.reset(e); + } + } + }); + + //wait until thread executes this task + //otherwise, a task might be queued + Thread.sleep(100); + } + + barrier.await(); + assertThat("wrong pool size", pool.getPoolSize(), equalTo(max)); + assertThat("wrong active size", pool.getActiveCount(), equalTo(max)); + barrier.await(); + pool.shutdown(); + } + + @Test public void testScaleDown() throws Exception { + final int min = 2; + final int max = 4; + final ThreadBarrier barrier = new ThreadBarrier(max + 1); + + ThreadPoolExecutor pool = (ThreadPoolExecutor) DynamicExecutors.newScalingThreadPool(min, max, 0 /*keep alive*/); + assertThat("Min property", pool.getCorePoolSize(), equalTo(min)); + assertThat("Max property", pool.getMaximumPoolSize(), equalTo(max)); + + for (int i = 0; i < max; ++i) { + pool.execute(new Runnable() { + public void run() { + try { + barrier.await(); + barrier.await(); + } + catch (Throwable e) { + barrier.reset(e); + } + } + }); + + //wait until thread executes this task + //otherwise, a task might be queued + Thread.sleep(100); + } + + barrier.await(); + assertThat("wrong pool size", pool.getPoolSize(), equalTo(max)); + assertThat("wrong active size", pool.getActiveCount(), equalTo(max)); + barrier.await(); + Thread.sleep(1000); + + assertThat("not all tasks completed", pool.getCompletedTaskCount(), equalTo((long) max)); + assertThat("wrong active count", pool.getActiveCount(), equalTo(0)); + //Assert.assertEquals("wrong pool size. ", min, pool.getPoolSize()); //BUG in ThreadPool - Bug ID: 6458662 + assertThat("idle threads didn't shrink below max. (" + pool.getPoolSize() + ")", pool.getPoolSize(), greaterThan(0)); + assertThat("idle threads didn't shrink below max. (" + pool.getPoolSize() + ")", pool.getPoolSize(), lessThan(max)); + } + + + @Test public void testScaleAbove() throws Exception { + final int min = 2; + final int max = 4; + final int ntasks = 16; + final ThreadBarrier barrier = new ThreadBarrier(max + 1); + + ThreadPoolExecutor pool = (ThreadPoolExecutor) DynamicExecutors.newScalingThreadPool(min, max, Long.MAX_VALUE); + assertThat("Min property", pool.getCorePoolSize(), equalTo(min)); + assertThat("Max property", pool.getMaximumPoolSize(), equalTo(max)); + + for (int i = 0; i < ntasks; ++i) { + final int id = i; + pool.execute(new Runnable() { + public void run() { + try { + if (id < max) { + barrier.await(); + } + } + catch (Throwable e) { + barrier.reset(e); + } + } + }); + + //wait until thread executes this task + //otherwise, a task might be queued + Thread.sleep(100); + } + + assertThat("wrong number of pooled tasks", pool.getQueue().size(), equalTo(ntasks - max)); + barrier.await(); + + //wait around for one second + Thread.sleep(1000); + assertThat("tasks not complete", pool.getCompletedTaskCount(), equalTo((long) ntasks)); + assertThat("didn't scale above core pool size. (" + pool.getLargestPoolSize() + ")", pool.getLargestPoolSize(), greaterThan(min)); + assertThat("Largest pool size exceeds max. (" + pool.getLargestPoolSize() + ")", pool.getLargestPoolSize(), lessThanOrEqualTo(max)); + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/util/concurrent/resource/AbstractAcquirableResourceTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/util/concurrent/resource/AbstractAcquirableResourceTests.java new file mode 100644 index 00000000000..4e73ceb2c1d --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/util/concurrent/resource/AbstractAcquirableResourceTests.java @@ -0,0 +1,108 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.concurrent.resource; + +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.util.StopWatch; +import org.elasticsearch.util.lease.Releasable; +import org.testng.annotations.Test; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.*; + +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy + */ +public abstract class AbstractAcquirableResourceTests { + + protected abstract AcquirableResource createInstance(T resource); + + @Test public void testSimple() throws Exception { + ExecutorService executorService = Executors.newCachedThreadPool(); + + final AcquirableResource acquirableResource = createInstance(new Resource()); + + List results = new ArrayList(); + + + final int cycles = 50; + final int operationsWithinCycle = 100000; + final CyclicBarrier barrier1 = new CyclicBarrier(cycles * 2 + 1); + final CyclicBarrier barrier2 = new CyclicBarrier(cycles * 2 + 1); + + for (int i = 0; i < cycles; i++) { + results.add(executorService.submit(new Callable() { + @Override public Object call() throws Exception { + barrier1.await(); + barrier2.await(); + for (int j = 0; j < operationsWithinCycle; j++) { + assertThat(acquirableResource.acquire(), equalTo(true)); + } + return null; + } + })); + results.add(executorService.submit(new Callable() { + @Override public Object call() throws Exception { + barrier1.await(); + barrier2.await(); + for (int j = 0; j < operationsWithinCycle; j++) { + acquirableResource.release(); + } + return null; + } + })); + } + barrier1.await(); + + StopWatch stopWatch = new StopWatch("Acquirable"); + stopWatch.start(); + + barrier2.await(); + + for (Future f : results) { + f.get(); + } + + assertThat(acquirableResource.resource().isReleased(), equalTo(false)); + acquirableResource.markForClose(); + assertThat(acquirableResource.resource().isReleased(), equalTo(true)); + + stopWatch.stop(); + System.out.println("Took: " + stopWatch.shortSummary()); + } + + private static class Resource implements Releasable { + + private volatile boolean released = false; + + @Override public boolean release() throws ElasticSearchException { + released = true; + return true; + } + + public boolean isReleased() { + return released; + } + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/util/concurrent/resource/BlockingAcquirableResourceTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/util/concurrent/resource/BlockingAcquirableResourceTests.java new file mode 100644 index 00000000000..0efe3f4db16 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/util/concurrent/resource/BlockingAcquirableResourceTests.java @@ -0,0 +1,32 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.concurrent.resource; + +import org.elasticsearch.util.lease.Releasable; + +/** + * @author kimchy + */ +public class BlockingAcquirableResourceTests extends AbstractAcquirableResourceTests { + + @Override protected AcquirableResource createInstance(T resource) { + return new BlockingAcquirableResource(resource); + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/util/concurrent/resource/NonBlockingAcquirableResourceTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/util/concurrent/resource/NonBlockingAcquirableResourceTests.java new file mode 100644 index 00000000000..124322d1181 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/util/concurrent/resource/NonBlockingAcquirableResourceTests.java @@ -0,0 +1,32 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.concurrent.resource; + +import org.elasticsearch.util.lease.Releasable; + +/** + * @author kimchy + */ +public class NonBlockingAcquirableResourceTests extends AbstractAcquirableResourceTests { + + @Override protected AcquirableResource createInstance(T resource) { + return new NonBlockingAcquirableResource(resource); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/util/guice/InjectorsTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/util/guice/InjectorsTests.java new file mode 100644 index 00000000000..00a9e9f7900 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/util/guice/InjectorsTests.java @@ -0,0 +1,84 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.guice; + +import com.google.inject.AbstractModule; +import com.google.inject.BindingAnnotation; +import com.google.inject.Guice; +import com.google.inject.Injector; +import com.google.inject.matcher.Matchers; +import org.testng.annotations.Test; + +import java.lang.annotation.Documented; +import java.lang.annotation.Retention; +import java.lang.annotation.Target; + +import static java.lang.annotation.ElementType.*; +import static java.lang.annotation.RetentionPolicy.*; +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy (Shay Banon) + */ +public class InjectorsTests { + + @Test public void testMatchers() throws Exception { + Injector injector = Guice.createInjector(new MyModule()); + + assertThat(Injectors.getInstancesOf(injector, A.class).size(), equalTo(2)); + assertThat(Injectors.getInstancesOf(injector, B.class).size(), equalTo(1)); + assertThat(Injectors.getInstancesOf(injector, C.class).size(), equalTo(1)); + + assertThat(Injectors.getInstancesOf(injector, + Matchers.subclassesOf(C.class).and(Matchers.annotatedWith(Blue.class))).size(), equalTo(1)); + } + + public static class MyModule extends AbstractModule { + protected void configure() { + bind(C.class); + bind(B.class); + } + } + + public static class A { + public String name = "A"; + } + + public static class B extends A { + public B() { + name = "B"; + } + } + + @Blue + public static class C extends A { + public C() { + name = "C"; + } + } + + @Target({METHOD, CONSTRUCTOR, FIELD, TYPE}) + @Retention(RUNTIME) + @Documented + @BindingAnnotation + public @interface Blue { + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/util/io/StreamsTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/util/io/StreamsTests.java new file mode 100644 index 00000000000..baac4c0b203 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/util/io/StreamsTests.java @@ -0,0 +1,85 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.io; + +import org.testng.annotations.Test; + +import java.io.*; +import java.util.Arrays; + +import static org.elasticsearch.util.io.Streams.*; +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * Unit tests for {@link Streams}. + * + * @author kimchy (Shay Banon) + */ +public class StreamsTests { + + @Test public void testCopyFromInputStream() throws IOException { + byte[] content = "content".getBytes(); + ByteArrayInputStream in = new ByteArrayInputStream(content); + ByteArrayOutputStream out = new ByteArrayOutputStream(content.length); + int count = copy(in, out); + + assertThat(count, equalTo(content.length)); + assertThat(Arrays.equals(content, out.toByteArray()), equalTo(true)); + } + + @Test public void testCopyFromByteArray() throws IOException { + byte[] content = "content".getBytes(); + ByteArrayOutputStream out = new ByteArrayOutputStream(content.length); + copy(content, out); + assertThat(Arrays.equals(content, out.toByteArray()), equalTo(true)); + } + + @Test public void testCopyToByteArray() throws IOException { + byte[] content = "content".getBytes(); + ByteArrayInputStream in = new ByteArrayInputStream(content); + byte[] result = copyToByteArray(in); + assertThat(Arrays.equals(content, result), equalTo(true)); + } + + @Test public void testCopyFromReader() throws IOException { + String content = "content"; + StringReader in = new StringReader(content); + StringWriter out = new StringWriter(); + int count = copy(in, out); + assertThat(content.length(), equalTo(count)); + assertThat(out.toString(), equalTo(content)); + } + + @Test public void testCopyFromString() throws IOException { + String content = "content"; + StringWriter out = new StringWriter(); + copy(content, out); + assertThat(out.toString(), equalTo(content)); + } + + @Test public void testCopyToString() throws IOException { + String content = "content"; + StringReader in = new StringReader(content); + String result = copyToString(in); + assertThat(result, equalTo(content)); + } + +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/util/io/compressor/AbstractCompressorTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/util/io/compressor/AbstractCompressorTests.java new file mode 100644 index 00000000000..38776b1d0c0 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/util/io/compressor/AbstractCompressorTests.java @@ -0,0 +1,64 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.io.compressor; + +import org.elasticsearch.util.io.compression.Compressor; +import org.testng.annotations.Test; + +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class AbstractCompressorTests { + + private static final String TEST_STRING = "aaaaaaaaaaaa bbbbbbbbbb aa aa aa cccccccccc"; + + @Test public void testSimpleOperations() throws Exception { + Compressor compressor = createCompressor(); + byte[] compressed = compressor.compressString(TEST_STRING); + System.out.println("" + TEST_STRING.length()); + System.out.println("" + compressed.length); + + assertThat(compressed.length, lessThan(TEST_STRING.length())); + + String decompressed = compressor.decompressString(compressed); +// System.out.println("" + TEST_STRING.length()); +// System.out.println("" + compressed.length); + assertThat(decompressed, equalTo(TEST_STRING)); + + decompressed = compressor.decompressString(compressed); + assertThat(decompressed, equalTo(TEST_STRING)); + + compressed = compressor.compressString(TEST_STRING); +// System.out.println("" + TEST_STRING.length()); +// System.out.println("" + compressed.length); + assertThat(compressed.length, lessThan(TEST_STRING.length())); + + decompressed = compressor.decompressString(compressed); + assertThat(decompressed, equalTo(TEST_STRING)); + + decompressed = compressor.decompressString(compressed); + assertThat(decompressed, equalTo(TEST_STRING)); + } + + protected abstract Compressor createCompressor(); +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/util/io/compressor/GZIPCompressorTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/util/io/compressor/GZIPCompressorTests.java new file mode 100644 index 00000000000..dd981e8c723 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/util/io/compressor/GZIPCompressorTests.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.io.compressor; + +import org.elasticsearch.util.io.compression.Compressor; +import org.elasticsearch.util.io.compression.GZIPCompressor; + +/** + * @author kimchy (Shay Banon) + */ +public class GZIPCompressorTests extends AbstractCompressorTests { + + @Override protected Compressor createCompressor() { + return new GZIPCompressor(); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/util/io/compressor/LzpCompressorTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/util/io/compressor/LzpCompressorTests.java new file mode 100644 index 00000000000..89d54cc157a --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/util/io/compressor/LzpCompressorTests.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.io.compressor; + +import org.elasticsearch.util.io.compression.Compressor; +import org.elasticsearch.util.io.compression.LzfCompressor; + +/** + * @author kimchy (Shay Banon) + */ +public class LzpCompressorTests extends AbstractCompressorTests { + + @Override protected Compressor createCompressor() { + return new LzfCompressor(); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/util/io/compressor/ZipCompressorTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/util/io/compressor/ZipCompressorTests.java new file mode 100644 index 00000000000..afe5a8a6086 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/util/io/compressor/ZipCompressorTests.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.io.compressor; + +import org.elasticsearch.util.io.compression.Compressor; +import org.elasticsearch.util.io.compression.ZipCompressor; + +/** + * @author kimchy (Shay Banon) + */ +public class ZipCompressorTests extends AbstractCompressorTests { + + @Override protected Compressor createCompressor() { + return new ZipCompressor(); + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/util/json/JsonBuilderTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/util/json/JsonBuilderTests.java new file mode 100644 index 00000000000..45f4e9e3006 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/util/json/JsonBuilderTests.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.json; + +import org.elasticsearch.util.io.FastCharArrayWriter; +import org.testng.annotations.Test; + +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy (Shay Banon) + */ +public class JsonBuilderTests { + + @Test public void verifyReuseJsonGenerator() throws Exception { + FastCharArrayWriter writer = new FastCharArrayWriter(); + org.codehaus.jackson.JsonGenerator generator = Jackson.defaultJsonFactory().createJsonGenerator(writer); + generator.writeStartObject(); + generator.writeStringField("test", "value"); + generator.writeEndObject(); + generator.flush(); + + assertThat(writer.toStringTrim(), equalTo("{\"test\":\"value\"}")); + + // try again... + writer.reset(); + generator.writeStartObject(); + generator.writeStringField("test", "value"); + generator.writeEndObject(); + generator.flush(); + // we get a space at the start here since it thinks we are not in the root object (fine, we will ignore it in the real code we use) + assertThat(writer.toStringTrim(), equalTo("{\"test\":\"value\"}")); + } + + @Test public void testSimpleJacksonGenerator() throws Exception { + JsonBuilder builder = new JsonBuilder(); + assertThat(builder.startObject().field("test", "value").endObject().string(), equalTo("{\"test\":\"value\"}")); + builder.reset(); + assertThat(builder.startObject().field("test", "value").endObject().string(), equalTo("{\"test\":\"value\"}")); + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/util/lucene/IndexWritersTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/util/lucene/IndexWritersTests.java new file mode 100644 index 00000000000..bbc1e0e7726 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/util/lucene/IndexWritersTests.java @@ -0,0 +1,58 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.lucene; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.NumericField; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.Term; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.RAMDirectory; +import org.testng.annotations.Test; + +import static org.elasticsearch.util.lucene.DocumentBuilder.*; +import static org.elasticsearch.util.lucene.IndexWriters.*; +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexWritersTests { + + @Test public void testEstimateSize() throws Exception { + Directory dir = new RAMDirectory(); + IndexWriter indexWriter = new IndexWriter(dir, Lucene.STANDARD_ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED); + indexWriter.commit(); + assertThat("Index is empty after creation and commit", estimateRamSize(indexWriter), equalTo(0l)); + + + indexWriter.addDocument(doc().add(field("_id", "1")).add(new NumericField("test", Field.Store.YES, true).setIntValue(2)).build()); + + long size = estimateRamSize(indexWriter); + assertThat("After indexing a small document, should be higher", size, greaterThan(100000l)); + + indexWriter.deleteDocuments(new Term("_id", "1")); + assertThat(estimateRamSize(indexWriter), greaterThan(size)); + + indexWriter.commit(); + assertThat(estimateRamSize(indexWriter), equalTo(0l)); + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/util/lucene/versioned/AbstractVersionedMapTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/util/lucene/versioned/AbstractVersionedMapTests.java new file mode 100644 index 00000000000..996f684479f --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/util/lucene/versioned/AbstractVersionedMapTests.java @@ -0,0 +1,55 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.lucene.versioned; + +import org.testng.annotations.Test; + +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class AbstractVersionedMapTests { + + protected abstract VersionedMap create(); + + @Test public void testSimple() { + VersionedMap versionedMap = create(); + + assertThat(true, equalTo(versionedMap.beforeVersion(1, 1))); + assertThat(true, equalTo(versionedMap.beforeVersion(2, 2))); + + versionedMap.putVersion(1, 2); + assertThat(true, equalTo(versionedMap.beforeVersion(1, 1))); + assertThat(false, equalTo(versionedMap.beforeVersion(1, 2))); + assertThat(true, equalTo(versionedMap.beforeVersion(2, 2))); + + versionedMap.putVersionIfAbsent(1, 0); + assertThat(true, equalTo(versionedMap.beforeVersion(1, 1))); + assertThat(true, equalTo(versionedMap.beforeVersion(2, 2))); + + versionedMap.putVersion(2, 1); + assertThat(true, equalTo(versionedMap.beforeVersion(2, 0))); + assertThat(false, equalTo(versionedMap.beforeVersion(2, 1))); + assertThat(false, equalTo(versionedMap.beforeVersion(2, 2))); + assertThat(false, equalTo(versionedMap.beforeVersion(2, 3))); + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/util/lucene/versioned/NativeVersionedMapTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/util/lucene/versioned/NativeVersionedMapTests.java new file mode 100644 index 00000000000..51ddf008455 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/util/lucene/versioned/NativeVersionedMapTests.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.lucene.versioned; + +/** + * @author kimchy (Shay Banon) + */ +public class NativeVersionedMapTests extends AbstractVersionedMapTests { + + @Override protected VersionedMap create() { + return new NativeVersionedMap(); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/util/lucene/versioned/NonBlockingVersionedMapTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/util/lucene/versioned/NonBlockingVersionedMapTests.java new file mode 100644 index 00000000000..cbb7bc2cf38 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/util/lucene/versioned/NonBlockingVersionedMapTests.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.lucene.versioned; + +/** + * @author kimchy (Shay Banon) + */ +public class NonBlockingVersionedMapTests extends AbstractVersionedMapTests { + + @Override protected VersionedMap create() { + return new NonBlockingVersionedMap(); + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/util/lucene/versioned/VersionedIndexReaderTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/util/lucene/versioned/VersionedIndexReaderTests.java new file mode 100644 index 00000000000..a5210e61976 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/util/lucene/versioned/VersionedIndexReaderTests.java @@ -0,0 +1,127 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.lucene.versioned; + +import org.apache.lucene.document.Document; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.Term; +import org.apache.lucene.index.TermDocs; +import org.apache.lucene.store.RAMDirectory; +import org.elasticsearch.util.lucene.Lucene; +import org.testng.annotations.AfterTest; +import org.testng.annotations.BeforeTest; +import org.testng.annotations.Test; + +import static org.elasticsearch.util.lucene.DocumentBuilder.*; +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy (Shay Banon) + */ +public class VersionedIndexReaderTests { + + private RAMDirectory dir; + private IndexReader indexReader; + private IndexWriter indexWriter; + private VersionedMap versionedMap; + + @BeforeTest public void setUp() throws Exception { + versionedMap = new NonBlockingVersionedMap(); + dir = new RAMDirectory(); + indexWriter = new IndexWriter(dir, Lucene.STANDARD_ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED); + indexWriter.addDocument(doc().add(field("value", "0")).build()); + indexWriter.addDocument(doc().add(field("value", "1")).build()); + indexWriter.addDocument(doc().add(field("value", "2")).build()); + indexWriter.addDocument(doc().add(field("value", "3")).build()); + indexWriter.commit(); + indexReader = IndexReader.open(dir, true); + } + + @AfterTest public void tearDown() throws Exception { + indexWriter.close(); + indexReader.close(); + dir.close(); + } + + @Test public void verifyExpected() throws Exception { + TermDocs termDocs; + Document doc = indexReader.document(0); + + assertThat(doc.getField("value").stringValue(), equalTo("0")); + termDocs = indexReader.termDocs(new Term("value", "0")); + assertThat(termDocs.next(), equalTo(true)); + assertThat(termDocs.next(), equalTo(false)); + + doc = indexReader.document(1); + assertThat(doc.getField("value").stringValue(), equalTo("1")); + termDocs = indexReader.termDocs(new Term("value", "1")); + assertThat(termDocs.next(), equalTo(true)); + assertThat(termDocs.next(), equalTo(false)); + + doc = indexReader.document(2); + assertThat(doc.getField("value").stringValue(), equalTo("2")); + termDocs = indexReader.termDocs(new Term("value", "2")); + assertThat(termDocs.next(), equalTo(true)); + assertThat(termDocs.next(), equalTo(false)); + + doc = indexReader.document(3); + assertThat(doc.getField("value").stringValue(), equalTo("3")); + termDocs = indexReader.termDocs(new Term("value", "3")); + assertThat(termDocs.next(), equalTo(true)); + assertThat(termDocs.next(), equalTo(false)); + } + + @Test public void testSimple() throws Exception { + TermDocs termDocs; + // open a versioned index reader in version 0 + VersionedIndexReader versionedIndexReader = new VersionedIndexReader(indexReader, 0, versionedMap); + // delete doc 0 in version 1 + versionedMap.putVersion(0, 1); + + // we can see doc 0 still (versioned reader is on version 0) + termDocs = versionedIndexReader.termDocs(new Term("value", "0")); + assertThat(termDocs.next(), equalTo(true)); + assertThat(termDocs.next(), equalTo(false)); + // make sure we see doc 1, it was never deleted + termDocs = versionedIndexReader.termDocs(new Term("value", "1")); + assertThat(termDocs.next(), equalTo(true)); + assertThat(termDocs.next(), equalTo(false)); + + // delete doc 1 in version 2, we still + versionedMap.putVersion(1, 2); + // we can see doc 0 still (versioned reader is on version 0) + termDocs = versionedIndexReader.termDocs(new Term("value", "0")); + assertThat(termDocs.next(), equalTo(true)); + // we can see doc 1 still (versioned reader is on version 0) + termDocs = versionedIndexReader.termDocs(new Term("value", "1")); + assertThat(termDocs.next(), equalTo(true)); + + // move the versioned reader to 1 + versionedIndexReader = new VersionedIndexReader(indexReader, 1, versionedMap); + // we now can't see the deleted version 0 + termDocs = versionedIndexReader.termDocs(new Term("value", "0")); + assertThat(termDocs.next(), equalTo(false)); + // we can still see deleted version 1 + termDocs = versionedIndexReader.termDocs(new Term("value", "1")); + assertThat(termDocs.next(), equalTo(true)); + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/util/settings/loader/JsonSettingsLoaderTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/util/settings/loader/JsonSettingsLoaderTests.java new file mode 100644 index 00000000000..0d3d0469ca6 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/util/settings/loader/JsonSettingsLoaderTests.java @@ -0,0 +1,50 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.settings.loader; + +import org.elasticsearch.util.settings.Settings; +import org.testng.annotations.Test; + +import static org.elasticsearch.util.settings.ImmutableSettings.*; +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy (Shay Banon) + */ +public class JsonSettingsLoaderTests { + + @Test public void testSimpleJsonSettings() throws Exception { + Settings settings = settingsBuilder() + .loadFromClasspath("org/elasticsearch/util/settings/loader/test-settings.json") + .build(); + + assertThat(settings.get("test1.value1"), equalTo("value1")); + assertThat(settings.get("test1.test2.value2"), equalTo("value2")); + assertThat(settings.getAsInt("test1.test2.value3", -1), equalTo(2)); + + // check array + assertThat(settings.get("test1.test3.0"), equalTo("test3-1")); + assertThat(settings.get("test1.test3.1"), equalTo("test3-2")); + assertThat(settings.getAsArray("test1.test3").length, equalTo(2)); + assertThat(settings.getAsArray("test1.test3")[0], equalTo("test3-1")); + assertThat(settings.getAsArray("test1.test3")[1], equalTo("test3-2")); + } +} diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/util/settings/loader/YamlSettingsLoaderTests.java b/modules/elasticsearch/src/test/java/org/elasticsearch/util/settings/loader/YamlSettingsLoaderTests.java new file mode 100644 index 00000000000..830eb41355c --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/util/settings/loader/YamlSettingsLoaderTests.java @@ -0,0 +1,50 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.settings.loader; + +import org.elasticsearch.util.settings.Settings; +import org.testng.annotations.Test; + +import static org.elasticsearch.util.settings.ImmutableSettings.*; +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy (Shay Banon) + */ +public class YamlSettingsLoaderTests { + + @Test public void testSimpleYamlSettings() throws Exception { + Settings settings = settingsBuilder() + .loadFromClasspath("org/elasticsearch/util/settings/loader/test-settings.yml") + .build(); + + assertThat(settings.get("test1.value1"), equalTo("value1")); + assertThat(settings.get("test1.test2.value2"), equalTo("value2")); + assertThat(settings.getAsInt("test1.test2.value3", -1), equalTo(2)); + + // check array + assertThat(settings.get("test1.test3.0"), equalTo("test3-1")); + assertThat(settings.get("test1.test3.1"), equalTo("test3-2")); + assertThat(settings.getAsArray("test1.test3").length, equalTo(2)); + assertThat(settings.getAsArray("test1.test3")[0], equalTo("test3-1")); + assertThat(settings.getAsArray("test1.test3")[1], equalTo("test3-2")); + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/util/settings/loader/test-settings.json b/modules/elasticsearch/src/test/java/org/elasticsearch/util/settings/loader/test-settings.json new file mode 100644 index 00000000000..e64b882a98e --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/util/settings/loader/test-settings.json @@ -0,0 +1,10 @@ +{ + test1 : { + value1 : "value1", + test2 : { + value2 : "value2", + value3 : 2 + }, + test3 : ["test3-1", "test3-2"] + } +} \ No newline at end of file diff --git a/modules/elasticsearch/src/test/java/org/elasticsearch/util/settings/loader/test-settings.yml b/modules/elasticsearch/src/test/java/org/elasticsearch/util/settings/loader/test-settings.yml new file mode 100644 index 00000000000..b533ae036e7 --- /dev/null +++ b/modules/elasticsearch/src/test/java/org/elasticsearch/util/settings/loader/test-settings.yml @@ -0,0 +1,8 @@ +test1: + value1: value1 + test2: + value2: value2 + value3: 2 + test3: + - test3-1 + - test3-2 diff --git a/modules/test/integration/build.gradle b/modules/test/integration/build.gradle new file mode 100644 index 00000000000..c34c424995f --- /dev/null +++ b/modules/test/integration/build.gradle @@ -0,0 +1,31 @@ +dependsOn(':elasticsearch') + +usePlugin 'java' + +archivesBaseName = "$rootProject.archivesBaseName-$project.archivesBaseName" + +configurations.compile.transitive = true +configurations.testCompile.transitive = true + +// no need to use the resource dir +sourceSets.main.resources.srcDir 'src/main/java' +sourceSets.test.resources.srcDir 'src/test/java' + +dependencies { + compile project(':elasticsearch') + + testCompile project(':test-testng') + testCompile('org.testng:testng:5.10:jdk15') { transitive = false } + testCompile 'org.hamcrest:hamcrest-all:1.1' +} + +test { + useTestNG() + jmvArgs = ["-ea", "-Xmx1024m"] + options.suiteName = project.name + options.listeners = ["org.elasticsearch.util.testng.Listeners"] + options.systemProperties = [ + "es.test.log.conf": System.getProperty("es.test.log.conf", "log4j-gradle.properties"), + "java.net.preferIPv4Stack": "true" + ] +} diff --git a/modules/test/integration/src/test/java/org/elasticsearch/test/integration/AbstractServersTests.java b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/AbstractServersTests.java new file mode 100644 index 00000000000..a4909bf6239 --- /dev/null +++ b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/AbstractServersTests.java @@ -0,0 +1,99 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.integration; + +import org.elasticsearch.client.Client; +import org.elasticsearch.server.Server; +import org.elasticsearch.util.logging.Loggers; +import org.elasticsearch.util.settings.Settings; +import org.slf4j.Logger; + +import java.util.Map; + +import static com.google.common.collect.Maps.*; +import static org.elasticsearch.server.ServerBuilder.*; +import static org.elasticsearch.util.settings.ImmutableSettings.Builder.*; +import static org.elasticsearch.util.settings.ImmutableSettings.*; + +public abstract class AbstractServersTests { + + protected final Logger logger = Loggers.getLogger(getClass()); + + private Map servers = newHashMap(); + + private Map clients = newHashMap(); + + public Server startServer(String id) { + return buildServer(id).start(); + } + + public Server startServer(String id, Settings settings) { + return buildServer(id, settings).start(); + } + + public Server buildServer(String id) { + return buildServer(id, EMPTY_SETTINGS); + } + + public Server buildServer(String id, Settings settings) { + String settingsSource = getClass().getName().replace('.', '/') + ".yml"; + Settings finalSettings = settingsBuilder() + .loadFromClasspath(settingsSource) + .putAll(settings) + .put("name", id) + .build(); + Server server = serverBuilder() + .settings(finalSettings) + .build(); + servers.put(id, server); + clients.put(id, server.client()); + return server; + } + + public void closeServer(String id) { + Client client = clients.remove(id); + if (client != null) { + client.close(); + } + Server server = servers.remove(id); + if (server != null) { + server.close(); + } + } + + public Server server(String id) { + return servers.get(id); + } + + public Client client(String id) { + return clients.get(id); + } + + public void closeAllServers() { + for (Client client : clients.values()) { + client.close(); + } + clients.clear(); + for (Server server : servers.values()) { + server.close(); + } + servers.clear(); + } +} \ No newline at end of file diff --git a/modules/test/integration/src/test/java/org/elasticsearch/test/integration/client/transport/ClientTransportTwoServersSearchTests.java b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/client/transport/ClientTransportTwoServersSearchTests.java new file mode 100644 index 00000000000..73df466ea92 --- /dev/null +++ b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/client/transport/ClientTransportTwoServersSearchTests.java @@ -0,0 +1,237 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.integration.client.transport; + +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.Requests; +import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.search.Scroll; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.server.internal.InternalServer; +import org.elasticsearch.test.integration.AbstractServersTests; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.transport.TransportAddress; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + +import static org.elasticsearch.action.search.SearchType.*; +import static org.elasticsearch.client.Requests.*; +import static org.elasticsearch.index.query.json.JsonQueryBuilders.*; +import static org.elasticsearch.search.builder.SearchSourceBuilder.*; +import static org.elasticsearch.util.TimeValue.*; +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy (Shay Banon) + */ +public class ClientTransportTwoServersSearchTests extends AbstractServersTests { + + private TransportClient client; + + @BeforeClass public void createServers() throws Exception { + startServer("server1"); + startServer("server2"); + + TransportAddress server1Address = ((InternalServer) server("server1")).injector().getInstance(TransportService.class).boundAddress().publishAddress(); + client = new TransportClient(); + client.addTransportAddress(server1Address); + + + client.admin().indices().create(createIndexRequest("test")).actionGet(); + + for (int i = 0; i < 100; i++) { + index(client, Integer.toString(i), "test", i); + } + client.admin().indices().refresh(refreshRequest("test")).actionGet(); + } + + @AfterClass public void closeServers() { + closeAllServers(); + if (client != null) { + client.close(); + } + } + + @Test public void testDfsQueryThenFetch() throws Exception { + SearchSourceBuilder source = searchSource() + .query(termQuery("multi", "test")) + .from(0).size(60).explain(true); + + SearchResponse searchResponse = client.search(searchRequest("test").source(source).searchType(DFS_QUERY_THEN_FETCH).scroll(new Scroll(timeValueMinutes(10)))).actionGet(); + + assertThat(searchResponse.hits().totalHits(), equalTo(100l)); + assertThat(searchResponse.hits().hits().length, equalTo(60)); + for (int i = 0; i < 60; i++) { + SearchHit hit = searchResponse.hits().hits()[i]; +// System.out.println(hit.target() + ": " + hit.explanation()); + assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(100 - i - 1))); + } + + searchResponse = client.searchScroll(searchScrollRequest(searchResponse.scrollId())).actionGet(); + + assertThat(searchResponse.hits().totalHits(), equalTo(100l)); + assertThat(searchResponse.hits().hits().length, equalTo(40)); + for (int i = 0; i < 40; i++) { + SearchHit hit = searchResponse.hits().hits()[i]; + assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(100 - 60 - 1 - i))); + } + } + + // + + @Test public void testDfsQueryThenFetchWithSort() throws Exception { + SearchSourceBuilder source = searchSource() + .query(termQuery("multi", "test")) + .from(0).size(60).explain(true).sort("age", false); + + SearchResponse searchResponse = client.search(searchRequest("test").source(source).searchType(DFS_QUERY_THEN_FETCH).scroll(new Scroll(timeValueMinutes(10)))).actionGet(); + assertThat(searchResponse.hits().totalHits(), equalTo(100l)); + assertThat(searchResponse.hits().hits().length, equalTo(60)); + for (int i = 0; i < 60; i++) { + SearchHit hit = searchResponse.hits().hits()[i]; +// System.out.println(hit.target() + ": " + hit.explanation()); + assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(i))); + } + + searchResponse = client.searchScroll(searchScrollRequest(searchResponse.scrollId())).actionGet(); + + assertThat(searchResponse.hits().totalHits(), equalTo(100l)); + assertThat(searchResponse.hits().hits().length, equalTo(40)); + for (int i = 0; i < 40; i++) { + SearchHit hit = searchResponse.hits().hits()[i]; + assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(i + 60))); + } + } + + @Test public void testQueryThenFetch() throws Exception { + SearchSourceBuilder source = searchSource() + .query(termQuery("multi", "test")) + .from(0).size(60).explain(true); + + SearchResponse searchResponse = client.search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH).scroll(new Scroll(timeValueMinutes(10)))).actionGet(); + assertThat(searchResponse.hits().totalHits(), equalTo(100l)); + assertThat(searchResponse.hits().hits().length, equalTo(60)); + for (int i = 0; i < 60; i++) { + SearchHit hit = searchResponse.hits().hits()[i]; +// System.out.println(hit.target() + ": " + hit.explanation()); + assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(100 - i - 1))); + } + + searchResponse = client.searchScroll(searchScrollRequest(searchResponse.scrollId())).actionGet(); + + assertThat(searchResponse.hits().totalHits(), equalTo(100l)); + assertThat(searchResponse.hits().hits().length, equalTo(40)); + for (int i = 0; i < 40; i++) { + SearchHit hit = searchResponse.hits().hits()[i]; + assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(100 - 60 - 1 - i))); + } + } + + @Test public void testQueryThenFetchWithSort() throws Exception { + SearchSourceBuilder source = searchSource() + .query(termQuery("multi", "test")) + .from(0).size(60).explain(true).sort("age", false); + + SearchResponse searchResponse = client.search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH).scroll(new Scroll(timeValueMinutes(10)))).actionGet(); + assertThat(searchResponse.hits().totalHits(), equalTo(100l)); + assertThat(searchResponse.hits().hits().length, equalTo(60)); + for (int i = 0; i < 60; i++) { + SearchHit hit = searchResponse.hits().hits()[i]; +// System.out.println(hit.target() + ": " + hit.explanation()); + assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(i))); + } + + searchResponse = client.searchScroll(searchScrollRequest(searchResponse.scrollId())).actionGet(); + + assertThat(searchResponse.hits().totalHits(), equalTo(100l)); + assertThat(searchResponse.hits().hits().length, equalTo(40)); + for (int i = 0; i < 40; i++) { + SearchHit hit = searchResponse.hits().hits()[i]; + assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(i + 60))); + } + } + + @Test public void testQueryAndFetch() throws Exception { + SearchSourceBuilder source = searchSource() + .query(termQuery("multi", "test")) + .from(0).size(20).explain(true); + + SearchResponse searchResponse = client.search(searchRequest("test").source(source).searchType(QUERY_AND_FETCH).scroll(new Scroll(timeValueMinutes(10)))).actionGet(); + assertThat(searchResponse.hits().totalHits(), equalTo(100l)); + assertThat(searchResponse.hits().hits().length, equalTo(60)); // 20 per shard + for (int i = 0; i < 60; i++) { + SearchHit hit = searchResponse.hits().hits()[i]; +// System.out.println(hit.target() + ": " + hit.explanation()); + assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(100 - i - 1))); + } + + // TODO support scrolling +// searchResponse = searchScrollAction.submit(new SearchScrollRequest(searchResponse.scrollId())).actionGet(); +// +// assertEquals(100, searchResponse.hits().totalHits()); +// assertEquals(40, searchResponse.hits().hits().length); +// for (int i = 0; i < 40; i++) { +// SearchHit hit = searchResponse.hits().hits()[i]; +// assertEquals("id[" + hit.id() + "]", Integer.toString(100 - 60 - 1 - i), hit.id()); +// } + } + + @Test public void testDfsQueryAndFetch() throws Exception { + SearchSourceBuilder source = searchSource() + .query(termQuery("multi", "test")) + .from(0).size(20).explain(true); + + SearchResponse searchResponse = client.search(searchRequest("test").source(source).searchType(DFS_QUERY_AND_FETCH).scroll(new Scroll(timeValueMinutes(10)))).actionGet(); + assertThat(searchResponse.hits().totalHits(), equalTo(100l)); + assertThat(searchResponse.hits().hits().length, equalTo(60)); // 20 per shard + for (int i = 0; i < 60; i++) { + SearchHit hit = searchResponse.hits().hits()[i]; +// System.out.println(hit.target() + ": " + hit.explanation()); + assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(100 - i - 1))); + } + + // TODO support scrolling +// searchResponse = searchScrollAction.submit(new SearchScrollRequest(searchResponse.scrollId())).actionGet(); +// +// assertEquals(100, searchResponse.hits().totalHits()); +// assertEquals(40, searchResponse.hits().hits().length); +// for (int i = 0; i < 40; i++) { +// SearchHit hit = searchResponse.hits().hits()[i]; +// assertEquals("id[" + hit.id() + "]", Integer.toString(100 - 60 - 1 - i), hit.id()); +// } + } + + + private void index(Client client, String id, String nameValue, int age) { + client.index(Requests.indexRequest("test").type("type1").id(id).source(source(id, nameValue, age))).actionGet(); + } + + private String source(String id, String nameValue, int age) { + StringBuilder multi = new StringBuilder().append(nameValue); + for (int i = 0; i < age; i++) { + multi.append(" ").append(nameValue); + } + return "{ type1 : { \"id\" : \"" + id + "\", \"name\" : \"" + (nameValue + id) + "\", age : " + age + ", multi : \"" + multi.toString() + "\", _boost : " + (age * 10) + " } }"; + } +} \ No newline at end of file diff --git a/modules/test/integration/src/test/java/org/elasticsearch/test/integration/client/transport/ClientTransportTwoServersSearchTests.yml b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/client/transport/ClientTransportTwoServersSearchTests.yml new file mode 100644 index 00000000000..fb4173efb1c --- /dev/null +++ b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/client/transport/ClientTransportTwoServersSearchTests.yml @@ -0,0 +1,9 @@ +cluster: + routing: + schedule: 100ms +index: + numberOfShards: 3 + numberOfReplicas: 0 + routing : + # Use simple hashing since we want even distribution and our ids are simple incremented number based + hash.type : simple diff --git a/modules/test/integration/src/test/java/org/elasticsearch/test/integration/client/transport/DiscoveryTransportClientTests.java b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/client/transport/DiscoveryTransportClientTests.java new file mode 100644 index 00000000000..8180af61237 --- /dev/null +++ b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/client/transport/DiscoveryTransportClientTests.java @@ -0,0 +1,64 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.integration.client.transport; + +import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.test.integration.AbstractServersTests; +import org.elasticsearch.util.settings.ImmutableSettings; +import org.testng.annotations.AfterMethod; + +import static org.elasticsearch.client.Requests.*; + +/** + * @author kimchy (Shay Banon) + */ +public class DiscoveryTransportClientTests extends AbstractServersTests { + + private TransportClient client; + + @AfterMethod public void closeServers() { + if (client != null) { + client.close(); + } + closeAllServers(); + } + + /*@Test*/ + + public void testWithDiscovery() throws Exception { + startServer("server1"); + client = new TransportClient(ImmutableSettings.settingsBuilder().putBoolean("discovery.enabled", true).build()); + // wait a bit so nodes will be discovered + Thread.sleep(1000); + client.admin().indices().create(createIndexRequest("test")).actionGet(); + Thread.sleep(500); + + client.admin().cluster().ping(pingSingleRequest("test").type("person").id("1")).actionGet(); + startServer("server2"); + Thread.sleep(1000); + client.admin().cluster().ping(pingSingleRequest("test").type("person").id("1")).actionGet(); + closeServer("server1"); + Thread.sleep(10000); + client.admin().cluster().ping(pingSingleRequest("test").type("person").id("1")).actionGet(); + closeServer("server2"); + client.admin().cluster().ping(pingSingleRequest("test").type("person").id("1")).actionGet(); + } + +} diff --git a/modules/test/integration/src/test/java/org/elasticsearch/test/integration/client/transport/DiscoveryTransportClientTests.yml b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/client/transport/DiscoveryTransportClientTests.yml new file mode 100644 index 00000000000..9c86b8ac2fb --- /dev/null +++ b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/client/transport/DiscoveryTransportClientTests.yml @@ -0,0 +1,6 @@ +cluster: + routing: + schedule: 100ms +index: + numberOfShards: 5 + numberOfReplicas: 1 diff --git a/modules/test/integration/src/test/java/org/elasticsearch/test/integration/client/transport/SimpleSingleTransportClientTests.java b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/client/transport/SimpleSingleTransportClientTests.java new file mode 100644 index 00000000000..acfdf5fe156 --- /dev/null +++ b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/client/transport/SimpleSingleTransportClientTests.java @@ -0,0 +1,174 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.integration.client.transport; + +import org.elasticsearch.action.admin.indices.flush.FlushResponse; +import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.action.admin.indices.status.IndicesStatusResponse; +import org.elasticsearch.action.count.CountResponse; +import org.elasticsearch.action.delete.DeleteResponse; +import org.elasticsearch.action.deletebyquery.DeleteByQueryResponse; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.Requests; +import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.server.internal.InternalServer; +import org.elasticsearch.test.integration.AbstractServersTests; +import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.util.settings.ImmutableSettings; +import org.elasticsearch.util.transport.TransportAddress; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.Test; + +import static org.elasticsearch.client.Requests.*; +import static org.elasticsearch.index.query.json.JsonQueryBuilders.*; +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy (Shay Banon) + */ +public class SimpleSingleTransportClientTests extends AbstractServersTests { + + private TransportClient client; + + @AfterMethod public void closeServers() { + closeAllServers(); + if (client != null) { + client.close(); + } + } + + @Test public void testOnlyWithTransportAddress() throws Exception { + startServer("server1"); + TransportAddress server1Address = ((InternalServer) server("server1")).injector().getInstance(TransportService.class).boundAddress().publishAddress(); + client = new TransportClient(ImmutableSettings.settingsBuilder().putBoolean("discovery.enabled", false).build()); + client.addTransportAddress(server1Address); + testSimpleActions(client); + } + + /*@Test*/ + + public void testWithDiscovery() throws Exception { + startServer("server1"); + client = new TransportClient(ImmutableSettings.settingsBuilder().putBoolean("discovery.enabled", true).build()); + // wait a bit so nodes will be discovered + Thread.sleep(1000); + testSimpleActions(client); + } + + private void testSimpleActions(Client client) { + IndexResponse indexResponse = client.index(Requests.indexRequest("test").type("type1").id("1").source(source("1", "test"))).actionGet(); + assertThat(indexResponse.id(), equalTo("1")); + assertThat(indexResponse.type(), equalTo("type1")); + RefreshResponse refreshResult = client.admin().indices().refresh(refreshRequest("test")).actionGet(); + assertThat(refreshResult.index("test").successfulShards(), equalTo(5)); + assertThat(refreshResult.index("test").failedShards(), equalTo(0)); + + IndicesStatusResponse indicesStatusResponse = client.admin().indices().status(indicesStatus()).actionGet(); + assertThat(indicesStatusResponse.indices().size(), equalTo(1)); + assertThat(indicesStatusResponse.index("test").shards().size(), equalTo(5)); // 5 index shards (1 with 1 backup) + assertThat(indicesStatusResponse.index("test").docs().numDocs(), equalTo(1)); + + GetResponse getResult; + + for (int i = 0; i < 5; i++) { + getResult = client.get(getRequest("test").type("type1").id("1").threadedOperation(false)).actionGet(); + assertThat("cycle #" + i, getResult.source(), equalTo(source("1", "test"))); + getResult = client.get(getRequest("test").type("type1").id("1").threadedOperation(true)).actionGet(); + assertThat("cycle #" + i, getResult.source(), equalTo(source("1", "test"))); + } + + for (int i = 0; i < 5; i++) { + getResult = client.get(getRequest("test").type("type1").id("2")).actionGet(); + assertThat(getResult.empty(), equalTo(true)); + } + + DeleteResponse deleteResponse = client.delete(deleteRequest("test").type("type1").id("1")).actionGet(); + assertThat(deleteResponse.id(), equalTo("1")); + assertThat(deleteResponse.type(), equalTo("type1")); + client.admin().indices().refresh(refreshRequest("test")).actionGet(); + + for (int i = 0; i < 5; i++) { + getResult = client.get(getRequest("test").type("type1").id("1")).actionGet(); + assertThat(getResult.empty(), equalTo(true)); + } + + client.index(Requests.indexRequest("test").type("type1").id("1").source(source("1", "test"))).actionGet(); + client.index(Requests.indexRequest("test").type("type1").id("2").source(source("2", "test"))).actionGet(); + + FlushResponse flushResult = client.admin().indices().flush(flushRequest("test")).actionGet(); + assertThat(flushResult.index("test").successfulShards(), equalTo(5)); + assertThat(flushResult.index("test").failedShards(), equalTo(0)); + client.admin().indices().refresh(refreshRequest("test")).actionGet(); + + for (int i = 0; i < 5; i++) { + getResult = client.get(getRequest("test").type("type1").id("1")).actionGet(); + assertThat("cycle #" + i, getResult.source(), equalTo(source("1", "test"))); + getResult = client.get(getRequest("test").type("type1").id("2")).actionGet(); + assertThat("cycle #" + i, getResult.source(), equalTo(source("2", "test"))); + } + + // check count + for (int i = 0; i < 5; i++) { + // test successful + CountResponse countResponse = client.count(countRequest("test").querySource(termQuery("_type", "type1"))).actionGet(); + assertThat(countResponse.count(), equalTo(2l)); + assertThat(countResponse.successfulShards(), equalTo(5)); + assertThat(countResponse.failedShards(), equalTo(0)); + // test failed (simply query that can't be parsed) + countResponse = client.count(countRequest("test").querySource("{ term : { _type : \"type1 } }")).actionGet(); + + assertThat(countResponse.count(), equalTo(0l)); + assertThat(countResponse.successfulShards(), equalTo(0)); + assertThat(countResponse.failedShards(), equalTo(5)); + } + + DeleteByQueryResponse queryResponse = client.deleteByQuery(deleteByQueryRequest("test").querySource(termQuery("name", "test2"))).actionGet(); + assertThat(queryResponse.index("test").successfulShards(), equalTo(5)); + assertThat(queryResponse.index("test").failedShards(), equalTo(0)); + client.admin().indices().refresh(refreshRequest("test")).actionGet(); + + for (int i = 0; i < 5; i++) { + getResult = client.get(getRequest("test").type("type1").id("1")).actionGet(); + assertThat("cycle #" + i, getResult.source(), equalTo(source("1", "test"))); + getResult = client.get(getRequest("test").type("type1").id("2")).actionGet(); + assertThat("cycle #" + i, getResult.empty(), equalTo(false)); + } + + + // stop the server + closeServer("server1"); + + // it should try and reconnect + try { + client.index(Requests.indexRequest("test").type("type1").id("1").source(source("1", "test"))).actionGet(); + assert false : "should fail..."; + } catch (ConnectTransportException e) { + // all is well + } + } + + private String source(String id, String nameValue) { + return "{ type1 : { \"id\" : \"" + id + "\", \"name\" : \"" + nameValue + "\" } }"; + } +} diff --git a/modules/test/integration/src/test/java/org/elasticsearch/test/integration/client/transport/SimpleTransportClientTests.yml b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/client/transport/SimpleTransportClientTests.yml new file mode 100644 index 00000000000..ffa9e152ec5 --- /dev/null +++ b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/client/transport/SimpleTransportClientTests.yml @@ -0,0 +1,12 @@ +cluster: + routing: + schedule: 100ms +index: + numberOfShards: 5 + numberOfReplicas: 1 + +# use large interval node sampler +client: + transport: + nodesSamplerInterval: 30s + diff --git a/modules/test/integration/src/test/java/org/elasticsearch/test/integration/datanode/SimpleDataNodesTests.java b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/datanode/SimpleDataNodesTests.java new file mode 100644 index 00000000000..923b5605480 --- /dev/null +++ b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/datanode/SimpleDataNodesTests.java @@ -0,0 +1,77 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.integration.datanode; + +import org.elasticsearch.action.PrimaryNotStartedActionException; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.client.Requests; +import org.elasticsearch.test.integration.AbstractServersTests; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.Test; + +import static org.elasticsearch.client.Requests.*; +import static org.elasticsearch.util.TimeValue.*; +import static org.elasticsearch.util.settings.ImmutableSettings.*; +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy (Shay Banon) + */ +public class SimpleDataNodesTests extends AbstractServersTests { + + @AfterMethod public void closeServers() { + closeAllServers(); + } + + @Test public void testDataNodes() throws Exception { + startServer("nonData1", settingsBuilder().putBoolean("node.data", false).build()); + client("nonData1").admin().indices().create(createIndexRequest("test")).actionGet(); + try { + client("nonData1").index(Requests.indexRequest("test").type("type1").id("1").source(source("1", "test")).timeout(timeValueSeconds(1))).actionGet(); + assert false : "no allocation should happen"; + } catch (PrimaryNotStartedActionException e) { + // all is well + } + + startServer("nonData2", settingsBuilder().putBoolean("node.data", false).build()); + Thread.sleep(500); + + // still no shard should be allocated + try { + client("nonData2").index(Requests.indexRequest("test").type("type1").id("1").source(source("1", "test")).timeout(timeValueSeconds(1))).actionGet(); + assert false : "no allocation should happen"; + } catch (PrimaryNotStartedActionException e) { + // all is well + } + + // now, start a node data, and see that it gets with shards + startServer("data1", settingsBuilder().putBoolean("node.data", true).build()); + Thread.sleep(500); + + IndexResponse indexResponse = client("nonData2").index(Requests.indexRequest("test").type("type1").id("1").source(source("1", "test"))).actionGet(); + assertThat(indexResponse.id(), equalTo("1")); + assertThat(indexResponse.type(), equalTo("type1")); + } + + private String source(String id, String nameValue) { + return "{ type1 : { \"id\" : \"" + id + "\", \"name\" : \"" + nameValue + "\" } }"; + } +} diff --git a/modules/test/integration/src/test/java/org/elasticsearch/test/integration/datanode/SimpleDataNodesTests.yml b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/datanode/SimpleDataNodesTests.yml new file mode 100644 index 00000000000..9c86b8ac2fb --- /dev/null +++ b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/datanode/SimpleDataNodesTests.yml @@ -0,0 +1,6 @@ +cluster: + routing: + schedule: 100ms +index: + numberOfShards: 5 + numberOfReplicas: 1 diff --git a/modules/test/integration/src/test/java/org/elasticsearch/test/integration/document/DocumentActionsTests.java b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/document/DocumentActionsTests.java new file mode 100644 index 00000000000..005bd9699a6 --- /dev/null +++ b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/document/DocumentActionsTests.java @@ -0,0 +1,158 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.integration.document; + +import org.elasticsearch.action.admin.indices.flush.FlushResponse; +import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.action.count.CountResponse; +import org.elasticsearch.action.delete.DeleteResponse; +import org.elasticsearch.action.deletebyquery.DeleteByQueryResponse; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.support.broadcast.BroadcastOperationThreading; +import org.elasticsearch.test.integration.AbstractServersTests; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.Test; + +import static org.elasticsearch.client.Requests.*; +import static org.elasticsearch.index.query.json.JsonQueryBuilders.*; +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy (Shay Banon) + */ +public class DocumentActionsTests extends AbstractServersTests { + + @AfterMethod public void closeServers() { + closeAllServers(); + } + + @Test public void testIndexActions() throws Exception { + startServer("server1"); + startServer("server2"); + + logger.info("Creating index test"); + client("server1").admin().indices().create(createIndexRequest("test")).actionGet(); + + logger.info("Indexing [type1/1]"); + IndexResponse indexResponse = client("server1").index(indexRequest("test").type("type1").id("1").source(source("1", "test"))).actionGet(); + assertThat(indexResponse.id(), equalTo("1")); + assertThat(indexResponse.type(), equalTo("type1")); + logger.info("Refreshing"); + RefreshResponse refreshResult = client("server1").admin().indices().refresh(refreshRequest("test")).actionGet(); + assertThat(refreshResult.index("test").successfulShards(), equalTo(5)); + assertThat(refreshResult.index("test").failedShards(), equalTo(0)); + + GetResponse getResult; + + logger.info("Get [type1/1]"); + for (int i = 0; i < 5; i++) { + getResult = client("server1").get(getRequest("test").type("type1").id("1").threadedOperation(false)).actionGet(); + assertThat("cycle #" + i, getResult.source(), equalTo(source("1", "test"))); + getResult = client("server1").get(getRequest("test").type("type1").id("1").threadedOperation(true)).actionGet(); + assertThat("cycle #" + i, getResult.source(), equalTo(source("1", "test"))); + } + + logger.info("Get [type1/2] (should be empty)"); + for (int i = 0; i < 5; i++) { + getResult = client("server1").get(getRequest("test").type("type1").id("2")).actionGet(); + assertThat(getResult.empty(), equalTo(true)); + } + + logger.info("Delete [type1/1]"); + DeleteResponse deleteResponse = client("server1").delete(deleteRequest("test").type("type1").id("1")).actionGet(); + assertThat(deleteResponse.id(), equalTo("1")); + assertThat(deleteResponse.type(), equalTo("type1")); + logger.info("Refreshing"); + client("server1").admin().indices().refresh(refreshRequest("test")).actionGet(); + + logger.info("Get [type1/1] (should be empty)"); + for (int i = 0; i < 5; i++) { + getResult = client("server1").get(getRequest("test").type("type1").id("1")).actionGet(); + assertThat(getResult.empty(), equalTo(true)); + } + + logger.info("Index [type1/1]"); + client("server1").index(indexRequest("test").type("type1").id("1").source(source("1", "test"))).actionGet(); + logger.info("Index [type1/2]"); + client("server1").index(indexRequest("test").type("type1").id("2").source(source("2", "test"))).actionGet(); + + logger.info("Flushing"); + FlushResponse flushResult = client("server1").admin().indices().flush(flushRequest("test")).actionGet(); + assertThat(flushResult.index("test").successfulShards(), equalTo(5)); + assertThat(flushResult.index("test").failedShards(), equalTo(0)); + logger.info("Refreshing"); + client("server1").admin().indices().refresh(refreshRequest("test")).actionGet(); + + logger.info("Get [type1/1] and [type1/2]"); + for (int i = 0; i < 5; i++) { + getResult = client("server1").get(getRequest("test").type("type1").id("1")).actionGet(); + assertThat("cycle #" + i, getResult.source(), equalTo(source("1", "test"))); + getResult = client("server1").get(getRequest("test").type("type1").id("2")).actionGet(); + assertThat("cycle #" + i, getResult.source(), equalTo(source("2", "test"))); + } + + logger.info("Count"); + // check count + for (int i = 0; i < 5; i++) { + // test successful + CountResponse countResponse = client("server1").count(countRequest("test").querySource(termQuery("_type", "type1")).operationThreading(BroadcastOperationThreading.NO_THREADS)).actionGet(); + assertThat(countResponse.count(), equalTo(2l)); + assertThat(countResponse.successfulShards(), equalTo(5)); + assertThat(countResponse.failedShards(), equalTo(0)); + + countResponse = client("server1").count(countRequest("test").querySource(termQuery("_type", "type1")).operationThreading(BroadcastOperationThreading.SINGLE_THREAD)).actionGet(); + assertThat(countResponse.count(), equalTo(2l)); + assertThat(countResponse.successfulShards(), equalTo(5)); + assertThat(countResponse.failedShards(), equalTo(0)); + + countResponse = client("server1").count(countRequest("test").querySource(termQuery("_type", "type1")).operationThreading(BroadcastOperationThreading.THREAD_PER_SHARD)).actionGet(); + assertThat(countResponse.count(), equalTo(2l)); + assertThat(countResponse.successfulShards(), equalTo(5)); + assertThat(countResponse.failedShards(), equalTo(0)); + + // test failed (simply query that can't be parsed) + countResponse = client("server1").count(countRequest("test").querySource("{ term : { _type : \"type1 } }")).actionGet(); + + assertThat(countResponse.count(), equalTo(0l)); + assertThat(countResponse.successfulShards(), equalTo(0)); + assertThat(countResponse.failedShards(), equalTo(5)); + } + + logger.info("Delete by query"); + DeleteByQueryResponse queryResponse = client("server2").deleteByQuery(deleteByQueryRequest("test").querySource(termQuery("name", "test2"))).actionGet(); + assertThat(queryResponse.index("test").successfulShards(), equalTo(5)); + assertThat(queryResponse.index("test").failedShards(), equalTo(0)); + client("server1").admin().indices().refresh(refreshRequest("test")).actionGet(); + + logger.info("Get [type1/1] and [type1/2], should be empty"); + for (int i = 0; i < 5; i++) { + getResult = client("server1").get(getRequest("test").type("type1").id("1")).actionGet(); + assertThat("cycle #" + i, getResult.source(), equalTo(source("1", "test"))); + getResult = client("server1").get(getRequest("test").type("type1").id("2")).actionGet(); + assertThat("cycle #" + i, getResult.empty(), equalTo(false)); + } + } + + private String source(String id, String nameValue) { + return "{ type1 : { \"id\" : \"" + id + "\", \"name\" : \"" + nameValue + "\" } }"; + } +} diff --git a/modules/test/integration/src/test/java/org/elasticsearch/test/integration/document/DocumentActionsTests.yml b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/document/DocumentActionsTests.yml new file mode 100644 index 00000000000..e3ec780afcc --- /dev/null +++ b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/document/DocumentActionsTests.yml @@ -0,0 +1,6 @@ +cluster: + routing: + schedule: 200ms +index: + numberOfShards: 5 + numberOfReplicas: 1 diff --git a/modules/test/integration/src/test/java/org/elasticsearch/test/integration/gateway/AbstractSimpleIndexGatewayTests.java b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/gateway/AbstractSimpleIndexGatewayTests.java new file mode 100644 index 00000000000..9a79830cea8 --- /dev/null +++ b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/gateway/AbstractSimpleIndexGatewayTests.java @@ -0,0 +1,146 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.integration.gateway; + +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.client.Requests; +import org.elasticsearch.gateway.Gateway; +import org.elasticsearch.server.internal.InternalServer; +import org.elasticsearch.test.integration.AbstractServersTests; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +import static org.elasticsearch.client.Requests.*; +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy (Shay Banon) + */ +public abstract class AbstractSimpleIndexGatewayTests extends AbstractServersTests { + + @AfterMethod public void closeServers() { + server("server1").stop(); + // since we store (by default) the index snapshot under the gateway, resetting it will reset the index data as well + ((InternalServer) server("server1")).injector().getInstance(Gateway.class).reset(); + closeAllServers(); + } + + @BeforeMethod public void buildServer1() { + buildServer("server1"); + // since we store (by default) the index snapshot under the gateway, resetting it will reset the index data as well + ((InternalServer) server("server1")).injector().getInstance(Gateway.class).reset(); + } + + @Test public void testSnapshotOperations() throws Exception { + server("server1").start(); + + // Translog tests + + logger.info("Creating index [{}]", "test"); + client("server1").admin().indices().create(createIndexRequest("test")).actionGet(); + // create two and delete the first + logger.info("Indexing #1"); + client("server1").index(Requests.indexRequest("test").type("type1").id("1").source(source("1", "test"))).actionGet(); + logger.info("Indexing #2"); + client("server1").index(Requests.indexRequest("test").type("type1").id("2").source(source("2", "test"))).actionGet(); + logger.info("Deleting #1"); + client("server1").delete(deleteRequest("test").type("type1").id("1")).actionGet(); + + // perform snapshot to the index + logger.info("Gateway Snapshot"); + client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet(); + logger.info("Gateway Snapshot (should be a no op)"); + // do it again, it should be a no op + client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet(); + + logger.info("Closing the server"); + closeServer("server1"); + Thread.sleep(500); + logger.info("Starting the server, should recover from the gateway (only translog should be populated)"); + startServer("server1"); + Thread.sleep(1000); + + logger.info("Getting #1, should not exists"); + GetResponse getResponse = client("server1").get(getRequest("test").type("type1").id("1")).actionGet(); + assertThat(getResponse.empty(), equalTo(true)); + logger.info("Getting #2"); + getResponse = client("server1").get(getRequest("test").type("type1").id("2")).actionGet(); + assertThat(getResponse.source(), equalTo(source("2", "test"))); + + // Now flush and add some data (so we have index recovery as well) + logger.info("Flushing, so we have actual content in the index files (#2 should be in the index)"); + client("server1").admin().indices().flush(flushRequest("test")).actionGet(); + logger.info("Indexing #3, so we have something in the translog as well"); + client("server1").index(Requests.indexRequest("test").type("type1").id("3").source(source("3", "test"))).actionGet(); + + logger.info("Gateway Snapshot"); + client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet(); + logger.info("Gateway Snapshot (should be a no op)"); + client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet(); + + logger.info("Closing the server"); + closeServer("server1"); + Thread.sleep(500); + logger.info("Starting the server, should recover from the gateway (both index and translog)"); + startServer("server1"); + Thread.sleep(1000); + + logger.info("Getting #1, should not exists"); + getResponse = client("server1").get(getRequest("test").type("type1").id("1")).actionGet(); + assertThat(getResponse.empty(), equalTo(true)); + logger.info("Getting #2 (not from the translog, but from the index)"); + getResponse = client("server1").get(getRequest("test").type("type1").id("2")).actionGet(); + assertThat(getResponse.source(), equalTo(source("2", "test"))); + logger.info("Getting #3 (from the translog)"); + getResponse = client("server1").get(getRequest("test").type("type1").id("3")).actionGet(); + assertThat(getResponse.source(), equalTo(source("3", "test"))); + + logger.info("Flushing, so we have actual content in the index files (#3 should be in the index now as well)"); + client("server1").admin().indices().flush(flushRequest("test")).actionGet(); + + logger.info("Gateway Snapshot"); + client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet(); + logger.info("Gateway Snapshot (should be a no op)"); + client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet(); + + logger.info("Closing the server"); + closeServer("server1"); + Thread.sleep(500); + logger.info("Starting the server, should recover from the gateway (just from the index, nothing in the translog)"); + startServer("server1"); + Thread.sleep(1000); + + logger.info("Getting #1, should not exists"); + getResponse = client("server1").get(getRequest("test").type("type1").id("1")).actionGet(); + assertThat(getResponse.empty(), equalTo(true)); + logger.info("Getting #2 (not from the translog, but from the index)"); + getResponse = client("server1").get(getRequest("test").type("type1").id("2")).actionGet(); + assertThat(getResponse.source(), equalTo(source("2", "test"))); + logger.info("Getting #3 (not from the translog, but from the index)"); + getResponse = client("server1").get(getRequest("test").type("type1").id("3")).actionGet(); + assertThat(getResponse.source(), equalTo(source("3", "test"))); + } + + private String source(String id, String nameValue) { + return "{ type1 : { \"id\" : \"" + id + "\", \"name\" : \"" + nameValue + "\" } }"; + } +} \ No newline at end of file diff --git a/modules/test/integration/src/test/java/org/elasticsearch/test/integration/gateway/fs/FsMetaDataGatewayTests.java b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/gateway/fs/FsMetaDataGatewayTests.java new file mode 100644 index 00000000000..8aeedd0f3fc --- /dev/null +++ b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/gateway/fs/FsMetaDataGatewayTests.java @@ -0,0 +1,73 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.integration.gateway.fs; + +import org.elasticsearch.gateway.Gateway; +import org.elasticsearch.indices.IndexAlreadyExistsException; +import org.elasticsearch.server.internal.InternalServer; +import org.elasticsearch.test.integration.AbstractServersTests; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +import static org.elasticsearch.client.Requests.*; + +/** + * @author kimchy (Shay Banon) + */ +public class FsMetaDataGatewayTests extends AbstractServersTests { + + @AfterMethod void closeServers() { + server("server1").stop(); + // since we store (by default) the index snapshot under the gateway, resetting it will reset the index data as well + ((InternalServer) server("server1")).injector().getInstance(Gateway.class).reset(); + closeAllServers(); + } + + @BeforeMethod void buildServer1() { + buildServer("server1"); + // since we store (by default) the index snapshot under the gateway, resetting it will reset the index data as well + ((InternalServer) server("server1")).injector().getInstance(Gateway.class).reset(); + } + + @Test public void testIndexActions() throws Exception { + + buildServer("server1"); + ((InternalServer) server("server1")).injector().getInstance(Gateway.class).reset(); + server("server1").start(); + + client("server1").admin().indices().create(createIndexRequest("test")).actionGet(); + + closeServer("server1"); + + Thread.sleep(1000); + + startServer("server1"); + Thread.sleep(3000); + try { + client("server1").admin().indices().create(createIndexRequest("test")).actionGet(); + assert false : "index should exists"; + } catch (IndexAlreadyExistsException e) { + // all is well + } + + ((InternalServer) server("server1")).injector().getInstance(Gateway.class).reset(); + } +} diff --git a/modules/test/integration/src/test/java/org/elasticsearch/test/integration/gateway/fs/FsMetaDataGatewayTests.yml b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/gateway/fs/FsMetaDataGatewayTests.yml new file mode 100644 index 00000000000..9c9507075d5 --- /dev/null +++ b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/gateway/fs/FsMetaDataGatewayTests.yml @@ -0,0 +1,8 @@ +cluster: + routing: + schedule: 200ms +gateway: + type: fs +index: + numberOfShards: 5 + numberOfReplicas: 1 diff --git a/modules/test/integration/src/test/java/org/elasticsearch/test/integration/gateway/fs/SimpleFsIndexGatewayTests.java b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/gateway/fs/SimpleFsIndexGatewayTests.java new file mode 100644 index 00000000000..905d0f6e9bf --- /dev/null +++ b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/gateway/fs/SimpleFsIndexGatewayTests.java @@ -0,0 +1,29 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.integration.gateway.fs; + +import org.elasticsearch.test.integration.gateway.AbstractSimpleIndexGatewayTests; + +/** + * @author kimchy (Shay Banon) + */ +public class SimpleFsIndexGatewayTests extends AbstractSimpleIndexGatewayTests { + +} diff --git a/modules/test/integration/src/test/java/org/elasticsearch/test/integration/gateway/fs/SimpleFsIndexGatewayTests.yml b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/gateway/fs/SimpleFsIndexGatewayTests.yml new file mode 100644 index 00000000000..4b5c622a188 --- /dev/null +++ b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/gateway/fs/SimpleFsIndexGatewayTests.yml @@ -0,0 +1,8 @@ +cluster: + routing: + schedule: 200ms +gateway: + type: fs +index: + numberOfShards: 1 + numberOfReplicas: 1 diff --git a/modules/test/integration/src/test/java/org/elasticsearch/test/integration/gateway/fs/SimpleFsIndexInRamIndexGatewayTests.java b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/gateway/fs/SimpleFsIndexInRamIndexGatewayTests.java new file mode 100644 index 00000000000..e717a256198 --- /dev/null +++ b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/gateway/fs/SimpleFsIndexInRamIndexGatewayTests.java @@ -0,0 +1,29 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.integration.gateway.fs; + +import org.elasticsearch.test.integration.gateway.AbstractSimpleIndexGatewayTests; + +/** + * @author kimchy (Shay Banon) + */ +public class SimpleFsIndexInRamIndexGatewayTests extends AbstractSimpleIndexGatewayTests { + +} \ No newline at end of file diff --git a/modules/test/integration/src/test/java/org/elasticsearch/test/integration/gateway/fs/SimpleFsIndexInRamIndexGatewayTests.yml b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/gateway/fs/SimpleFsIndexInRamIndexGatewayTests.yml new file mode 100644 index 00000000000..3c9b928246a --- /dev/null +++ b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/gateway/fs/SimpleFsIndexInRamIndexGatewayTests.yml @@ -0,0 +1,10 @@ +cluster: + routing: + schedule: 200ms +gateway: + type: fs +index: + numberOfShards: 1 + numberOfReplicas: 1 + store: + type: ram diff --git a/modules/test/integration/src/test/java/org/elasticsearch/test/integration/indexlifecycle/IndexLifecycleActionTests.java b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/indexlifecycle/IndexLifecycleActionTests.java new file mode 100644 index 00000000000..015d1c46abf --- /dev/null +++ b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/indexlifecycle/IndexLifecycleActionTests.java @@ -0,0 +1,246 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.integration.indexlifecycle; + +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.server.internal.InternalServer; +import org.elasticsearch.test.integration.AbstractServersTests; +import org.elasticsearch.util.logging.Loggers; +import org.elasticsearch.util.settings.Settings; +import org.slf4j.Logger; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.Test; + +import static org.elasticsearch.client.Requests.*; +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; +import static org.elasticsearch.cluster.routing.ShardRoutingState.*; +import static org.elasticsearch.util.settings.ImmutableSettings.*; +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy (Shay Banon) + */ +public class IndexLifecycleActionTests extends AbstractServersTests { + + private final Logger logger = Loggers.getLogger(IndexLifecycleActionTests.class); + + @AfterMethod public void closeServers() { + closeAllServers(); + } + + @Test public void testIndexLifecycleActionsWith11Shards1Backup() throws Exception { + Settings settings = settingsBuilder() + .putInt(SETTING_NUMBER_OF_SHARDS, 11) + .putInt(SETTING_NUMBER_OF_REPLICAS, 1) + .build(); + + // start one server + logger.info("Starting sever1"); + startServer("server1", settings); + + ClusterService clusterService1 = ((InternalServer) server("server1")).injector().getInstance(ClusterService.class); + + logger.info("Creating index [test]"); + client("server1").admin().indices().create(createIndexRequest("test")).actionGet(); + + Thread.sleep(1000); + + ClusterState clusterState1 = clusterService1.state(); + RoutingNode routingNodeEntry1 = clusterState1.routingNodes().nodesToShards().get(clusterState1.nodes().localNodeId()); + assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED), equalTo(11)); + + clusterState1 = client("server1").admin().cluster().state(clusterState()).actionGet().state(); + routingNodeEntry1 = clusterState1.routingNodes().nodesToShards().get(clusterState1.nodes().localNodeId()); + assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED), equalTo(11)); + + logger.info("Starting server2"); + // start another server + startServer("server2", settings); + + ClusterService clusterService2 = ((InternalServer) server("server2")).injector().getInstance(ClusterService.class); + + Thread.sleep(1500); + + clusterState1 = clusterService1.state(); + routingNodeEntry1 = clusterState1.routingNodes().nodesToShards().get(clusterState1.nodes().localNodeId()); + assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED), equalTo(11)); + + ClusterState clusterState2 = clusterService2.state(); + RoutingNode routingNodeEntry2 = clusterState2.routingNodes().nodesToShards().get(clusterState2.nodes().localNodeId()); + assertThat(routingNodeEntry2.numberOfShardsWithState(STARTED), equalTo(11)); + + logger.info("Starting server3"); + // start another server + startServer("server3", settings); + + ClusterService clusterService3 = ((InternalServer) server("server3")).injector().getInstance(ClusterService.class); + + Thread.sleep(1500); + + clusterState1 = clusterService1.state(); + routingNodeEntry1 = clusterState1.routingNodes().nodesToShards().get(clusterState1.nodes().localNodeId()); + assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED), anyOf(equalTo(7), equalTo(8))); + + clusterState2 = clusterService2.state(); + routingNodeEntry2 = clusterState2.routingNodes().nodesToShards().get(clusterState2.nodes().localNodeId()); + assertThat(routingNodeEntry2.numberOfShardsWithState(STARTED), anyOf(equalTo(7), equalTo(8))); + + ClusterState clusterState3 = clusterService3.state(); + RoutingNode routingNodeEntry3 = clusterState3.routingNodes().nodesToShards().get(clusterState3.nodes().localNodeId()); + assertThat(routingNodeEntry3.numberOfShardsWithState(STARTED), equalTo(7)); + + assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED) + routingNodeEntry2.numberOfShardsWithState(STARTED) + routingNodeEntry3.numberOfShardsWithState(STARTED), equalTo(22)); + + logger.info("Closing server1"); + // kill the first server + closeServer("server1"); + + Thread.sleep(1500); + + clusterState2 = clusterService2.state(); + routingNodeEntry2 = clusterState2.routingNodes().nodesToShards().get(clusterState2.nodes().localNodeId()); + assertThat(routingNodeEntry2.numberOfShardsWithState(STARTED), equalTo(11)); + + clusterState3 = clusterService3.state(); + routingNodeEntry3 = clusterState3.routingNodes().nodesToShards().get(clusterState3.nodes().localNodeId()); + assertThat(routingNodeEntry3.numberOfShardsWithState(STARTED), equalTo(11)); + + assertThat(routingNodeEntry2.numberOfShardsWithState(STARTED) + routingNodeEntry3.numberOfShardsWithState(STARTED), equalTo(22)); + + logger.info("Deleting index [test]"); + // last, lets delete the index + client("server2").admin().indices().delete(deleteIndexRequest("test")).actionGet(); + + Thread.sleep(1500); + clusterState2 = clusterService2.state(); + routingNodeEntry2 = clusterState2.routingNodes().nodesToShards().get(clusterState2.nodes().localNodeId()); + assertThat(routingNodeEntry2, nullValue()); + + clusterState3 = clusterService3.state(); + routingNodeEntry3 = clusterState3.routingNodes().nodesToShards().get(clusterState3.nodes().localNodeId()); + assertThat(routingNodeEntry3, nullValue()); + } + + @Test public void testIndexLifecycleActionsWith11Shards0Backup() throws Exception { + + Settings settings = settingsBuilder() + .putInt(SETTING_NUMBER_OF_SHARDS, 11) + .putInt(SETTING_NUMBER_OF_REPLICAS, 0) + .build(); + + // start one server + logger.info("Starting server1"); + startServer("server1", settings); + + ClusterService clusterService1 = ((InternalServer) server("server1")).injector().getInstance(ClusterService.class); + + logger.info("Creating index [test]"); + client("server1").admin().indices().create(createIndexRequest("test")).actionGet(); + + Thread.sleep(1000); + + ClusterState clusterState1 = clusterService1.state(); + RoutingNode routingNodeEntry1 = clusterState1.routingNodes().nodesToShards().get(clusterState1.nodes().localNodeId()); + assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED), equalTo(11)); + + // start another server + logger.info("Starting server2"); + startServer("server2", settings); + + ClusterService clusterService2 = ((InternalServer) server("server2")).injector().getInstance(ClusterService.class); + + Thread.sleep(2000); + + clusterState1 = clusterService1.state(); + routingNodeEntry1 = clusterState1.routingNodes().nodesToShards().get(clusterState1.nodes().localNodeId()); + assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED), anyOf(equalTo(6), equalTo(5))); + + ClusterState clusterState2 = clusterService2.state(); + RoutingNode routingNodeEntry2 = clusterState2.routingNodes().nodesToShards().get(clusterState2.nodes().localNodeId()); + assertThat(routingNodeEntry2.numberOfShardsWithState(STARTED), anyOf(equalTo(5), equalTo(6))); + + // start another server + logger.info("Starting server3"); + startServer("server3"); + + ClusterService clusterService3 = ((InternalServer) server("server3")).injector().getInstance(ClusterService.class); + + Thread.sleep(1500); + + clusterState1 = clusterService1.state(); + routingNodeEntry1 = clusterState1.routingNodes().nodesToShards().get(clusterState1.nodes().localNodeId()); + assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED), anyOf(equalTo(5), equalTo(3))); + + clusterState2 = clusterService2.state(); + routingNodeEntry2 = clusterState2.routingNodes().nodesToShards().get(clusterState2.nodes().localNodeId()); + assertThat(routingNodeEntry2.numberOfShardsWithState(STARTED), anyOf(equalTo(5), equalTo(3))); + + ClusterState clusterState3 = clusterService3.state(); + RoutingNode routingNodeEntry3 = clusterState3.routingNodes().nodesToShards().get(clusterState3.nodes().localNodeId()); + assertThat(routingNodeEntry3.numberOfShardsWithState(STARTED), equalTo(3)); + + assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED) + routingNodeEntry2.numberOfShardsWithState(STARTED) + routingNodeEntry3.numberOfShardsWithState(STARTED), equalTo(11)); + + logger.info("Closing server1"); + // kill the first server + closeServer("server1"); + + Thread.sleep(2000); + + clusterState2 = clusterService2.state(); + routingNodeEntry2 = clusterState2.routingNodes().nodesToShards().get(clusterState2.nodes().localNodeId()); + assertThat(routingNodeEntry2.numberOfShardsWithState(STARTED), anyOf(equalTo(5), equalTo(6))); + + clusterState3 = clusterService3.state(); + routingNodeEntry3 = clusterState3.routingNodes().nodesToShards().get(clusterState3.nodes().localNodeId()); + assertThat(routingNodeEntry3.numberOfShardsWithState(STARTED), anyOf(equalTo(5), equalTo(6))); + + assertThat(routingNodeEntry2.numberOfShardsWithState(STARTED) + routingNodeEntry3.numberOfShardsWithState(STARTED), equalTo(11)); + + logger.info("Deleting index [test]"); + // last, lets delete the index + client("server2").admin().indices().delete(deleteIndexRequest("test")).actionGet(); + + Thread.sleep(2000); + clusterState2 = clusterService2.state(); + routingNodeEntry2 = clusterState2.routingNodes().nodesToShards().get(clusterState2.nodes().localNodeId()); + assertThat(routingNodeEntry2, nullValue()); + + clusterState3 = clusterService3.state(); + routingNodeEntry3 = clusterState3.routingNodes().nodesToShards().get(clusterState3.nodes().localNodeId()); + assertThat(routingNodeEntry3, nullValue()); + } + + @Test public void testTwoIndicesCreation() throws Exception { + + Settings settings = settingsBuilder() + .putInt(SETTING_NUMBER_OF_SHARDS, 11) + .putInt(SETTING_NUMBER_OF_REPLICAS, 0) + .build(); + + // start one server + startServer("server1", settings); + client("server1").admin().indices().create(createIndexRequest("test1")).actionGet(); + client("server1").admin().indices().create(createIndexRequest("test2")).actionGet(); + } +} diff --git a/modules/test/integration/src/test/java/org/elasticsearch/test/integration/indexlifecycle/IndexLifecycleActionTests.yml b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/indexlifecycle/IndexLifecycleActionTests.yml new file mode 100644 index 00000000000..1852901f452 --- /dev/null +++ b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/indexlifecycle/IndexLifecycleActionTests.yml @@ -0,0 +1,6 @@ +cluster: + routing: + schedule: 200ms +index: + numberOfShards: 11 + numberOfReplicas: 1 diff --git a/modules/test/integration/src/test/java/org/elasticsearch/test/integration/nodesinfo/SimpleNodesInfoTests.java b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/nodesinfo/SimpleNodesInfoTests.java new file mode 100644 index 00000000000..540878d5f2d --- /dev/null +++ b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/nodesinfo/SimpleNodesInfoTests.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.integration.nodesinfo; + +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.server.internal.InternalServer; +import org.elasticsearch.test.integration.AbstractServersTests; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.Test; + +import static org.elasticsearch.client.Requests.*; +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy (Shay Banon) + */ +public class SimpleNodesInfoTests extends AbstractServersTests { + + @AfterMethod public void closeServers() { + closeAllServers(); + } + + @Test public void testNodesInfos() { + startServer("server1"); + startServer("server2"); + String server1NodeId = ((InternalServer) server("server1")).injector().getInstance(ClusterService.class).state().nodes().localNodeId(); + String server2NodeId = ((InternalServer) server("server2")).injector().getInstance(ClusterService.class).state().nodes().localNodeId(); + + NodesInfoResponse response = client("server1").admin().cluster().nodesInfo(nodesInfo()).actionGet(); + assertThat(response.nodes().length, equalTo(2)); + assertThat(response.nodesMap().get(server1NodeId), notNullValue()); + assertThat(response.nodesMap().get(server2NodeId), notNullValue()); + + response = client("server2").admin().cluster().nodesInfo(nodesInfo()).actionGet(); + assertThat(response.nodes().length, equalTo(2)); + assertThat(response.nodesMap().get(server1NodeId), notNullValue()); + assertThat(response.nodesMap().get(server2NodeId), notNullValue()); + + response = client("server1").admin().cluster().nodesInfo(nodesInfo(server1NodeId)).actionGet(); + assertThat(response.nodes().length, equalTo(1)); + assertThat(response.nodesMap().get(server1NodeId), notNullValue()); + + response = client("server2").admin().cluster().nodesInfo(nodesInfo(server1NodeId)).actionGet(); + assertThat(response.nodes().length, equalTo(1)); + assertThat(response.nodesMap().get(server1NodeId), notNullValue()); + + response = client("server1").admin().cluster().nodesInfo(nodesInfo(server2NodeId)).actionGet(); + assertThat(response.nodes().length, equalTo(1)); + assertThat(response.nodesMap().get(server2NodeId), notNullValue()); + + response = client("server2").admin().cluster().nodesInfo(nodesInfo(server2NodeId)).actionGet(); + assertThat(response.nodes().length, equalTo(1)); + assertThat(response.nodesMap().get(server2NodeId), notNullValue()); + } +} diff --git a/modules/test/integration/src/test/java/org/elasticsearch/test/integration/ping/PingActionTests.java b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/ping/PingActionTests.java new file mode 100644 index 00000000000..585e59ff4ea --- /dev/null +++ b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/ping/PingActionTests.java @@ -0,0 +1,80 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.integration.ping; + +import org.elasticsearch.action.admin.cluster.ping.broadcast.BroadcastPingResponse; +import org.elasticsearch.action.admin.cluster.ping.replication.ReplicationPingResponse; +import org.elasticsearch.action.admin.cluster.ping.single.SinglePingResponse; +import org.elasticsearch.test.integration.AbstractServersTests; +import org.elasticsearch.util.logging.Loggers; +import org.slf4j.Logger; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +import static org.elasticsearch.client.Requests.*; +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy (Shay Banon) + */ +public class PingActionTests extends AbstractServersTests { + + private final Logger logger = Loggers.getLogger(PingActionTests.class); + + @BeforeMethod public void startServers() { + startServer("server1"); + startServer("server2"); + } + + @AfterMethod public void closeServers() { + closeAllServers(); + } + + @Test public void testIndexActions() throws Exception { + logger.info("Creating index [test1]"); + client("server1").admin().indices().create(createIndexRequest("test1")).actionGet(); + logger.info("Creating index [test2]"); + client("server1").admin().indices().create(createIndexRequest("test2")).actionGet(); + + logger.info("Sleeping to shards allocate and start"); + Thread.sleep(500); + + logger.info("Pinging single person with id 1"); + SinglePingResponse singleResponse = client("server1").admin().cluster().ping(pingSingleRequest("test1").type("person").id("1")).actionGet(); + + logger.info("Broadcast pinging test1 and test2"); + BroadcastPingResponse broadcastResponse = client("server1").admin().cluster().ping(pingBroadcastRequest("test1", "test2")).actionGet(); + assertThat(broadcastResponse.successfulShards(), equalTo(10)); + assertThat(broadcastResponse.failedShards(), equalTo(0)); + + logger.info("Broadcast pinging test1"); + broadcastResponse = client("server1").admin().cluster().ping(pingBroadcastRequest("test1")).actionGet(); + assertThat(broadcastResponse.successfulShards(), equalTo(5)); + assertThat(broadcastResponse.failedShards(), equalTo(0)); + + logger.info("Replication pinging test1 and test2"); + ReplicationPingResponse replicationResponse = client("server1").admin().cluster().ping(pingReplicationRequest("test1", "test2")).actionGet(); + assertThat(replicationResponse.indices().size(), equalTo(2)); + assertThat(replicationResponse.index("test1").successfulShards(), equalTo(5)); + assertThat(replicationResponse.index("test1").failedShards(), equalTo(0)); + } +} diff --git a/modules/test/integration/src/test/java/org/elasticsearch/test/integration/ping/PingActionTests.yml b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/ping/PingActionTests.yml new file mode 100644 index 00000000000..e3ec780afcc --- /dev/null +++ b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/ping/PingActionTests.yml @@ -0,0 +1,6 @@ +cluster: + routing: + schedule: 200ms +index: + numberOfShards: 5 + numberOfReplicas: 1 diff --git a/modules/test/integration/src/test/java/org/elasticsearch/test/integration/recovery/SimpleRecoveryTests.java b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/recovery/SimpleRecoveryTests.java new file mode 100644 index 00000000000..5eb21d8835e --- /dev/null +++ b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/recovery/SimpleRecoveryTests.java @@ -0,0 +1,90 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.integration.recovery; + +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.test.integration.AbstractServersTests; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.Test; + +import static org.elasticsearch.client.Requests.*; +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy (Shay Banon) + */ +public class SimpleRecoveryTests extends AbstractServersTests { + + @AfterMethod public void closeServers() { + closeAllServers(); + } + + @Test public void testSimpleRecovery() throws Exception { + startServer("server1"); + + client("server1").admin().indices().create(createIndexRequest("test")).actionGet(5000); + + client("server1").index(indexRequest("test").type("type1").id("1").source(source("1", "test"))).actionGet(); + client("server1").admin().indices().flush(flushRequest("test")).actionGet(); + client("server1").index(indexRequest("test").type("type1").id("2").source(source("2", "test"))).actionGet(); + client("server1").admin().indices().refresh(refreshRequest("test")).actionGet(); + + startServer("server2"); + // sleep so we recover properly + Thread.sleep(5000); + + GetResponse getResult; + + for (int i = 0; i < 5; i++) { + getResult = client("server1").get(getRequest("test").type("type1").id("1").threadedOperation(false)).actionGet(1000); + assertThat(getResult.source(), equalTo(source("1", "test"))); + getResult = client("server2").get(getRequest("test").type("type1").id("1").threadedOperation(false)).actionGet(1000); + assertThat(getResult.source(), equalTo(source("1", "test"))); + getResult = client("server1").get(getRequest("test").type("type1").id("2").threadedOperation(true)).actionGet(1000); + assertThat(getResult.source(), equalTo(source("2", "test"))); + getResult = client("server2").get(getRequest("test").type("type1").id("2").threadedOperation(true)).actionGet(1000); + assertThat(getResult.source(), equalTo(source("2", "test"))); + } + + // now start another one so we move some primaries + startServer("server3"); + Thread.sleep(5000); + + for (int i = 0; i < 5; i++) { + getResult = client("server1").get(getRequest("test").type("type1").id("1")).actionGet(1000); + assertThat(getResult.source(), equalTo(source("1", "test"))); + getResult = client("server2").get(getRequest("test").type("type1").id("1")).actionGet(1000); + assertThat(getResult.source(), equalTo(source("1", "test"))); + getResult = client("server3").get(getRequest("test").type("type1").id("1")).actionGet(1000); + assertThat(getResult.source(), equalTo(source("1", "test"))); + getResult = client("server1").get(getRequest("test").type("type1").id("2").threadedOperation(true)).actionGet(1000); + assertThat(getResult.source(), equalTo(source("2", "test"))); + getResult = client("server2").get(getRequest("test").type("type1").id("2").threadedOperation(true)).actionGet(1000); + assertThat(getResult.source(), equalTo(source("2", "test"))); + getResult = client("server3").get(getRequest("test").type("type1").id("2").threadedOperation(true)).actionGet(1000); + assertThat(getResult.source(), equalTo(source("2", "test"))); + } + } + + private String source(String id, String nameValue) { + return "{ type1 : { \"id\" : \"" + id + "\", \"name\" : \"" + nameValue + "\" } }"; + } +} diff --git a/modules/test/integration/src/test/java/org/elasticsearch/test/integration/recovery/SimpleRecoveryTests.yml b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/recovery/SimpleRecoveryTests.yml new file mode 100644 index 00000000000..1bc1ea0a08e --- /dev/null +++ b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/recovery/SimpleRecoveryTests.yml @@ -0,0 +1,10 @@ +cluster: + routing: + schedule: 200ms +index: + numberOfShards: 5 + numberOfReplicas: 1 + shard: + recovery: + fileChunkSize: 3b + diff --git a/modules/test/integration/src/test/java/org/elasticsearch/test/integration/search/SingleInstanceEmbeddedSearchTests.java b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/search/SingleInstanceEmbeddedSearchTests.java new file mode 100644 index 00000000000..ddf05acb9ac --- /dev/null +++ b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/search/SingleInstanceEmbeddedSearchTests.java @@ -0,0 +1,183 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.integration.search; + +import org.elasticsearch.client.Client; +import org.elasticsearch.search.SearchService; +import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.controller.SearchPhaseController; +import org.elasticsearch.search.controller.ShardDoc; +import org.elasticsearch.search.dfs.AggregatedDfs; +import org.elasticsearch.search.dfs.DfsSearchResult; +import org.elasticsearch.search.fetch.FetchSearchRequest; +import org.elasticsearch.search.fetch.FetchSearchResult; +import org.elasticsearch.search.fetch.QueryFetchSearchResult; +import org.elasticsearch.search.internal.InternalSearchRequest; +import org.elasticsearch.search.query.QuerySearchRequest; +import org.elasticsearch.search.query.QuerySearchResult; +import org.elasticsearch.server.internal.InternalServer; +import org.elasticsearch.test.integration.AbstractServersTests; +import org.elasticsearch.util.trove.ExtTIntArrayList; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + +import java.util.Map; + +import static com.google.common.collect.Lists.*; +import static org.elasticsearch.client.Requests.*; +import static org.elasticsearch.index.query.json.JsonQueryBuilders.*; +import static org.elasticsearch.search.builder.SearchSourceBuilder.*; +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy (Shay Banon) + */ +public class SingleInstanceEmbeddedSearchTests extends AbstractServersTests { + + private SearchService searchService; + + private SearchPhaseController searchPhaseController; + + @BeforeClass public void createServerAndInitWithData() throws Exception { + startServer("server1"); + + client("server1").admin().indices().create(createIndexRequest("test")).actionGet(); + index(client("server1"), "1", "test1", 1); + index(client("server1"), "2", "test2", 2); + index(client("server1"), "3", "test3", 2); + index(client("server1"), "4", "test4", 2); + index(client("server1"), "5", "test5", 2); + client("server1").admin().indices().refresh(refreshRequest("test")).actionGet(); + + searchService = ((InternalServer) server("server1")).injector().getInstance(SearchService.class); + searchPhaseController = ((InternalServer) server("server1")).injector().getInstance(SearchPhaseController.class); + } + + @AfterClass public void closeServer() { + closeAllServers(); + } + + @Test public void testDirectDfs() throws Exception { + DfsSearchResult dfsResult = searchService.executeDfsPhase(searchRequest(searchSource().query(termQuery("name", "test1")))); + + assertThat(dfsResult.terms().length, equalTo(1)); + assertThat(dfsResult.freqs().length, equalTo(1)); + assertThat(dfsResult.terms()[0].field(), equalTo("name")); + assertThat(dfsResult.terms()[0].text(), equalTo("test1")); + assertThat(dfsResult.freqs()[0], equalTo(1)); + } + + @Test public void testDirectQuery() throws Exception { + QuerySearchResult queryResult = searchService.executeQueryPhase(searchRequest(searchSource().query(termQuery("name", "test1")))); + assertThat(queryResult.topDocs().totalHits, equalTo(1)); + } + + @Test public void testDirectFetch() throws Exception { + QueryFetchSearchResult queryFetchResult = searchService.executeFetchPhase(searchRequest(searchSource().query(termQuery("name", "test1")))); + assertThat(queryFetchResult.queryResult().topDocs().totalHits, equalTo(1)); + assertThat(queryFetchResult.fetchResult().hits().hits().length, equalTo(1)); + assertThat(queryFetchResult.fetchResult().hits().hits()[0].source(), equalTo(source("1", "test1", 1))); + assertThat(queryFetchResult.fetchResult().hits().hits()[0].id(), equalTo("1")); + assertThat(queryFetchResult.fetchResult().hits().hits()[0].type(), equalTo("type1")); + } + + @Test public void testQueryFetch() throws Exception { + QuerySearchResult queryResult = searchService.executeQueryPhase(searchRequest(searchSource().query(termQuery("name", "test1")))); + assertThat(queryResult.topDocs().totalHits, equalTo(1)); + + ShardDoc[] sortedShardList = searchPhaseController.sortDocs(newArrayList(queryResult)); + Map docIdsToLoad = searchPhaseController.docIdsToLoad(sortedShardList); + assertThat(docIdsToLoad.size(), equalTo(1)); + assertThat(docIdsToLoad.values().iterator().next().size(), equalTo(1)); + + FetchSearchResult fetchResult = searchService.executeFetchPhase(new FetchSearchRequest(queryResult.id(), docIdsToLoad.values().iterator().next())); + assertThat(fetchResult.hits().hits()[0].source(), equalTo(source("1", "test1", 1))); + assertThat(fetchResult.hits().hits()[0].id(), equalTo("1")); + assertThat(fetchResult.hits().hits()[0].type(), equalTo("type1")); + } + + @Test public void testQueryFetchInOneGo() throws Exception { + QueryFetchSearchResult result = searchService.executeFetchPhase(searchRequest(searchSource().query(termQuery("name", "test1")))); + FetchSearchResult fetchResult = result.fetchResult(); + assertThat(fetchResult.hits().hits()[0].source(), equalTo(source("1", "test1", 1))); + assertThat(fetchResult.hits().hits()[0].id(), equalTo("1")); + assertThat(fetchResult.hits().hits()[0].type(), equalTo("type1")); + } + + @Test public void testDfsQueryFetch() throws Exception { + DfsSearchResult dfsResult = searchService.executeDfsPhase(searchRequest(searchSource().query(termQuery("name", "test1")))); + AggregatedDfs dfs = searchPhaseController.aggregateDfs(newArrayList(dfsResult)); + + QuerySearchResult queryResult = searchService.executeQueryPhase(new QuerySearchRequest(dfsResult.id(), dfs)); + assertThat(queryResult.topDocs().totalHits, equalTo(1)); + + ShardDoc[] sortedShardList = searchPhaseController.sortDocs(newArrayList(queryResult)); + Map docIdsToLoad = searchPhaseController.docIdsToLoad(sortedShardList); + assertThat(docIdsToLoad.size(), equalTo(1)); + assertThat(docIdsToLoad.values().iterator().next().size(), equalTo(1)); + + FetchSearchResult fetchResult = searchService.executeFetchPhase(new FetchSearchRequest(queryResult.id(), docIdsToLoad.values().iterator().next())); + assertThat(fetchResult.hits().hits()[0].source(), equalTo(source("1", "test1", 1))); + assertThat(fetchResult.hits().hits()[0].id(), equalTo("1")); + assertThat(fetchResult.hits().hits()[0].type(), equalTo("type1")); + } + + @Test public void testSimpleQueryFacetsNoExecutionType() throws Exception { + QuerySearchResult queryResult = searchService.executeQueryPhase(searchRequest( + searchSource().query(wildcardQuery("name", "te*")) + .facets(facets().facet("age2", termQuery("age", 2)).facet("age1", termQuery("age", 1))) + )); + assertThat(queryResult.facets().countFacet("age2").count(), equalTo(4l)); + assertThat(queryResult.facets().countFacet("age1").count(), equalTo(1l)); + } + + @Test public void testSimpleQueryFacetsQueryExecutionCollect() throws Exception { + QuerySearchResult queryResult = searchService.executeQueryPhase(searchRequest( + searchSource().query(wildcardQuery("name", "te*")) + .facets(facets().queryExecution("collect").facet("age2", termQuery("age", 2)).facet("age1", termQuery("age", 1))) + )); + assertThat(queryResult.facets().countFacet("age2").count(), equalTo(4l)); + assertThat(queryResult.facets().countFacet("age1").count(), equalTo(1l)); + } + + @Test public void testSimpleQueryFacetsQueryExecutionIdset() throws Exception { + QuerySearchResult queryResult = searchService.executeQueryPhase(searchRequest( + searchSource().query(wildcardQuery("name", "te*")) + .facets(facets().queryExecution("idset").facet("age2", termQuery("age", 2)).facet("age1", termQuery("age", 1))) + )); + assertThat(queryResult.facets().countFacet("age2").count(), equalTo(4l)); + assertThat(queryResult.facets().countFacet("age1").count(), equalTo(1l)); + } + + private InternalSearchRequest searchRequest(SearchSourceBuilder builder) { + return new InternalSearchRequest("test", 0, builder.build()); + } + + private void index(Client client, String id, String nameValue, int age) { + client.index(indexRequest("test").type("type1").id(id).source(source(id, nameValue, age))).actionGet(); + } + + private String source(String id, String nameValue, int age) { + return "{ type1 : { \"id\" : \"" + id + "\", \"name\" : \"" + nameValue + "\", age : " + age + " } }"; + } +} diff --git a/modules/test/integration/src/test/java/org/elasticsearch/test/integration/search/SingleInstanceEmbeddedSearchTests.yml b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/search/SingleInstanceEmbeddedSearchTests.yml new file mode 100644 index 00000000000..be2e218456a --- /dev/null +++ b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/search/SingleInstanceEmbeddedSearchTests.yml @@ -0,0 +1,6 @@ +cluster: + routing: + schedule: 200ms +index: + numberOfShards: 1 + numberOfReplicas: 0 diff --git a/modules/test/integration/src/test/java/org/elasticsearch/test/integration/search/TransportTwoServersSearchTests.java b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/search/TransportTwoServersSearchTests.java new file mode 100644 index 00000000000..4c4f556f9eb --- /dev/null +++ b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/search/TransportTwoServersSearchTests.java @@ -0,0 +1,223 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.integration.search; + +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.Requests; +import org.elasticsearch.search.Scroll; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.test.integration.AbstractServersTests; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + +import static org.elasticsearch.action.search.SearchType.*; +import static org.elasticsearch.client.Requests.*; +import static org.elasticsearch.index.query.json.JsonQueryBuilders.*; +import static org.elasticsearch.search.builder.SearchSourceBuilder.*; +import static org.elasticsearch.util.TimeValue.*; +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy (Shay Banon) + */ +public class TransportTwoServersSearchTests extends AbstractServersTests { + + @BeforeClass public void createServers() throws Exception { + startServer("server1"); + startServer("server2"); + + client("server1").admin().indices().create(createIndexRequest("test")).actionGet(); + + for (int i = 0; i < 100; i++) { + index(client("server1"), Integer.toString(i), "test", i); + } + client("server1").admin().indices().refresh(refreshRequest("test")).actionGet(); + } + + @AfterClass public void closeServers() { + closeAllServers(); + } + + @Test public void testDfsQueryThenFetch() throws Exception { + SearchSourceBuilder source = searchSource() + .query(termQuery("multi", "test")) + .from(0).size(60).explain(true); + + SearchResponse searchResponse = client("server1").search(searchRequest("test").source(source).searchType(DFS_QUERY_THEN_FETCH).scroll(new Scroll(timeValueMinutes(10)))).actionGet(); + + assertThat(searchResponse.hits().totalHits(), equalTo(100l)); + assertThat(searchResponse.hits().hits().length, equalTo(60)); + for (int i = 0; i < 60; i++) { + SearchHit hit = searchResponse.hits().hits()[i]; +// System.out.println(hit.target() + ": " + hit.explanation()); + assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(100 - i - 1))); + } + + searchResponse = client("server1").searchScroll(searchScrollRequest(searchResponse.scrollId())).actionGet(); + + assertThat(searchResponse.hits().totalHits(), equalTo(100l)); + assertThat(searchResponse.hits().hits().length, equalTo(40)); + for (int i = 0; i < 40; i++) { + SearchHit hit = searchResponse.hits().hits()[i]; + assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(100 - 60 - 1 - i))); + } + } + + // + + @Test public void testDfsQueryThenFetchWithSort() throws Exception { + SearchSourceBuilder source = searchSource() + .query(termQuery("multi", "test")) + .from(0).size(60).explain(true).sort("age", false); + + SearchResponse searchResponse = client("server1").search(searchRequest("test").source(source).searchType(DFS_QUERY_THEN_FETCH).scroll(new Scroll(timeValueMinutes(10)))).actionGet(); + assertThat(searchResponse.hits().totalHits(), equalTo(100l)); + assertThat(searchResponse.hits().hits().length, equalTo(60)); + for (int i = 0; i < 60; i++) { + SearchHit hit = searchResponse.hits().hits()[i]; +// System.out.println(hit.target() + ": " + hit.explanation()); + assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(i))); + } + + searchResponse = client("server1").searchScroll(searchScrollRequest(searchResponse.scrollId())).actionGet(); + + assertThat(searchResponse.hits().totalHits(), equalTo(100l)); + assertThat(searchResponse.hits().hits().length, equalTo(40)); + for (int i = 0; i < 40; i++) { + SearchHit hit = searchResponse.hits().hits()[i]; + assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(i + 60))); + } + } + + @Test public void testQueryThenFetch() throws Exception { + SearchSourceBuilder source = searchSource() + .query(termQuery("multi", "test")) + .from(0).size(60).explain(true); + + SearchResponse searchResponse = client("server1").search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH).scroll(new Scroll(timeValueMinutes(10)))).actionGet(); + assertThat(searchResponse.hits().totalHits(), equalTo(100l)); + assertThat(searchResponse.hits().hits().length, equalTo(60)); + for (int i = 0; i < 60; i++) { + SearchHit hit = searchResponse.hits().hits()[i]; +// System.out.println(hit.target() + ": " + hit.explanation()); + assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(100 - i - 1))); + } + + searchResponse = client("server1").searchScroll(searchScrollRequest(searchResponse.scrollId())).actionGet(); + + assertThat(searchResponse.hits().totalHits(), equalTo(100l)); + assertThat(searchResponse.hits().hits().length, equalTo(40)); + for (int i = 0; i < 40; i++) { + SearchHit hit = searchResponse.hits().hits()[i]; + assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(100 - 60 - 1 - i))); + } + } + + @Test public void testQueryThenFetchWithSort() throws Exception { + SearchSourceBuilder source = searchSource() + .query(termQuery("multi", "test")) + .from(0).size(60).explain(true).sort("age", false); + + SearchResponse searchResponse = client("server1").search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH).scroll(new Scroll(timeValueMinutes(10)))).actionGet(); + assertThat(searchResponse.hits().totalHits(), equalTo(100l)); + assertThat(searchResponse.hits().hits().length, equalTo(60)); + for (int i = 0; i < 60; i++) { + SearchHit hit = searchResponse.hits().hits()[i]; +// System.out.println(hit.target() + ": " + hit.explanation()); + assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(i))); + } + + searchResponse = client("server1").searchScroll(searchScrollRequest(searchResponse.scrollId())).actionGet(); + + assertThat(searchResponse.hits().totalHits(), equalTo(100l)); + assertThat(searchResponse.hits().hits().length, equalTo(40)); + for (int i = 0; i < 40; i++) { + SearchHit hit = searchResponse.hits().hits()[i]; + assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(i + 60))); + } + } + + @Test public void testQueryAndFetch() throws Exception { + SearchSourceBuilder source = searchSource() + .query(termQuery("multi", "test")) + .from(0).size(20).explain(true); + + SearchResponse searchResponse = client("server1").search(searchRequest("test").source(source).searchType(QUERY_AND_FETCH).scroll(new Scroll(timeValueMinutes(10)))).actionGet(); + assertThat(searchResponse.hits().totalHits(), equalTo(100l)); + assertThat(searchResponse.hits().hits().length, equalTo(60)); // 20 per shard + for (int i = 0; i < 60; i++) { + SearchHit hit = searchResponse.hits().hits()[i]; +// System.out.println(hit.target() + ": " + hit.explanation()); + assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(100 - i - 1))); + } + + // TODO support scrolling +// searchResponse = searchScrollAction.submit(new SearchScrollRequest(searchResponse.scrollId())).actionGet(); +// +// assertEquals(100, searchResponse.hits().totalHits()); +// assertEquals(40, searchResponse.hits().hits().length); +// for (int i = 0; i < 40; i++) { +// SearchHit hit = searchResponse.hits().hits()[i]; +// assertEquals("id[" + hit.id() + "]", Integer.toString(100 - 60 - 1 - i), hit.id()); +// } + } + + @Test public void testDfsQueryAndFetch() throws Exception { + SearchSourceBuilder source = searchSource() + .query(termQuery("multi", "test")) + .from(0).size(20).explain(true); + + SearchResponse searchResponse = client("server1").search(searchRequest("test").source(source).searchType(DFS_QUERY_AND_FETCH).scroll(new Scroll(timeValueMinutes(10)))).actionGet(); + assertThat(searchResponse.hits().totalHits(), equalTo(100l)); + assertThat(searchResponse.hits().hits().length, equalTo(60)); // 20 per shard + for (int i = 0; i < 60; i++) { + SearchHit hit = searchResponse.hits().hits()[i]; +// System.out.println(hit.target() + ": " + hit.explanation()); + assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(100 - i - 1))); + } + + // TODO support scrolling +// searchResponse = searchScrollAction.submit(new SearchScrollRequest(searchResponse.scrollId())).actionGet(); +// +// assertEquals(100, searchResponse.hits().totalHits()); +// assertEquals(40, searchResponse.hits().hits().length); +// for (int i = 0; i < 40; i++) { +// SearchHit hit = searchResponse.hits().hits()[i]; +// assertEquals("id[" + hit.id() + "]", Integer.toString(100 - 60 - 1 - i), hit.id()); +// } + } + + + private void index(Client client, String id, String nameValue, int age) { + client.index(Requests.indexRequest("test").type("type1").id(id).source(source(id, nameValue, age))).actionGet(); + } + + private String source(String id, String nameValue, int age) { + StringBuilder multi = new StringBuilder().append(nameValue); + for (int i = 0; i < age; i++) { + multi.append(" ").append(nameValue); + } + return "{ type1 : { \"id\" : \"" + id + "\", \"name\" : \"" + (nameValue + id) + "\", age : " + age + ", multi : \"" + multi.toString() + "\", _boost : " + (age * 10) + " } }"; + } +} diff --git a/modules/test/integration/src/test/java/org/elasticsearch/test/integration/search/TransportTwoServersSearchTests.yml b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/search/TransportTwoServersSearchTests.yml new file mode 100644 index 00000000000..b8f11fa524f --- /dev/null +++ b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/search/TransportTwoServersSearchTests.yml @@ -0,0 +1,9 @@ +cluster: + routing: + schedule: 200ms +index: + numberOfShards: 3 + numberOfReplicas: 0 + routing : + # Use simple hashing since we want even distribution and our ids are simple incremented number based + hash.type : simple diff --git a/modules/test/integration/src/test/java/org/elasticsearch/test/integration/search/TwoInstanceEmbeddedSearchTests.java b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/search/TwoInstanceEmbeddedSearchTests.java new file mode 100644 index 00000000000..e33661a1586 --- /dev/null +++ b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/search/TwoInstanceEmbeddedSearchTests.java @@ -0,0 +1,361 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.integration.search; + +import com.google.common.collect.ImmutableMap; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.Requests; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.search.*; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.controller.SearchPhaseController; +import org.elasticsearch.search.controller.ShardDoc; +import org.elasticsearch.search.dfs.AggregatedDfs; +import org.elasticsearch.search.dfs.DfsSearchResult; +import org.elasticsearch.search.fetch.FetchSearchRequest; +import org.elasticsearch.search.fetch.FetchSearchResult; +import org.elasticsearch.search.fetch.QueryFetchSearchResult; +import org.elasticsearch.search.internal.InternalScrollSearchRequest; +import org.elasticsearch.search.internal.InternalSearchRequest; +import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.search.query.QuerySearchRequest; +import org.elasticsearch.search.query.QuerySearchResult; +import org.elasticsearch.search.query.QuerySearchResultProvider; +import org.elasticsearch.server.internal.InternalServer; +import org.elasticsearch.test.integration.AbstractServersTests; +import org.elasticsearch.util.TimeValue; +import org.elasticsearch.util.trove.ExtTIntArrayList; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static com.google.common.collect.Lists.*; +import static com.google.common.collect.Maps.*; +import static org.elasticsearch.client.Requests.*; +import static org.elasticsearch.index.query.json.JsonQueryBuilders.*; +import static org.elasticsearch.search.builder.SearchSourceBuilder.*; +import static org.elasticsearch.util.TimeValue.*; +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy (Shay Banon) + */ +public class TwoInstanceEmbeddedSearchTests extends AbstractServersTests { + + private IndicesService indicesService; + + private ClusterService clusterService; + + private Map nodeToSearchService; + + private SearchPhaseController searchPhaseController; + + @BeforeClass public void createServerAndInitWithData() throws Exception { + startServer("server1"); + startServer("server2"); + + clusterService = ((InternalServer) server("server1")).injector().getInstance(ClusterService.class); + indicesService = ((InternalServer) server("server1")).injector().getInstance(IndicesService.class); + client("server1").admin().indices().create(Requests.createIndexRequest("test")).actionGet(); + + for (int i = 0; i < 100; i++) { + index(client("server1"), Integer.toString(i), "test", i); + } + client("server1").admin().indices().refresh(refreshRequest("test")).actionGet(); + + SearchService searchService1 = ((InternalServer) server("server1")).injector().getInstance(SearchService.class); + SearchService searchService2 = ((InternalServer) server("server2")).injector().getInstance(SearchService.class); + + nodeToSearchService = ImmutableMap.builder() + .put(((InternalServer) server("server1")).injector().getInstance(ClusterService.class).state().nodes().localNodeId(), searchService1) + .put(((InternalServer) server("server2")).injector().getInstance(ClusterService.class).state().nodes().localNodeId(), searchService2) + .build(); + + searchPhaseController = ((InternalServer) server("server1")).injector().getInstance(SearchPhaseController.class); + } + + @AfterClass public void closeServers() { + closeAllServers(); + } + + @Test public void testDfsQueryFetch() throws Exception { + SearchSourceBuilder sourceBuilder = searchSource() + .query(termQuery("multi", "test")) + .from(0).size(60).explain(true); + + List dfsResults = newArrayList(); + for (ShardsIterator shardsIt : indicesService.searchShards(clusterService.state(), new String[]{"test"}, null)) { + for (ShardRouting shardRouting : shardsIt) { + InternalSearchRequest searchRequest = searchRequest(shardRouting, sourceBuilder) + .scroll(new Scroll(new TimeValue(10, TimeUnit.MINUTES))); + dfsResults.add(nodeToSearchService.get(shardRouting.currentNodeId()).executeDfsPhase(searchRequest)); + } + } + + AggregatedDfs dfs = searchPhaseController.aggregateDfs(dfsResults); + Map queryResults = newHashMap(); + for (DfsSearchResult dfsResult : dfsResults) { + queryResults.put(dfsResult.shardTarget(), nodeToSearchService.get(dfsResult.shardTarget().nodeId()).executeQueryPhase(new QuerySearchRequest(dfsResult.id(), dfs))); + } + + ShardDoc[] sortedShardList = searchPhaseController.sortDocs(queryResults.values()); + Map docIdsToLoad = searchPhaseController.docIdsToLoad(sortedShardList); + + Map fetchResults = newHashMap(); + for (Map.Entry entry : docIdsToLoad.entrySet()) { + SearchShardTarget shardTarget = entry.getKey(); + ExtTIntArrayList docIds = entry.getValue(); + FetchSearchResult fetchResult = nodeToSearchService.get(shardTarget.nodeId()).executeFetchPhase(new FetchSearchRequest(queryResults.get(shardTarget).queryResult().id(), docIds)); + fetchResults.put(fetchResult.shardTarget(), fetchResult.initCounter()); + } + + SearchHits hits = searchPhaseController.merge(sortedShardList, queryResults, fetchResults).hits(); + + assertThat(hits.totalHits(), equalTo(100l)); + assertThat(hits.hits().length, equalTo(60)); + for (int i = 0; i < 60; i++) { + SearchHit hit = hits.hits()[i]; +// System.out.println(hit.explanation()); + assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(100 - i - 1))); + } + + // now try and scroll to the next batch of results + Map scollQueryResults = newHashMap(); + for (QuerySearchResultProvider queryResult : queryResults.values()) { + scollQueryResults.put(queryResult.queryResult().shardTarget(), nodeToSearchService.get(queryResult.shardTarget().nodeId()).executeQueryPhase(new InternalScrollSearchRequest(queryResult.id()))); + } + queryResults = scollQueryResults; + + sortedShardList = searchPhaseController.sortDocs(queryResults.values()); + docIdsToLoad = searchPhaseController.docIdsToLoad(sortedShardList); + + fetchResults = newHashMap(); + for (Map.Entry entry : docIdsToLoad.entrySet()) { + SearchShardTarget shardTarget = entry.getKey(); + ExtTIntArrayList docIds = entry.getValue(); + FetchSearchResult fetchResult = nodeToSearchService.get(shardTarget.nodeId()).executeFetchPhase(new FetchSearchRequest(queryResults.get(shardTarget).queryResult().id(), docIds)); + fetchResults.put(fetchResult.shardTarget(), fetchResult.initCounter()); + } + + hits = searchPhaseController.merge(sortedShardList, queryResults, fetchResults).hits(); + assertThat(hits.totalHits(), equalTo(100l)); + assertThat(hits.hits().length, equalTo(40)); + for (int i = 0; i < 40; i++) { + SearchHit hit = hits.hits()[i]; + assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(100 - 60 - 1 - i))); + } + } + + @Test public void testDfsQueryFetchWithSort() throws Exception { + SearchSourceBuilder sourceBuilder = searchSource() + .query(termQuery("multi", "test")) + .from(0).size(60).explain(true).sort("age", false); + + List dfsResults = newArrayList(); + for (ShardsIterator shardsIt : indicesService.searchShards(clusterService.state(), new String[]{"test"}, null)) { + for (ShardRouting shardRouting : shardsIt) { + InternalSearchRequest searchRequest = searchRequest(shardRouting, sourceBuilder) + .scroll(new Scroll(new TimeValue(10, TimeUnit.MINUTES))); + dfsResults.add(nodeToSearchService.get(shardRouting.currentNodeId()).executeDfsPhase(searchRequest)); + } + } + + AggregatedDfs dfs = searchPhaseController.aggregateDfs(dfsResults); + Map queryResults = newHashMap(); + for (DfsSearchResult dfsResult : dfsResults) { + queryResults.put(dfsResult.shardTarget(), nodeToSearchService.get(dfsResult.shardTarget().nodeId()).executeQueryPhase(new QuerySearchRequest(dfsResult.id(), dfs))); + } + + ShardDoc[] sortedShardList = searchPhaseController.sortDocs(queryResults.values()); + Map docIdsToLoad = searchPhaseController.docIdsToLoad(sortedShardList); + + Map fetchResults = newHashMap(); + for (Map.Entry entry : docIdsToLoad.entrySet()) { + SearchShardTarget shardTarget = entry.getKey(); + ExtTIntArrayList docIds = entry.getValue(); + FetchSearchResult fetchResult = nodeToSearchService.get(shardTarget.nodeId()).executeFetchPhase(new FetchSearchRequest(queryResults.get(shardTarget).queryResult().id(), docIds)); + fetchResults.put(fetchResult.shardTarget(), fetchResult.initCounter()); + } + + SearchHits hits = searchPhaseController.merge(sortedShardList, queryResults, fetchResults).hits(); + assertThat(hits.totalHits(), equalTo(100l)); + assertThat(hits.hits().length, equalTo(60)); + for (int i = 0; i < 60; i++) { + SearchHit hit = hits.hits()[i]; +// System.out.println(hit.explanation()); + assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(i))); + } + + // now try and scroll to the next batch of results + Map scollQueryResults = newHashMap(); + for (QuerySearchResultProvider queryResult : queryResults.values()) { + scollQueryResults.put(queryResult.queryResult().shardTarget(), nodeToSearchService.get(queryResult.shardTarget().nodeId()).executeQueryPhase(new InternalScrollSearchRequest(queryResult.id()).scroll(new Scroll(timeValueMinutes(10))))); + } + queryResults = scollQueryResults; + + sortedShardList = searchPhaseController.sortDocs(queryResults.values()); + docIdsToLoad = searchPhaseController.docIdsToLoad(sortedShardList); + + fetchResults = newHashMap(); + for (Map.Entry entry : docIdsToLoad.entrySet()) { + SearchShardTarget shardTarget = entry.getKey(); + ExtTIntArrayList docIds = entry.getValue(); + FetchSearchResult fetchResult = nodeToSearchService.get(shardTarget.nodeId()).executeFetchPhase(new FetchSearchRequest(queryResults.get(shardTarget).queryResult().id(), docIds)); + fetchResults.put(fetchResult.shardTarget(), fetchResult.initCounter()); + } + + hits = searchPhaseController.merge(sortedShardList, queryResults, fetchResults).hits(); + assertThat(hits.totalHits(), equalTo(100l)); + assertThat(hits.hits().length, equalTo(40)); + for (int i = 0; i < 40; i++) { + SearchHit hit = hits.hits()[i]; + assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(i + 60))); + } + + // now try and scroll to the next next batch of results + scollQueryResults = newHashMap(); + for (QuerySearchResultProvider queryResult : queryResults.values()) { + scollQueryResults.put(queryResult.queryResult().shardTarget(), nodeToSearchService.get(queryResult.shardTarget().nodeId()).executeQueryPhase(new InternalScrollSearchRequest(queryResult.id()))); + } + queryResults = scollQueryResults; + + sortedShardList = searchPhaseController.sortDocs(queryResults.values()); + docIdsToLoad = searchPhaseController.docIdsToLoad(sortedShardList); + + fetchResults = newHashMap(); + for (Map.Entry entry : docIdsToLoad.entrySet()) { + SearchShardTarget shardTarget = entry.getKey(); + ExtTIntArrayList docIds = entry.getValue(); + FetchSearchResult fetchResult = nodeToSearchService.get(shardTarget.nodeId()).executeFetchPhase(new FetchSearchRequest(queryResults.get(shardTarget).queryResult().id(), docIds)); + fetchResults.put(fetchResult.shardTarget(), fetchResult.initCounter()); + } + + hits = searchPhaseController.merge(sortedShardList, queryResults, fetchResults).hits(); + assertThat(hits.totalHits(), equalTo(100l)); + assertThat(hits.hits().length, equalTo(0)); + } + + @Test public void testQueryFetchInOneGo() { + SearchSourceBuilder sourceBuilder = searchSource() + .query(termQuery("multi", "test")) + .from(0).size(20).explain(true); + + Map queryFetchResults = newHashMap(); + for (ShardsIterator shardsIt : indicesService.searchShards(clusterService.state(), new String[]{"test"}, null)) { + for (ShardRouting shardRouting : shardsIt) { + InternalSearchRequest searchRequest = searchRequest(shardRouting, sourceBuilder) + .scroll(new Scroll(new TimeValue(10, TimeUnit.MINUTES))); + QueryFetchSearchResult queryFetchResult = nodeToSearchService.get(shardRouting.currentNodeId()).executeFetchPhase(searchRequest); + queryFetchResults.put(queryFetchResult.shardTarget(), queryFetchResult); + } + } + + + ShardDoc[] sortedShardList = searchPhaseController.sortDocs(queryFetchResults.values()); + SearchHits hits = searchPhaseController.merge(sortedShardList, queryFetchResults, queryFetchResults).hits(); + + assertThat(hits.totalHits(), equalTo(100l)); + assertThat(hits.hits().length, equalTo(60)); // 60 results, with size 20, since we have 3 shards + for (int i = 0; i < 60; i++) { + SearchHit hit = hits.hits()[i]; +// System.out.println(hit.id() + " " + hit.explanation()); + assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(100 - i - 1))); + } + + // TODO we need to support scrolling for query+fetch +// Map scollQueryFetchResults = newHashMap(); +// for (QueryFetchSearchResult searchResult : queryFetchResults.values()) { +// QueryFetchSearchResult queryFetchResult = nodeToSearchService.get(searchResult.shardTarget().nodeId()).executeFetchPhase(new InternalScrollSearchRequest(searchResult.id()).scroll(new Scroll(timeValueMinutes(10)))); +// scollQueryFetchResults.put(queryFetchResult.shardTarget(), queryFetchResult); +// } +// queryFetchResults = scollQueryFetchResults; +// +// sortedShardList = searchPhaseController.sortDocs(queryFetchResults.values()); +// hits = searchPhaseController.merge(sortedShardList, queryFetchResults, queryFetchResults).hits(); +// assertEquals(100, hits.totalHits()); +// assertEquals(40, hits.hits().length); +// for (int i = 0; i < 40; i++) { +// SearchHit hit = hits.hits()[i]; +// assertEquals("id[" + hit.id() + "]", Integer.toString(100 - 60 - 1 - i), hit.id()); +// } + } + + @Test public void testSimpleFacets() { + SearchSourceBuilder sourceBuilder = searchSource() + .query(termQuery("multi", "test")) + .from(0).size(20).explain(true).sort("age", false) + .facets(facets().facet("all", termQuery("multi", "test")).facet("test1", termQuery("name", "test1"))); + + Map queryResults = newHashMap(); + for (ShardsIterator shardsIt : indicesService.searchShards(clusterService.state(), new String[]{"test"}, null)) { + for (ShardRouting shardRouting : shardsIt) { + InternalSearchRequest searchRequest = searchRequest(shardRouting, sourceBuilder) + .scroll(new Scroll(new TimeValue(10, TimeUnit.MINUTES))); + QuerySearchResult queryResult = nodeToSearchService.get(shardRouting.currentNodeId()).executeQueryPhase(searchRequest); + queryResults.put(queryResult.shardTarget(), queryResult); + } + } + ShardDoc[] sortedShardList = searchPhaseController.sortDocs(queryResults.values()); + Map docIdsToLoad = searchPhaseController.docIdsToLoad(sortedShardList); + + Map fetchResults = newHashMap(); + for (Map.Entry entry : docIdsToLoad.entrySet()) { + SearchShardTarget shardTarget = entry.getKey(); + ExtTIntArrayList docIds = entry.getValue(); + FetchSearchResult fetchResult = nodeToSearchService.get(shardTarget.nodeId()).executeFetchPhase(new FetchSearchRequest(queryResults.get(shardTarget).queryResult().id(), docIds)); + fetchResults.put(fetchResult.shardTarget(), fetchResult.initCounter()); + } + + InternalSearchResponse searchResponse = searchPhaseController.merge(sortedShardList, queryResults, fetchResults); + assertThat(searchResponse.hits().totalHits(), equalTo(100l)); + + assertThat(searchResponse.facets().countFacet("test1").count(), equalTo(1l)); + assertThat(searchResponse.facets().countFacet("all").count(), equalTo(100l)); + } + + @Test public void testSimpleFacetsTwice() { + testSimpleFacets(); + testSimpleFacets(); + } + + private InternalSearchRequest searchRequest(ShardRouting shardRouting, SearchSourceBuilder builder) { + return new InternalSearchRequest(shardRouting, builder.build()); + } + + private void index(Client client, String id, String nameValue, int age) { + client.index(indexRequest("test").type("type1").id(id).source(source(id, nameValue, age))).actionGet(); + } + + private String source(String id, String nameValue, int age) { + StringBuilder multi = new StringBuilder().append(nameValue); + for (int i = 0; i < age; i++) { + multi.append(" ").append(nameValue); + } + return "{ type1 : { \"id\" : \"" + id + "\", \"name\" : \"" + (nameValue + id) + "\", age : " + age + ", multi : \"" + multi.toString() + "\", _boost : " + (age * 10) + " } }"; + } +} diff --git a/modules/test/integration/src/test/java/org/elasticsearch/test/integration/search/TwoInstanceEmbeddedSearchTests.yml b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/search/TwoInstanceEmbeddedSearchTests.yml new file mode 100644 index 00000000000..b8f11fa524f --- /dev/null +++ b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/search/TwoInstanceEmbeddedSearchTests.yml @@ -0,0 +1,9 @@ +cluster: + routing: + schedule: 200ms +index: + numberOfShards: 3 + numberOfReplicas: 0 + routing : + # Use simple hashing since we want even distribution and our ids are simple incremented number based + hash.type : simple diff --git a/modules/test/integration/src/test/java/org/elasticsearch/test/integration/search/TwoInstanceUnbalancedShardsEmbeddedSearchTests.java b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/search/TwoInstanceUnbalancedShardsEmbeddedSearchTests.java new file mode 100644 index 00000000000..ff25cd636d2 --- /dev/null +++ b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/search/TwoInstanceUnbalancedShardsEmbeddedSearchTests.java @@ -0,0 +1,406 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.integration.search; + +import com.google.common.collect.ImmutableMap; +import com.google.inject.AbstractModule; +import com.google.inject.Inject; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.Requests; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.routing.OperationRouting; +import org.elasticsearch.index.routing.plain.PlainOperationRouting; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.search.*; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.controller.SearchPhaseController; +import org.elasticsearch.search.controller.ShardDoc; +import org.elasticsearch.search.dfs.AggregatedDfs; +import org.elasticsearch.search.dfs.DfsSearchResult; +import org.elasticsearch.search.fetch.FetchSearchRequest; +import org.elasticsearch.search.fetch.FetchSearchResult; +import org.elasticsearch.search.fetch.QueryFetchSearchResult; +import org.elasticsearch.search.internal.InternalScrollSearchRequest; +import org.elasticsearch.search.internal.InternalSearchRequest; +import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.search.query.QuerySearchRequest; +import org.elasticsearch.search.query.QuerySearchResult; +import org.elasticsearch.search.query.QuerySearchResultProvider; +import org.elasticsearch.server.internal.InternalServer; +import org.elasticsearch.test.integration.AbstractServersTests; +import org.elasticsearch.util.TimeValue; +import org.elasticsearch.util.settings.Settings; +import org.elasticsearch.util.trove.ExtTIntArrayList; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static com.google.common.collect.Lists.*; +import static com.google.common.collect.Maps.*; +import static org.elasticsearch.client.Requests.*; +import static org.elasticsearch.index.query.json.JsonQueryBuilders.*; +import static org.elasticsearch.search.builder.SearchSourceBuilder.*; +import static org.elasticsearch.util.TimeValue.*; +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; + +/** + * @author kimchy (Shay Banon) + */ +public class TwoInstanceUnbalancedShardsEmbeddedSearchTests extends AbstractServersTests { + + private IndicesService indicesService; + + private ClusterService clusterService; + + private Map nodeToSearchService; + + private SearchPhaseController searchPhaseController; + + @BeforeClass public void createServerAndInitWithData() throws Exception { + startServer("server1"); + startServer("server2"); + + clusterService = ((InternalServer) server("server1")).injector().getInstance(ClusterService.class); + indicesService = ((InternalServer) server("server1")).injector().getInstance(IndicesService.class); + + client("server1").admin().indices().create(Requests.createIndexRequest("test")).actionGet(); + + for (int i = 0; i < 100; i++) { + index(client("server1"), Integer.toString(i), "test", i); + } + client("server1").admin().indices().refresh(refreshRequest("test")).actionGet(); + + SearchService searchService1 = ((InternalServer) server("server1")).injector().getInstance(SearchService.class); + SearchService searchService2 = ((InternalServer) server("server2")).injector().getInstance(SearchService.class); + + nodeToSearchService = ImmutableMap.builder() + .put(((InternalServer) server("server1")).injector().getInstance(ClusterService.class).state().nodes().localNodeId(), searchService1) + .put(((InternalServer) server("server2")).injector().getInstance(ClusterService.class).state().nodes().localNodeId(), searchService2) + .build(); + + searchPhaseController = ((InternalServer) server("server1")).injector().getInstance(SearchPhaseController.class); + } + + @AfterClass public void closeServers() { + closeAllServers(); + } + + @Test public void testDfsQueryFetch() throws Exception { + SearchSourceBuilder sourceBuilder = searchSource() + .query(termQuery("multi", "test")) + .from(0).size(60).explain(true); + + List dfsResults = newArrayList(); + for (ShardsIterator shardsIt : indicesService.searchShards(clusterService.state(), new String[]{"test"}, null)) { + for (ShardRouting shardRouting : shardsIt) { + InternalSearchRequest searchRequest = searchRequest(shardRouting, sourceBuilder) + .scroll(new Scroll(new TimeValue(10, TimeUnit.MINUTES))); + dfsResults.add(nodeToSearchService.get(shardRouting.currentNodeId()).executeDfsPhase(searchRequest)); + } + } + + AggregatedDfs dfs = searchPhaseController.aggregateDfs(dfsResults); + Map queryResults = newHashMap(); + for (DfsSearchResult dfsResult : dfsResults) { + queryResults.put(dfsResult.shardTarget(), nodeToSearchService.get(dfsResult.shardTarget().nodeId()).executeQueryPhase(new QuerySearchRequest(dfsResult.id(), dfs))); + } + + ShardDoc[] sortedShardList = searchPhaseController.sortDocs(queryResults.values()); + Map docIdsToLoad = searchPhaseController.docIdsToLoad(sortedShardList); + + Map fetchResults = newHashMap(); + for (Map.Entry entry : docIdsToLoad.entrySet()) { + SearchShardTarget shardTarget = entry.getKey(); + ExtTIntArrayList docIds = entry.getValue(); + FetchSearchResult fetchResult = nodeToSearchService.get(shardTarget.nodeId()).executeFetchPhase(new FetchSearchRequest(queryResults.get(shardTarget).queryResult().id(), docIds)); + fetchResults.put(fetchResult.shardTarget(), fetchResult.initCounter()); + } + + SearchHits hits = searchPhaseController.merge(sortedShardList, queryResults, fetchResults).hits(); + assertThat(hits.totalHits(), equalTo(100l)); + assertThat(hits.hits().length, equalTo(60)); + for (int i = 0; i < 60; i++) { + SearchHit hit = hits.hits()[i]; +// System.out.println(hit.explanation()); + assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(100 - i - 1))); + } + + // now try and scroll to the next batch of results + Map scollQueryResults = newHashMap(); + for (QuerySearchResultProvider queryResult : queryResults.values()) { + scollQueryResults.put(queryResult.queryResult().shardTarget(), nodeToSearchService.get(queryResult.shardTarget().nodeId()).executeQueryPhase(new InternalScrollSearchRequest(queryResult.id()))); + } + queryResults = scollQueryResults; + + sortedShardList = searchPhaseController.sortDocs(queryResults.values()); + docIdsToLoad = searchPhaseController.docIdsToLoad(sortedShardList); + + fetchResults = newHashMap(); + for (Map.Entry entry : docIdsToLoad.entrySet()) { + SearchShardTarget shardTarget = entry.getKey(); + ExtTIntArrayList docIds = entry.getValue(); + FetchSearchResult fetchResult = nodeToSearchService.get(shardTarget.nodeId()).executeFetchPhase(new FetchSearchRequest(queryResults.get(shardTarget).queryResult().id(), docIds)); + fetchResults.put(fetchResult.shardTarget(), fetchResult.initCounter()); + } + + hits = searchPhaseController.merge(sortedShardList, queryResults, fetchResults).hits(); + assertThat(hits.totalHits(), equalTo(100l)); + assertThat(hits.hits().length, equalTo(40)); + for (int i = 0; i < 40; i++) { + SearchHit hit = hits.hits()[i]; + assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(100 - 60 - 1 - i))); + } + } + + @Test public void testDfsQueryFetchWithSort() throws Exception { + SearchSourceBuilder sourceBuilder = searchSource() + .query(termQuery("multi", "test")) + .from(0).size(60).explain(true).sort("age", false); + + List dfsResults = newArrayList(); + for (ShardsIterator shardsIt : indicesService.searchShards(clusterService.state(), new String[]{"test"}, null)) { + for (ShardRouting shardRouting : shardsIt) { + InternalSearchRequest searchRequest = searchRequest(shardRouting, sourceBuilder) + .scroll(new Scroll(new TimeValue(10, TimeUnit.MINUTES))); + dfsResults.add(nodeToSearchService.get(shardRouting.currentNodeId()).executeDfsPhase(searchRequest)); + } + } + + AggregatedDfs dfs = searchPhaseController.aggregateDfs(dfsResults); + Map queryResults = newHashMap(); + for (DfsSearchResult dfsResult : dfsResults) { + queryResults.put(dfsResult.shardTarget(), nodeToSearchService.get(dfsResult.shardTarget().nodeId()).executeQueryPhase(new QuerySearchRequest(dfsResult.id(), dfs))); + } + + ShardDoc[] sortedShardList = searchPhaseController.sortDocs(queryResults.values()); + Map docIdsToLoad = searchPhaseController.docIdsToLoad(sortedShardList); + + Map fetchResults = newHashMap(); + for (Map.Entry entry : docIdsToLoad.entrySet()) { + SearchShardTarget shardTarget = entry.getKey(); + ExtTIntArrayList docIds = entry.getValue(); + FetchSearchResult fetchResult = nodeToSearchService.get(shardTarget.nodeId()).executeFetchPhase(new FetchSearchRequest(queryResults.get(shardTarget).queryResult().id(), docIds)); + fetchResults.put(fetchResult.shardTarget(), fetchResult.initCounter()); + } + + SearchHits hits = searchPhaseController.merge(sortedShardList, queryResults, fetchResults).hits(); + assertThat(hits.totalHits(), equalTo(100l)); + assertThat(hits.hits().length, equalTo(60)); + for (int i = 0; i < 60; i++) { + SearchHit hit = hits.hits()[i]; +// System.out.println(hit.explanation()); + assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(i))); + } + + // now try and scroll to the next batch of results + Map scollQueryResults = newHashMap(); + for (QuerySearchResultProvider queryResult : queryResults.values()) { + scollQueryResults.put(queryResult.queryResult().shardTarget(), nodeToSearchService.get(queryResult.shardTarget().nodeId()).executeQueryPhase(new InternalScrollSearchRequest(queryResult.id()).scroll(new Scroll(timeValueMinutes(10))))); + } + queryResults = scollQueryResults; + + sortedShardList = searchPhaseController.sortDocs(queryResults.values()); + docIdsToLoad = searchPhaseController.docIdsToLoad(sortedShardList); + + fetchResults = newHashMap(); + for (Map.Entry entry : docIdsToLoad.entrySet()) { + SearchShardTarget shardTarget = entry.getKey(); + ExtTIntArrayList docIds = entry.getValue(); + FetchSearchResult fetchResult = nodeToSearchService.get(shardTarget.nodeId()).executeFetchPhase(new FetchSearchRequest(queryResults.get(shardTarget).queryResult().id(), docIds)); + fetchResults.put(fetchResult.shardTarget(), fetchResult.initCounter()); + } + + hits = searchPhaseController.merge(sortedShardList, queryResults, fetchResults).hits(); + assertThat(hits.totalHits(), equalTo(100l)); + assertThat(hits.hits().length, equalTo(40)); + for (int i = 0; i < 40; i++) { + SearchHit hit = hits.hits()[i]; + assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(i + 60))); + } + + // now try and scroll to the next next batch of results + scollQueryResults = newHashMap(); + for (QuerySearchResultProvider queryResult : queryResults.values()) { + scollQueryResults.put(queryResult.queryResult().shardTarget(), nodeToSearchService.get(queryResult.shardTarget().nodeId()).executeQueryPhase(new InternalScrollSearchRequest(queryResult.id()))); + } + queryResults = scollQueryResults; + + sortedShardList = searchPhaseController.sortDocs(queryResults.values()); + docIdsToLoad = searchPhaseController.docIdsToLoad(sortedShardList); + + fetchResults = newHashMap(); + for (Map.Entry entry : docIdsToLoad.entrySet()) { + SearchShardTarget shardTarget = entry.getKey(); + ExtTIntArrayList docIds = entry.getValue(); + FetchSearchResult fetchResult = nodeToSearchService.get(shardTarget.nodeId()).executeFetchPhase(new FetchSearchRequest(queryResults.get(shardTarget).queryResult().id(), docIds)); + fetchResults.put(fetchResult.shardTarget(), fetchResult.initCounter()); + } + + hits = searchPhaseController.merge(sortedShardList, queryResults, fetchResults).hits(); + assertThat(hits.totalHits(), equalTo(100l)); + assertThat(hits.hits().length, equalTo(0)); + } + + @Test public void testQueryFetchInOneGo() { + SearchSourceBuilder sourceBuilder = searchSource() + .query(termQuery("multi", "test")) + .from(0).size(20).explain(true); + + // do this with dfs, since we have uneven distribution of docs between shards + List dfsResults = newArrayList(); + for (ShardsIterator shardsIt : indicesService.searchShards(clusterService.state(), new String[]{"test"}, null)) { + for (ShardRouting shardRouting : shardsIt) { + InternalSearchRequest searchRequest = searchRequest(shardRouting, sourceBuilder) + .scroll(new Scroll(new TimeValue(10, TimeUnit.MINUTES))); + dfsResults.add(nodeToSearchService.get(shardRouting.currentNodeId()).executeDfsPhase(searchRequest)); + } + } + + AggregatedDfs dfs = searchPhaseController.aggregateDfs(dfsResults); + Map queryFetchResults = newHashMap(); + for (DfsSearchResult dfsResult : dfsResults) { + QueryFetchSearchResult queryFetchResult = nodeToSearchService.get(dfsResult.shardTarget().nodeId()).executeFetchPhase(new QuerySearchRequest(dfsResult.id(), dfs)); + queryFetchResults.put(queryFetchResult.shardTarget(), queryFetchResult); + } + + + ShardDoc[] sortedShardList = searchPhaseController.sortDocs(queryFetchResults.values()); + SearchHits hits = searchPhaseController.merge(sortedShardList, queryFetchResults, queryFetchResults).hits(); + + assertThat(hits.totalHits(), equalTo(100l)); + assertThat(hits.hits().length, equalTo(50)); // 50 results, 20 from first shard, 20 from second shard, but 3rh shard only has total of 10 docs + for (int i = 0; i < 50; i++) { + SearchHit hit = hits.hits()[i]; +// System.out.println(hit.id() + " " + hit.explanation()); +// System.out.println(hit.id()); +// long lId = Long.parseLong(hit.id()); +// assertTrue("id[" + hit.id() + "]", lId >= 49 ); + } + + // TODO we need to support scrolling for query+fetch +// Map scollQueryFetchResults = newHashMap(); +// for (QueryFetchSearchResult searchResult : queryFetchResults.values()) { +// QueryFetchSearchResult queryFetchResult = nodeToSearchService.get(searchResult.shardTarget().nodeId()).executeFetchPhase(new InternalScrollSearchRequest(searchResult.id()).scroll(new Scroll(timeValueMinutes(10)))); +// scollQueryFetchResults.put(queryFetchResult.shardTarget(), queryFetchResult); +// } +// queryFetchResults = scollQueryFetchResults; +// +// sortedShardList = searchPhaseController.sortDocs(queryFetchResults.values()); +// hits = searchPhaseController.merge(sortedShardList, queryFetchResults, queryFetchResults).hits(); +// assertEquals(100, hits.totalHits()); +// assertEquals(40, hits.hits().length); +// for (int i = 0; i < 40; i++) { +// SearchHit hit = hits.hits()[i]; +// assertEquals("id[" + hit.id() + "]", Integer.toString(100 - 60 - 1 - i), hit.id()); +// } + } + + @Test public void testSimpleFacets() { + SearchSourceBuilder sourceBuilder = searchSource() + .query(termQuery("multi", "test")) + .from(0).size(20).explain(true).sort("age", false) + .facets(facets().facet("all", termQuery("multi", "test")).facet("test1", termQuery("name", "test1"))); + + Map queryResults = newHashMap(); + for (ShardsIterator shardsIt : indicesService.searchShards(clusterService.state(), new String[]{"test"}, null)) { + for (ShardRouting shardRouting : shardsIt) { + InternalSearchRequest searchRequest = searchRequest(shardRouting, sourceBuilder) + .scroll(new Scroll(new TimeValue(10, TimeUnit.MINUTES))); + QuerySearchResult queryResult = nodeToSearchService.get(shardRouting.currentNodeId()).executeQueryPhase(searchRequest); + queryResults.put(queryResult.shardTarget(), queryResult); + } + } + ShardDoc[] sortedShardList = searchPhaseController.sortDocs(queryResults.values()); + Map docIdsToLoad = searchPhaseController.docIdsToLoad(sortedShardList); + + Map fetchResults = newHashMap(); + for (Map.Entry entry : docIdsToLoad.entrySet()) { + SearchShardTarget shardTarget = entry.getKey(); + ExtTIntArrayList docIds = entry.getValue(); + FetchSearchResult fetchResult = nodeToSearchService.get(shardTarget.nodeId()).executeFetchPhase(new FetchSearchRequest(queryResults.get(shardTarget).queryResult().id(), docIds)); + fetchResults.put(fetchResult.shardTarget(), fetchResult.initCounter()); + } + + InternalSearchResponse searchResponse = searchPhaseController.merge(sortedShardList, queryResults, fetchResults); + assertThat(searchResponse.hits().totalHits(), equalTo(100l)); + + assertThat(searchResponse.facets().countFacet("test1").count(), equalTo(1l)); + assertThat(searchResponse.facets().countFacet("all").count(), equalTo(100l)); + } + + @Test public void testSimpleFacetsTwice() { + testSimpleFacets(); + testSimpleFacets(); + } + + private static InternalSearchRequest searchRequest(ShardRouting shardRouting, SearchSourceBuilder builder) { + return new InternalSearchRequest(shardRouting, builder.build()); + } + + private void index(Client client, String id, String nameValue, int age) { + client.index(indexRequest("test").type("type1").id(id).source(source(id, nameValue, age))).actionGet(); + } + + private String source(String id, String nameValue, int age) { + StringBuilder multi = new StringBuilder().append(nameValue); + for (int i = 0; i < age; i++) { + multi.append(" ").append(nameValue); + } + return "{ type1 : { \"id\" : \"" + id + "\", \"name\" : \"" + (nameValue + id) + "\", age : " + age + ", multi : \"" + multi.toString() + "\", _boost : " + (age * 10) + " } }"; + } + + public static class UnevenOperationRoutingModule extends AbstractModule { + @Override protected void configure() { + bind(OperationRouting.class).to(UnevenOperationRoutingStrategy.class).asEagerSingleton(); + } + } + + /** + * First 60 go to the first shard, + * Next 30 go to the second shard, + * Next 10 go to the third shard + */ + public static class UnevenOperationRoutingStrategy extends PlainOperationRouting { + + @Inject public UnevenOperationRoutingStrategy(Index index, @IndexSettings Settings indexSettings) { + super(index, indexSettings, null); + } + + @Override protected int hash(String type, String id) { + long lId = Long.parseLong(id); + if (lId < 60) { + return 0; + } + if (lId >= 60 && lId < 90) { + return 1; + } + return 2; + } + } +} \ No newline at end of file diff --git a/modules/test/integration/src/test/java/org/elasticsearch/test/integration/search/TwoInstanceUnbalancedShardsEmbeddedSearchTests.yml b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/search/TwoInstanceUnbalancedShardsEmbeddedSearchTests.yml new file mode 100644 index 00000000000..2ffff78f279 --- /dev/null +++ b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/search/TwoInstanceUnbalancedShardsEmbeddedSearchTests.yml @@ -0,0 +1,8 @@ +cluster: + routing: + schedule: 200ms +index: + numberOfShards: 3 + numberOfReplicas: 0 + routing: + type: org.elasticsearch.test.integration.search.TwoInstanceUnbalancedShardsEmbeddedSearchTests$UnevenOperationRoutingModule diff --git a/modules/test/testng/build.gradle b/modules/test/testng/build.gradle new file mode 100644 index 00000000000..c089c4d2119 --- /dev/null +++ b/modules/test/testng/build.gradle @@ -0,0 +1,22 @@ +usePlugin 'java' + +archivesBaseName = "$rootProject.archivesBaseName-$project.archivesBaseName" + +configurations.compile.transitive = true +configurations.testCompile.transitive = true + +// no need to use the resource dir +sourceSets.main.resources.srcDir 'src/main/java' +sourceSets.test.resources.srcDir 'src/test/java' + +dependencies { + compile('org.testng:testng:5.10:jdk15') { transitive = false } + compile 'org.slf4j:slf4j-api:1.5.8' + compile('org.slf4j:slf4j-log4j12:1.5.8') { transitive = false } + compile('log4j:log4j:1.2.15') { transitive = false } +} + +test { + useTestNG() + options.systemProperties = ["es.test.log.conf": "log4j-gradle.properties"] +} diff --git a/modules/test/testng/src/main/java/log4j-gradle.properties b/modules/test/testng/src/main/java/log4j-gradle.properties new file mode 100644 index 00000000000..d4fa6250dc5 --- /dev/null +++ b/modules/test/testng/src/main/java/log4j-gradle.properties @@ -0,0 +1,13 @@ +log4j.rootLogger=INFO, out +log4j.logger.jgroups=WARN + +#log4j.logger.index=DEBUG +#log4j.logger.http=TRACE +#log4j.logger.monitor.memory=TRACE +#log4j.logger.gateway=TRACE + +log4j.appender.out=org.apache.log4j.FileAppender +log4j.appender.out.file=${test.log.dir}/${test.log.name}.log +log4j.appender.out.append=true +log4j.appender.out.layout=org.apache.log4j.PatternLayout +log4j.appender.out.layout.ConversionPattern=[%d{ABSOLUTE}][%-5p][%-25c] %m%n diff --git a/modules/test/testng/src/main/java/log4j.properties b/modules/test/testng/src/main/java/log4j.properties new file mode 100644 index 00000000000..199132b5b7f --- /dev/null +++ b/modules/test/testng/src/main/java/log4j.properties @@ -0,0 +1,19 @@ +log4j.rootLogger=INFO, out +log4j.logger.jgroups=WARN + +#log4j.logger.discovery=TRACE +#log4j.logger.cluster=TRACE +#log4j.logger.indices.cluster=DEBUG +#log4j.logger.index=TRACE +#log4j.logger.index.engine=DEBUG +#log4j.logger.index.shard.recovery=TRACE +#log4j.logger.index.cache=DEBUG +#log4j.logger.http=TRACE +#log4j.logger.monitor.memory=TRACE +#log4j.logger.monitor.memory=TRACE +#log4j.logger.cluster.action.shard=TRACE +#log4j.logger.index.gateway=TRACE + +log4j.appender.out=org.apache.log4j.ConsoleAppender +log4j.appender.out.layout=org.apache.log4j.PatternLayout +log4j.appender.out.layout.ConversionPattern=[%d{ABSOLUTE}][%-5p][%-25c] %m%n diff --git a/modules/test/testng/src/main/java/org/elasticsearch/util/testng/DotTestListener.java b/modules/test/testng/src/main/java/org/elasticsearch/util/testng/DotTestListener.java new file mode 100644 index 00000000000..c709fa7df7b --- /dev/null +++ b/modules/test/testng/src/main/java/org/elasticsearch/util/testng/DotTestListener.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.testng; + +import org.testng.ITestResult; +import org.testng.TestListenerAdapter; + +/** + * @author kimchy (Shay Banon) + */ +public class DotTestListener extends TestListenerAdapter { + + private int count = 1; + + @Override public void onTestFailure(ITestResult tr) { + log("F"); + } + + @Override public void onTestSkipped(ITestResult tr) { + log("S"); + } + + @Override public void onTestSuccess(ITestResult tr) { + log("."); + } + + private void log(String string) { + System.err.print(string); + if (count++ % 40 == 0) { + System.err.println(""); + } + } +} + diff --git a/modules/test/testng/src/main/java/org/elasticsearch/util/testng/Listeners.java b/modules/test/testng/src/main/java/org/elasticsearch/util/testng/Listeners.java new file mode 100644 index 00000000000..b487c8d2a29 --- /dev/null +++ b/modules/test/testng/src/main/java/org/elasticsearch/util/testng/Listeners.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.testng; + +import org.testng.ITestContext; +import org.testng.ITestListener; +import org.testng.ITestResult; + +/** + * @author kimchy (Shay Banon) + */ +public class Listeners implements ITestListener { + + private final ITestListener[] listeners = new ITestListener[]{new DotTestListener(), new LoggingListener()}; + + @Override public void onTestStart(ITestResult result) { + for (ITestListener listener : listeners) { + listener.onTestStart(result); + } + } + + @Override public void onTestSuccess(ITestResult result) { + for (ITestListener listener : listeners) { + listener.onTestSuccess(result); + } + } + + @Override public void onTestFailure(ITestResult result) { + for (ITestListener listener : listeners) { + listener.onTestFailure(result); + } + } + + @Override public void onTestSkipped(ITestResult result) { + for (ITestListener listener : listeners) { + listener.onTestSkipped(result); + } + } + + @Override public void onTestFailedButWithinSuccessPercentage(ITestResult result) { + for (ITestListener listener : listeners) { + listener.onTestFailedButWithinSuccessPercentage(result); + } + } + + @Override public void onStart(ITestContext context) { + for (ITestListener listener : listeners) { + listener.onStart(context); + } + } + + @Override public void onFinish(ITestContext context) { + for (ITestListener listener : listeners) { + listener.onFinish(context); + } + } +} diff --git a/modules/test/testng/src/main/java/org/elasticsearch/util/testng/LoggingListener.java b/modules/test/testng/src/main/java/org/elasticsearch/util/testng/LoggingListener.java new file mode 100644 index 00000000000..d8c68ef6284 --- /dev/null +++ b/modules/test/testng/src/main/java/org/elasticsearch/util/testng/LoggingListener.java @@ -0,0 +1,101 @@ +/* + * Licensed to Elastic Search and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Elastic Search licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.util.testng; + +import org.apache.log4j.PropertyConfigurator; +import org.slf4j.LoggerFactory; +import org.testng.ITestContext; +import org.testng.ITestResult; +import org.testng.TestListenerAdapter; + +import java.io.File; +import java.io.IOException; +import java.util.Properties; + +/** + * @author kimchy (Shay Banon) + */ +public class LoggingListener extends TestListenerAdapter { + + @Override public void onStart(ITestContext context) { + String logsDir = context.getOutputDirectory() + "/logs"; + deleteRecursively(new File(logsDir), false); + System.setProperty("test.log.dir", logsDir); + } + + @Override public void onTestStart(ITestResult result) { + String logName = result.getTestClass().getName(); + if (logName.startsWith("org.elasticsearch.")) { + logName = logName.substring("org.elasticsearch.".length()); + } + System.setProperty("test.log.name", logName); + + Properties props = new Properties(); + try { + props.load(LoggingListener.class.getClassLoader().getResourceAsStream(System.getProperty("es.test.log.conf", "log4j.properties"))); + } catch (IOException e) { + e.printStackTrace(); + } + PropertyConfigurator.configure(props); + + LoggerFactory.getLogger("testng").info("========== Starting Test [" + result.getName() + "] =========="); + } + + @Override public void onTestSuccess(ITestResult result) { + LoggerFactory.getLogger("testng").info("========== Test Success [" + result.getName() + "] =========="); + } + + @Override public void onTestFailure(ITestResult result) { + LoggerFactory.getLogger("testng").info("========== Test Failure [" + result.getName() + "] =========="); + } + + @Override public void onTestSkipped(ITestResult result) { + LoggerFactory.getLogger("testng").info("========== Test Skipped [" + result.getName() + "] =========="); + } + + /** + * Delete the supplied {@link java.io.File} - for directories, + * recursively delete any nested directories or files as well. + * + * @param root the root File to delete + * @param deleteRoot whether or not to delete the root itself or just the content of the root. + * @return true if the File was deleted, + * otherwise false + */ + public static boolean deleteRecursively(File root, boolean deleteRoot) { + if (root != null && root.exists()) { + if (root.isDirectory()) { + File[] children = root.listFiles(); + if (children != null) { + for (File aChildren : children) { + deleteRecursively(aChildren, true); + } + } + } + + if (deleteRoot) { + return root.delete(); + } else { + return true; + } + } + return false; + } +} \ No newline at end of file diff --git a/settings.gradle b/settings.gradle new file mode 100644 index 00000000000..adf10ee1d74 --- /dev/null +++ b/settings.gradle @@ -0,0 +1,14 @@ +include 'test-testng' + +include 'elasticsearch' + +include 'test-integration' + +include 'benchmark-micro' + +rootProject.name = 'elasticsearch-root' +rootProject.children.each {project -> + String fileBaseName = project.name.replaceAll("\\p{Upper}") { "-${it.toLowerCase()}" } + fileBaseName = fileBaseName.replace('-', '/'); + project.projectDir = new File(settingsDir, "modules/$fileBaseName") +}

    This method always replaces malformed-input and unmappable-character + * sequences with this charset's default replacement string. The {@link + * java.nio.charset.CharsetDecoder} class should be used when more control + * over the decoding process is required. + * + * @param charsetName the name of a supported + * {@linkplain java.nio.charset.Charset charset} + * @return String decoded from the buffer's contents. + * @throws java.io.UnsupportedEncodingException + * If the named charset is not supported + * @since JDK1.1 + */ + public String toString(String charsetName) + throws UnsupportedEncodingException { + return new String(buf, 0, count, charsetName); + } + + /** + * Closing a ByteArrayOutputStream has no effect. The methods in + * this class can be called after the stream has been closed without + * generating an IOException. + *